code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
from torch import nn
import torch
import torch.nn.functional as F
class DoubleAttentionLayer(nn.Module):
"""
Implementation of Double Attention Network. NIPS 2018
"""
def __init__(self, in_channels: int, num_frames: int, reconstruct = True):
"""
Parameters
----------
in_channels
reconstruct: `bool` whether to re-construct output to have shape (B, in_channels, L, R)
"""
super(DoubleAttentionLayer, self).__init__()
self.in_channels = in_channels
self.c_m = in_channels // 4
self.c_n = in_channels // 4
self.reconstruct = reconstruct
self.avg_pool = nn.AdaptiveAvgPool3d((num_frames, 1, 1))
self.convA = nn.Conv3d(in_channels, self.c_m, kernel_size = 1)
self.convB = nn.Conv3d(in_channels, self.c_n, kernel_size = 1)
self.convV = nn.Conv3d(in_channels, self.c_n, kernel_size = 1)
if self.reconstruct:
self.conv_reconstruct = nn.Conv1d(self.c_m, in_channels, kernel_size = 1)
def forward(self, x: torch.Tensor):
"""
Parameters
----------
x: `torch.Tensor` of shape (B, C, H, W)
Returns
-------
"""
batch_size, c, t, h, w = x.size()
assert c == self.in_channels, 'input channel not equal!'
A = self.avg_pool(self.convA(x)).view(batch_size, self.c_m, t) # (n, c_m, t, h, w)
B = self.avg_pool(self.convB(x)).view(batch_size, self.c_n, t) # (n, c_n, t, h, w)
V = self.avg_pool(self.convV(x)).view(batch_size, self.c_n, t) # (n, c_n, t, h, w)
tmpA = A.view(batch_size, self.c_m, t)
attention_maps = B.view(batch_size, self.c_n, t)
attention_vectors = V.view(batch_size, self.c_n, t)
attention_maps = F.softmax(attention_maps, dim = -1) # softmax on the last dimension to create attention maps
# step 1: feature gathering
global_descriptors = torch.bmm(tmpA, attention_maps.permute(0, 2, 1)) # (B, c_m, c_n)
# step 2: feature distribution
attention_vectors = F.softmax(attention_vectors, dim = 1) # (B, c_n, h * w) attention on c_n dimension
tmpZ = torch.matmul(global_descriptors, attention_vectors) # B, self.c_m, h * w
tmpZ = tmpZ.view(batch_size, self.c_m, t)
if self.reconstruct: tmpZ = self.conv_reconstruct(tmpZ)
tmpZ = tmpZ.unsqueeze(3).unsqueeze(3)
tmpZ = tmpZ.expand_as(x)
return tmpZ * x | [
"torch.nn.functional.softmax",
"torch.nn.AdaptiveAvgPool3d",
"torch.matmul",
"torch.nn.Conv1d",
"torch.nn.Conv3d"
] | [((666, 706), 'torch.nn.AdaptiveAvgPool3d', 'nn.AdaptiveAvgPool3d', (['(num_frames, 1, 1)'], {}), '((num_frames, 1, 1))\n', (686, 706), False, 'from torch import nn\n'), ((728, 775), 'torch.nn.Conv3d', 'nn.Conv3d', (['in_channels', 'self.c_m'], {'kernel_size': '(1)'}), '(in_channels, self.c_m, kernel_size=1)\n', (737, 775), False, 'from torch import nn\n'), ((799, 846), 'torch.nn.Conv3d', 'nn.Conv3d', (['in_channels', 'self.c_n'], {'kernel_size': '(1)'}), '(in_channels, self.c_n, kernel_size=1)\n', (808, 846), False, 'from torch import nn\n'), ((870, 917), 'torch.nn.Conv3d', 'nn.Conv3d', (['in_channels', 'self.c_n'], {'kernel_size': '(1)'}), '(in_channels, self.c_n, kernel_size=1)\n', (879, 917), False, 'from torch import nn\n'), ((1791, 1824), 'torch.nn.functional.softmax', 'F.softmax', (['attention_maps'], {'dim': '(-1)'}), '(attention_maps, dim=-1)\n', (1800, 1824), True, 'import torch.nn.functional as F\n'), ((2083, 2118), 'torch.nn.functional.softmax', 'F.softmax', (['attention_vectors'], {'dim': '(1)'}), '(attention_vectors, dim=1)\n', (2092, 2118), True, 'import torch.nn.functional as F\n'), ((2182, 2233), 'torch.matmul', 'torch.matmul', (['global_descriptors', 'attention_vectors'], {}), '(global_descriptors, attention_vectors)\n', (2194, 2233), False, 'import torch\n'), ((985, 1032), 'torch.nn.Conv1d', 'nn.Conv1d', (['self.c_m', 'in_channels'], {'kernel_size': '(1)'}), '(self.c_m, in_channels, kernel_size=1)\n', (994, 1032), False, 'from torch import nn\n')] |
import penman
from penman.surface import (
Alignment,
RoleAlignment,
alignments,
role_alignments,
)
codec = penman.PENMANCodec()
def test_alignments(isi_aligned):
g = codec.decode('(a :ARG~1 (b / beta~2))')
assert alignments(g) == {
('b', ':instance', 'beta'): Alignment((2,)),
}
assert role_alignments(g) == {
('a', ':ARG', 'b'): RoleAlignment((1,)),
}
assert codec.encode(g, indent=None) == '(a :ARG~1 (b / beta~2))'
g = codec.decode(isi_aligned[0])
assert alignments(g) == {
('d', ':instance', 'drive-01'): Alignment((2,), prefix='e.'),
('h', ':instance', 'he'): Alignment((1,), prefix='e.'),
('c', ':instance', 'care-04'): Alignment((3,), prefix='e.'),
}
assert role_alignments(g) == {}
assert codec.encode(g) == (
'(d / drive-01~e.2\n'
' :ARG0 (h / he~e.1)\n'
' :manner (c / care-04~e.3\n'
' :polarity -))')
| [
"penman.surface.role_alignments",
"penman.surface.alignments",
"penman.surface.RoleAlignment",
"penman.PENMANCodec",
"penman.surface.Alignment"
] | [((126, 146), 'penman.PENMANCodec', 'penman.PENMANCodec', ([], {}), '()\n', (144, 146), False, 'import penman\n'), ((242, 255), 'penman.surface.alignments', 'alignments', (['g'], {}), '(g)\n', (252, 255), False, 'from penman.surface import Alignment, RoleAlignment, alignments, role_alignments\n'), ((331, 349), 'penman.surface.role_alignments', 'role_alignments', (['g'], {}), '(g)\n', (346, 349), False, 'from penman.surface import Alignment, RoleAlignment, alignments, role_alignments\n'), ((528, 541), 'penman.surface.alignments', 'alignments', (['g'], {}), '(g)\n', (538, 541), False, 'from penman.surface import Alignment, RoleAlignment, alignments, role_alignments\n'), ((767, 785), 'penman.surface.role_alignments', 'role_alignments', (['g'], {}), '(g)\n', (782, 785), False, 'from penman.surface import Alignment, RoleAlignment, alignments, role_alignments\n'), ((297, 312), 'penman.surface.Alignment', 'Alignment', (['(2,)'], {}), '((2,))\n', (306, 312), False, 'from penman.surface import Alignment, RoleAlignment, alignments, role_alignments\n'), ((383, 402), 'penman.surface.RoleAlignment', 'RoleAlignment', (['(1,)'], {}), '((1,))\n', (396, 402), False, 'from penman.surface import Alignment, RoleAlignment, alignments, role_alignments\n'), ((587, 615), 'penman.surface.Alignment', 'Alignment', (['(2,)'], {'prefix': '"""e."""'}), "((2,), prefix='e.')\n", (596, 615), False, 'from penman.surface import Alignment, RoleAlignment, alignments, role_alignments\n'), ((651, 679), 'penman.surface.Alignment', 'Alignment', (['(1,)'], {'prefix': '"""e."""'}), "((1,), prefix='e.')\n", (660, 679), False, 'from penman.surface import Alignment, RoleAlignment, alignments, role_alignments\n'), ((720, 748), 'penman.surface.Alignment', 'Alignment', (['(3,)'], {'prefix': '"""e."""'}), "((3,), prefix='e.')\n", (729, 748), False, 'from penman.surface import Alignment, RoleAlignment, alignments, role_alignments\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-10 15:46
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('information', '0008_auto_20171110_1527'),
]
operations = [
migrations.RenameField(
model_name='information',
old_name='media_audio',
new_name='media_audios',
),
migrations.RenameField(
model_name='information',
old_name='media_gallery',
new_name='media_galleries',
),
migrations.RenameField(
model_name='information',
old_name='media_image',
new_name='media_images',
),
migrations.RenameField(
model_name='information',
old_name='media_video',
new_name='media_videos',
),
]
| [
"django.db.migrations.RenameField"
] | [((296, 397), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""information"""', 'old_name': '"""media_audio"""', 'new_name': '"""media_audios"""'}), "(model_name='information', old_name='media_audio',\n new_name='media_audios')\n", (318, 397), False, 'from django.db import migrations\n'), ((450, 556), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""information"""', 'old_name': '"""media_gallery"""', 'new_name': '"""media_galleries"""'}), "(model_name='information', old_name='media_gallery',\n new_name='media_galleries')\n", (472, 556), False, 'from django.db import migrations\n'), ((609, 710), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""information"""', 'old_name': '"""media_image"""', 'new_name': '"""media_images"""'}), "(model_name='information', old_name='media_image',\n new_name='media_images')\n", (631, 710), False, 'from django.db import migrations\n'), ((763, 864), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""information"""', 'old_name': '"""media_video"""', 'new_name': '"""media_videos"""'}), "(model_name='information', old_name='media_video',\n new_name='media_videos')\n", (785, 864), False, 'from django.db import migrations\n')] |
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
import pytest
from sqlalchemy.exc import IntegrityError
from schematizer.models.source import Topic
from schematizer_testing import factories
from tests.models.base_model_test import GetAllModelTestBase
from tests.models.testing_db import DBTestCase
class TestGetAllTopics(GetAllModelTestBase):
def create_topic(self, topic_no):
source_bar = factories.get_or_create_source(
namespace_name='foo',
source_name='bar',
owner_email='<EMAIL>'
)
return factories.create_topic(
topic_name='topic_{}'.format(topic_no),
namespace_name=source_bar.namespace.name,
source_name=source_bar.name
)
entity_model = Topic
create_entity_func = create_topic
assert_func_name = 'assert_equal_topic'
class TestTopicModel(DBTestCase):
def test_valid_cluster_type(self, biz_source):
cluster_type = 'scribe'
topic = factories.create_topic(
topic_name='yelp.biz_test.1',
namespace_name=biz_source.namespace.name,
source_name=biz_source.name,
cluster_type=cluster_type
)
assert topic.cluster_type == cluster_type
def test_empty_cluster_type(self, biz_source):
with pytest.raises(IntegrityError):
factories.create_topic(
topic_name='yelp.biz_test.1',
namespace_name=biz_source.namespace.name,
source_name=biz_source.name,
cluster_type=None
)
| [
"schematizer_testing.factories.create_topic",
"pytest.raises",
"schematizer_testing.factories.get_or_create_source"
] | [((1036, 1134), 'schematizer_testing.factories.get_or_create_source', 'factories.get_or_create_source', ([], {'namespace_name': '"""foo"""', 'source_name': '"""bar"""', 'owner_email': '"""<EMAIL>"""'}), "(namespace_name='foo', source_name='bar',\n owner_email='<EMAIL>')\n", (1066, 1134), False, 'from schematizer_testing import factories\n'), ((1616, 1776), 'schematizer_testing.factories.create_topic', 'factories.create_topic', ([], {'topic_name': '"""yelp.biz_test.1"""', 'namespace_name': 'biz_source.namespace.name', 'source_name': 'biz_source.name', 'cluster_type': 'cluster_type'}), "(topic_name='yelp.biz_test.1', namespace_name=\n biz_source.namespace.name, source_name=biz_source.name, cluster_type=\n cluster_type)\n", (1638, 1776), False, 'from schematizer_testing import factories\n'), ((1940, 1969), 'pytest.raises', 'pytest.raises', (['IntegrityError'], {}), '(IntegrityError)\n', (1953, 1969), False, 'import pytest\n'), ((1983, 2130), 'schematizer_testing.factories.create_topic', 'factories.create_topic', ([], {'topic_name': '"""yelp.biz_test.1"""', 'namespace_name': 'biz_source.namespace.name', 'source_name': 'biz_source.name', 'cluster_type': 'None'}), "(topic_name='yelp.biz_test.1', namespace_name=\n biz_source.namespace.name, source_name=biz_source.name, cluster_type=None)\n", (2005, 2130), False, 'from schematizer_testing import factories\n')] |
from django_rtk.utils.get_setting_or import get_setting_or_throw
from django_rtk.utils.import_class import import_class
_validator = None
def get_validator():
global _validator
if _validator is None:
cls_path = get_setting_or_throw("VALIDATOR")
cls = import_class(cls_path)
_validator = cls()
return _validator
| [
"django_rtk.utils.get_setting_or.get_setting_or_throw",
"django_rtk.utils.import_class.import_class"
] | [((230, 263), 'django_rtk.utils.get_setting_or.get_setting_or_throw', 'get_setting_or_throw', (['"""VALIDATOR"""'], {}), "('VALIDATOR')\n", (250, 263), False, 'from django_rtk.utils.get_setting_or import get_setting_or_throw\n'), ((278, 300), 'django_rtk.utils.import_class.import_class', 'import_class', (['cls_path'], {}), '(cls_path)\n', (290, 300), False, 'from django_rtk.utils.import_class import import_class\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from numbers import Number
import os
from datetime import datetime
class ThermocoupleStatistics:
@classmethod
def from_folders(cls,
path: "str",
unit1_name: "str" = 'unit 1',
unit2_name: "str" = 'unit 2',
time_col: "str" = "datetime",
old_time_col: "str" = "time",
ftype: "str" = ".csv"
)->("ThermocoupleStatistics", "list"):
"""
from_folders class method
=========================
PURPOSE
=======
This method provides a quick way to convert all TCReader's generated thermocouple data (in csv format)
into pandas.dataframe object using only the folders containing these data.
Because the dual version of TCReader is intended to be used only with 2 TC's simultaneously, both
TC units names is required, or else the analysis will not be performed.
TODO- could be generalized to handle any amount of input from TC
Args:
path: (*str)- relative or direct path to the desired measurement datasets
unit1_name: (*str)- name of the of first TC/CK-unit
unit2_name: (*str)- name of the of second TC/CK-unit
time_col: (*str)- name of the new date+ time column in a YYYY-MM-DD HH:MM:ss format
old_time_col: (*str)- name of the current time column (can be adjusted if the output csv's are changed)
ftype: (*str)- file type where the data is TCReader generated is stored (default csv- won't work with others)
Returns:
ThermocoupleStatistics object
"""
devices_params = []
unit_df1 = []
unit_df2 = []
for file in os.listdir(path):
if file.endswith(ftype):
device_params = {}
get_data = file.split("-")
unit = get_data[0]
_date = file.split(" ")[-1].split(".")[0]
try:
device_params["ck_unit"] = get_data[0]
device_params["date"] = _date
if len(get_data) == 6:
device_params["port"] = get_data[1]
device_params["tc_name"] = get_data[2]
except Exception as e:
print(f"An error has occurred:\n{e}")
devices_params.append(device_params)
if unit == unit1_name:
_path = os.path.join(path, file)
df1 = pd.read_csv(_path, engine = "c")
df1[time_col] = df1[old_time_col].apply(lambda row: row + f" {_date}")
df1[time_col] = pd.to_datetime(df1[time_col])
unit_df1.append(df1)
elif unit == unit2_name:
_path = os.path.join(path, file)
df2 = pd.read_csv(_path, engine = "c")
df2[time_col] = df2[old_time_col].apply(lambda row: row + f" {_date}")
df2[time_col] = pd.to_datetime(df2[time_col])
unit_df2.append(df2)
tc_df1 = pd.concat(unit_df1) # first df
tc_df2 = pd.concat(unit_df2) # Second df
return cls(tc_df1,tc_df2, *devices_params)
def __init__(self, tc1_df, tc2_df, *device_params):
self.tc1_df = tc1_df
self.tc2_df = tc2_df
self._merged_df = pd.DataFrame()
self._params = device_params
self._test_setpoints = [50, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1150]
# try to get tc names
try:
self._tc_names = {unit["tc_name"] for unit in self._params}
except KeyError:
self._tc_names = set(['TC1','TC2'])
def concat_channels(self,
test_period: "int",
heat_time: "int" = 60,
col_names: "list" = ['temperature1', 'datetime1', 'temperature2', 'datetime2'],
date_sort: "bool" = False,
**pdkwargs
):
"""
PURPOSE
=======
Concatenate both thermocouple channels according to the time to a new dataframe.
Additionally, add a cumulative time sums columns in seconds.
New dataframe is named merged_df and is a property of the ThermocoupleStatistics instance.
Args:
test_period: (*int)- the tested temperature period in minutes
heat_time: (*int)- the heating period (ramp period) for each temperature preset
col_names: (*list)- list of the new column names arranged in a format of:
['temp_tc1', 'datetime_tc1','temp_tc2', 'datetime_tc2'].
Can change column names but should maintain this kind of naming convention to avoid confusion.
date_sort: (*bool)- sort merge_df according to date and time (default -False)
Return:
None
"""
# Creating a merged dataframe
self._merged_df = pd.DataFrame(**pdkwargs)
self._merged_df = pd.concat([self.tc1_df, self.tc2_df], axis=1)
# fixing the dataframe
self._merged_df.pop('time')
self._merged_df.pop('event')
self._merged_df.reset_index(drop=True, inplace=True)
self._merged_df.columns = col_names
# sort according the first datetime column
if date_sort:
self._merged_df = self._merged_df.sort_values(col_names[1])
test_period = test_period * 60 # convert to seconds
heat_time = heat_time * 60 # convert to seconds
total_setpoint_time = heat_time + test_period
datetime1 = col_names[1]
datetime2 = col_names[3]
time_offset1 = time_offset2 = self._merged_df[datetime1].loc[0]
time_cumsum1 = []
time_cumsum2 = []
for index, row in self._merged_df.iterrows():
sec_counter1 = (row[datetime1] - time_offset1).seconds
sec_counter2 = (row[datetime2] - time_offset2).seconds
time_cumsum1.append(sec_counter1)
time_cumsum2.append(sec_counter2)
if sec_counter1 >= total_setpoint_time:
time_offset1 = row[datetime1]
if sec_counter2 >= total_setpoint_time:
time_offset2 = row[datetime2]
self._merged_df["time1_cumsum"] = time_cumsum1
self._merged_df["time2_cumsum"] = time_cumsum2
def test_statistics(self,
temp_column: "str",
temp_range: "float" = 5.0,
mean_std: "bool" = True,
max_min: "bool" =False,
add_stats: "dict" = {}) -> "list":
"""
PURPOSE
=======
Calculate simple test statistics- finding maximum, minimum values and mean/stdev.
Additional statistics could be easily added into this method using numpy and the add_stats dict.
Args:
temp_column: (*str)- name of the tested temperature column
mean_std: (*bool)- add mean and standard deviation calculations of each temperature bin (default -True)
max_min: (*bool)- add maximum and minimum measurements of each temperature bin (default -False)
temp_range: (*float)- temperature range for the bins used to evaluate the statistics (default- 5.0)
add_stats: (*dict)- additional statistics dictionary. The dict format has to be {"function_name": function object}.
For instance- {"median": np.median} (no closing braces).
Calling it has to be in the format: add_stats["median"](array_like object).
Returns:
test_stat: (*dict)- dict in the {'mean': [], 'std': [], 'min': [], 'max': []} format
"""
_iterindex = 0
temp_bins = [[]]
test_stats = {'mean': [], 'std': [], 'min': [], 'max': []}
temp0 = self._merged_df[temp_column].iloc[0] # get first temp meas in the dataframe
for index, row in self._merged_df.iterrows():
if (row[temp_column] <= (temp0 + temp_range)) and (row[temp_column] > (temp0 - temp_range)):
temp_bins[_iterindex].append(row[temp_column])
else:
temp0 = row[temp_column]
_iterindex += 1
temp_bins.append([])
if mean_std:
for tempRange in temp_bins:
tempRange = np.array(tempRange)
if mean_std:
temp_mean = tempRange.mean()
temp_std = tempRange.std()
# fill dictionary
test_stats['mean'].append(temp_mean)
test_stats['std'].append(temp_std)
if max_min:
temp_min = tempRange.min()
temp_max = tempRange.max()
# fill dictionary
test_stats['min'].append(temp_min)
test_stats['max'].append(temp_max)
try:
for key in add_stats.keys():
res = add_stats[key](tempRange)
try:
test_stats[key].append(res)
except Exception:
test_stats[key] = []
test_stats[key].append(res)
except AttributeError:
pass
except Exception:
pass
pass
return test_stats
def filterTest_data_only(self, test_period: "int",
inplace: "bool" = False,
time_col:"str" = "time1_cumsum"):
"""
PURPOSE
=======
Filter the TC generated data, stored in the merged dataframe to only relevant test data.
EXPLANATION
===========
For the TC calibration lab, the current (30/09/2020) setup is a temperature ramp to
a setpoint and than a dwell period of a fixed time (test_period).
Args:
test_period: (*int)- the tested temperature period in minutes
inplace: (*bool)- replace the current merged_df dataframe with the filtered one
time_col: (*str)- name of the time column
Return:
fil_df- (*DataFrame)- the filtered data
"""
test_period *= 60 # convert into seconds
fil_df = self._merged_df[(self._merged_df[time_col] <= test_period) & (self._merged_df[time_col] > 0)]
if inplace:
try:
self._merged_df = fil_df
except Exception as e:
print(f"{type(e)}:{e}\nDataFrame was not updated")
return fil_df
def filterTest_by_date(self,
start_date: "str",
end_date: "str",
time_column: "str" = "datetime1",
inplace: "bool"= False
) -> "pandas.core.frame.DataFrame":
try:
df = self._merged_df[(self._merged_df[time_column] < end_date) & (self._merged_df[time_column] > start_date)]
except Exception as e:
print(e.args)
df[time_column] = pd.to_datetime(self._merged_df[time_column])
df = self._merged_df[(self._merged_df[time_column] < end_date) & (self._merged_df[time_column] > start_date)]
if inplace:
self._merged_df = df
return df
@property
def merged_df(self):
return self._merged_df
@property
def unit_names(self):
self._units = {unit["ck_unit"] for unit in self._params}
return self._units
@property
def tc_names(self):
return self._tc_names
@tc_names.setter
def tc_names(self, names):
self._tc_names = set(f"_{name}" for name in names)
@property
def test_setpoints(self):
"""
test_setpoints: (*array_type)- the calibration temperature presets in a list
"""
return self._test_setpoints
@test_setpoints.setter
def test_setpoints(self, setpoints):
if isinstance(setpoints, (list, tuple)):
if all(isinstance(x, (Number)) for x in setpoints):
self._test_setpoints = setpoints
else:
raise ValueError("The Test Presets file contains none numeric values.\n"
"Please change the values or the default set points will be used")
else:
raise AttributeError("Not an array type object ")
def cal_summary(self,
temp1_col: "int" = 0,
temp2_col: "int" = 2,
temp_range: "float" = 20.0,
suffixes: "tuple" = ('_TC1', '_TC2'),
add_statistics: "dict" = {},
to_csv: "bool" = False,
mean_std = True,
max_min = True,
save_dir: "str" = os.getcwd(),
file_name: "str" = "test_statistics.csv",
time_strf_format = "%Y-%m-%d %H-%M"
)->"DataFrame":
"""
PURPOSE
=======
Get a summary of the test statistics for both thermocouples
Optional- save data to a csv file.
Args:
temp1_col: (*int) - index of the columns.values list for the first TC temperature measurements
temp2_col: (*int) - index of the columns.values list for the second TC temperature measurements
temp_range: (*float)- temperature range for the bins used to evaluate the statistics (default- 20.0)
suffixes: (*tuple)- suffixes of the merged dataframe for each thermocouple.
add_statistics: (*dict)- additional statistics dictionary. The dict format has to be {"function_name": function object}.
For instance- {"median": np.median} (no closing braces)
to_csv: (*bool)- save calibration summary into csv
Returns:
summary dataframe
"""
tc_stats = []
col_names = self._merged_df.columns.values
tc_temps = [col_names[temp1_col],col_names[temp2_col]]
for temp in tc_temps:
tc_stat = self.test_statistics(temp_column=temp,
temp_range=temp_range,
add_stats=add_statistics,
mean_std = mean_std,
max_min=max_min
)
tc_stat_df = pd.DataFrame(tc_stat, index=self.test_setpoints)
tc_stats.append(tc_stat_df)
summary = pd.merge(*tc_stats, left_index=True, right_index=True, suffixes = suffixes)
if to_csv:
today = datetime.now()
today = today.strftime(time_strf_format)
tc_names = list(self.tc_names)
prefix_file_name = f"{tc_names[0]}-{tc_names[1]}-{today}"
file_name = "-".join([prefix_file_name, file_name])
_saved_path = os.path.join(save_dir, file_name)
summary.to_csv(_saved_path)
return summary
def quick_filter(self, test_period: "int",
start_date: "str",
end_date: "str",
date_sort: "bool" = True,
inplace: "bool" = True)->"dict":
"""
PURPOSE
=======
Allow quick and dirty filtering of the data according to specific test time period and duration.
Note- This method may not work on several conditions, so it's better to use each method independently.
Args:
test_period: (*int)- the tested temperature period in minutes
start_date: (*str)- start date for analysis and time in a YYYY-MM-DD HH:MM:ss format
end_date: (*str)- end date for analysis and time in a YYYY-MM-DD HH:MM:ss format
date_sort: (*bool)- sort merge_df according to date and time (default -True)
inplace: (*bool)- apply all filters on merged_df (default -True)
Return:
None
"""
self.concat_channels(test_period=test_period, date_sort = date_sort)
self.filterTest_by_date(start_date=start_date, end_date = end_date, inplace=inplace)
self.filterTest_data_only(test_period= test_period, inplace=inplace)
| [
"os.listdir",
"pandas.read_csv",
"pandas.merge",
"os.path.join",
"os.getcwd",
"datetime.datetime.now",
"numpy.array",
"pandas.DataFrame",
"pandas.concat",
"pandas.to_datetime"
] | [((1852, 1868), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1862, 1868), False, 'import os\n'), ((3247, 3266), 'pandas.concat', 'pd.concat', (['unit_df1'], {}), '(unit_df1)\n', (3256, 3266), True, 'import pandas as pd\n'), ((3296, 3315), 'pandas.concat', 'pd.concat', (['unit_df2'], {}), '(unit_df2)\n', (3305, 3315), True, 'import pandas as pd\n'), ((3523, 3537), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3535, 3537), True, 'import pandas as pd\n'), ((5162, 5186), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '(**pdkwargs)\n', (5174, 5186), True, 'import pandas as pd\n'), ((5213, 5258), 'pandas.concat', 'pd.concat', (['[self.tc1_df, self.tc2_df]'], {'axis': '(1)'}), '([self.tc1_df, self.tc2_df], axis=1)\n', (5222, 5258), True, 'import pandas as pd\n'), ((13214, 13225), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (13223, 13225), False, 'import os\n'), ((14976, 15049), 'pandas.merge', 'pd.merge', (['*tc_stats'], {'left_index': '(True)', 'right_index': '(True)', 'suffixes': 'suffixes'}), '(*tc_stats, left_index=True, right_index=True, suffixes=suffixes)\n', (14984, 15049), True, 'import pandas as pd\n'), ((14868, 14916), 'pandas.DataFrame', 'pd.DataFrame', (['tc_stat'], {'index': 'self.test_setpoints'}), '(tc_stat, index=self.test_setpoints)\n', (14880, 14916), True, 'import pandas as pd\n'), ((15092, 15106), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (15104, 15106), False, 'from datetime import datetime\n'), ((15363, 15396), 'os.path.join', 'os.path.join', (['save_dir', 'file_name'], {}), '(save_dir, file_name)\n', (15375, 15396), False, 'import os\n'), ((8626, 8645), 'numpy.array', 'np.array', (['tempRange'], {}), '(tempRange)\n', (8634, 8645), True, 'import numpy as np\n'), ((11463, 11507), 'pandas.to_datetime', 'pd.to_datetime', (['self._merged_df[time_column]'], {}), '(self._merged_df[time_column])\n', (11477, 11507), True, 'import pandas as pd\n'), ((2595, 2619), 'os.path.join', 'os.path.join', (['path', 'file'], {}), '(path, file)\n', (2607, 2619), False, 'import os\n'), ((2646, 2676), 'pandas.read_csv', 'pd.read_csv', (['_path'], {'engine': '"""c"""'}), "(_path, engine='c')\n", (2657, 2676), True, 'import pandas as pd\n'), ((2806, 2835), 'pandas.to_datetime', 'pd.to_datetime', (['df1[time_col]'], {}), '(df1[time_col])\n', (2820, 2835), True, 'import pandas as pd\n'), ((2947, 2971), 'os.path.join', 'os.path.join', (['path', 'file'], {}), '(path, file)\n', (2959, 2971), False, 'import os\n'), ((2998, 3028), 'pandas.read_csv', 'pd.read_csv', (['_path'], {'engine': '"""c"""'}), "(_path, engine='c')\n", (3009, 3028), True, 'import pandas as pd\n'), ((3158, 3187), 'pandas.to_datetime', 'pd.to_datetime', (['df2[time_col]'], {}), '(df2[time_col])\n', (3172, 3187), True, 'import pandas as pd\n')] |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetBackendHealthResult',
'AwaitableGetBackendHealthResult',
'get_backend_health',
]
@pulumi.output_type
class GetBackendHealthResult:
"""
A collection of values returned by getBackendHealth.
"""
def __init__(__self__, backend_name=None, backend_set_name=None, health_check_results=None, id=None, load_balancer_id=None, status=None):
if backend_name and not isinstance(backend_name, str):
raise TypeError("Expected argument 'backend_name' to be a str")
pulumi.set(__self__, "backend_name", backend_name)
if backend_set_name and not isinstance(backend_set_name, str):
raise TypeError("Expected argument 'backend_set_name' to be a str")
pulumi.set(__self__, "backend_set_name", backend_set_name)
if health_check_results and not isinstance(health_check_results, list):
raise TypeError("Expected argument 'health_check_results' to be a list")
pulumi.set(__self__, "health_check_results", health_check_results)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if load_balancer_id and not isinstance(load_balancer_id, str):
raise TypeError("Expected argument 'load_balancer_id' to be a str")
pulumi.set(__self__, "load_balancer_id", load_balancer_id)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="backendName")
def backend_name(self) -> str:
return pulumi.get(self, "backend_name")
@property
@pulumi.getter(name="backendSetName")
def backend_set_name(self) -> str:
return pulumi.get(self, "backend_set_name")
@property
@pulumi.getter(name="healthCheckResults")
def health_check_results(self) -> Sequence['outputs.GetBackendHealthHealthCheckResultResult']:
"""
A list of the most recent health check results returned for the specified backend server.
"""
return pulumi.get(self, "health_check_results")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="loadBalancerId")
def load_balancer_id(self) -> str:
return pulumi.get(self, "load_balancer_id")
@property
@pulumi.getter
def status(self) -> str:
"""
The general health status of the specified backend server as reported by the primary and standby load balancers.
* **OK:** Both health checks returned `OK`.
* **WARNING:** One health check returned `OK` and one did not.
* **CRITICAL:** Neither health check returned `OK`.
* **UNKNOWN:** One or both health checks returned `UNKNOWN`, or the system was unable to retrieve metrics at this time.
"""
return pulumi.get(self, "status")
class AwaitableGetBackendHealthResult(GetBackendHealthResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetBackendHealthResult(
backend_name=self.backend_name,
backend_set_name=self.backend_set_name,
health_check_results=self.health_check_results,
id=self.id,
load_balancer_id=self.load_balancer_id,
status=self.status)
def get_backend_health(backend_name: Optional[str] = None,
backend_set_name: Optional[str] = None,
load_balancer_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetBackendHealthResult:
"""
This data source provides details about a specific Backend Health resource in Oracle Cloud Infrastructure Load Balancer service.
Gets the current health status of the specified backend server.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_backend_health = oci.loadbalancer.get_backend_health(backend_name=oci_load_balancer_backend["test_backend"]["name"],
backend_set_name=oci_load_balancer_backend_set["test_backend_set"]["name"],
load_balancer_id=oci_load_balancer_load_balancer["test_load_balancer"]["id"])
```
:param str backend_name: The IP address and port of the backend server to retrieve the health status for. Example: `10.0.0.3:8080`
:param str backend_set_name: The name of the backend set associated with the backend server to retrieve the health status for. Example: `example_backend_set`
:param str load_balancer_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the load balancer associated with the backend server health status to be retrieved.
"""
__args__ = dict()
__args__['backendName'] = backend_name
__args__['backendSetName'] = backend_set_name
__args__['loadBalancerId'] = load_balancer_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:loadbalancer/getBackendHealth:getBackendHealth', __args__, opts=opts, typ=GetBackendHealthResult).value
return AwaitableGetBackendHealthResult(
backend_name=__ret__.backend_name,
backend_set_name=__ret__.backend_set_name,
health_check_results=__ret__.health_check_results,
id=__ret__.id,
load_balancer_id=__ret__.load_balancer_id,
status=__ret__.status)
| [
"pulumi.get",
"pulumi.getter",
"pulumi.set",
"pulumi.InvokeOptions",
"pulumi.runtime.invoke"
] | [((1940, 1973), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""backendName"""'}), "(name='backendName')\n", (1953, 1973), False, 'import pulumi\n'), ((2077, 2113), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""backendSetName"""'}), "(name='backendSetName')\n", (2090, 2113), False, 'import pulumi\n'), ((2225, 2265), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""healthCheckResults"""'}), "(name='healthCheckResults')\n", (2238, 2265), False, 'import pulumi\n'), ((2751, 2787), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""loadBalancerId"""'}), "(name='loadBalancerId')\n", (2764, 2787), False, 'import pulumi\n'), ((877, 927), 'pulumi.set', 'pulumi.set', (['__self__', '"""backend_name"""', 'backend_name'], {}), "(__self__, 'backend_name', backend_name)\n", (887, 927), False, 'import pulumi\n'), ((1087, 1145), 'pulumi.set', 'pulumi.set', (['__self__', '"""backend_set_name"""', 'backend_set_name'], {}), "(__self__, 'backend_set_name', backend_set_name)\n", (1097, 1145), False, 'import pulumi\n'), ((1319, 1385), 'pulumi.set', 'pulumi.set', (['__self__', '"""health_check_results"""', 'health_check_results'], {}), "(__self__, 'health_check_results', health_check_results)\n", (1329, 1385), False, 'import pulumi\n'), ((1503, 1533), 'pulumi.set', 'pulumi.set', (['__self__', '"""id"""', 'id'], {}), "(__self__, 'id', id)\n", (1513, 1533), False, 'import pulumi\n'), ((1693, 1751), 'pulumi.set', 'pulumi.set', (['__self__', '"""load_balancer_id"""', 'load_balancer_id'], {}), "(__self__, 'load_balancer_id', load_balancer_id)\n", (1703, 1751), False, 'import pulumi\n'), ((1881, 1919), 'pulumi.set', 'pulumi.set', (['__self__', '"""status"""', 'status'], {}), "(__self__, 'status', status)\n", (1891, 1919), False, 'import pulumi\n'), ((2024, 2056), 'pulumi.get', 'pulumi.get', (['self', '"""backend_name"""'], {}), "(self, 'backend_name')\n", (2034, 2056), False, 'import pulumi\n'), ((2168, 2204), 'pulumi.get', 'pulumi.get', (['self', '"""backend_set_name"""'], {}), "(self, 'backend_set_name')\n", (2178, 2204), False, 'import pulumi\n'), ((2502, 2542), 'pulumi.get', 'pulumi.get', (['self', '"""health_check_results"""'], {}), "(self, 'health_check_results')\n", (2512, 2542), False, 'import pulumi\n'), ((2708, 2730), 'pulumi.get', 'pulumi.get', (['self', '"""id"""'], {}), "(self, 'id')\n", (2718, 2730), False, 'import pulumi\n'), ((2842, 2878), 'pulumi.get', 'pulumi.get', (['self', '"""load_balancer_id"""'], {}), "(self, 'load_balancer_id')\n", (2852, 2878), False, 'import pulumi\n'), ((3421, 3447), 'pulumi.get', 'pulumi.get', (['self', '"""status"""'], {}), "(self, 'status')\n", (3431, 3447), False, 'import pulumi\n'), ((5532, 5554), 'pulumi.InvokeOptions', 'pulumi.InvokeOptions', ([], {}), '()\n', (5552, 5554), False, 'import pulumi\n'), ((5646, 5774), 'pulumi.runtime.invoke', 'pulumi.runtime.invoke', (['"""oci:loadbalancer/getBackendHealth:getBackendHealth"""', '__args__'], {'opts': 'opts', 'typ': 'GetBackendHealthResult'}), "('oci:loadbalancer/getBackendHealth:getBackendHealth',\n __args__, opts=opts, typ=GetBackendHealthResult)\n", (5667, 5774), False, 'import pulumi\n')] |
import os
from flask import Flask
import sample_upload
import auth
app = Flask(__name__)
@app.route('/')
def hello():
return 'hello world!'
@app.route('/auth')
def a():
auth()
return 'a'
| [
"auth",
"flask.Flask"
] | [((73, 88), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (78, 88), False, 'from flask import Flask\n'), ((179, 185), 'auth', 'auth', ([], {}), '()\n', (183, 185), False, 'import auth\n')] |
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
process.options = cms.untracked.PSet(
numberOfStreams = cms.untracked.uint32(1),
numberOfThreads = cms.untracked.uint32(1),
numberOfConcurrentRuns = cms.untracked.uint32(1),
numberOfConcurrentLuminosityBlocks = cms.untracked.uint32(1)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'file:testProcessBlockMergeOfMergedFiles.root'
)
)
process.intProducerBeginProcessBlockT = cms.EDProducer("IntProducerBeginProcessBlock", ivalue = cms.int32(4000))
process.intProducerEndProcessBlockT = cms.EDProducer("IntProducerEndProcessBlock", ivalue = cms.int32(40000))
process.out = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('testProcessBlockSubProcessTest.root'),
outputCommands = cms.untracked.vstring(
"keep *",
"drop *_*_beginProcessBlock_*",
"drop *_*_endProcessBlock_*"
)
)
process.testOneOutput = cms.OutputModule("TestOneOutput",
verbose = cms.untracked.bool(False),
expectedProcessesWithProcessBlockProducts = cms.untracked.vstring('PROD1', 'MERGE', 'MERGEOFMERGED', 'TEST'),
expectedWriteProcessBlockTransitions = cms.untracked.int32(8),
outputCommands = cms.untracked.vstring(
"keep *",
"drop *_*_beginProcessBlock_*",
"drop *_*_endProcessBlock_*"
)
)
process.eventIntProducer = cms.EDProducer("IntProducer", ivalue = cms.int32(1))
process.transientIntProducerEndProcessBlock = cms.EDProducer("TransientIntProducerEndProcessBlock",
ivalue = cms.int32(90)
)
process.nonEventIntProducer = cms.EDProducer("NonEventIntProducer",
ivalue = cms.int32(1)
)
process.p = cms.Path(
process.eventIntProducer *
process.transientIntProducerEndProcessBlock *
process.nonEventIntProducer *
process.intProducerBeginProcessBlockT *
process.intProducerEndProcessBlockT
)
process.e = cms.EndPath(
process.out *
process.testOneOutput
)
readProcess = cms.Process("READ")
process.addSubProcess(cms.SubProcess(readProcess,
outputCommands = cms.untracked.vstring(
"keep *"
)
))
readProcess.intProducerBeginProcessBlockR = cms.EDProducer("IntProducerBeginProcessBlock", ivalue = cms.int32(5000))
readProcess.intProducerEndProcessBlockR = cms.EDProducer("IntProducerEndProcessBlock", ivalue = cms.int32(50000))
readProcess.out = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('testProcessBlockSubProcessRead.root'),
outputCommands = cms.untracked.vstring(
"keep *",
"drop *_*_beginProcessBlock_*",
"drop *_*_endProcessBlock_*"
)
)
readProcess.testOneOutput = cms.OutputModule("TestOneOutput",
verbose = cms.untracked.bool(False),
expectedProcessesWithProcessBlockProducts = cms.untracked.vstring('PROD1', 'MERGE', 'MERGEOFMERGED', 'TEST', 'READ'),
expectedWriteProcessBlockTransitions = cms.untracked.int32(9),
outputCommands = cms.untracked.vstring(
"keep *",
"drop *_*_beginProcessBlock_*",
"drop *_*_endProcessBlock_*"
)
)
readProcess.p = cms.Path(
readProcess.intProducerBeginProcessBlockR *
readProcess.intProducerEndProcessBlockR
)
readProcess.e = cms.EndPath(
readProcess.out *
readProcess.testOneOutput
)
readAgainProcess = cms.Process("READAGAIN")
readProcess.addSubProcess(cms.SubProcess(readAgainProcess,
outputCommands = cms.untracked.vstring(
"keep *"
)
))
# transitions = 12 events + 9 access input ProcessBlock transitions + 12 fill cache functor calls
# sum = 11 + 22 + 3300 + 4400 + 44 + 444
readAgainProcess.readProcessBlocksOneAnalyzer1 = cms.EDAnalyzer("edmtest::one::InputProcessBlockIntAnalyzer",
transitions = cms.int32(33),
consumesBeginProcessBlock = cms.InputTag("intProducerBeginProcessBlock", ""),
consumesEndProcessBlock = cms.InputTag("intProducerEndProcessBlock", ""),
consumesBeginProcessBlockM = cms.InputTag("intProducerBeginProcessBlockM", ""),
consumesEndProcessBlockM = cms.InputTag("intProducerEndProcessBlockM", ""),
expectedByRun = cms.vint32(11, 22, 3300, 4400),
expectedSum = cms.int32(8221)
)
# transitions = 12 events + 9 access input ProcessBlock transitions + 6 fill cache functor calls
# sum = 44 + 444
readAgainProcess.readProcessBlocksOneAnalyzer2 = cms.EDAnalyzer("edmtest::one::InputProcessBlockIntAnalyzer",
transitions = cms.int32(27),
consumesBeginProcessBlock = cms.InputTag("intProducerBeginProcessBlockM", ""),
consumesEndProcessBlock = cms.InputTag("intProducerEndProcessBlockM", ""),
consumesBeginProcessBlockM = cms.InputTag("intProducerBeginProcessBlockM", ""),
consumesEndProcessBlockM = cms.InputTag("intProducerEndProcessBlockM", ""),
expectedByRun = cms.vint32(44, 44, 444, 444),
expectedSum = cms.int32(488)
)
# transitions = 12 events + 9 access input ProcessBlock transitions + 3 fill cache functor calls
# sum = 44 + 444
readAgainProcess.readProcessBlocksOneAnalyzer3 = cms.EDAnalyzer("edmtest::one::InputProcessBlockIntAnalyzer",
transitions = cms.int32(24),
consumesBeginProcessBlock = cms.InputTag("intProducerBeginProcessBlockMM", ""),
consumesEndProcessBlock = cms.InputTag("intProducerEndProcessBlockMM", ""),
consumesBeginProcessBlockM = cms.InputTag("intProducerBeginProcessBlockM", ""),
consumesEndProcessBlockM = cms.InputTag("intProducerEndProcessBlockM", ""),
expectedByRun = cms.vint32(644, 644, 644, 644),
expectedSum = cms.int32(488)
)
# transitions = 12 events + 9 access input ProcessBlock transitions + 3 fill cache functor calls
# sum = 44 + 444
# filler sum = 3 x 44000
readAgainProcess.readProcessBlocksOneAnalyzer4 = cms.EDAnalyzer("edmtest::one::InputProcessBlockIntAnalyzer",
transitions = cms.int32(24),
consumesBeginProcessBlock = cms.InputTag("intProducerBeginProcessBlockT", ""),
consumesEndProcessBlock = cms.InputTag("intProducerEndProcessBlockT", ""),
consumesBeginProcessBlockM = cms.InputTag("intProducerBeginProcessBlockM", ""),
consumesEndProcessBlockM = cms.InputTag("intProducerEndProcessBlockM", ""),
# The expectedByRun test cannot work because the data is from an earlier SubProcess
expectedByRun = cms.vint32(),
expectedFillerSum = cms.untracked.int32(132000),
expectedSum = cms.int32(488)
)
# transitions = 12 events + 9 access input ProcessBlock transitions + 3 fill cache functor calls
# sum = 44 + 444
# filler sum = 3 x 55000
readAgainProcess.readProcessBlocksOneAnalyzer5 = cms.EDAnalyzer("edmtest::one::InputProcessBlockIntAnalyzer",
transitions = cms.int32(24),
consumesBeginProcessBlock = cms.InputTag("intProducerBeginProcessBlockR", ""),
consumesEndProcessBlock = cms.InputTag("intProducerEndProcessBlockR", ""),
consumesBeginProcessBlockM = cms.InputTag("intProducerBeginProcessBlockM", ""),
consumesEndProcessBlockM = cms.InputTag("intProducerEndProcessBlockM", ""),
# The expectedByRun test cannot work because the data is from an earlier SubProcess
expectedByRun = cms.vint32(),
expectedFillerSum = cms.untracked.int32(165000),
expectedSum = cms.int32(488),
consumesBeginProcessBlockNotFound = cms.InputTag("intProducerBeginProcessBlockT"),
consumesEndProcessBlockNotFound = cms.InputTag("intProducerEndProcessBlockT")
)
readAgainProcess.intProducerBeginProcessBlockRA = cms.EDProducer("IntProducerBeginProcessBlock", ivalue = cms.int32(100000))
readAgainProcess.intProducerEndProcessBlockRA = cms.EDProducer("IntProducerEndProcessBlock", ivalue = cms.int32(1000000))
readAgainProcess.out = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('testProcessBlockSubProcessReadAgain.root'),
outputCommands = cms.untracked.vstring(
"keep *",
"drop *_*_beginProcessBlock_*",
"drop *_*_endProcessBlock_*"
)
)
readAgainProcess.testOneOutput = cms.OutputModule("TestOneOutput",
verbose = cms.untracked.bool(False),
expectedProcessesWithProcessBlockProducts = cms.untracked.vstring('PROD1', 'MERGE', 'MERGEOFMERGED', 'TEST', 'READ', 'READAGAIN'),
expectedTopProcessesWithProcessBlockProducts = cms.untracked.vstring('PROD1', 'MERGE', 'MERGEOFMERGED', 'TEST'),
expectedProcessesInFirstFile = cms.untracked.uint32(3),
expectedAddedProcesses = cms.untracked.vstring('TEST', 'READ', 'READAGAIN'),
expectedTopAddedProcesses = cms.untracked.vstring('TEST'),
expectedTopCacheIndices0 = cms.untracked.vuint32(0, 4, 6, 1, 4, 6, 2, 5, 6, 3, 5, 6),
expectedWriteProcessBlockTransitions = cms.untracked.int32(10),
expectedNEntries0 = cms.untracked.vuint32(4, 2, 1),
expectedCacheIndexVectorsPerFile = cms.untracked.vuint32(4),
expectedCacheEntriesPerFile0 = cms.untracked.vuint32(7),
expectedOuterOffset = cms.untracked.vuint32(0),
outputCommands = cms.untracked.vstring(
"keep *",
"drop *_*_beginProcessBlock_*",
"drop *_*_endProcessBlock_*"
)
)
readAgainProcess.p = cms.Path(
readAgainProcess.intProducerBeginProcessBlockRA *
readAgainProcess.intProducerEndProcessBlockRA *
readAgainProcess.readProcessBlocksOneAnalyzer1 *
readAgainProcess.readProcessBlocksOneAnalyzer2 *
readAgainProcess.readProcessBlocksOneAnalyzer3 *
readAgainProcess.readProcessBlocksOneAnalyzer4 *
readAgainProcess.readProcessBlocksOneAnalyzer5
)
readAgainProcess.e = cms.EndPath(
readAgainProcess.out *
readAgainProcess.testOneOutput
)
| [
"FWCore.ParameterSet.Config.untracked.string",
"FWCore.ParameterSet.Config.EndPath",
"FWCore.ParameterSet.Config.InputTag",
"FWCore.ParameterSet.Config.vint32",
"FWCore.ParameterSet.Config.untracked.vuint32",
"FWCore.ParameterSet.Config.int32",
"FWCore.ParameterSet.Config.untracked.int32",
"FWCore.ParameterSet.Config.untracked.uint32",
"FWCore.ParameterSet.Config.Process",
"FWCore.ParameterSet.Config.untracked.vstring",
"FWCore.ParameterSet.Config.untracked.bool",
"FWCore.ParameterSet.Config.Path"
] | [((52, 71), 'FWCore.ParameterSet.Config.Process', 'cms.Process', (['"""TEST"""'], {}), "('TEST')\n", (63, 71), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1720, 1922), 'FWCore.ParameterSet.Config.Path', 'cms.Path', (['(process.eventIntProducer * process.transientIntProducerEndProcessBlock *\n process.nonEventIntProducer * process.intProducerBeginProcessBlockT *\n process.intProducerEndProcessBlockT)'], {}), '(process.eventIntProducer * process.\n transientIntProducerEndProcessBlock * process.nonEventIntProducer *\n process.intProducerBeginProcessBlockT * process.intProducerEndProcessBlockT\n )\n', (1728, 1922), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1944, 1992), 'FWCore.ParameterSet.Config.EndPath', 'cms.EndPath', (['(process.out * process.testOneOutput)'], {}), '(process.out * process.testOneOutput)\n', (1955, 1992), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2018, 2037), 'FWCore.ParameterSet.Config.Process', 'cms.Process', (['"""READ"""'], {}), "('READ')\n", (2029, 2037), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3128, 3226), 'FWCore.ParameterSet.Config.Path', 'cms.Path', (['(readProcess.intProducerBeginProcessBlockR * readProcess.\n intProducerEndProcessBlockR)'], {}), '(readProcess.intProducerBeginProcessBlockR * readProcess.\n intProducerEndProcessBlockR)\n', (3136, 3226), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3249, 3305), 'FWCore.ParameterSet.Config.EndPath', 'cms.EndPath', (['(readProcess.out * readProcess.testOneOutput)'], {}), '(readProcess.out * readProcess.testOneOutput)\n', (3260, 3305), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3336, 3360), 'FWCore.ParameterSet.Config.Process', 'cms.Process', (['"""READAGAIN"""'], {}), "('READAGAIN')\n", (3347, 3360), True, 'import FWCore.ParameterSet.Config as cms\n'), ((10642, 11022), 'FWCore.ParameterSet.Config.Path', 'cms.Path', (['(readAgainProcess.intProducerBeginProcessBlockRA * readAgainProcess.\n intProducerEndProcessBlockRA * readAgainProcess.\n readProcessBlocksOneAnalyzer1 * readAgainProcess.\n readProcessBlocksOneAnalyzer2 * readAgainProcess.\n readProcessBlocksOneAnalyzer3 * readAgainProcess.\n readProcessBlocksOneAnalyzer4 * readAgainProcess.\n readProcessBlocksOneAnalyzer5)'], {}), '(readAgainProcess.intProducerBeginProcessBlockRA * readAgainProcess\n .intProducerEndProcessBlockRA * readAgainProcess.\n readProcessBlocksOneAnalyzer1 * readAgainProcess.\n readProcessBlocksOneAnalyzer2 * readAgainProcess.\n readProcessBlocksOneAnalyzer3 * readAgainProcess.\n readProcessBlocksOneAnalyzer4 * readAgainProcess.\n readProcessBlocksOneAnalyzer5)\n', (10650, 11022), True, 'import FWCore.ParameterSet.Config as cms\n'), ((11045, 11111), 'FWCore.ParameterSet.Config.EndPath', 'cms.EndPath', (['(readAgainProcess.out * readAgainProcess.testOneOutput)'], {}), '(readAgainProcess.out * readAgainProcess.testOneOutput)\n', (11056, 11111), True, 'import FWCore.ParameterSet.Config as cms\n'), ((133, 156), 'FWCore.ParameterSet.Config.untracked.uint32', 'cms.untracked.uint32', (['(1)'], {}), '(1)\n', (153, 156), True, 'import FWCore.ParameterSet.Config as cms\n'), ((180, 203), 'FWCore.ParameterSet.Config.untracked.uint32', 'cms.untracked.uint32', (['(1)'], {}), '(1)\n', (200, 203), True, 'import FWCore.ParameterSet.Config as cms\n'), ((234, 257), 'FWCore.ParameterSet.Config.untracked.uint32', 'cms.untracked.uint32', (['(1)'], {}), '(1)\n', (254, 257), True, 'import FWCore.ParameterSet.Config as cms\n'), ((300, 323), 'FWCore.ParameterSet.Config.untracked.uint32', 'cms.untracked.uint32', (['(1)'], {}), '(1)\n', (320, 323), True, 'import FWCore.ParameterSet.Config as cms\n'), ((385, 454), 'FWCore.ParameterSet.Config.untracked.vstring', 'cms.untracked.vstring', (['"""file:testProcessBlockMergeOfMergedFiles.root"""'], {}), "('file:testProcessBlockMergeOfMergedFiles.root')\n", (406, 454), True, 'import FWCore.ParameterSet.Config as cms\n'), ((568, 583), 'FWCore.ParameterSet.Config.int32', 'cms.int32', (['(4000)'], {}), '(4000)\n', (577, 583), True, 'import FWCore.ParameterSet.Config as cms\n'), ((678, 694), 'FWCore.ParameterSet.Config.int32', 'cms.int32', (['(40000)'], {}), '(40000)\n', (687, 694), True, 'import FWCore.ParameterSet.Config as cms\n'), ((763, 822), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""testProcessBlockSubProcessTest.root"""'], {}), "('testProcessBlockSubProcessTest.root')\n", (783, 822), True, 'import FWCore.ParameterSet.Config as cms\n'), ((845, 942), 'FWCore.ParameterSet.Config.untracked.vstring', 'cms.untracked.vstring', (['"""keep *"""', '"""drop *_*_beginProcessBlock_*"""', '"""drop *_*_endProcessBlock_*"""'], {}), "('keep *', 'drop *_*_beginProcessBlock_*',\n 'drop *_*_endProcessBlock_*')\n", (866, 942), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1044, 1069), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(False)'], {}), '(False)\n', (1062, 1069), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1119, 1183), 'FWCore.ParameterSet.Config.untracked.vstring', 'cms.untracked.vstring', (['"""PROD1"""', '"""MERGE"""', '"""MERGEOFMERGED"""', '"""TEST"""'], {}), "('PROD1', 'MERGE', 'MERGEOFMERGED', 'TEST')\n", (1140, 1183), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1228, 1250), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(8)'], {}), '(8)\n', (1247, 1250), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1273, 1370), 'FWCore.ParameterSet.Config.untracked.vstring', 'cms.untracked.vstring', (['"""keep *"""', '"""drop *_*_beginProcessBlock_*"""', '"""drop *_*_endProcessBlock_*"""'], {}), "('keep *', 'drop *_*_beginProcessBlock_*',\n 'drop *_*_endProcessBlock_*')\n", (1294, 1370), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1466, 1478), 'FWCore.ParameterSet.Config.int32', 'cms.int32', (['(1)'], {}), '(1)\n', (1475, 1478), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1594, 1607), 'FWCore.ParameterSet.Config.int32', 'cms.int32', (['(90)'], {}), '(90)\n', (1603, 1607), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1692, 1704), 'FWCore.ParameterSet.Config.int32', 'cms.int32', (['(1)'], {}), '(1)\n', (1701, 1704), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2259, 2274), 'FWCore.ParameterSet.Config.int32', 'cms.int32', (['(5000)'], {}), '(5000)\n', (2268, 2274), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2373, 2389), 'FWCore.ParameterSet.Config.int32', 'cms.int32', (['(50000)'], {}), '(50000)\n', (2382, 2389), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2462, 2521), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""testProcessBlockSubProcessRead.root"""'], {}), "('testProcessBlockSubProcessRead.root')\n", (2482, 2521), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2544, 2641), 'FWCore.ParameterSet.Config.untracked.vstring', 'cms.untracked.vstring', (['"""keep *"""', '"""drop *_*_beginProcessBlock_*"""', '"""drop *_*_endProcessBlock_*"""'], {}), "('keep *', 'drop *_*_beginProcessBlock_*',\n 'drop *_*_endProcessBlock_*')\n", (2565, 2641), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2747, 2772), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(False)'], {}), '(False)\n', (2765, 2772), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2822, 2894), 'FWCore.ParameterSet.Config.untracked.vstring', 'cms.untracked.vstring', (['"""PROD1"""', '"""MERGE"""', '"""MERGEOFMERGED"""', '"""TEST"""', '"""READ"""'], {}), "('PROD1', 'MERGE', 'MERGEOFMERGED', 'TEST', 'READ')\n", (2843, 2894), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2940, 2962), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(9)'], {}), '(9)\n', (2959, 2962), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2985, 3082), 'FWCore.ParameterSet.Config.untracked.vstring', 'cms.untracked.vstring', (['"""keep *"""', '"""drop *_*_beginProcessBlock_*"""', '"""drop *_*_endProcessBlock_*"""'], {}), "('keep *', 'drop *_*_beginProcessBlock_*',\n 'drop *_*_endProcessBlock_*')\n", (3006, 3082), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3798, 3811), 'FWCore.ParameterSet.Config.int32', 'cms.int32', (['(33)'], {}), '(33)\n', (3807, 3811), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3885, 3933), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""intProducerBeginProcessBlock"""', '""""""'], {}), "('intProducerBeginProcessBlock', '')\n", (3897, 3933), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4005, 4051), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""intProducerEndProcessBlock"""', '""""""'], {}), "('intProducerEndProcessBlock', '')\n", (4017, 4051), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4126, 4175), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""intProducerBeginProcessBlockM"""', '""""""'], {}), "('intProducerBeginProcessBlockM', '')\n", (4138, 4175), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4248, 4295), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""intProducerEndProcessBlockM"""', '""""""'], {}), "('intProducerEndProcessBlockM', '')\n", (4260, 4295), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4357, 4387), 'FWCore.ParameterSet.Config.vint32', 'cms.vint32', (['(11)', '(22)', '(3300)', '(4400)'], {}), '(11, 22, 3300, 4400)\n', (4367, 4387), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4447, 4462), 'FWCore.ParameterSet.Config.int32', 'cms.int32', (['(8221)'], {}), '(8221)\n', (4456, 4462), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4748, 4761), 'FWCore.ParameterSet.Config.int32', 'cms.int32', (['(27)'], {}), '(27)\n', (4757, 4761), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4835, 4884), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""intProducerBeginProcessBlockM"""', '""""""'], {}), "('intProducerBeginProcessBlockM', '')\n", (4847, 4884), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4956, 5003), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""intProducerEndProcessBlockM"""', '""""""'], {}), "('intProducerEndProcessBlockM', '')\n", (4968, 5003), True, 'import FWCore.ParameterSet.Config as cms\n'), ((5078, 5127), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""intProducerBeginProcessBlockM"""', '""""""'], {}), "('intProducerBeginProcessBlockM', '')\n", (5090, 5127), True, 'import FWCore.ParameterSet.Config as cms\n'), ((5200, 5247), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""intProducerEndProcessBlockM"""', '""""""'], {}), "('intProducerEndProcessBlockM', '')\n", (5212, 5247), True, 'import FWCore.ParameterSet.Config as cms\n'), ((5309, 5337), 'FWCore.ParameterSet.Config.vint32', 'cms.vint32', (['(44)', '(44)', '(444)', '(444)'], {}), '(44, 44, 444, 444)\n', (5319, 5337), True, 'import FWCore.ParameterSet.Config as cms\n'), ((5397, 5411), 'FWCore.ParameterSet.Config.int32', 'cms.int32', (['(488)'], {}), '(488)\n', (5406, 5411), True, 'import FWCore.ParameterSet.Config as cms\n'), ((5697, 5710), 'FWCore.ParameterSet.Config.int32', 'cms.int32', (['(24)'], {}), '(24)\n', (5706, 5710), True, 'import FWCore.ParameterSet.Config as cms\n'), ((5784, 5834), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""intProducerBeginProcessBlockMM"""', '""""""'], {}), "('intProducerBeginProcessBlockMM', '')\n", (5796, 5834), True, 'import FWCore.ParameterSet.Config as cms\n'), ((5906, 5954), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""intProducerEndProcessBlockMM"""', '""""""'], {}), "('intProducerEndProcessBlockMM', '')\n", (5918, 5954), True, 'import FWCore.ParameterSet.Config as cms\n'), ((6029, 6078), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""intProducerBeginProcessBlockM"""', '""""""'], {}), "('intProducerBeginProcessBlockM', '')\n", (6041, 6078), True, 'import FWCore.ParameterSet.Config as cms\n'), ((6151, 6198), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""intProducerEndProcessBlockM"""', '""""""'], {}), "('intProducerEndProcessBlockM', '')\n", (6163, 6198), True, 'import FWCore.ParameterSet.Config as cms\n'), ((6260, 6290), 'FWCore.ParameterSet.Config.vint32', 'cms.vint32', (['(644)', '(644)', '(644)', '(644)'], {}), '(644, 644, 644, 644)\n', (6270, 6290), True, 'import FWCore.ParameterSet.Config as cms\n'), ((6350, 6364), 'FWCore.ParameterSet.Config.int32', 'cms.int32', (['(488)'], {}), '(488)\n', (6359, 6364), True, 'import FWCore.ParameterSet.Config as cms\n'), ((6675, 6688), 'FWCore.ParameterSet.Config.int32', 'cms.int32', (['(24)'], {}), '(24)\n', (6684, 6688), True, 'import FWCore.ParameterSet.Config as cms\n'), ((6762, 6811), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""intProducerBeginProcessBlockT"""', '""""""'], {}), "('intProducerBeginProcessBlockT', '')\n", (6774, 6811), True, 'import FWCore.ParameterSet.Config as cms\n'), ((6883, 6930), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""intProducerEndProcessBlockT"""', '""""""'], {}), "('intProducerEndProcessBlockT', '')\n", (6895, 6930), True, 'import FWCore.ParameterSet.Config as cms\n'), ((7005, 7054), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""intProducerBeginProcessBlockM"""', '""""""'], {}), "('intProducerBeginProcessBlockM', '')\n", (7017, 7054), True, 'import FWCore.ParameterSet.Config as cms\n'), ((7127, 7174), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""intProducerEndProcessBlockM"""', '""""""'], {}), "('intProducerEndProcessBlockM', '')\n", (7139, 7174), True, 'import FWCore.ParameterSet.Config as cms\n'), ((7364, 7376), 'FWCore.ParameterSet.Config.vint32', 'cms.vint32', ([], {}), '()\n', (7374, 7376), True, 'import FWCore.ParameterSet.Config as cms\n'), ((7442, 7469), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(132000)'], {}), '(132000)\n', (7461, 7469), True, 'import FWCore.ParameterSet.Config as cms\n'), ((7529, 7543), 'FWCore.ParameterSet.Config.int32', 'cms.int32', (['(488)'], {}), '(488)\n', (7538, 7543), True, 'import FWCore.ParameterSet.Config as cms\n'), ((7854, 7867), 'FWCore.ParameterSet.Config.int32', 'cms.int32', (['(24)'], {}), '(24)\n', (7863, 7867), True, 'import FWCore.ParameterSet.Config as cms\n'), ((7941, 7990), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""intProducerBeginProcessBlockR"""', '""""""'], {}), "('intProducerBeginProcessBlockR', '')\n", (7953, 7990), True, 'import FWCore.ParameterSet.Config as cms\n'), ((8062, 8109), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""intProducerEndProcessBlockR"""', '""""""'], {}), "('intProducerEndProcessBlockR', '')\n", (8074, 8109), True, 'import FWCore.ParameterSet.Config as cms\n'), ((8184, 8233), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""intProducerBeginProcessBlockM"""', '""""""'], {}), "('intProducerBeginProcessBlockM', '')\n", (8196, 8233), True, 'import FWCore.ParameterSet.Config as cms\n'), ((8306, 8353), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""intProducerEndProcessBlockM"""', '""""""'], {}), "('intProducerEndProcessBlockM', '')\n", (8318, 8353), True, 'import FWCore.ParameterSet.Config as cms\n'), ((8543, 8555), 'FWCore.ParameterSet.Config.vint32', 'cms.vint32', ([], {}), '()\n', (8553, 8555), True, 'import FWCore.ParameterSet.Config as cms\n'), ((8621, 8648), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(165000)'], {}), '(165000)\n', (8640, 8648), True, 'import FWCore.ParameterSet.Config as cms\n'), ((8708, 8722), 'FWCore.ParameterSet.Config.int32', 'cms.int32', (['(488)'], {}), '(488)\n', (8717, 8722), True, 'import FWCore.ParameterSet.Config as cms\n'), ((8804, 8849), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""intProducerBeginProcessBlockT"""'], {}), "('intProducerBeginProcessBlockT')\n", (8816, 8849), True, 'import FWCore.ParameterSet.Config as cms\n'), ((8929, 8972), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""intProducerEndProcessBlockT"""'], {}), "('intProducerEndProcessBlockT')\n", (8941, 8972), True, 'import FWCore.ParameterSet.Config as cms\n'), ((9082, 9099), 'FWCore.ParameterSet.Config.int32', 'cms.int32', (['(100000)'], {}), '(100000)\n', (9091, 9099), True, 'import FWCore.ParameterSet.Config as cms\n'), ((9204, 9222), 'FWCore.ParameterSet.Config.int32', 'cms.int32', (['(1000000)'], {}), '(1000000)\n', (9213, 9222), True, 'import FWCore.ParameterSet.Config as cms\n'), ((9300, 9364), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""testProcessBlockSubProcessReadAgain.root"""'], {}), "('testProcessBlockSubProcessReadAgain.root')\n", (9320, 9364), True, 'import FWCore.ParameterSet.Config as cms\n'), ((9387, 9484), 'FWCore.ParameterSet.Config.untracked.vstring', 'cms.untracked.vstring', (['"""keep *"""', '"""drop *_*_beginProcessBlock_*"""', '"""drop *_*_endProcessBlock_*"""'], {}), "('keep *', 'drop *_*_beginProcessBlock_*',\n 'drop *_*_endProcessBlock_*')\n", (9408, 9484), True, 'import FWCore.ParameterSet.Config as cms\n'), ((9595, 9620), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(False)'], {}), '(False)\n', (9613, 9620), True, 'import FWCore.ParameterSet.Config as cms\n'), ((9670, 9759), 'FWCore.ParameterSet.Config.untracked.vstring', 'cms.untracked.vstring', (['"""PROD1"""', '"""MERGE"""', '"""MERGEOFMERGED"""', '"""TEST"""', '"""READ"""', '"""READAGAIN"""'], {}), "('PROD1', 'MERGE', 'MERGEOFMERGED', 'TEST', 'READ',\n 'READAGAIN')\n", (9691, 9759), True, 'import FWCore.ParameterSet.Config as cms\n'), ((9809, 9873), 'FWCore.ParameterSet.Config.untracked.vstring', 'cms.untracked.vstring', (['"""PROD1"""', '"""MERGE"""', '"""MERGEOFMERGED"""', '"""TEST"""'], {}), "('PROD1', 'MERGE', 'MERGEOFMERGED', 'TEST')\n", (9830, 9873), True, 'import FWCore.ParameterSet.Config as cms\n'), ((9910, 9933), 'FWCore.ParameterSet.Config.untracked.uint32', 'cms.untracked.uint32', (['(3)'], {}), '(3)\n', (9930, 9933), True, 'import FWCore.ParameterSet.Config as cms\n'), ((9964, 10014), 'FWCore.ParameterSet.Config.untracked.vstring', 'cms.untracked.vstring', (['"""TEST"""', '"""READ"""', '"""READAGAIN"""'], {}), "('TEST', 'READ', 'READAGAIN')\n", (9985, 10014), True, 'import FWCore.ParameterSet.Config as cms\n'), ((10049, 10078), 'FWCore.ParameterSet.Config.untracked.vstring', 'cms.untracked.vstring', (['"""TEST"""'], {}), "('TEST')\n", (10070, 10078), True, 'import FWCore.ParameterSet.Config as cms\n'), ((10111, 10168), 'FWCore.ParameterSet.Config.untracked.vuint32', 'cms.untracked.vuint32', (['(0)', '(4)', '(6)', '(1)', '(4)', '(6)', '(2)', '(5)', '(6)', '(3)', '(5)', '(6)'], {}), '(0, 4, 6, 1, 4, 6, 2, 5, 6, 3, 5, 6)\n', (10132, 10168), True, 'import FWCore.ParameterSet.Config as cms\n'), ((10213, 10236), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(10)'], {}), '(10)\n', (10232, 10236), True, 'import FWCore.ParameterSet.Config as cms\n'), ((10262, 10292), 'FWCore.ParameterSet.Config.untracked.vuint32', 'cms.untracked.vuint32', (['(4)', '(2)', '(1)'], {}), '(4, 2, 1)\n', (10283, 10292), True, 'import FWCore.ParameterSet.Config as cms\n'), ((10333, 10357), 'FWCore.ParameterSet.Config.untracked.vuint32', 'cms.untracked.vuint32', (['(4)'], {}), '(4)\n', (10354, 10357), True, 'import FWCore.ParameterSet.Config as cms\n'), ((10395, 10419), 'FWCore.ParameterSet.Config.untracked.vuint32', 'cms.untracked.vuint32', (['(7)'], {}), '(7)\n', (10416, 10419), True, 'import FWCore.ParameterSet.Config as cms\n'), ((10447, 10471), 'FWCore.ParameterSet.Config.untracked.vuint32', 'cms.untracked.vuint32', (['(0)'], {}), '(0)\n', (10468, 10471), True, 'import FWCore.ParameterSet.Config as cms\n'), ((10494, 10591), 'FWCore.ParameterSet.Config.untracked.vstring', 'cms.untracked.vstring', (['"""keep *"""', '"""drop *_*_beginProcessBlock_*"""', '"""drop *_*_endProcessBlock_*"""'], {}), "('keep *', 'drop *_*_beginProcessBlock_*',\n 'drop *_*_endProcessBlock_*')\n", (10515, 10591), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2109, 2140), 'FWCore.ParameterSet.Config.untracked.vstring', 'cms.untracked.vstring', (['"""keep *"""'], {}), "('keep *')\n", (2130, 2140), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3441, 3472), 'FWCore.ParameterSet.Config.untracked.vstring', 'cms.untracked.vstring', (['"""keep *"""'], {}), "('keep *')\n", (3462, 3472), True, 'import FWCore.ParameterSet.Config as cms\n')] |
import tempfile
import pytest
from flaskr import flaskr
@pytest.fixture
def client():
flaskr.app.config['TESTING'] = True
with flaskr.app.test_client() as client:
with flaskr.app.app_context():
yield client
| [
"flaskr.flaskr.app.app_context",
"flaskr.flaskr.app.test_client"
] | [((140, 164), 'flaskr.flaskr.app.test_client', 'flaskr.app.test_client', ([], {}), '()\n', (162, 164), False, 'from flaskr import flaskr\n'), ((189, 213), 'flaskr.flaskr.app.app_context', 'flaskr.app.app_context', ([], {}), '()\n', (211, 213), False, 'from flaskr import flaskr\n')] |
from PySide2.QtWidgets import QListView
from PySide2.QtCore import QObject, Signal
class DeselectableListView(QListView):
selectionCleared = Signal()
def mousePressEvent(self, event):
self.clearSelection()
QListView.mousePressEvent(self, event)
self.selectionCleared.emit()
| [
"PySide2.QtWidgets.QListView.mousePressEvent",
"PySide2.QtCore.Signal"
] | [((156, 164), 'PySide2.QtCore.Signal', 'Signal', ([], {}), '()\n', (162, 164), False, 'from PySide2.QtCore import QObject, Signal\n'), ((242, 280), 'PySide2.QtWidgets.QListView.mousePressEvent', 'QListView.mousePressEvent', (['self', 'event'], {}), '(self, event)\n', (267, 280), False, 'from PySide2.QtWidgets import QListView\n')] |
import torch
import torchio as tio
from ...utils import TorchioTestCase
class TestClamp(TorchioTestCase):
"""Tests for :class:`tio.Clamp` class."""
def test_out_min_max(self):
transform = tio.Clamp(out_min=0, out_max=1)
transformed = transform(self.sample_subject)
self.assertEqual(transformed.t1.data.min(), 0)
self.assertEqual(transformed.t1.data.max(), 1)
def test_ct(self):
ct_max = 1500
ct_min = -2000
ct_range = ct_max - ct_min
tensor = torch.rand(1, 30, 30, 30) * ct_range + ct_min
ct = tio.ScalarImage(tensor=tensor)
ct_air = -1000
ct_bone = 1000
clamp = tio.Clamp(ct_air, ct_bone)
clamped = clamp(ct)
assert clamped.data.min() == ct_air
assert clamped.data.max() == ct_bone
def test_too_many_values_for_out_min(self):
with self.assertRaises(TypeError):
clamp = tio.Clamp(out_min=(1, 2))
clamp(self.sample_subject)
def test_too_many_values_for_out_max(self):
with self.assertRaises(TypeError):
clamp = tio.Clamp(out_max=(1, 2))
clamp(self.sample_subject)
def test_wrong_out_min_type(self):
with self.assertRaises(TypeError):
clamp = tio.Clamp(out_min='foo')
clamp(self.sample_subject)
def test_wrong_out_max_type(self):
with self.assertRaises(TypeError):
clamp = tio.Clamp(out_max='foo')
clamp(self.sample_subject)
| [
"torchio.Clamp",
"torchio.ScalarImage",
"torch.rand"
] | [((207, 238), 'torchio.Clamp', 'tio.Clamp', ([], {'out_min': '(0)', 'out_max': '(1)'}), '(out_min=0, out_max=1)\n', (216, 238), True, 'import torchio as tio\n'), ((582, 612), 'torchio.ScalarImage', 'tio.ScalarImage', ([], {'tensor': 'tensor'}), '(tensor=tensor)\n', (597, 612), True, 'import torchio as tio\n'), ((675, 701), 'torchio.Clamp', 'tio.Clamp', (['ct_air', 'ct_bone'], {}), '(ct_air, ct_bone)\n', (684, 701), True, 'import torchio as tio\n'), ((931, 956), 'torchio.Clamp', 'tio.Clamp', ([], {'out_min': '(1, 2)'}), '(out_min=(1, 2))\n', (940, 956), True, 'import torchio as tio\n'), ((1108, 1133), 'torchio.Clamp', 'tio.Clamp', ([], {'out_max': '(1, 2)'}), '(out_max=(1, 2))\n', (1117, 1133), True, 'import torchio as tio\n'), ((1276, 1300), 'torchio.Clamp', 'tio.Clamp', ([], {'out_min': '"""foo"""'}), "(out_min='foo')\n", (1285, 1300), True, 'import torchio as tio\n'), ((1443, 1467), 'torchio.Clamp', 'tio.Clamp', ([], {'out_max': '"""foo"""'}), "(out_max='foo')\n", (1452, 1467), True, 'import torchio as tio\n'), ((523, 548), 'torch.rand', 'torch.rand', (['(1)', '(30)', '(30)', '(30)'], {}), '(1, 30, 30, 30)\n', (533, 548), False, 'import torch\n')] |
from abc import abstractmethod, ABC
from datetime import datetime
from importlib import import_module
from typing import IO
import requests
from django.conf import settings
from . import converters
def get_repository():
repository = settings.REPOSITORIES['CSV_REPOSITORY']
try:
module_path, class_name = repository.rsplit('.', 1)
module = import_module(module_path)
return getattr(module, class_name)()
except (ImportError, AttributeError) as e:
raise ImportError(repository)
class CsvPpdRepository(ABC):
def find_record_by_id(self, transaction_id):
filter = lambda row: row[0] == '{' + transaction_id + '}'
return self.__get_records(filter, 0, 1)
def find_all_records(self, offset, limit):
filter = lambda row: True
return self.__get_records(filter, offset, limit)
def find_all_records_between(self, from_period, until_period, offset, limit):
filter = lambda row: datetime.strptime(row[2],settings.DATE_FORMAT) >= from_period and datetime.strptime(row[2],settings.DATE_FORMAT) <= until_period
return self.__get_records(filter, offset, limit)
def __get_records(self, filter, offset, limit):
records = []
converter = converters.PpdCsvRowConverter()
with self.get_csv_data() as file :
i = 0
matches = 0
while matches < limit :
line = file.readline()
if not line:
break
if i >= offset and filter(line.replace('"','').split(sep=',')):
records.append(converter.covertCsvRow(line))
matches += 1
i += 1
self.cleanup()
return records
@abstractmethod
def get_csv_data(self) -> IO:
pass
def cleanup(self):
pass
class FileSystemCachedCsvPpdRepository(CsvPpdRepository):
fileUrl = settings.CSV_DATA_LOCATION
def __init__(self) -> None:
pass
def get_csv_data(self) -> IO:
return open(self.fileUrl,"r")
class RealTimeLatestCsvPpdRepository(CsvPpdRepository):
fileUrl = settings.CSV_DATA_LOCATION
def __init__(self) -> None:
pass
def get_csv_data(self) -> IO:
response = requests.get(self.fileUrl)
self.temp_file_path = "latest_ppd_cache.csv"
file = open(self.temp_file_path, 'w+')
file.write(response.text)
file.seek(0)
return file
def cleanup(self):
pass
| [
"datetime.datetime.strptime",
"importlib.import_module",
"requests.get"
] | [((367, 393), 'importlib.import_module', 'import_module', (['module_path'], {}), '(module_path)\n', (380, 393), False, 'from importlib import import_module\n'), ((2273, 2299), 'requests.get', 'requests.get', (['self.fileUrl'], {}), '(self.fileUrl)\n', (2285, 2299), False, 'import requests\n'), ((970, 1017), 'datetime.datetime.strptime', 'datetime.strptime', (['row[2]', 'settings.DATE_FORMAT'], {}), '(row[2], settings.DATE_FORMAT)\n', (987, 1017), False, 'from datetime import datetime\n'), ((1036, 1083), 'datetime.datetime.strptime', 'datetime.strptime', (['row[2]', 'settings.DATE_FORMAT'], {}), '(row[2], settings.DATE_FORMAT)\n', (1053, 1083), False, 'from datetime import datetime\n')] |
from pydantic import BaseModel, Field
import strawberry
def test_use_alias_as_gql_name():
class UserModel(BaseModel):
age_: int = Field(..., alias="age_alias")
@strawberry.experimental.pydantic.type(
UserModel, all_fields=True, use_pydantic_alias=True
)
class User:
...
@strawberry.type
class Query:
user: User = User(age_=5)
schema = strawberry.Schema(query=Query)
query = """{
user {
__typename,
... on User {
age_alias
}
}
}"""
result = schema.execute_sync(query, root_value=Query())
assert not result.errors
assert result.data["user"] == {"__typename": "User", "age_alias": 5}
def test_do_not_use_alias_as_gql_name():
class UserModel(BaseModel):
age_: int = Field(..., alias="age_alias")
@strawberry.experimental.pydantic.type(
UserModel, all_fields=True, use_pydantic_alias=False
)
class User:
...
@strawberry.type
class Query:
user: User = User(age_=5)
schema = strawberry.Schema(query=Query)
query = """{
user {
__typename,
... on User {
age_
}
}
}"""
result = schema.execute_sync(query, root_value=Query())
assert not result.errors
assert result.data["user"] == {"__typename": "User", "age_": 5}
| [
"strawberry.experimental.pydantic.type",
"pydantic.Field",
"strawberry.Schema"
] | [((181, 275), 'strawberry.experimental.pydantic.type', 'strawberry.experimental.pydantic.type', (['UserModel'], {'all_fields': '(True)', 'use_pydantic_alias': '(True)'}), '(UserModel, all_fields=True,\n use_pydantic_alias=True)\n', (218, 275), False, 'import strawberry\n'), ((401, 431), 'strawberry.Schema', 'strawberry.Schema', ([], {'query': 'Query'}), '(query=Query)\n', (418, 431), False, 'import strawberry\n'), ((869, 964), 'strawberry.experimental.pydantic.type', 'strawberry.experimental.pydantic.type', (['UserModel'], {'all_fields': '(True)', 'use_pydantic_alias': '(False)'}), '(UserModel, all_fields=True,\n use_pydantic_alias=False)\n', (906, 964), False, 'import strawberry\n'), ((1090, 1120), 'strawberry.Schema', 'strawberry.Schema', ([], {'query': 'Query'}), '(query=Query)\n', (1107, 1120), False, 'import strawberry\n'), ((145, 174), 'pydantic.Field', 'Field', (['...'], {'alias': '"""age_alias"""'}), "(..., alias='age_alias')\n", (150, 174), False, 'from pydantic import BaseModel, Field\n'), ((833, 862), 'pydantic.Field', 'Field', (['...'], {'alias': '"""age_alias"""'}), "(..., alias='age_alias')\n", (838, 862), False, 'from pydantic import BaseModel, Field\n')] |
import requests
from time import sleep
HEADERS = {'User-Agent': 'AVITO 52.0 (iPad5,1; 11.3.1; en_RU)'}
def get_json(url: str):
return requests.get(url, headers=HEADERS).json()
def get_locs() -> dict:
locations = {621540: 'Вся Россия'}
url = 'https://www.avito.ru/api/2/locations/top/children?includeRefs=1&key=<KEY>'
j = get_json(url)
for e in j:
loc_id = int(e['id'])
loc_name = e['names']['1']
locations[loc_id] = loc_name
return locations
def get_vacancies_count(by_loc_id: int) -> int:
url = 'https://www.avito.ru/api/9/items?categoryId=110&countOnly=1&locationId={0}&sort=default&key=ZaeC8aidairahqu2Eeb1quee9einaeFieboocohX' \
.format(by_loc_id)
return int(get_json(url)['result']['count'])
def parse():
locs = get_locs()
for loc_id in locs:
loc_name = locs[loc_id]
c = get_vacancies_count(loc_id)
print('{0} in {1}'.format(c, loc_name))
sleep(1)
parse()
| [
"requests.get",
"time.sleep"
] | [((962, 970), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (967, 970), False, 'from time import sleep\n'), ((141, 175), 'requests.get', 'requests.get', (['url'], {'headers': 'HEADERS'}), '(url, headers=HEADERS)\n', (153, 175), False, 'import requests\n')] |
import celery
import json
@celery.task()
def cache_standings(load_oauth_from_redis=True, write_oauth_to_redis=True):
from deependeliminator.standings import build_standings_list
from application import redis_store
from deependeliminator.standings import get_week
redis_store.setex(
'standings',
7200,
json.dumps(build_standings_list(
week=get_week(),
load_oauth_from_redis=load_oauth_from_redis,
write_oauth_to_redis=write_oauth_to_redis
))
)
| [
"deependeliminator.standings.get_week",
"celery.task"
] | [((29, 42), 'celery.task', 'celery.task', ([], {}), '()\n', (40, 42), False, 'import celery\n'), ((394, 404), 'deependeliminator.standings.get_week', 'get_week', ([], {}), '()\n', (402, 404), False, 'from deependeliminator.standings import get_week\n')] |
"""
Get Certificate Chain Info
==========================
This module contains the following parsers:
SatelliteCustomCaChain - command ``awk 'BEGIN { pipe="openssl x509 -noout -subject -enddate"} /^-+BEGIN CERT/,/^-+END CERT/ { print | pipe } /^-+END CERT/ { close(pipe); printf("\\n")}' /etc/pki/katello/certs/katello-server-ca.crt``
========================================================================================================================================================================================================================================
"""
from insights import parser, CommandParser
from datetime import datetime
from insights.parsers import ParseException, SkipException
from insights.specs import Specs
from insights.parsers.certificates_enddate import CertificatesEnddate
class CertificateChain(CommandParser, list):
"""
Class to parse the output of "openssl -in <certificate_chain_file> -xxx -xxx".
Blank line is added to distinguish different certs in the chain.
Currently it only supports the attributes which the output is in
key=value pairs.
Sample Output::
issuer= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.a.com
subject= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.b.com
notBefore=Dec 7 07:02:33 2020 GMT
notAfter=Jan 18 07:02:33 2038 GMT
issuer= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.c.com
subject= /C=US/ST=North Carolina/O=Katello/OU=SomeOrgUnit/CN=test.d.com
notBefore=Nov 30 07:02:42 2020 GMT
notAfter=Jan 18 07:02:43 2018 GMT
Examples:
>>> type(certs)
<class 'insights.parsers.certificate_chain.CertificateChain'>
>>> len(certs)
2
>>> certs.earliest_expiry_date.str
'Jan 18 07:02:43 2018'
"""
expire_date_format = '%b %d %H:%M:%S %Y'
def parse_content(self, content):
"""
Parse the content of crt chain file. And it saves the expiration
info of each crt in a list of dict. The value of notBefore and
notAfter are saved to an instance of ExpirationDate, it
contains the date in string and datetime format.
Attributes:
earliest_expiry_date(ExpirationDate):
The earliest expiry datetime of the certs in the chain.
None when there isn't "notAfter" for all the certs
in the chain.
Raises:
ParseException: when the output isn't in key=value format or
the notAfter or notBefore isn't expected format.
"""
if len(content) < 1:
raise SkipException("No cert in the output")
data = {}
self.append(data)
self.earliest_expiry_date = None
for index, line in enumerate(content):
if not line.strip():
# a new cert starts
if data:
data = {}
self.append(data)
continue
if '=' not in line:
raise ParseException('The line %s is not in key=value format' % line)
key, value = [item.strip() for item in line.split('=', 1)]
value_without_tz = value.rsplit(" ", 1)[0]
if key in ['notBefore', 'notAfter']:
try:
date_time = datetime.strptime(value_without_tz, self.expire_date_format)
except Exception:
raise ParseException('The %s is not in %s format.' % (key, self.expire_date_format))
value = CertificatesEnddate.ExpirationDate(value_without_tz, date_time)
data[key] = value
for one_cert in self:
expire_date = one_cert.get('notAfter')
if expire_date and (self.earliest_expiry_date is None or expire_date.datetime < self.earliest_expiry_date.datetime):
self.earliest_expiry_date = expire_date
@parser(Specs.satellite_custom_ca_chain)
class SatelliteCustomCaChain(CertificateChain):
"""
.. note::
Please refer to its super-class :class:`insights.parsers.certificate_chain.CertificateChain` for more
details.
Sample Output::
subject= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.a.com
notAfter=Jan 18 07:02:33 2038 GMT
subject= /C=US/ST=North Carolina/O=Katello/OU=SomeOrgUnit/CN=test.b.com
notAfter=Jan 18 07:02:43 2028 GMT
Examples:
>>> type(satellite_ca_certs)
<class 'insights.parsers.certificate_chain.SatelliteCustomCaChain'>
>>> len(satellite_ca_certs)
2
>>> satellite_ca_certs.earliest_expiry_date.str
'Jan 18 07:02:43 2028'
"""
pass
| [
"datetime.datetime.strptime",
"insights.parsers.ParseException",
"insights.parser",
"insights.parsers.SkipException",
"insights.parsers.certificates_enddate.CertificatesEnddate.ExpirationDate"
] | [((3992, 4031), 'insights.parser', 'parser', (['Specs.satellite_custom_ca_chain'], {}), '(Specs.satellite_custom_ca_chain)\n', (3998, 4031), False, 'from insights import parser, CommandParser\n'), ((2700, 2738), 'insights.parsers.SkipException', 'SkipException', (['"""No cert in the output"""'], {}), "('No cert in the output')\n", (2713, 2738), False, 'from insights.parsers import ParseException, SkipException\n'), ((3112, 3175), 'insights.parsers.ParseException', 'ParseException', (["('The line %s is not in key=value format' % line)"], {}), "('The line %s is not in key=value format' % line)\n", (3126, 3175), False, 'from insights.parsers import ParseException, SkipException\n'), ((3628, 3691), 'insights.parsers.certificates_enddate.CertificatesEnddate.ExpirationDate', 'CertificatesEnddate.ExpirationDate', (['value_without_tz', 'date_time'], {}), '(value_without_tz, date_time)\n', (3662, 3691), False, 'from insights.parsers.certificates_enddate import CertificatesEnddate\n'), ((3404, 3464), 'datetime.datetime.strptime', 'datetime.strptime', (['value_without_tz', 'self.expire_date_format'], {}), '(value_without_tz, self.expire_date_format)\n', (3421, 3464), False, 'from datetime import datetime\n'), ((3525, 3603), 'insights.parsers.ParseException', 'ParseException', (["('The %s is not in %s format.' % (key, self.expire_date_format))"], {}), "('The %s is not in %s format.' % (key, self.expire_date_format))\n", (3539, 3603), False, 'from insights.parsers import ParseException, SkipException\n')] |
from textwrap import dedent
from sphinxnotes.any import Schema, Field
confval = Schema(
'confval',
name=Field(unique=True, referenceable=True),
attrs={
'type': Field(),
'default': Field(),
},
description_template=dedent("""
:Type: ``{{ type }}``
:Default: ``{{ default}}``
{{ content }}"""),
reference_template='⚙️{{ title }}',
missing_reference_template='⚙️{{ title }}')
| [
"textwrap.dedent",
"sphinxnotes.any.Field"
] | [((113, 151), 'sphinxnotes.any.Field', 'Field', ([], {'unique': '(True)', 'referenceable': '(True)'}), '(unique=True, referenceable=True)\n', (118, 151), False, 'from sphinxnotes.any import Schema, Field\n'), ((250, 362), 'textwrap.dedent', 'dedent', (['"""\n :Type: ``{{ type }}``\n :Default: ``{{ default}}``\n\n {{ content }}"""'], {}), '(\n """\n :Type: ``{{ type }}``\n :Default: ``{{ default}}``\n\n {{ content }}"""\n )\n', (256, 362), False, 'from textwrap import dedent\n'), ((181, 188), 'sphinxnotes.any.Field', 'Field', ([], {}), '()\n', (186, 188), False, 'from sphinxnotes.any import Schema, Field\n'), ((209, 216), 'sphinxnotes.any.Field', 'Field', ([], {}), '()\n', (214, 216), False, 'from sphinxnotes.any import Schema, Field\n')] |
from flask import Flask, render_template, request, jsonify
import pandas as pd
import numpy as np
from pymongo import MongoClient
from ..ALS_recommender import ALSRecommender
import pickle
we_eat_client = MongoClient()
we_eat_database = we_eat_client['we_eat']
survey_collection = we_eat_database['surveys']
partner_collection = we_eat_database['partners']
app = Flask(__name__, static_url_path='')
with open('data/item_factors_df.pkl', 'rb') as f:
item_factors = pickle.load(f)
with open('data/inv_alias_dict.pickle', 'rb') as g:
inv_alias_dict = pickle.load(g)
@app.route('/')
def home():
return render_template('stylish.html')
@app.route('/survey', methods=['POST'])
def survey():
data = request.get_json()
survey_collection.insert_one(data)
print(data)
return f"<h2>Thanks, {data['user']}! We've received your survey.</h2>"
@app.route('/recommend/<user1>/<user2>', methods=['GET'])
def recommend_for_two_users(user1, user2):
"""Return a recommendation, given two usernames."""
user1_survey = survey_collection.find_one({'user': user1})
user2_survey = survey_collection.find_one({'user': user2})
recommender = ALSRecommender(item_factors, inv_alias_dict)
user1_df = recommender.user_preds_from_survey(user1_survey)
user2_df = recommender.user_preds_from_survey(user2_survey)
compiled_df = recommender.compile_df(user1_df, user2_df)
top_three = recommender.top_recs(user1, user2, compiled_df)
top_three_rest = list(top_three.index)
random_rec = recommender.get_a_rec(user1, user2, compiled_df)
return f'<h2> Try this place out! <a href="https://www.yelp.com/biz/{random_rec.index[0]}">{random_rec.index[0]}</a></h2>'
#The top 3 matches for {user1} and {user2} are: {top_three_rest[0]}, {top_three_rest[1]}, {top_three_rest[2]}, PLUS a random rec out of your top 30
if __name__ == '__main__':
app.run(debug=True) | [
"flask.render_template",
"flask.Flask",
"pickle.load",
"flask.request.get_json",
"pymongo.MongoClient"
] | [((208, 221), 'pymongo.MongoClient', 'MongoClient', ([], {}), '()\n', (219, 221), False, 'from pymongo import MongoClient\n'), ((368, 403), 'flask.Flask', 'Flask', (['__name__'], {'static_url_path': '""""""'}), "(__name__, static_url_path='')\n", (373, 403), False, 'from flask import Flask, render_template, request, jsonify\n'), ((474, 488), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (485, 488), False, 'import pickle\n'), ((563, 577), 'pickle.load', 'pickle.load', (['g'], {}), '(g)\n', (574, 577), False, 'import pickle\n'), ((618, 649), 'flask.render_template', 'render_template', (['"""stylish.html"""'], {}), "('stylish.html')\n", (633, 649), False, 'from flask import Flask, render_template, request, jsonify\n'), ((716, 734), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (732, 734), False, 'from flask import Flask, render_template, request, jsonify\n')] |
import pandas as pd
# Series
number_list = pd.Series([1,2,3,4,5,6])
# DataFrame
matrix_list = pd.DataFrame([[1,2,3],
['a','b','c'],
[3,4,5],
['d',4,6]])
# [1] attribute .info()
print("[1] attribute .info()")
print(matrix_list.info())
# [2] attribute .shape
print("\n[2] attribute .shape")
print(" Shape dari number_list:", number_list.shape)
print(" Shape dari matrix_list:", matrix_list.shape)
# [3] attribute .dtypes
print("\n[3] attribute .dtypes")
print(" Tipe data number_list:", number_list.dtypes)
print(" Tipe data matrix_list:", matrix_list.dtypes)
# [4] attribute .astype()
print("\n[4] attribute .astype()")
print(" Konversi number_list ke str:", number_list.astype("str"))
print(" Konversi matrix_list ke str:", matrix_list.astype("str")) | [
"pandas.Series",
"pandas.DataFrame"
] | [((43, 72), 'pandas.Series', 'pd.Series', (['[1, 2, 3, 4, 5, 6]'], {}), '([1, 2, 3, 4, 5, 6])\n', (52, 72), True, 'import pandas as pd\n'), ((94, 160), 'pandas.DataFrame', 'pd.DataFrame', (["[[1, 2, 3], ['a', 'b', 'c'], [3, 4, 5], ['d', 4, 6]]"], {}), "([[1, 2, 3], ['a', 'b', 'c'], [3, 4, 5], ['d', 4, 6]])\n", (106, 160), True, 'import pandas as pd\n')] |
import sopel
@sopel.module.commands('winky')
def dick(bot, trigger):
bot.say('8===D')
| [
"sopel.module.commands"
] | [((15, 45), 'sopel.module.commands', 'sopel.module.commands', (['"""winky"""'], {}), "('winky')\n", (36, 45), False, 'import sopel\n')] |
from keras.utils import to_categorical, plot_model
from keras.optimizers import SGD, Adam
from keras import backend
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import EarlyStopping
from cnn import Convolucao
import datetime
import h5py
import time
EPOCHS = 30
CLASS = 21
FILE_NAME = 'cnn_model_LIBRAS_'
def getDateStr():
return str('{date:%Y%m%d_%H%M}').format(date=datetime.datetime.now())
def getTimeMin(start, end):
return (end - start) / 60
print('[INFO] [INICIO]: ' + getDateStr() + '\n')
print('[INFO] Download dataset usando keras.preprocessing.image.ImageDataGenerator')
train_dataGen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
validation_split=0.25)
test_dataGen = ImageDataGenerator(rescale=1. / 255, validation_split=0.25)
training_set = train_dataGen.flow_from_directory(
'../dataset/training',
target_size=(64, 64),
color_mode='rgb',
batch_size=32,
shuffle=False,
class_mode='categorical')
test_set = test_dataGen.flow_from_directory(
'../dataset/test',
target_size=(64, 64),
color_mode='rgb',
batch_size=32,
shuffle=False,
class_mode='categorical')
# inicializar e otimizar modelo
print("[INFO] Inicializando e otimizando a CNN...")
start = time.time()
early_stopping_monitor = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=15)
model = Convolucao.build(64, 64, 3, CLASS)
model.compile(optimizer=SGD(0.01), loss="categorical_crossentropy",
metrics=["acc"])
# treinar a CNN
print("[INFO] Treinando a CNN...")
classifier = model.fit_generator(
training_set,
steps_per_epoch=(training_set.n // training_set.batch_size),
epochs=EPOCHS,
validation_data=test_set,
validation_steps=(test_set.n // test_set.batch_size),
shuffle=False,
verbose=2,
callbacks=[early_stopping_monitor]
)
# atualizo valor da epoca caso o treinamento tenha finalizado antes do valor de epoca que foi iniciado
EPOCHS = len(classifier.history["loss"])
print("[INFO] Salvando modelo treinado ...")
# para todos arquivos ficarem com a mesma data e hora. Armazeno na variavel
file_date = getDateStr()
model.save('../models/' + FILE_NAME + file_date + '.h5')
print('[INFO] modelo: ../models/' + FILE_NAME + file_date + '.h5 salvo!')
end = time.time()
print("[INFO] Tempo de execução da CNN: %.1f min" % (getTimeMin(start, end)))
print('[INFO] Summary: ')
model.summary()
print("\n[INFO] Avaliando a CNN...")
score = model.evaluate_generator(generator=test_set, steps=(test_set.n // test_set.batch_size), verbose=1)
print('[INFO] Accuracy: %.2f%%' % (score[1] * 100), '| Loss: %.5f' % (score[0]))
print("[INFO] Sumarizando loss e accuracy para os datasets 'train' e 'test'")
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, EPOCHS), classifier.history["loss"], label="train_loss")
plt.plot(np.arange(0, EPOCHS), classifier.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, EPOCHS), classifier.history["acc"], label="train_acc")
plt.plot(np.arange(0, EPOCHS), classifier.history["val_acc"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
plt.savefig('../models/graphics/' + FILE_NAME + file_date + '.png', bbox_inches='tight')
print('[INFO] Gerando imagem do modelo de camadas da CNN')
plot_model(model, to_file='../models/image/' + FILE_NAME + file_date + '.png', show_shapes=True)
print('\n[INFO] [FIM]: ' + getDateStr())
print('\n\n')
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"keras.utils.plot_model",
"cnn.Convolucao.build",
"matplotlib.pyplot.style.use",
"keras.preprocessing.image.ImageDataGenerator",
"datetime.datetime.now",
"matplotlib.pyplot.figure",
"keras.optimizers.SGD",
"keras.callbacks.EarlyStopping",
"matplotlib.pyplot.title",
"time.time",
"matplotlib.pyplot.legend"
] | [((748, 867), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)', 'shear_range': '(0.2)', 'zoom_range': '(0.2)', 'horizontal_flip': '(True)', 'validation_split': '(0.25)'}), '(rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2,\n horizontal_flip=True, validation_split=0.25)\n', (766, 867), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((900, 960), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)', 'validation_split': '(0.25)'}), '(rescale=1.0 / 255, validation_split=0.25)\n', (918, 960), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((1432, 1443), 'time.time', 'time.time', ([], {}), '()\n', (1441, 1443), False, 'import time\n'), ((1470, 1539), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'mode': '"""min"""', 'verbose': '(1)', 'patience': '(15)'}), "(monitor='val_loss', mode='min', verbose=1, patience=15)\n", (1483, 1539), False, 'from keras.callbacks import EarlyStopping\n'), ((1549, 1583), 'cnn.Convolucao.build', 'Convolucao.build', (['(64)', '(64)', '(3)', 'CLASS'], {}), '(64, 64, 3, CLASS)\n', (1565, 1583), False, 'from cnn import Convolucao\n'), ((2465, 2476), 'time.time', 'time.time', ([], {}), '()\n', (2474, 2476), False, 'import time\n'), ((2905, 2928), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (2918, 2928), True, 'import matplotlib.pyplot as plt\n'), ((2929, 2941), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2939, 2941), True, 'import matplotlib.pyplot as plt\n'), ((3258, 3297), 'matplotlib.pyplot.title', 'plt.title', (['"""Training Loss and Accuracy"""'], {}), "('Training Loss and Accuracy')\n", (3267, 3297), True, 'import matplotlib.pyplot as plt\n'), ((3298, 3319), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch #"""'], {}), "('Epoch #')\n", (3308, 3319), True, 'import matplotlib.pyplot as plt\n'), ((3320, 3347), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss/Accuracy"""'], {}), "('Loss/Accuracy')\n", (3330, 3347), True, 'import matplotlib.pyplot as plt\n'), ((3348, 3360), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3358, 3360), True, 'import matplotlib.pyplot as plt\n'), ((3361, 3453), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('../models/graphics/' + FILE_NAME + file_date + '.png')"], {'bbox_inches': '"""tight"""'}), "('../models/graphics/' + FILE_NAME + file_date + '.png',\n bbox_inches='tight')\n", (3372, 3453), True, 'import matplotlib.pyplot as plt\n'), ((3510, 3610), 'keras.utils.plot_model', 'plot_model', (['model'], {'to_file': "('../models/image/' + FILE_NAME + file_date + '.png')", 'show_shapes': '(True)'}), "(model, to_file='../models/image/' + FILE_NAME + file_date +\n '.png', show_shapes=True)\n", (3520, 3610), False, 'from keras.utils import to_categorical, plot_model\n'), ((2951, 2971), 'numpy.arange', 'np.arange', (['(0)', 'EPOCHS'], {}), '(0, EPOCHS)\n', (2960, 2971), True, 'import numpy as np\n'), ((3030, 3050), 'numpy.arange', 'np.arange', (['(0)', 'EPOCHS'], {}), '(0, EPOCHS)\n', (3039, 3050), True, 'import numpy as np\n'), ((3111, 3131), 'numpy.arange', 'np.arange', (['(0)', 'EPOCHS'], {}), '(0, EPOCHS)\n', (3120, 3131), True, 'import numpy as np\n'), ((3188, 3208), 'numpy.arange', 'np.arange', (['(0)', 'EPOCHS'], {}), '(0, EPOCHS)\n', (3197, 3208), True, 'import numpy as np\n'), ((1608, 1617), 'keras.optimizers.SGD', 'SGD', (['(0.01)'], {}), '(0.01)\n', (1611, 1617), False, 'from keras.optimizers import SGD, Adam\n'), ((509, 532), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (530, 532), False, 'import datetime\n')] |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lib.vcd import VCDBaseActions
from xml.etree.ElementTree import Element, SubElement
class createVDCNetwork(VCDBaseActions):
def run(self, vcloud="default", data=None):
contenttype = "application/vnd.vmware.vcloud.orgVdcNetwork+xml"
self.set_connection(vcloud)
self.get_sessionid()
post = {}
all_orgs = self.get_orgs()
all_pvdcs = self.get_pvdcs("false")
for org in data:
for vdc in data[org]['vdcs']:
if org not in all_orgs.keys():
post["%s" % (org.lower())] = "Org does not exist"
continue
org_details = self.get_org(all_orgs[org]['id'])
if vdc not in org_details['vdcs'].keys():
post["%s (%s)" % (vdc.lower(), org.lower())] =\
"VDC does not exist"
continue
endpoint = "admin/vdc/%s/networks" % org_details['vdcs'][
vdc]['id']
pvdc = data[org]['vdcs'][vdc]['PVDC']
pvdc_details = self.get_pvdc_details(all_pvdcs[pvdc]['id'])
if 'org_network' not in data[org]['vdcs'][vdc]:
post["%s (%s)" % (vdc.lower(), org.lower())] =\
"No networks defined"
continue
for network in data[org]['vdcs'][vdc]['org_network']:
if network in org_details['vdcs'][vdc][
'availablenetworks'].keys():
post["%s (%s)" % (network.lower(), vdc.lower())] =\
"Network Already Exists"
continue
netdets = data[org]['vdcs'][vdc]['org_network'][network]
orgvdcnetwork = Element('OrgVdcNetwork')
orgvdcnetwork.set('xmlns',
'http://www.vmware.com/vcloud/v1.5')
orgvdcnetwork.set('name', network)
configuration = SubElement(orgvdcnetwork, 'Configuration')
orgvdcnetwork.extend(configuration)
if netdets['type'] == "bridged":
parent = netdets['parent']
if parent not in pvdc_details[
'external_networks'].keys():
post["%s (%s)" % (network.lower(), vdc.lower())] =\
"Parent Network Not Found"
continue
networkref = pvdc_details['external_networks'][
parent]['href']
networkref = networkref.replace(
"extension/externalnet", "network")
parentnetwork = SubElement(configuration,
'ParentNetwork')
parentnetwork.set('href', networkref)
fencemode = SubElement(configuration, 'FenceMode')
fencemode.text = netdets['type']
else:
post["%s (%s)" % (network.lower(), vdc.lower())] =\
"Unsupported Network Type"
post["%s (%s)" % (network.lower(), vdc.lower())] =\
self.vcd_post(endpoint,
orgvdcnetwork,
contenttype)
return post
| [
"xml.etree.ElementTree.Element",
"xml.etree.ElementTree.SubElement"
] | [((2580, 2604), 'xml.etree.ElementTree.Element', 'Element', (['"""OrgVdcNetwork"""'], {}), "('OrgVdcNetwork')\n", (2587, 2604), False, 'from xml.etree.ElementTree import Element, SubElement\n'), ((2819, 2861), 'xml.etree.ElementTree.SubElement', 'SubElement', (['orgvdcnetwork', '"""Configuration"""'], {}), "(orgvdcnetwork, 'Configuration')\n", (2829, 2861), False, 'from xml.etree.ElementTree import Element, SubElement\n'), ((3594, 3636), 'xml.etree.ElementTree.SubElement', 'SubElement', (['configuration', '"""ParentNetwork"""'], {}), "(configuration, 'ParentNetwork')\n", (3604, 3636), False, 'from xml.etree.ElementTree import Element, SubElement\n'), ((3787, 3825), 'xml.etree.ElementTree.SubElement', 'SubElement', (['configuration', '"""FenceMode"""'], {}), "(configuration, 'FenceMode')\n", (3797, 3825), False, 'from xml.etree.ElementTree import Element, SubElement\n')] |
"""hub URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.urls import path, include, reverse_lazy
from django.views.generic import RedirectView
from hub_app.admin import admin_site
urlpatterns = [ # pylint: disable=invalid-name
path('admin/', admin_site.urls, name='admin'),
path('client-configuration/i18n/', include('django.conf.urls.i18n')),
path('hub/', include(('hub_app.urls', 'hub_app'), namespace='ha')),
path('schema/', include(('hub_json_schema.urls', 'hub_json_schema'), namespace='json')),
url('^$', RedirectView.as_view(url=reverse_lazy('ha:home'), permanent=False), name='index'),
]
| [
"django.urls.reverse_lazy",
"django.urls.path",
"django.urls.include"
] | [((849, 894), 'django.urls.path', 'path', (['"""admin/"""', 'admin_site.urls'], {'name': '"""admin"""'}), "('admin/', admin_site.urls, name='admin')\n", (853, 894), False, 'from django.urls import path, include, reverse_lazy\n'), ((935, 967), 'django.urls.include', 'include', (['"""django.conf.urls.i18n"""'], {}), "('django.conf.urls.i18n')\n", (942, 967), False, 'from django.urls import path, include, reverse_lazy\n'), ((987, 1039), 'django.urls.include', 'include', (["('hub_app.urls', 'hub_app')"], {'namespace': '"""ha"""'}), "(('hub_app.urls', 'hub_app'), namespace='ha')\n", (994, 1039), False, 'from django.urls import path, include, reverse_lazy\n'), ((1062, 1132), 'django.urls.include', 'include', (["('hub_json_schema.urls', 'hub_json_schema')"], {'namespace': '"""json"""'}), "(('hub_json_schema.urls', 'hub_json_schema'), namespace='json')\n", (1069, 1132), False, 'from django.urls import path, include, reverse_lazy\n'), ((1174, 1197), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""ha:home"""'], {}), "('ha:home')\n", (1186, 1197), False, 'from django.urls import path, include, reverse_lazy\n')] |
import numpy as np
import pandas as pd
import itertools
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.metrics import accuracy_score, confusion_matrix
# Read data
df = pd.read_csv("news.csv")
# Get shape and head
print(df.shape)
print(df.head())
# DataFlair - get labels
labels = df.label
print(labels.head())
# DataFlair - split dataset
x_train, x_test, y_train, y_test = train_test_split(df['text'], labels, test_size=0.2, random_state=7)
# DataFlair - initialize a TfidfVectorizer
tfidf_vectorizer = TfidfVectorizer(stop_words='english', max_df=0.7)
# DataFlair - fit and transform train set, transform test set
tfidf_train = tfidf_vectorizer.fit_transform(x_train)
tfidf_test = tfidf_vectorizer.transform(x_test)
# DataFlair - initialize a PassiveAggressiveClassifier
pac = PassiveAggressiveClassifier(max_iter=50)
pac.fit(tfidf_train, y_train)
# DataFlair - predict on test set and calculate accuracy
y_pred = pac.predict(tfidf_test)
score = accuracy_score(y_test, y_pred)
print(f'Accuracy: {round(score*100, 2)}%')
# DataFlair - build confusion matrix
print(confusion_matrix(y_test, y_pred, labels=['FAKE', 'REAL']))
| [
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.linear_model.PassiveAggressiveClassifier",
"sklearn.feature_extraction.text.TfidfVectorizer",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.confusion_matrix"
] | [((309, 332), 'pandas.read_csv', 'pd.read_csv', (['"""news.csv"""'], {}), "('news.csv')\n", (320, 332), True, 'import pandas as pd\n'), ((517, 584), 'sklearn.model_selection.train_test_split', 'train_test_split', (["df['text']", 'labels'], {'test_size': '(0.2)', 'random_state': '(7)'}), "(df['text'], labels, test_size=0.2, random_state=7)\n", (533, 584), False, 'from sklearn.model_selection import train_test_split\n'), ((648, 697), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'stop_words': '"""english"""', 'max_df': '(0.7)'}), "(stop_words='english', max_df=0.7)\n", (663, 697), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((925, 965), 'sklearn.linear_model.PassiveAggressiveClassifier', 'PassiveAggressiveClassifier', ([], {'max_iter': '(50)'}), '(max_iter=50)\n', (952, 965), False, 'from sklearn.linear_model import PassiveAggressiveClassifier\n'), ((1095, 1125), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1109, 1125), False, 'from sklearn.metrics import accuracy_score, confusion_matrix\n'), ((1213, 1270), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {'labels': "['FAKE', 'REAL']"}), "(y_test, y_pred, labels=['FAKE', 'REAL'])\n", (1229, 1270), False, 'from sklearn.metrics import accuracy_score, confusion_matrix\n')] |
# -*- coding: utf-8 -*-
"""Utils for handling models.
- Author: Curt-Park
- Email: <EMAIL>
"""
from collections import OrderedDict
import hashlib
import os
import re
import tarfile
from typing import Any, Dict, List, Set, Tuple
import gdown
import numpy as np
import torch
import torch.nn as nn
import torch.nn.utils.prune as prune
import wandb
import yaml
def get_model(model_name: str, model_config: Dict[str, Any]) -> nn.Module:
"""Get PyTorch model."""
# get model constructor
return __import__("src.models." + model_name, fromlist=[model_name]).get_model(
**model_config
)
def initialize_params(model: Any, state_dict: Dict[str, Any], with_mask=True) -> None:
"""Initialize weights and masks."""
model_dict = model.state_dict()
# 1. filter out unnecessary keys
pretrained_dict = OrderedDict()
for key_ori, key_pre in zip(model_dict.keys(), state_dict.keys()):
if with_mask or ("weight_mask" not in key_ori and "bias_mask" not in key_ori):
pretrained_dict[key_ori] = state_dict[key_pre]
# 3. load the new state dict
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
def get_model_hash(model: nn.Module) -> str:
"""Get model info as hash."""
return hashlib.sha224(str(model).encode("UTF-8")).hexdigest()
def get_pretrained_model_info(model: nn.Module) -> Dict[str, str]:
"""Read yaml file, get pretrained model information(model_dir, gdrive_link) \
given hash."""
model_hash = str(get_model_hash(model))
with open("config/pretrained_model_url.yaml", mode="r") as f:
model_info = yaml.load(f, Loader=yaml.FullLoader)[model_hash]
return model_info
def get_model_tensor_datatype(model: nn.Module) -> List[Tuple[str, torch.dtype]]:
"""Print all tensors data types."""
return [
(name, tensor.dtype)
for name, tensor in model.state_dict().items()
if hasattr(tensor, "dtype")
]
def get_params(
model: nn.Module, extract_conditions: Tuple[Tuple[Any, str], ...]
) -> Tuple[Tuple[nn.Module, str], ...]:
"""Get parameters(weight and bias) tuples for pruning."""
t = []
for module in model.modules():
for module_type, param_name in extract_conditions:
# it returns true when we try hasattr(even though it returns None)
if (
isinstance(module, module_type)
and getattr(module, param_name) is not None
):
t += [(module, param_name)]
return tuple(t)
def get_layernames(model: nn.Module) -> Set[str]:
"""Get parameters(weight and bias) layer name.
Notes:
No usage now, can be deprecated.
"""
t = set()
for name, param in model.named_parameters():
if not param.requires_grad:
continue
layer_name = name.rsplit(".", 1)[0]
t.add(layer_name)
return t
def get_model_size_mb(model: nn.Module) -> float:
"""Get the model file size."""
torch.save(model.state_dict(), "temp.p")
size = os.path.getsize("temp.p") / 1e6
os.remove("temp.p")
return size
def remove_pruning_reparameterization(
params_to_prune: Tuple[Tuple[nn.Module, str], ...]
) -> None:
"""Combine (weight_orig, weight_mask) and reduce the model size."""
for module, weight_type in params_to_prune:
prune.remove(module, weight_type)
def get_masks(model: nn.Module) -> Dict[str, torch.Tensor]:
"""Get masks from the model."""
mask = dict()
for k, v in model.state_dict().items():
if "mask" in k:
mask[k] = v.detach().cpu().clone()
return mask
def dummy_pruning(params_all: Tuple[Tuple[nn.Module, str], ...]) -> None:
"""Conduct fake pruning."""
prune.global_unstructured(
params_all, pruning_method=prune.L1Unstructured, amount=0.0,
)
def sparsity(
params_all: Tuple[Tuple[nn.Module, str], ...],
module_types: Tuple[Any, ...] = (
nn.Conv2d,
nn.Linear,
nn.BatchNorm1d,
nn.BatchNorm2d,
),
) -> float:
"""Get the proportion of zeros in weights (default: model's sparsity)."""
n_zero = n_total = 0
for module, param_name in params_all:
match = next((m for m in module_types if type(module) is m), None)
if not match:
continue
n_zero += int(torch.sum(getattr(module, param_name) == 0.0).item())
n_total += getattr(module, param_name).nelement()
return (100.0 * n_zero / n_total) if n_total != 0 else 0.0
def mask_sparsity(
params_all: Tuple[Tuple[nn.Module, str], ...],
module_types: Tuple[Any, ...] = (
nn.Conv2d,
nn.Linear,
nn.BatchNorm1d,
nn.BatchNorm2d,
),
) -> float:
"""Get the ratio of zeros in weight masks."""
n_zero = n_total = 0
for module, param_name in params_all:
match = next((m for m in module_types if type(module) is m), None)
if not match:
continue
param_mask_name = param_name + "_mask"
if hasattr(module, param_mask_name):
param = getattr(module, param_mask_name)
n_zero += int(torch.sum(param == 0.0).item())
n_total += param.nelement()
return (100.0 * n_zero / n_total) if n_total != 0 else 0.0
def download_pretrained_model(file_path: str, download_link: str) -> None:
"""Get pretrained model from google drive."""
model_folder, model_name, file_name = file_path.rsplit(os.path.sep, 2)
if not os.path.exists(model_folder):
os.makedirs(model_folder)
# Download, unzip
zip_file_path = os.path.join(model_folder, model_name + ".tar.xz")
gdown.download(download_link, zip_file_path)
with tarfile.open(zip_file_path, "r:*") as f:
f.extractall(model_folder)
def dot2bracket(s: str) -> str:
"""Replace layer names with valid names for pruning.
Test:
>>> dot2bracket("dense2.1.bn1.bias")
'dense2[1].bn1.bias'
>>> dot2bracket("dense2.13.bn1.bias")
'dense2[13].bn1.bias'
>>> dot2bracket("conv2.123.bn1.bias")
'conv2[123].bn1.bias'
>>> dot2bracket("dense2.6.conv2.5.bn1.bias")
'dense2[6].conv2[5].bn1.bias'
>>> dot2bracket("model.6")
'model[6]'
>>> dot2bracket("vgg.2.conv2.bn.2")
'vgg[2].conv2.bn[2]'
>>> dot2bracket("features.11")
'features[11]'
>>> dot2bracket("dense_blocks.0.0.conv1")
'dense_blocks[0][0].conv1'
"""
pattern = r"\.[0-9]+"
s_list = list(s)
for m in re.finditer(pattern, s):
start, end = m.span()
# e.g s_list == [..., ".", "0", ".", "0", ".", ...]
# step1: [..., "[", "0", "].", "0", ".", ...]
# step2: [..., "[", "0", "][", "0", "].", ...]
s_list[start] = s_list[start][:-1] + "["
if end < len(s) and s_list[end] == ".":
s_list[end] = "]."
else:
s_list.insert(end, "]")
return "".join(s_list)
def wlog_weight(model: nn.Module) -> None:
"""Log weights on wandb."""
wlog = dict()
for name, param in model.named_parameters():
if not param.requires_grad:
continue
layer_name, weight_type = name.rsplit(".", 1)
# get params(weight, bias, weight_orig)
if weight_type in ("weight", "bias", "weight_orig"):
w_name = "params/" + layer_name + "." + weight_type
weight = eval("model." + dot2bracket(layer_name) + "." + weight_type)
weight = weight.cpu().data.numpy()
wlog.update({w_name: wandb.Histogram(weight)})
else:
continue
# get masked weights
if weight_type == "weight_orig":
w_name = "params/" + layer_name + ".weight"
named_buffers = eval(
"model." + dot2bracket(layer_name) + ".named_buffers()"
)
mask: Tuple[str, torch.Tensor] = next(
x for x in list(named_buffers) if x[0] == "weight_mask"
)[1].cpu().data.numpy()
masked_weight = weight[np.where(mask == 1.0)]
wlog.update({w_name: wandb.Histogram(masked_weight)})
wandb.log(wlog, commit=False)
def split_channels(n_channels: int, n_chunks: int) -> List[int]:
"""Get splitted channel numbers.
It adds up all the remainders to the first chunck.
"""
split = [n_channels // n_chunks for _ in range(n_chunks)]
split[0] += n_channels - sum(split)
return split
def count_model_params(model: nn.Module) -> int:
"""Count and return the total number of model params."""
return sum(p.numel() for p in model.parameters())
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
"os.path.exists",
"collections.OrderedDict",
"os.path.getsize",
"wandb.log",
"tarfile.open",
"gdown.download",
"torch.nn.utils.prune.global_unstructured",
"torch.nn.utils.prune.remove",
"os.makedirs",
"numpy.where",
"os.path.join",
"yaml.load",
"doctest.testmod",
"re.finditer",
"torch.sum",
"wandb.Histogram",
"os.remove"
] | [((831, 844), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (842, 844), False, 'from collections import OrderedDict\n'), ((3082, 3101), 'os.remove', 'os.remove', (['"""temp.p"""'], {}), "('temp.p')\n", (3091, 3101), False, 'import os\n'), ((3746, 3836), 'torch.nn.utils.prune.global_unstructured', 'prune.global_unstructured', (['params_all'], {'pruning_method': 'prune.L1Unstructured', 'amount': '(0.0)'}), '(params_all, pruning_method=prune.L1Unstructured,\n amount=0.0)\n', (3771, 3836), True, 'import torch.nn.utils.prune as prune\n'), ((5596, 5646), 'os.path.join', 'os.path.join', (['model_folder', "(model_name + '.tar.xz')"], {}), "(model_folder, model_name + '.tar.xz')\n", (5608, 5646), False, 'import os\n'), ((5651, 5695), 'gdown.download', 'gdown.download', (['download_link', 'zip_file_path'], {}), '(download_link, zip_file_path)\n', (5665, 5695), False, 'import gdown\n'), ((6526, 6549), 're.finditer', 're.finditer', (['pattern', 's'], {}), '(pattern, s)\n', (6537, 6549), False, 'import re\n'), ((8141, 8170), 'wandb.log', 'wandb.log', (['wlog'], {'commit': '(False)'}), '(wlog, commit=False)\n', (8150, 8170), False, 'import wandb\n'), ((8677, 8694), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (8692, 8694), False, 'import doctest\n'), ((3046, 3071), 'os.path.getsize', 'os.path.getsize', (['"""temp.p"""'], {}), "('temp.p')\n", (3061, 3071), False, 'import os\n'), ((3353, 3386), 'torch.nn.utils.prune.remove', 'prune.remove', (['module', 'weight_type'], {}), '(module, weight_type)\n', (3365, 3386), True, 'import torch.nn.utils.prune as prune\n'), ((5490, 5518), 'os.path.exists', 'os.path.exists', (['model_folder'], {}), '(model_folder)\n', (5504, 5518), False, 'import os\n'), ((5528, 5553), 'os.makedirs', 'os.makedirs', (['model_folder'], {}), '(model_folder)\n', (5539, 5553), False, 'import os\n'), ((5705, 5739), 'tarfile.open', 'tarfile.open', (['zip_file_path', '"""r:*"""'], {}), "(zip_file_path, 'r:*')\n", (5717, 5739), False, 'import tarfile\n'), ((1624, 1660), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (1633, 1660), False, 'import yaml\n'), ((8048, 8069), 'numpy.where', 'np.where', (['(mask == 1.0)'], {}), '(mask == 1.0)\n', (8056, 8069), True, 'import numpy as np\n'), ((7546, 7569), 'wandb.Histogram', 'wandb.Histogram', (['weight'], {}), '(weight)\n', (7561, 7569), False, 'import wandb\n'), ((8104, 8134), 'wandb.Histogram', 'wandb.Histogram', (['masked_weight'], {}), '(masked_weight)\n', (8119, 8134), False, 'import wandb\n'), ((5141, 5164), 'torch.sum', 'torch.sum', (['(param == 0.0)'], {}), '(param == 0.0)\n', (5150, 5164), False, 'import torch\n')] |
from pathlib import Path
from pprint import pprint
import itertools as it
import pyautogui
import inspect
import shutil
import glob
import json
import time
import os
def general(title, description=""):
def decorator(func):
func.title = title # Readable name
func.name = func.__name__ # Callable name
func.description = description
if not hasattr(func, 'is_google'):
func.is_google = False
if not hasattr(func, 'is_deck'):
func.is_deck = False
module_name = inspect.getmodule(func).__name__
if func.is_google:
func.push2run_dict = {
"Descrption": func.title, # "Descrption" is a typo in Push2Run so it is kept as is.
"ListenFor": '\r'.join(func.voice),
"Open": "python",
"Parameters": f'-c "import {module_name}; {module_name}.{func.name}()"',
"StartIn": os.environ.get('PYBIOSIS_USER_PATH'),
"Admin": True,
"StartingWindowState": 1,
"KeysToSend": ""
}
if func.is_deck:
func.deck_dict = {
'Name': 'Open',
'Settings': {
'openInBrowser': True,
'path': f'cmd.exe /c "cd /d {os.environ.get("PYBIOSIS_USER_PATH")} && python -c "import {module_name}; {module_name}.{func.name}();""',
},
'State': 0,
'States': [{'FFamily': '',
'FSize': '9',
'FStyle': '',
'FUnderline': 'off',
'Image': 'state0.png',
'Title': func.title,
'TitleAlignment': 'top',
'TitleColor': '#ffffff',
'TitleShow': ''
}],
'UUID': 'com.elgato.streamdeck.system.open'
}
return func
return decorator
def google(voice):
''' Google Assistant '''
if not hasattr(google, 'functions'):
google.functions = []
def decorator(func):
func.is_google = True
func.voice = [voice] if isinstance(voice, str) else voice
google.functions.append(func)
return func
return decorator
def deck(location, image=None):
''' Stream Deck '''
if not hasattr(deck, 'functions'):
deck.functions = []
def decorator(func):
func.is_deck = True
func.location = location
func.image = image
deck.functions.append(func)
return func
return decorator
def multi_phrase(*words):
""" This function accepts a sequence of lists, where each list corresponds to multiple versions of a word.
The returned value is a list of all the possible ways to say that sentence.
This is useful since the Google assistant may mis-hear a particular phrasing,
and this makes it easier to construct multiple variants.
"""
return list(map(' '.join, it.product(*words)))
class DeckCompiler:
def __init__(self):
self.DECK_EXE = R'"C:\Program Files\Elgato\StreamDeck\StreamDeck.exe"'
self.DECK_PROFILES = fR"{os.getenv('APPDATA')}\Elgato\StreamDeck\ProfilesV2"
default_identifier = os.listdir(self.DECK_PROFILES)[0].replace('.sdProfile', '')
self.DECK_PROFILE_IDENTIFIER = os.getenv('PYBIOSIS_PROFILE_ID', default_identifier)
self.DECK_PROFILE_PATH = fR"{self.DECK_PROFILES}\{self.DECK_PROFILE_IDENTIFIER}.sdProfile"
if hasattr(deck, "functions"):
print(f'Summary of ({len(deck.functions)}) Deck Functions:')
print('============================')
for i, f in enumerate(deck.functions, 1):
title = f.title.replace('\n', ' ')
print(f'({i}) {title} - {f.description}')
print('\t', 'Location:', f.location)
print('\t', 'Image:', f.image)
print('----------------------------')
print('============================')
else:
print("There are no Stream Deck functions.")
def compile(self):
def ID_to_manifest(ID):
return json.load(open(self.DECK_PROFILE_PATH + '/Profiles/' + ID + '.sdProfile/manifest.json'))
def get_manifest_folders(manifest):
folders = {}
for location, details in manifest['Actions'].items():
if details['Name'] == 'Create Folder':
title = details['States'][0]['Title'] # Assumes only 1 state
ID = details['Settings']['ProfileUUID']
folders[title] = ID
return folders
def get_subfolders(ID):
return get_manifest_folders(ID_to_manifest(ID))
def recur(IDseq):
sf = get_subfolders(IDseq[-1][1])
new_IDseqs = []
for name, ID in sf.items():
new_IDseqs.append( IDseq + [(name, ID)] )
for seq in new_IDseqs[:]:
new_IDseqs.extend(recur(seq))
return new_IDseqs
if os.system('powershell.exe -command "taskkill /IM Streamdeck.exe /T /F"') == 0: # Requires terminal with admin priv.
time.sleep(1)
# Construct nested folders. Each folder is a different profile.
manifest = json.load(open(self.DECK_PROFILE_PATH + '/manifest.json'))
main_folders = get_manifest_folders(manifest)
nested_folders = {}
for title, ID in main_folders.items():
for sequence in recur([ (title, ID) ]):
path = '/'.join([folder for folder, ID in sequence])
final_ID = sequence[-1][1]
nested_folders[path.strip('\n')] = final_ID # Ignore newlines in path - which are accidentally typed into GUI
folders = {**main_folders, **nested_folders}
pprint(folders)
# Add in Deck Functions
for deck_function in deck.functions:
folder, coords = deck_function.location.rsplit('/', 1)
folder_ID = folders[folder]
folder_path = self.DECK_PROFILE_PATH + f'/Profiles/{folder_ID}.sdProfile/'
folder_manifest = json.load(open(folder_path + 'manifest.json'))
folder_manifest['Actions'][coords] = deck_function.deck_dict
# Image Support
if deck_function.image:
image_path = Path(os.environ.get("PYBIOSIS_USER_PATH")) / 'Icons' / deck_function.image
state_path = f'{folder_path}/{coords}/CustomImages'
if deck_function.image == 'default':
try:
os.remove(f'{state_path}/state0.png')
except OSError:
pass
try:
Path(state_path).rmdir()
except FileNotFoundError:
pass
else:
Path(state_path).mkdir(parents=True, exist_ok=True)
shutil.copyfile(image_path, f'{state_path}/state0.png')
json.dump(folder_manifest, open(f'{folder_path}/manifest.json', 'w'))
# os.system(Rf'cmd.exe /c start "" /min {self.DECK_EXE}')
os.system(Rf'start "" /max {self.DECK_EXE}')
time.sleep(1.5)
# Minimize by closing. Dangerous to alt-f4, cannot find alternative.
pyautogui.hotkey('alt', 'f4')
class GoogleCompiler:
def __init__(self):
self.PUSH2RUN_PROFILE_PATH = os.path.abspath(Path(__file__).parent / 'applications' / 'Push2Run' / 'pybiosis.p2r')
self.PUSH2RUN_LOCAL_DATA = fR"{os.getenv('LOCALAPPDATA')}\Rob_Latour"
self.PUSH2RUN_DATABASES = fR"{os.getenv('APPDATA')}\Push2Run\Push2Run*.db3"
if hasattr(google, "functions"):
print(f'Summary of ({len(google.functions)}) Google Functions:')
print('============================')
for i, f in enumerate(google.functions, 1):
title = f.title.replace('\n', ' ')
print(f'({i}) {title} - {f.description}')
print('\t', 'Commands:')
for command in f.voice:
print('\t\t', command)
print('----------------------------')
print('============================')
print('\n\n')
else:
print("There are no Google functions.")
def compile(self):
if os.system('powershell.exe -command "taskkill /IM Push2Run.exe /T /F"') == 0: # Requires terminal with admin priv.
time.sleep(2)
# There is a bug that makes push2run prompt for a password, deleting these files will prevent it from happening.
# https://push2run.com/passwordpromptfix.html
# import shutil
# shutil.rmtree(self.PUSH2RUN_LOCAL_DATA)
# This doesn't work either... Creates a clean version which prompts user even more.
# It actually seems random, so re-running the compilation often fixes it.
# Reset all push2run functions. Note that this leaves the 'calculator' command, but this isn't a big deal.
# I'm thinking an empty file "may" erase it: [], but I won't deal with this yet.
for file in glob.glob(str(self.PUSH2RUN_DATABASES)):
os.remove(file)
# Programmatic uploading was added to push2run: https://www.push2run.com/phpbb/viewforum.php?f=6
# Calculator is still added, but I think that is fine.
os.makedirs(os.path.dirname(self.PUSH2RUN_PROFILE_PATH), exist_ok=True)
with open(self.PUSH2RUN_PROFILE_PATH, "w") as f:
json.dump([m.push2run_dict for m in google.functions], f, indent=4)
os.system(f'powershell.exe -command "Start-Process -window minimized {self.PUSH2RUN_PROFILE_PATH}"')
| [
"pyautogui.hotkey",
"os.listdir",
"os.getenv",
"pathlib.Path",
"inspect.getmodule",
"itertools.product",
"os.environ.get",
"time.sleep",
"os.path.dirname",
"shutil.copyfile",
"os.system",
"pprint.pprint",
"json.dump",
"os.remove"
] | [((2793, 2845), 'os.getenv', 'os.getenv', (['"""PYBIOSIS_PROFILE_ID"""', 'default_identifier'], {}), "('PYBIOSIS_PROFILE_ID', default_identifier)\n", (2802, 2845), False, 'import os\n'), ((4885, 4900), 'pprint.pprint', 'pprint', (['folders'], {}), '(folders)\n', (4891, 4900), False, 'from pprint import pprint\n'), ((5949, 5992), 'os.system', 'os.system', (['f"""start "" /max {self.DECK_EXE}"""'], {}), '(f\'start "" /max {self.DECK_EXE}\')\n', (5958, 5992), False, 'import os\n'), ((5996, 6011), 'time.sleep', 'time.sleep', (['(1.5)'], {}), '(1.5)\n', (6006, 6011), False, 'import time\n'), ((6088, 6117), 'pyautogui.hotkey', 'pyautogui.hotkey', (['"""alt"""', '"""f4"""'], {}), "('alt', 'f4')\n", (6104, 6117), False, 'import pyautogui\n'), ((8112, 8222), 'os.system', 'os.system', (['f"""powershell.exe -command "Start-Process -window minimized {self.PUSH2RUN_PROFILE_PATH}\\""""'], {}), '(\n f\'powershell.exe -command "Start-Process -window minimized {self.PUSH2RUN_PROFILE_PATH}"\'\n )\n', (8121, 8222), False, 'import os\n'), ((481, 504), 'inspect.getmodule', 'inspect.getmodule', (['func'], {}), '(func)\n', (498, 504), False, 'import inspect\n'), ((2461, 2479), 'itertools.product', 'it.product', (['*words'], {}), '(*words)\n', (2471, 2479), True, 'import itertools as it\n'), ((4206, 4278), 'os.system', 'os.system', (['"""powershell.exe -command "taskkill /IM Streamdeck.exe /T /F\\""""'], {}), '(\'powershell.exe -command "taskkill /IM Streamdeck.exe /T /F"\')\n', (4215, 4278), False, 'import os\n'), ((4326, 4339), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4336, 4339), False, 'import time\n'), ((6969, 7039), 'os.system', 'os.system', (['"""powershell.exe -command "taskkill /IM Push2Run.exe /T /F\\""""'], {}), '(\'powershell.exe -command "taskkill /IM Push2Run.exe /T /F"\')\n', (6978, 7039), False, 'import os\n'), ((7087, 7100), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (7097, 7100), False, 'import time\n'), ((7741, 7756), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (7750, 7756), False, 'import os\n'), ((7928, 7971), 'os.path.dirname', 'os.path.dirname', (['self.PUSH2RUN_PROFILE_PATH'], {}), '(self.PUSH2RUN_PROFILE_PATH)\n', (7943, 7971), False, 'import os\n'), ((8042, 8109), 'json.dump', 'json.dump', (['[m.push2run_dict for m in google.functions]', 'f'], {'indent': '(4)'}), '([m.push2run_dict for m in google.functions], f, indent=4)\n', (8051, 8109), False, 'import json\n'), ((804, 840), 'os.environ.get', 'os.environ.get', (['"""PYBIOSIS_USER_PATH"""'], {}), "('PYBIOSIS_USER_PATH')\n", (818, 840), False, 'import os\n'), ((2625, 2645), 'os.getenv', 'os.getenv', (['"""APPDATA"""'], {}), "('APPDATA')\n", (2634, 2645), False, 'import os\n'), ((6313, 6338), 'os.getenv', 'os.getenv', (['"""LOCALAPPDATA"""'], {}), "('LOCALAPPDATA')\n", (6322, 6338), False, 'import os\n'), ((6384, 6404), 'os.getenv', 'os.getenv', (['"""APPDATA"""'], {}), "('APPDATA')\n", (6393, 6404), False, 'import os\n'), ((2700, 2730), 'os.listdir', 'os.listdir', (['self.DECK_PROFILES'], {}), '(self.DECK_PROFILES)\n', (2710, 2730), False, 'import os\n'), ((5757, 5812), 'shutil.copyfile', 'shutil.copyfile', (['image_path', 'f"""{state_path}/state0.png"""'], {}), "(image_path, f'{state_path}/state0.png')\n", (5772, 5812), False, 'import shutil\n'), ((5526, 5563), 'os.remove', 'os.remove', (['f"""{state_path}/state0.png"""'], {}), "(f'{state_path}/state0.png')\n", (5535, 5563), False, 'import os\n'), ((1059, 1095), 'os.environ.get', 'os.environ.get', (['"""PYBIOSIS_USER_PATH"""'], {}), "('PYBIOSIS_USER_PATH')\n", (1073, 1095), False, 'import os\n'), ((5340, 5376), 'os.environ.get', 'os.environ.get', (['"""PYBIOSIS_USER_PATH"""'], {}), "('PYBIOSIS_USER_PATH')\n", (5354, 5376), False, 'import os\n'), ((5700, 5716), 'pathlib.Path', 'Path', (['state_path'], {}), '(state_path)\n', (5704, 5716), False, 'from pathlib import Path\n'), ((6210, 6224), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (6214, 6224), False, 'from pathlib import Path\n'), ((5615, 5631), 'pathlib.Path', 'Path', (['state_path'], {}), '(state_path)\n', (5619, 5631), False, 'from pathlib import Path\n')] |
import discord
from discord.ext import commands
import requests
import json
curData_url = 'https://v1.api.covindia.com/covindia-raw-data' #CovIndia API, the best data for India! Checkout covindia.com
allData = {}
class covCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command('covid')
async def covidAllData(self, ctx):
await ctx.channel.send(getTotalData())
@commands.command('covidState')
async def covidState(self, ctx):
await ctx.channel.send(getStateData())
def reloadData():
apiData = requests.get(curData_url).json()
global allData
allData = {}
for entry in apiData:
if apiData[entry]['state'] in allData:
if apiData[entry]['district'] in allData[apiData[entry]['state']]:
allData[apiData[entry]['state']][apiData[entry]['district']]['infected'] += apiData[entry]['infected']
allData[apiData[entry]['state']][apiData[entry]['district']]['dead'] += apiData[entry]['death']
else:
allData[apiData[entry]['state']][apiData[entry]['district']] = {}
allData[apiData[entry]['state']][apiData[entry]['district']]['infected'] = apiData[entry]['infected']
allData[apiData[entry]['state']][apiData[entry]['district']]['dead'] = apiData[entry]['death']
else:
allData[apiData[entry]['state']] = {}
allData[apiData[entry]['state']][apiData[entry]['district']] = {}
allData[apiData[entry]['state']][apiData[entry]['district']]['infected'] = apiData[entry]['infected']
allData[apiData[entry]['state']][apiData[entry]['district']]['dead'] = apiData[entry]['death']
def getTotalData():
reloadData()
totalInfected = 0
totalDead = 0
for stateBoi in allData:
for districtBoi in allData[stateBoi]:
totalInfected += allData[stateBoi][districtBoi]['infected']
totalDead += allData[stateBoi][districtBoi]['dead']
return 'Total cases in India :\nInfected : {}\nDead : {}'.format(totalInfected, totalDead)
def getStateData():
reloadData()
returnText = 'State-wise count :\n'
for stateBoi in allData:
stateInfected = 0
stateDead = 0
for districtBoi in allData[stateBoi]:
stateInfected += allData[stateBoi][districtBoi]['infected']
stateDead += allData[stateBoi][districtBoi]['dead']
returnText += '{}:\nInfected : {}\nDead : {}\n\n'.format(stateBoi, stateInfected, stateDead)
return returnText
def setup(bot):
bot.add_cog(covCog(bot)) | [
"discord.ext.commands.command",
"requests.get"
] | [((290, 315), 'discord.ext.commands.command', 'commands.command', (['"""covid"""'], {}), "('covid')\n", (306, 315), False, 'from discord.ext import commands\n'), ((397, 427), 'discord.ext.commands.command', 'commands.command', (['"""covidState"""'], {}), "('covidState')\n", (413, 427), False, 'from discord.ext import commands\n'), ((533, 558), 'requests.get', 'requests.get', (['curData_url'], {}), '(curData_url)\n', (545, 558), False, 'import requests\n')] |
from django.contrib import admin
from django.contrib.admin.options import StackedInline
from example_admintabs_project.example_app.models import Article, Category
from admin_tabs.helpers import TabbedModelAdmin, TabbedPageConfig, Config
class ArticlePageConfig(TabbedPageConfig):
class FieldsetsConfig:
titles = Config(fields=["title", "subtitle"], name="Title & Subtitle")
miscdata = Config(fields=["modified_at", "created_at", "is_online"], name="Dates & State")
content = Config(name="Content", fields=["content"])
authors = Config(name="Authors", inline="ArticleToUserInline")
categories = Config(name="Category", inline="ArticleToCategoryInline")
class ColsConfig:
content_col = Config(name="Contenu", fieldsets=["content"], css_classes=["col1"])
titles_col = Config(name="Titles", fieldsets=["titles", "miscdata"], css_classes=["col1"])
authors_col = Config(name="Authors", fieldsets=["authors"], css_classes=["col1"])
categories_col = Config(name="Categories", fieldsets=["categories"], css_classes=["col1"])
class TabsConfig:
main_tab = Config(name="Main", cols=["content_col", "titles_col"])
secondary_tab = Config(name="Relations", cols=["authors_col", "categories_col"])
class ArticleToUserInline(StackedInline):
model = Article.authors.through
class ArticleToCategoryInline(StackedInline):
model = Article.categories.through
class ArticleAdmin(TabbedModelAdmin):
page_config_class = ArticlePageConfig
readonly_fields = ('created_at', 'modified_at')
inlines = (ArticleToUserInline, ArticleToCategoryInline)
change_form_template = 'example_app/change_form.html'
class Media:
css = {
"all": ("example_app/css/jquery-ui-1.8.22.custom.css", "example_app/css/tabs.css")
}
js = ("example_app/js/jquery-ui-1.8.22.custom.min.js",) # Note: was modified to use django.jQuery and not jQuery
admin.site.register(Article, ArticleAdmin)
admin.site.register(Category) | [
"admin_tabs.helpers.Config",
"django.contrib.admin.site.register"
] | [((1978, 2020), 'django.contrib.admin.site.register', 'admin.site.register', (['Article', 'ArticleAdmin'], {}), '(Article, ArticleAdmin)\n', (1997, 2020), False, 'from django.contrib import admin\n'), ((2021, 2050), 'django.contrib.admin.site.register', 'admin.site.register', (['Category'], {}), '(Category)\n', (2040, 2050), False, 'from django.contrib import admin\n'), ((328, 389), 'admin_tabs.helpers.Config', 'Config', ([], {'fields': "['title', 'subtitle']", 'name': '"""Title & Subtitle"""'}), "(fields=['title', 'subtitle'], name='Title & Subtitle')\n", (334, 389), False, 'from admin_tabs.helpers import TabbedModelAdmin, TabbedPageConfig, Config\n'), ((409, 488), 'admin_tabs.helpers.Config', 'Config', ([], {'fields': "['modified_at', 'created_at', 'is_online']", 'name': '"""Dates & State"""'}), "(fields=['modified_at', 'created_at', 'is_online'], name='Dates & State')\n", (415, 488), False, 'from admin_tabs.helpers import TabbedModelAdmin, TabbedPageConfig, Config\n'), ((507, 549), 'admin_tabs.helpers.Config', 'Config', ([], {'name': '"""Content"""', 'fields': "['content']"}), "(name='Content', fields=['content'])\n", (513, 549), False, 'from admin_tabs.helpers import TabbedModelAdmin, TabbedPageConfig, Config\n'), ((568, 620), 'admin_tabs.helpers.Config', 'Config', ([], {'name': '"""Authors"""', 'inline': '"""ArticleToUserInline"""'}), "(name='Authors', inline='ArticleToUserInline')\n", (574, 620), False, 'from admin_tabs.helpers import TabbedModelAdmin, TabbedPageConfig, Config\n'), ((642, 699), 'admin_tabs.helpers.Config', 'Config', ([], {'name': '"""Category"""', 'inline': '"""ArticleToCategoryInline"""'}), "(name='Category', inline='ArticleToCategoryInline')\n", (648, 699), False, 'from admin_tabs.helpers import TabbedModelAdmin, TabbedPageConfig, Config\n'), ((749, 816), 'admin_tabs.helpers.Config', 'Config', ([], {'name': '"""Contenu"""', 'fieldsets': "['content']", 'css_classes': "['col1']"}), "(name='Contenu', fieldsets=['content'], css_classes=['col1'])\n", (755, 816), False, 'from admin_tabs.helpers import TabbedModelAdmin, TabbedPageConfig, Config\n'), ((838, 915), 'admin_tabs.helpers.Config', 'Config', ([], {'name': '"""Titles"""', 'fieldsets': "['titles', 'miscdata']", 'css_classes': "['col1']"}), "(name='Titles', fieldsets=['titles', 'miscdata'], css_classes=['col1'])\n", (844, 915), False, 'from admin_tabs.helpers import TabbedModelAdmin, TabbedPageConfig, Config\n'), ((938, 1005), 'admin_tabs.helpers.Config', 'Config', ([], {'name': '"""Authors"""', 'fieldsets': "['authors']", 'css_classes': "['col1']"}), "(name='Authors', fieldsets=['authors'], css_classes=['col1'])\n", (944, 1005), False, 'from admin_tabs.helpers import TabbedModelAdmin, TabbedPageConfig, Config\n'), ((1031, 1104), 'admin_tabs.helpers.Config', 'Config', ([], {'name': '"""Categories"""', 'fieldsets': "['categories']", 'css_classes': "['col1']"}), "(name='Categories', fieldsets=['categories'], css_classes=['col1'])\n", (1037, 1104), False, 'from admin_tabs.helpers import TabbedModelAdmin, TabbedPageConfig, Config\n'), ((1151, 1206), 'admin_tabs.helpers.Config', 'Config', ([], {'name': '"""Main"""', 'cols': "['content_col', 'titles_col']"}), "(name='Main', cols=['content_col', 'titles_col'])\n", (1157, 1206), False, 'from admin_tabs.helpers import TabbedModelAdmin, TabbedPageConfig, Config\n'), ((1231, 1295), 'admin_tabs.helpers.Config', 'Config', ([], {'name': '"""Relations"""', 'cols': "['authors_col', 'categories_col']"}), "(name='Relations', cols=['authors_col', 'categories_col'])\n", (1237, 1295), False, 'from admin_tabs.helpers import TabbedModelAdmin, TabbedPageConfig, Config\n')] |
from flask import g, abort
def user_required(f):
"""Checks whether user is logged in or raises error 401."""
def decorator(*args, **kwargs):
if "user" not in g:
abort(401)
return f(*args, **kwargs)
return decorator
def admin_required(f):
"""Checks whether user is logged in or raises error 401."""
def decorator(*args, **kwargs):
if "user" not in g:
abort(401)
if not g.user.admin:
abort(403)
return f(*args, **kwargs)
return decorator
| [
"flask.abort"
] | [((190, 200), 'flask.abort', 'abort', (['(401)'], {}), '(401)\n', (195, 200), False, 'from flask import g, abort\n'), ((420, 430), 'flask.abort', 'abort', (['(401)'], {}), '(401)\n', (425, 430), False, 'from flask import g, abort\n'), ((472, 482), 'flask.abort', 'abort', (['(403)'], {}), '(403)\n', (477, 482), False, 'from flask import g, abort\n')] |
# usage:
# first_update last_update h5_filenames
import numpy as np
import h5py
import sys
import scipy.stats as stats
from tqdm import tqdm
import os
import pandas as pd
from keyname import keyname as kn
from fileshash import fileshash as fsh
from joblib import delayed, Parallel
first_update = int(sys.argv[1])
last_update = int(sys.argv[2])
filenames = sys.argv[3:]
# check all data is from same software source
assert len({kn.unpack(filename)['_source_hash'] for filename in filenames}) == 1
def CalcSurroundedRate(filename):
file = h5py.File(filename, 'r')
nlev = int(file.attrs.get('NLEV'))
return np.mean([
sum(ro[idx] != -1 for ro in ros)
for ch, pc, dirs, live, ros in [
(
np.array(
file['Channel']['lev_'+str(nlev-1)]['upd_'+str(upd)]
).flatten(),
np.array(
file['PrevChan']['upd_'+str(upd)]
).flatten(),
[np.array(
file['Index'][dir_key]
).flatten() for dir_key in file['RepOutgoing']],
np.array(
file['Live']['upd_'+str(upd)]
).flatten(),
[np.array(
file['RepOutgoing'][dir_key]['upd_'+str(upd)]
).flatten() for dir_key in file['RepOutgoing']]
)
for upd in range(first_update, last_update)
]
for idx in range(file['Index']['own'].size)
if live[idx] and all(ch[idx] == ch[dir[idx]] for dir in dirs)
])
def SafeCalcSurroundedRate(filename):
try:
return CalcSurroundedRate(filename)
except Exception as e:
print("warning: corrupt or incomplete data file... skipping")
print(" ", e)
return None
def CalcNotSurroundedRate(filename):
file = h5py.File(filename, 'r')
nlev = int(file.attrs.get('NLEV'))
return np.mean([
sum(ro[idx] != -1 for ro in ros)
for ch, pc, dirs, live, ros in [
(
np.array(
file['Channel']['lev_'+str(nlev-1)]['upd_'+str(upd)]
).flatten(),
np.array(
file['PrevChan']['upd_'+str(upd)]
).flatten(),
[np.array(
file['Index'][dir_key]
).flatten() for dir_key in file['RepOutgoing']],
np.array(
file['Live']['upd_'+str(upd)]
).flatten(),
[np.array(
file['RepOutgoing'][dir_key]['upd_'+str(upd)]
).flatten() for dir_key in file['RepOutgoing']]
)
for upd in range(first_update, last_update)
]
for idx in range(file['Index']['own'].size)
if live[idx] and any(ch[idx] != ch[dir[idx]] for dir in dirs)
])
def SafeCalcNotSurroundedRate(filename):
try:
return CalcNotSurroundedRate(filename)
except Exception as e:
print("warning: corrupt or incomplete data file... skipping")
print(" ", e)
return None
print("num files:" , len(filenames))
outfile = kn.pack({
'_data_hathash_hash' : fsh.FilesHash().hash_files(filenames),
'_script_fullcat_hash' : fsh.FilesHash(
file_parcel="full_parcel",
files_join="cat_join"
).hash_files([sys.argv[0]]),
'_source_hash' :kn.unpack(filenames[0])['_source_hash'],
'title' : 'reproductive_labor_surrounded',
'ext' : '.csv'
})
pd.DataFrame.from_dict([
{
'Treatment' : kn.unpack(filename)['treat'],
'Reproduction Rate' : res,
'Channel 1 Surrounded' : 'True',
'First Update' : first_update,
'Last Update' : last_update
}
for res, filename in zip(
Parallel(n_jobs=-1)(
delayed(SafeCalcSurroundedRate)(filename)
for filename in tqdm(filenames)
), filenames
)
] + [
{
'Treatment' : kn.unpack(filename)['treat'],
'Reproduction Rate' : res,
'Channel 1 Surrounded' : 'False',
'First Update' : first_update,
'Last Update' : last_update
}
for res, filename in zip(
Parallel(n_jobs=-1)(
delayed(SafeCalcNotSurroundedRate)(filename)
for filename in tqdm(filenames)
), filenames
)
]).to_csv(outfile, index=False)
print('Output saved to', outfile)
| [
"keyname.keyname.unpack",
"tqdm.tqdm",
"h5py.File",
"joblib.Parallel",
"numpy.array",
"fileshash.fileshash.FilesHash",
"joblib.delayed"
] | [((545, 569), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (554, 569), False, 'import h5py\n'), ((1859, 1883), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (1868, 1883), False, 'import h5py\n'), ((3501, 3524), 'keyname.keyname.unpack', 'kn.unpack', (['filenames[0]'], {}), '(filenames[0])\n', (3510, 3524), True, 'from keyname import keyname as kn\n'), ((430, 449), 'keyname.keyname.unpack', 'kn.unpack', (['filename'], {}), '(filename)\n', (439, 449), True, 'from keyname import keyname as kn\n'), ((3216, 3231), 'fileshash.fileshash.FilesHash', 'fsh.FilesHash', ([], {}), '()\n', (3229, 3231), True, 'from fileshash import fileshash as fsh\n'), ((3284, 3347), 'fileshash.fileshash.FilesHash', 'fsh.FilesHash', ([], {'file_parcel': '"""full_parcel"""', 'files_join': '"""cat_join"""'}), "(file_parcel='full_parcel', files_join='cat_join')\n", (3297, 3347), True, 'from fileshash import fileshash as fsh\n'), ((3665, 3684), 'keyname.keyname.unpack', 'kn.unpack', (['filename'], {}), '(filename)\n', (3674, 3684), True, 'from keyname import keyname as kn\n'), ((4070, 4089), 'keyname.keyname.unpack', 'kn.unpack', (['filename'], {}), '(filename)\n', (4079, 4089), True, 'from keyname import keyname as kn\n'), ((3890, 3909), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (3898, 3909), False, 'from joblib import delayed, Parallel\n'), ((4296, 4315), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (4304, 4315), False, 'from joblib import delayed, Parallel\n'), ((984, 1016), 'numpy.array', 'np.array', (["file['Index'][dir_key]"], {}), "(file['Index'][dir_key])\n", (992, 1016), True, 'import numpy as np\n'), ((2298, 2330), 'numpy.array', 'np.array', (["file['Index'][dir_key]"], {}), "(file['Index'][dir_key])\n", (2306, 2330), True, 'import numpy as np\n'), ((3923, 3954), 'joblib.delayed', 'delayed', (['SafeCalcSurroundedRate'], {}), '(SafeCalcSurroundedRate)\n', (3930, 3954), False, 'from joblib import delayed, Parallel\n'), ((3993, 4008), 'tqdm.tqdm', 'tqdm', (['filenames'], {}), '(filenames)\n', (3997, 4008), False, 'from tqdm import tqdm\n'), ((4329, 4363), 'joblib.delayed', 'delayed', (['SafeCalcNotSurroundedRate'], {}), '(SafeCalcNotSurroundedRate)\n', (4336, 4363), False, 'from joblib import delayed, Parallel\n'), ((4402, 4417), 'tqdm.tqdm', 'tqdm', (['filenames'], {}), '(filenames)\n', (4406, 4417), False, 'from tqdm import tqdm\n')] |
from xtbservice.ir import ir_from_smiles
from .help import clear_caches, filter_modes
import pytest
def get_ir_modes(modes, threshold=0.01):
return [mode for mode in modes if mode['intensity'] > threshold]
def get_raman_modes(modes, threshold=0.01):
return [mode for mode in modes if mode['ramanIntensity'] > threshold]
@pytest.mark.parametrize("technique", ["GFN2xTB", "GFNFF"])
def test_co2(technique):
"""3N-5 with center of inversion"""
clear_caches()
ir = ir_from_smiles('C(=O)=O', technique)
modes = filter_modes(ir.modes)
raman_intensities = get_raman_modes(modes)
assert len(raman_intensities) == 1
ir_intensities = get_ir_modes(modes)
assert len(ir_intensities) == 3
@pytest.mark.parametrize("technique", ["GFN2xTB", "GFNFF"])
def test_cos(technique):
"""3N-5 no center of symmetry"""
print(technique)
clear_caches()
ir = ir_from_smiles('C(=O)=S', technique)
modes = filter_modes(ir.modes)
raman_intensities = get_raman_modes(modes)
assert len(raman_intensities) == 4
ir_intensities =get_ir_modes(modes)
assert len(ir_intensities) == 4
@pytest.mark.parametrize("technique", ["GFN2xTB", "GFNFF"])
def test_h2o(technique):
"""3N-6 DOF"""
clear_caches()
ir = ir_from_smiles('O', technique)
modes = filter_modes(ir.modes)
raman_intensities = get_raman_modes(modes)
assert len(raman_intensities) == 3
ir_intensities =get_ir_modes(modes)
assert len(ir_intensities) == 3
@pytest.mark.parametrize("technique", ["GFN2xTB", "GFNFF"])
def test_ch4(technique):
"""3N-6 DOF"""
clear_caches()
ir = ir_from_smiles('C', technique)
modes = filter_modes(ir.modes)
raman_intensities = get_raman_modes(modes)
assert len(raman_intensities) == 9
ir_intensities =get_ir_modes(modes)
assert len(ir_intensities) == 6 | [
"pytest.mark.parametrize",
"xtbservice.ir.ir_from_smiles"
] | [((333, 391), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""technique"""', "['GFN2xTB', 'GFNFF']"], {}), "('technique', ['GFN2xTB', 'GFNFF'])\n", (356, 391), False, 'import pytest\n'), ((727, 785), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""technique"""', "['GFN2xTB', 'GFNFF']"], {}), "('technique', ['GFN2xTB', 'GFNFF'])\n", (750, 785), False, 'import pytest\n'), ((1137, 1195), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""technique"""', "['GFN2xTB', 'GFNFF']"], {}), "('technique', ['GFN2xTB', 'GFNFF'])\n", (1160, 1195), False, 'import pytest\n'), ((1501, 1559), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""technique"""', "['GFN2xTB', 'GFNFF']"], {}), "('technique', ['GFN2xTB', 'GFNFF'])\n", (1524, 1559), False, 'import pytest\n'), ((485, 521), 'xtbservice.ir.ir_from_smiles', 'ir_from_smiles', (['"""C(=O)=O"""', 'technique'], {}), "('C(=O)=O', technique)\n", (499, 521), False, 'from xtbservice.ir import ir_from_smiles\n'), ((897, 933), 'xtbservice.ir.ir_from_smiles', 'ir_from_smiles', (['"""C(=O)=S"""', 'technique'], {}), "('C(=O)=S', technique)\n", (911, 933), False, 'from xtbservice.ir import ir_from_smiles\n'), ((1268, 1298), 'xtbservice.ir.ir_from_smiles', 'ir_from_smiles', (['"""O"""', 'technique'], {}), "('O', technique)\n", (1282, 1298), False, 'from xtbservice.ir import ir_from_smiles\n'), ((1632, 1662), 'xtbservice.ir.ir_from_smiles', 'ir_from_smiles', (['"""C"""', 'technique'], {}), "('C', technique)\n", (1646, 1662), False, 'from xtbservice.ir import ir_from_smiles\n')] |
def run_create_and_import():
# IMPORTS FOR CHECKING IF DB IS UP TO DATE
import datetime as dt
#
# IMPORTS FOR EDGAR
from sec_api import QueryApi
# IMPORTS FOR SQL DB CONNECTION
import pymysql
import Import_Data_To_DB as import_data
#CHANGE FIELDS TO YOUR OWN PERSONAL FIELDS
db = pymysql.connect(
host = 'your database hosting site',
user = 'your username',
password = '<PASSWORD>',
db = 'Name of the database you are connecting to'
)
c = db.cursor()
print("For this demo, we're calling our table \'All_Holdings_Raw_Data\'")
#THIS IS WHAT WE CHOSE OUR TABLE NAME TO BE:
Your_Table_Name = "All_Holdings_Raw_Data"
# CREATE TABLE
sql = '''
create table %s (
filingDate text,
shares int,
value int,
cusip varchar(255),
nameOfIssuer text,
CIK int
)
''' % Your_Table_Name
c.execute(sql)
# Choose how far back you want the data from, we chose 4 quarters worth.
number_of_quarters = 4
import_data.run_import(number_of_quarters)
return Your_Table_Name | [
"Import_Data_To_DB.run_import",
"pymysql.connect"
] | [((323, 471), 'pymysql.connect', 'pymysql.connect', ([], {'host': '"""your database hosting site"""', 'user': '"""your username"""', 'password': '"""<PASSWORD>"""', 'db': '"""Name of the database you are connecting to"""'}), "(host='your database hosting site', user='your username',\n password='<PASSWORD>', db='Name of the database you are connecting to')\n", (338, 471), False, 'import pymysql\n'), ((1026, 1068), 'Import_Data_To_DB.run_import', 'import_data.run_import', (['number_of_quarters'], {}), '(number_of_quarters)\n', (1048, 1068), True, 'import Import_Data_To_DB as import_data\n')] |
import numpy as np
import logging
logger = logging.getLogger(__name__)
def parse_markov_matrix(matrix):
if not isinstance(matrix, np.ndarray):
raise TypeError('The matrix should be a numpy array')
return matrix.reshape(-1, 1).ravel()
| [
"logging.getLogger"
] | [((47, 74), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (64, 74), False, 'import logging\n')] |
#!/usr/bin/env python
from setuptools import find_packages, setup
from przelewy24 import VERSION
setup(
name = 'django_oscar_przelewy24',
version = VERSION,
description = 'Przelewy24.pl payment gateway for django-oscar e-commerce',
long_description=open('README.md').read(),
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/kisiel/django-oscar-przelewy24',
license='BSD License',
platforms=['OS Independent'],
packages=find_packages(exclude=['sandbox*', 'tests*']),
include_package_data=True,
keywords = ['django', 'oscar', 'przelewy24.pl', 'e-commerce'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
install_requires=[
'Django>=1.6',
'django-oscar>=1.0.2',
'pycountry>=1.10',
'requests>=2.5.1'
],
) | [
"setuptools.find_packages"
] | [((464, 509), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['sandbox*', 'tests*']"}), "(exclude=['sandbox*', 'tests*'])\n", (477, 509), False, 'from setuptools import find_packages, setup\n')] |
from opendc.models.scenario import Scenario
from opendc.models.portfolio import Portfolio
from opendc.util.rest import Response
def GET(request):
"""Get this Scenario."""
request.check_required_parameters(path={'scenarioId': 'string'})
scenario = Scenario.from_id(request.params_path['scenarioId'])
scenario.check_exists()
scenario.check_user_access(request.google_id, False)
return Response(200, 'Successfully retrieved scenario.', scenario.obj)
def PUT(request):
"""Update this Scenarios name."""
request.check_required_parameters(path={'scenarioId': 'string'}, body={'scenario': {
'name': 'string',
}})
scenario = Scenario.from_id(request.params_path['scenarioId'])
scenario.check_exists()
scenario.check_user_access(request.google_id, True)
scenario.set_property('name',
request.params_body['scenario']['name'])
scenario.update()
return Response(200, 'Successfully updated scenario.', scenario.obj)
def DELETE(request):
"""Delete this Scenario."""
request.check_required_parameters(path={'scenarioId': 'string'})
scenario = Scenario.from_id(request.params_path['scenarioId'])
scenario.check_exists()
scenario.check_user_access(request.google_id, True)
scenario_id = scenario.get_id()
portfolio = Portfolio.from_id(scenario.obj['portfolioId'])
portfolio.check_exists()
if scenario_id in portfolio.obj['scenarioIds']:
portfolio.obj['scenarioIds'].remove(scenario_id)
portfolio.update()
old_object = scenario.delete()
return Response(200, 'Successfully deleted scenario.', old_object)
| [
"opendc.models.portfolio.Portfolio.from_id",
"opendc.models.scenario.Scenario.from_id",
"opendc.util.rest.Response"
] | [((263, 314), 'opendc.models.scenario.Scenario.from_id', 'Scenario.from_id', (["request.params_path['scenarioId']"], {}), "(request.params_path['scenarioId'])\n", (279, 314), False, 'from opendc.models.scenario import Scenario\n'), ((413, 476), 'opendc.util.rest.Response', 'Response', (['(200)', '"""Successfully retrieved scenario."""', 'scenario.obj'], {}), "(200, 'Successfully retrieved scenario.', scenario.obj)\n", (421, 476), False, 'from opendc.util.rest import Response\n'), ((675, 726), 'opendc.models.scenario.Scenario.from_id', 'Scenario.from_id', (["request.params_path['scenarioId']"], {}), "(request.params_path['scenarioId'])\n", (691, 726), False, 'from opendc.models.scenario import Scenario\n'), ((949, 1010), 'opendc.util.rest.Response', 'Response', (['(200)', '"""Successfully updated scenario."""', 'scenario.obj'], {}), "(200, 'Successfully updated scenario.', scenario.obj)\n", (957, 1010), False, 'from opendc.util.rest import Response\n'), ((1152, 1203), 'opendc.models.scenario.Scenario.from_id', 'Scenario.from_id', (["request.params_path['scenarioId']"], {}), "(request.params_path['scenarioId'])\n", (1168, 1203), False, 'from opendc.models.scenario import Scenario\n'), ((1343, 1389), 'opendc.models.portfolio.Portfolio.from_id', 'Portfolio.from_id', (["scenario.obj['portfolioId']"], {}), "(scenario.obj['portfolioId'])\n", (1360, 1389), False, 'from opendc.models.portfolio import Portfolio\n'), ((1599, 1658), 'opendc.util.rest.Response', 'Response', (['(200)', '"""Successfully deleted scenario."""', 'old_object'], {}), "(200, 'Successfully deleted scenario.', old_object)\n", (1607, 1658), False, 'from opendc.util.rest import Response\n')] |
import myspokenlanguagedetection as mysp
p=r"C:\Users\Shahab\Desktop\pack"
m="russian"
mysp.myspolangdet(m,p)
| [
"myspokenlanguagedetection.myspolangdet"
] | [((94, 117), 'myspokenlanguagedetection.myspolangdet', 'mysp.myspolangdet', (['m', 'p'], {}), '(m, p)\n', (111, 117), True, 'import myspokenlanguagedetection as mysp\n')] |
"""Support for the CO2signal platform."""
from datetime import timedelta
import logging
import CO2Signal
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_IDENTIFIERS,
ATTR_MANUFACTURER,
ATTR_NAME,
CONF_API_KEY,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_TOKEN,
ENERGY_KILO_WATT_HOUR,
)
import homeassistant.helpers.config_validation as cv
from .const import ATTRIBUTION, CONF_COUNTRY_CODE, DOMAIN, MSG_LOCATION
from .util import get_extra_name
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(minutes=3)
CO2_INTENSITY_UNIT = f"CO2eq/{ENERGY_KILO_WATT_HOUR}"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_TOKEN): cv.string,
vol.Inclusive(CONF_LATITUDE, "coords", msg=MSG_LOCATION): cv.latitude,
vol.Inclusive(CONF_LONGITUDE, "coords", msg=MSG_LOCATION): cv.longitude,
vol.Optional(CONF_COUNTRY_CODE): cv.string,
}
)
async def async_setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the CO2signal sensor."""
await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=config,
)
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the CO2signal sensor."""
name = "CO2 intensity"
if extra_name := get_extra_name(hass, entry.data):
name += f" - {extra_name}"
async_add_entities(
[
CO2Sensor(
name,
entry.data,
entry_id=entry.entry_id,
)
],
True,
)
class CO2Sensor(SensorEntity):
"""Implementation of the CO2Signal sensor."""
_attr_icon = "mdi:molecule-co2"
_attr_unit_of_measurement = CO2_INTENSITY_UNIT
def __init__(self, name, config, entry_id):
"""Initialize the sensor."""
self._config = config
self._attr_name = name
self._attr_extra_state_attributes = {ATTR_ATTRIBUTION: ATTRIBUTION}
self._attr_device_info = {
ATTR_IDENTIFIERS: {(DOMAIN, entry_id)},
ATTR_NAME: "CO2 signal",
ATTR_MANUFACTURER: "Tmrow.com",
"entry_type": "service",
}
self._attr_unique_id = f"{entry_id}_co2intensity"
def update(self):
"""Get the latest data and updates the states."""
_LOGGER.debug("Update data for %s", self.name)
if CONF_COUNTRY_CODE in self._config:
kwargs = {"country_code": self._config[CONF_COUNTRY_CODE]}
elif CONF_LATITUDE in self._config:
kwargs = {
"latitude": self._config[CONF_LATITUDE],
"longitude": self._config[CONF_LONGITUDE],
}
else:
kwargs = {
"latitude": self.hass.config.latitude,
"longitude": self.hass.config.longitude,
}
self._attr_state = round(
CO2Signal.get_latest_carbon_intensity(self._config[CONF_API_KEY], **kwargs),
2,
)
| [
"logging.getLogger",
"voluptuous.Inclusive",
"voluptuous.Required",
"CO2Signal.get_latest_carbon_intensity",
"datetime.timedelta",
"voluptuous.Optional"
] | [((635, 662), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (652, 662), False, 'import logging\n'), ((679, 699), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(3)'}), '(minutes=3)\n', (688, 699), False, 'from datetime import timedelta\n'), ((811, 835), 'voluptuous.Required', 'vol.Required', (['CONF_TOKEN'], {}), '(CONF_TOKEN)\n', (823, 835), True, 'import voluptuous as vol\n'), ((856, 912), 'voluptuous.Inclusive', 'vol.Inclusive', (['CONF_LATITUDE', '"""coords"""'], {'msg': 'MSG_LOCATION'}), "(CONF_LATITUDE, 'coords', msg=MSG_LOCATION)\n", (869, 912), True, 'import voluptuous as vol\n'), ((935, 992), 'voluptuous.Inclusive', 'vol.Inclusive', (['CONF_LONGITUDE', '"""coords"""'], {'msg': 'MSG_LOCATION'}), "(CONF_LONGITUDE, 'coords', msg=MSG_LOCATION)\n", (948, 992), True, 'import voluptuous as vol\n'), ((1016, 1047), 'voluptuous.Optional', 'vol.Optional', (['CONF_COUNTRY_CODE'], {}), '(CONF_COUNTRY_CODE)\n', (1028, 1047), True, 'import voluptuous as vol\n'), ((3080, 3155), 'CO2Signal.get_latest_carbon_intensity', 'CO2Signal.get_latest_carbon_intensity', (['self._config[CONF_API_KEY]'], {}), '(self._config[CONF_API_KEY], **kwargs)\n', (3117, 3155), False, 'import CO2Signal\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import json, os
from os import path as osp
from zipfile import ZipFile
from gluoncv.utils import download
def extract(fpath, exdir):
print("Extracting zip file")
with ZipFile(fpath) as z:
z.extractall(path=exdir)
print("Extracting Done")
def make_list(exdir):
train_dir = osp.join(exdir, "bounding_box_train")
train_list = {}
for _, _, files in os.walk(train_dir, topdown=False):
for name in files:
if '.jpg' in name:
name_split = name.split('_')
pid = name_split[0]
pcam = name_split[1][1]
if pid not in train_list:
train_list[pid] = []
train_list[pid].append({"name":name, "pid":pid, "pcam":pcam})
with open(osp.join(exdir, 'train.txt'), 'w') as f:
for i, key in enumerate(train_list):
for item in train_list[key]:
f.write(item['name']+" "+str(i)+" "+item["pcam"]+"\n")
print("Make Label List Done")
def main():
name = "Market-1501-v15.09.15"
url = "http://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/gluon/dataset/"+name+".zip"
root = osp.expanduser("~/.mxnet/datasets")
if not os.path.exists(root):
os.mkdir(root)
fpath = osp.join(root, name+'.zip')
exdir = osp.join(root, name)
if os.path.exists(fpath):
if not osp.isdir(exdir):
extract(fpath, root)
make_list(exdir)
else:
download(url, fpath, False)
extract(fpath, root)
make_list(exdir)
if __name__ == '__main__':
main()
| [
"os.path.exists",
"zipfile.ZipFile",
"os.walk",
"os.path.join",
"os.path.isdir",
"os.mkdir",
"gluoncv.utils.download",
"os.path.expanduser"
] | [((394, 431), 'os.path.join', 'osp.join', (['exdir', '"""bounding_box_train"""'], {}), "(exdir, 'bounding_box_train')\n", (402, 431), True, 'from os import path as osp\n'), ((475, 508), 'os.walk', 'os.walk', (['train_dir'], {'topdown': '(False)'}), '(train_dir, topdown=False)\n', (482, 508), False, 'import json, os\n'), ((1255, 1290), 'os.path.expanduser', 'osp.expanduser', (['"""~/.mxnet/datasets"""'], {}), "('~/.mxnet/datasets')\n", (1269, 1290), True, 'from os import path as osp\n'), ((1359, 1388), 'os.path.join', 'osp.join', (['root', "(name + '.zip')"], {}), "(root, name + '.zip')\n", (1367, 1388), True, 'from os import path as osp\n'), ((1399, 1419), 'os.path.join', 'osp.join', (['root', 'name'], {}), '(root, name)\n', (1407, 1419), True, 'from os import path as osp\n'), ((1428, 1449), 'os.path.exists', 'os.path.exists', (['fpath'], {}), '(fpath)\n', (1442, 1449), False, 'import json, os\n'), ((272, 286), 'zipfile.ZipFile', 'ZipFile', (['fpath'], {}), '(fpath)\n', (279, 286), False, 'from zipfile import ZipFile\n'), ((1302, 1322), 'os.path.exists', 'os.path.exists', (['root'], {}), '(root)\n', (1316, 1322), False, 'import json, os\n'), ((1332, 1346), 'os.mkdir', 'os.mkdir', (['root'], {}), '(root)\n', (1340, 1346), False, 'import json, os\n'), ((1577, 1604), 'gluoncv.utils.download', 'download', (['url', 'fpath', '(False)'], {}), '(url, fpath, False)\n', (1585, 1604), False, 'from gluoncv.utils import download\n'), ((866, 894), 'os.path.join', 'osp.join', (['exdir', '"""train.txt"""'], {}), "(exdir, 'train.txt')\n", (874, 894), True, 'from os import path as osp\n'), ((1466, 1482), 'os.path.isdir', 'osp.isdir', (['exdir'], {}), '(exdir)\n', (1475, 1482), True, 'from os import path as osp\n')] |
import statistics
import solve
import utils
def test_parse_horizontal_positions():
line = "3,4,3,1,2"
result = utils.parse_horizontal_positions(line)
assert result == [3, 4, 3, 1, 2]
def test_parse_horizontal_positions_single_value():
line = "3"
result = utils.parse_horizontal_positions(line)
assert result == [3]
def test_parse_horizontal_positions_empty():
line = ""
result = utils.parse_horizontal_positions(line)
assert result == []
def test_distance_to_median():
values = [1, 2, 7, 14, 21, 3, 10]
expected = 39
total = utils.distance_to_median(values)
assert total == expected
def test_distance_to_mean():
values = [1, 2, 7, 14, 21, 3, 10]
expected = 40
total = utils.distance_to_mean(values)
assert total == expected
def test_part1_sample_input():
input_data = [
"16,1,2,0,4,2,7,1,2,14"
]
result = solve.part_1(input_data)
assert result == 37
def test_nth_triangular_zero():
x = 0
result = utils.nth_triangular(x)
assert result == 0
def test_nth_triangular():
x = 11
result = utils.nth_triangular(x)
assert result == 66
def test_nth_triangular_distances():
numbers = [16, 1, 2, 0, 4, 2, 7, 1, 2, 14]
value = 5
result = utils.nth_triangular_distances(numbers, value)
assert result == [66, 10, 6, 15, 1, 6, 3, 10, 6, 45]
def test_weighted_mean_equal_weights():
numbers = [16, 1, 2, 0, 4, 2, 7, 1, 2, 14]
weights = [1] * len(numbers)
result = utils.weighted_mean(numbers, weights)
assert result == statistics.mean(numbers)
def test_weighted_mean_skewed_weights():
numbers = [5, 5, 5, 10]
weights = [1, 1, 1, 3]
expected = 7.5
result = utils.weighted_mean(numbers, weights)
assert result == expected
def test_part2_sample_input():
input_data = [
"16,1,2,0,4,2,7,1,2,14"
]
result = solve.part_2(input_data)
assert result == 168
| [
"statistics.mean",
"utils.nth_triangular",
"utils.distance_to_mean",
"solve.part_2",
"utils.nth_triangular_distances",
"utils.weighted_mean",
"solve.part_1",
"utils.distance_to_median",
"utils.parse_horizontal_positions"
] | [((122, 160), 'utils.parse_horizontal_positions', 'utils.parse_horizontal_positions', (['line'], {}), '(line)\n', (154, 160), False, 'import utils\n'), ((282, 320), 'utils.parse_horizontal_positions', 'utils.parse_horizontal_positions', (['line'], {}), '(line)\n', (314, 320), False, 'import utils\n'), ((422, 460), 'utils.parse_horizontal_positions', 'utils.parse_horizontal_positions', (['line'], {}), '(line)\n', (454, 460), False, 'import utils\n'), ((588, 620), 'utils.distance_to_median', 'utils.distance_to_median', (['values'], {}), '(values)\n', (612, 620), False, 'import utils\n'), ((751, 781), 'utils.distance_to_mean', 'utils.distance_to_mean', (['values'], {}), '(values)\n', (773, 781), False, 'import utils\n'), ((916, 940), 'solve.part_1', 'solve.part_1', (['input_data'], {}), '(input_data)\n', (928, 940), False, 'import solve\n'), ((1023, 1046), 'utils.nth_triangular', 'utils.nth_triangular', (['x'], {}), '(x)\n', (1043, 1046), False, 'import utils\n'), ((1124, 1147), 'utils.nth_triangular', 'utils.nth_triangular', (['x'], {}), '(x)\n', (1144, 1147), False, 'import utils\n'), ((1286, 1332), 'utils.nth_triangular_distances', 'utils.nth_triangular_distances', (['numbers', 'value'], {}), '(numbers, value)\n', (1316, 1332), False, 'import utils\n'), ((1527, 1564), 'utils.weighted_mean', 'utils.weighted_mean', (['numbers', 'weights'], {}), '(numbers, weights)\n', (1546, 1564), False, 'import utils\n'), ((1743, 1780), 'utils.weighted_mean', 'utils.weighted_mean', (['numbers', 'weights'], {}), '(numbers, weights)\n', (1762, 1780), False, 'import utils\n'), ((1916, 1940), 'solve.part_2', 'solve.part_2', (['input_data'], {}), '(input_data)\n', (1928, 1940), False, 'import solve\n'), ((1587, 1611), 'statistics.mean', 'statistics.mean', (['numbers'], {}), '(numbers)\n', (1602, 1611), False, 'import statistics\n')] |
from serpent.game_launcher import GameLauncher, GameLauncherException
from serpent.utilities import is_linux, is_macos, is_windows
import shlex
import subprocess
class RetroarchGameLauncher(GameLauncher):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def launch(self, **kwargs):
core_path = kwargs.get("core_path")
rom_path = kwargs.get("rom_path")
if core_path is None:
raise GameLauncherException("A 'core_path' kwarg is required...")
if rom_path is None:
raise GameLauncherException("A 'rom_path' kwarg is required...")
launch_list = ["retroarch", "-L", core_path, rom_path]
if is_linux():
subprocess.Popen(launch_list)
elif is_macos():
subprocess.Popen(launch_list)
elif is_windows():
subprocess.Popen(launch_list) | [
"subprocess.Popen",
"serpent.utilities.is_macos",
"serpent.game_launcher.GameLauncherException",
"serpent.utilities.is_windows",
"serpent.utilities.is_linux"
] | [((689, 699), 'serpent.utilities.is_linux', 'is_linux', ([], {}), '()\n', (697, 699), False, 'from serpent.utilities import is_linux, is_macos, is_windows\n'), ((446, 505), 'serpent.game_launcher.GameLauncherException', 'GameLauncherException', (['"""A \'core_path\' kwarg is required..."""'], {}), '("A \'core_path\' kwarg is required...")\n', (467, 505), False, 'from serpent.game_launcher import GameLauncher, GameLauncherException\n'), ((554, 612), 'serpent.game_launcher.GameLauncherException', 'GameLauncherException', (['"""A \'rom_path\' kwarg is required..."""'], {}), '("A \'rom_path\' kwarg is required...")\n', (575, 612), False, 'from serpent.game_launcher import GameLauncher, GameLauncherException\n'), ((713, 742), 'subprocess.Popen', 'subprocess.Popen', (['launch_list'], {}), '(launch_list)\n', (729, 742), False, 'import subprocess\n'), ((756, 766), 'serpent.utilities.is_macos', 'is_macos', ([], {}), '()\n', (764, 766), False, 'from serpent.utilities import is_linux, is_macos, is_windows\n'), ((780, 809), 'subprocess.Popen', 'subprocess.Popen', (['launch_list'], {}), '(launch_list)\n', (796, 809), False, 'import subprocess\n'), ((823, 835), 'serpent.utilities.is_windows', 'is_windows', ([], {}), '()\n', (833, 835), False, 'from serpent.utilities import is_linux, is_macos, is_windows\n'), ((849, 878), 'subprocess.Popen', 'subprocess.Popen', (['launch_list'], {}), '(launch_list)\n', (865, 878), False, 'import subprocess\n')] |
import datetime
import json
from pathlib import Path
from typing import List, MutableMapping, Mapping, Any
from lemonspotter.core.test import Test
from lemonspotter.core.test import TestType
from lemonspotter.core.test import TestOutcome
from lemonspotter.core.database import Database
class TestReport():
"""
Class for generating logging statements for tests
"""
def __init__(self):
self._now = datetime.datetime.now()
self._report_id: str = "lsout_" + self._now.strftime("%Y-%m-%d_%H:%M")
self._report: MutableMapping = {}
self._tests: List[Test] = []
# Ensures that report directory exists
report_dir = Path.resolve(Path(__file__) / '../../../reports')
if not Path.exists(report_dir):
Path.mkdir(report_dir)
# Creates path to report file
self._report_file_name = self._report_id + '.log'
self._report_file_dir = Path(report_dir) / self._report_file_name
self._report_file_dir = Path.resolve(self._report_file_dir)
@property
def report_id(self) -> str:
return self._report_id
@report_id.setter
def report_id(self, report_id) -> None:
self._report_id = report_id
@property
def report_file_dir(self) -> str:
return self._report_file_dir
@report_file_dir.setter
def report_file_dir(self, report_file_dir) -> None:
self._report_file_dir = report_file_dir
@property
def tests(self) -> List[Test]:
return self._tests
@tests.setter
def tests(self, tests) -> None:
self._tests = tests
def log_test_result(self, test, msg=None) -> None:
"""
Prints results for single test
"""
if test not in self.tests:
self.tests.append(test)
log_msg = ''
if test.type == TestType.BUILD_ONLY:
log_msg += '[BUILD ONLY|'
if test.build_outcome == TestOutcome.SUCCESS:
log_msg += 'PASS|'
else:
log_msg += 'FAIL|'
elif test.type == TestType.BUILD_AND_RUN:
log_msg += '[RUN|'
if test.run_outcome == TestOutcome.SUCCESS:
log_msg += 'PASS|'
else:
log_msg += 'FAIL|'
log_msg += test.name + ']'
if msg:
log_msg += '\n\t ' + msg
def generate_report(self) -> None:
"""
Generates the complete report for tests run to this point
"""
self._generate_report()
def _generate_report(self) -> None:
""""""
# Generates Presence Report #
presence_report: MutableMapping[str, Mapping[str, Any]] = {}
constants = {}
for constant in Database().get_constants():
constants[constant.name] = constant.properties
functions = {}
for function in Database().get_functions():
functions[function.name] = function.properties
presence_report['constants'] = constants
presence_report['functions'] = functions
self._report['presence_report'] = presence_report
test_report = {}
for test in self._tests:
test_report[test.name] = {'type': str(test.type),
'build_outcome': str(test.build_outcome),
'run_outcome': str(test.run_outcome)}
self._report['tests'] = test_report
def print_report(self, indent=2):
"""
Pretty prints report
"""
self._generate_report()
print(json.dumps(self._report, indent=indent))
def write_report(self):
"""
Writes generated report to file
"""
self._generate_report()
with open(self.report_file_dir, 'a+') as file_buffer:
json.dump(self._report, file_buffer)
| [
"pathlib.Path.resolve",
"pathlib.Path.exists",
"lemonspotter.core.database.Database",
"pathlib.Path",
"json.dumps",
"datetime.datetime.now",
"pathlib.Path.mkdir",
"json.dump"
] | [((424, 447), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (445, 447), False, 'import datetime\n'), ((1004, 1039), 'pathlib.Path.resolve', 'Path.resolve', (['self._report_file_dir'], {}), '(self._report_file_dir)\n', (1016, 1039), False, 'from pathlib import Path\n'), ((741, 764), 'pathlib.Path.exists', 'Path.exists', (['report_dir'], {}), '(report_dir)\n', (752, 764), False, 'from pathlib import Path\n'), ((778, 800), 'pathlib.Path.mkdir', 'Path.mkdir', (['report_dir'], {}), '(report_dir)\n', (788, 800), False, 'from pathlib import Path\n'), ((930, 946), 'pathlib.Path', 'Path', (['report_dir'], {}), '(report_dir)\n', (934, 946), False, 'from pathlib import Path\n'), ((3577, 3616), 'json.dumps', 'json.dumps', (['self._report'], {'indent': 'indent'}), '(self._report, indent=indent)\n', (3587, 3616), False, 'import json\n'), ((3818, 3854), 'json.dump', 'json.dump', (['self._report', 'file_buffer'], {}), '(self._report, file_buffer)\n', (3827, 3854), False, 'import json\n'), ((689, 703), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (693, 703), False, 'from pathlib import Path\n'), ((2736, 2746), 'lemonspotter.core.database.Database', 'Database', ([], {}), '()\n', (2744, 2746), False, 'from lemonspotter.core.database import Database\n'), ((2871, 2881), 'lemonspotter.core.database.Database', 'Database', ([], {}), '()\n', (2879, 2881), False, 'from lemonspotter.core.database import Database\n')] |
from collections import defaultdict
from typing import Dict
import torch
import numpy as np
from malib.algorithm.mappo.vtrace import compute_vtrace
from malib.utils.episode import EpisodeKey
def get_part_data_from_batch(batch_data, idx):
# (ziyu): suppose the first dimension is batch_size
res = {}
for k, v in batch_data.items():
res[k] = v[idx]
return res
def compute_return(policy, batch, mode="gae"):
cm_cfg = policy.custom_config
gamma, gae_lambda = cm_cfg["gamma"], cm_cfg["gae"]["gae_lambda"]
values, rewards, dones = (
# FIXME(ziyu): for debugging
np.zeros_like(batch[EpisodeKey.REWARD]),
# batch[EpisodeKey.STATE_VALUE],
batch[EpisodeKey.REWARD],
batch[EpisodeKey.DONE],
)
if cm_cfg["use_popart"]:
values = policy.value_normalizer.denormalize(values)
if mode == "gae":
return compute_gae(values, rewards, dones, gamma, gae_lambda)
elif mode == "vtrace":
return compute_vtrace(
policy,
batch[EpisodeKey.CUR_OBS],
rewards,
values,
dones,
# XXX(ming): why load rnn states from batch? we do not save it.
batch["rnn_state_0"],
batch[EpisodeKey.ACTION],
batch[EpisodeKey.ACTION_DIST],
gamma,
cm_cfg["vtrace"]["clip_rho_threshold"],
cm_cfg["vtrace"]["clip_pg_rho_threshold"],
)
else:
raise ValueError("Unexpected return mode: {}".format(mode))
def compute_gae(value, reward, done, gamma, gae_lambda):
assert len(reward.shape) == 4, (reward.shape, done.shape, value.shape)
B, Tp1, N, _ = reward.shape
assert list(value.shape) == [B, Tp1, N, 1] and list(done.shape) == [B, Tp1, N, 1]
value = np.transpose(value, (1, 0, 2, 3))
done = np.transpose(done, (1, 0, 2, 3))
reward = np.transpose(reward, (1, 0, 2, 3))
gae, ret = 0, np.zeros_like(reward)
for t in reversed(range(Tp1 - 1)):
delta = reward[t] + gamma * (1 - done[t]) * value[t + 1] - value[t]
gae = delta + gamma * gae_lambda * (1 - done[t]) * gae
ret[t] = gae + value[t]
return {"return": ret.transpose((1, 0, 2, 3))}
def simple_data_generator(batch, num_mini_batch, device):
# XXX(ziyu): if we put all data on GPUs, mini-batch cannot work when we don't have enough GPU memory
batch_size, _ = batch[EpisodeKey.CUR_OBS].shape
mini_batch_size = batch_size // num_mini_batch
assert mini_batch_size > 0
rand = torch.randperm(batch_size).numpy()
for i in range(0, batch_size, mini_batch_size):
tmp_slice = slice(i, min(batch_size, i + mini_batch_size))
tmp_batch = get_part_data_from_batch(batch, rand[tmp_slice])
yield tmp_batch
def recurrent_generator(data, num_mini_batch, rnn_data_chunk_length, device):
batch = {k: d.copy() for k, d in data.items()}
# original shape is [fragment_length, batch_size, num_agent, ...]
def _cast(x):
return x.permute(1, 2, 0, 3).reshape(-1, *x.shape[3:])
for k in batch:
if isinstance(batch[k], np.ndarray):
batch[k] = torch.FloatTensor(batch[k]) # .to(device)
# FIXME(ziyu): the put on GPU operation here should be considered in detail
if k not in ["rnn_state_0", "rnn_state_1"]:
batch[k] = _cast(batch[k])
else:
batch[k] = batch[k].permute(1, 2, 0, 3, 4).reshape(-1, *batch[k].shape[3:])
batch_size, _ = batch[EpisodeKey.CUR_OBS].shape
data_chunks = batch_size // rnn_data_chunk_length # [C=r*T*M/L]
mini_batch_size = data_chunks // num_mini_batch
rand = torch.randperm(data_chunks).numpy()
sampler = [
rand[i * mini_batch_size : (i + 1) * mini_batch_size]
for i in range(num_mini_batch)
]
for indices in sampler:
tmp_batch_list = defaultdict(list)
for index in indices:
ind = index * rnn_data_chunk_length
for k in batch:
if k not in ["rnn_state_0", "rnn_state_1"]:
tmp_batch_list[k].append(
batch[k][ind : ind + rnn_data_chunk_length]
)
else:
tmp_batch_list[k].append(batch[k][ind])
T, N = rnn_data_chunk_length, mini_batch_size
tmp_batch = {}
for k in batch:
if k not in ["rnn_state_0", "rnn_state_1"]:
tmp_batch[k] = torch.stack(tmp_batch_list[k], dim=1)
tmp_batch[k] = tmp_batch[k].reshape(N * T, *tmp_batch[k].shape[2:])
else:
tmp_batch[k] = torch.stack(tmp_batch_list[k])
yield {k: v.to(device) for k, v in tmp_batch.items()}
| [
"numpy.transpose",
"torch.randperm",
"torch.stack",
"collections.defaultdict",
"malib.algorithm.mappo.vtrace.compute_vtrace",
"numpy.zeros_like",
"torch.FloatTensor"
] | [((1795, 1828), 'numpy.transpose', 'np.transpose', (['value', '(1, 0, 2, 3)'], {}), '(value, (1, 0, 2, 3))\n', (1807, 1828), True, 'import numpy as np\n'), ((1840, 1872), 'numpy.transpose', 'np.transpose', (['done', '(1, 0, 2, 3)'], {}), '(done, (1, 0, 2, 3))\n', (1852, 1872), True, 'import numpy as np\n'), ((1886, 1920), 'numpy.transpose', 'np.transpose', (['reward', '(1, 0, 2, 3)'], {}), '(reward, (1, 0, 2, 3))\n', (1898, 1920), True, 'import numpy as np\n'), ((612, 651), 'numpy.zeros_like', 'np.zeros_like', (['batch[EpisodeKey.REWARD]'], {}), '(batch[EpisodeKey.REWARD])\n', (625, 651), True, 'import numpy as np\n'), ((1940, 1961), 'numpy.zeros_like', 'np.zeros_like', (['reward'], {}), '(reward)\n', (1953, 1961), True, 'import numpy as np\n'), ((3877, 3894), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3888, 3894), False, 'from collections import defaultdict\n'), ((991, 1247), 'malib.algorithm.mappo.vtrace.compute_vtrace', 'compute_vtrace', (['policy', 'batch[EpisodeKey.CUR_OBS]', 'rewards', 'values', 'dones', "batch['rnn_state_0']", 'batch[EpisodeKey.ACTION]', 'batch[EpisodeKey.ACTION_DIST]', 'gamma', "cm_cfg['vtrace']['clip_rho_threshold']", "cm_cfg['vtrace']['clip_pg_rho_threshold']"], {}), "(policy, batch[EpisodeKey.CUR_OBS], rewards, values, dones,\n batch['rnn_state_0'], batch[EpisodeKey.ACTION], batch[EpisodeKey.\n ACTION_DIST], gamma, cm_cfg['vtrace']['clip_rho_threshold'], cm_cfg[\n 'vtrace']['clip_pg_rho_threshold'])\n", (1005, 1247), False, 'from malib.algorithm.mappo.vtrace import compute_vtrace\n'), ((2537, 2563), 'torch.randperm', 'torch.randperm', (['batch_size'], {}), '(batch_size)\n', (2551, 2563), False, 'import torch\n'), ((3155, 3182), 'torch.FloatTensor', 'torch.FloatTensor', (['batch[k]'], {}), '(batch[k])\n', (3172, 3182), False, 'import torch\n'), ((3665, 3692), 'torch.randperm', 'torch.randperm', (['data_chunks'], {}), '(data_chunks)\n', (3679, 3692), False, 'import torch\n'), ((4469, 4506), 'torch.stack', 'torch.stack', (['tmp_batch_list[k]'], {'dim': '(1)'}), '(tmp_batch_list[k], dim=1)\n', (4480, 4506), False, 'import torch\n'), ((4640, 4670), 'torch.stack', 'torch.stack', (['tmp_batch_list[k]'], {}), '(tmp_batch_list[k])\n', (4651, 4670), False, 'import torch\n')] |
import os
import cv2
import keyboard
import threading
import pyautogui
import numpy as np
from tkinter import *
# makes a folder or notifies that the folder exists
try:
os.mkdir('Screen Recording')
print('Folder created')
except FileExistsError:
print('Folder already exists')
# constants
BIGGEST_I = 0
RECORDING_LIST = os.listdir('Screen Recording')
SCREEN_SIZE = tuple(pyautogui.size())
FPS = 8
# this part generates the index of the last recording and allows the program to save the next recording with a unique name
for i in RECORDING_LIST:
if i == 'Thumbs.db':
pass
else:
# <-- this just gets the value after (#) or the index
index = int(i.split('#')[1].split('.')[0])
if index > BIGGEST_I:
BIGGEST_I = index
index = BIGGEST_I + 1
# records the screen until <esc> is pressed
def start_or_stop():
print("Recording has started")
fourcc = cv2.VideoWriter_fourcc(*"XVID")
out = cv2.VideoWriter(
"Screen Recording/video#{0}.avi".format(index), fourcc, FPS, (SCREEN_SIZE))
while True:
img = pyautogui.screenshot()
frame = np.array(img)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
out.write(frame)
# if the user clicks esc, it exits
if keyboard.is_pressed('esc'):
print('Recording ended')
cv2.destroyAllWindows()
out.release()
root.destroy()
break
if __name__ == "__main__":
t1 = threading.Thread(target=start_or_stop)
root = Tk()
root.geometry('200x130')
root.title('Screen Recorder')
root.configure(background='#b5d1ff')
# root.iconbitmap('favicon.ico')
start_stop_button = Button(root, text="Start", command=t1.start, bg="#b5d1ff", bd=1, padx=4)
start_stop_button.place(x=20, y=20, width=160, height=40)
frm = Frame(root, bg='black')
frm.place(x=20, y=70, width=160, height=40)
lbl = Label(frm, text="To stop, press <ESC>", anchor=CENTER, bg="#b5d1ff", padx=4)
lbl.place(x=1, y=1, width=158, height=38)
root.mainloop()
| [
"os.listdir",
"pyautogui.screenshot",
"keyboard.is_pressed",
"pyautogui.size",
"numpy.array",
"cv2.destroyAllWindows",
"os.mkdir",
"cv2.VideoWriter_fourcc",
"cv2.cvtColor",
"threading.Thread"
] | [((351, 381), 'os.listdir', 'os.listdir', (['"""Screen Recording"""'], {}), "('Screen Recording')\n", (361, 381), False, 'import os\n'), ((184, 212), 'os.mkdir', 'os.mkdir', (['"""Screen Recording"""'], {}), "('Screen Recording')\n", (192, 212), False, 'import os\n'), ((403, 419), 'pyautogui.size', 'pyautogui.size', ([], {}), '()\n', (417, 419), False, 'import pyautogui\n'), ((956, 987), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (978, 987), False, 'import cv2\n'), ((1546, 1584), 'threading.Thread', 'threading.Thread', ([], {'target': 'start_or_stop'}), '(target=start_or_stop)\n', (1562, 1584), False, 'import threading\n'), ((1135, 1157), 'pyautogui.screenshot', 'pyautogui.screenshot', ([], {}), '()\n', (1155, 1157), False, 'import pyautogui\n'), ((1175, 1188), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1183, 1188), True, 'import numpy as np\n'), ((1206, 1244), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (1218, 1244), False, 'import cv2\n'), ((1327, 1353), 'keyboard.is_pressed', 'keyboard.is_pressed', (['"""esc"""'], {}), "('esc')\n", (1346, 1353), False, 'import keyboard\n'), ((1406, 1429), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1427, 1429), False, 'import cv2\n')] |
from jsonrpcclient.response import JSONRPCResponse, Response, total_results
class TestTotalResults:
def test_unparsed(self):
assert total_results(None) == 0
def test_one(self):
res = {"jsonrpc": "2.0", "result": "foo", "id": 1}
jsonrpc_response = JSONRPCResponse(res)
assert total_results(jsonrpc_response) == 1
def test_list(self):
res = {"jsonrpc": "2.0", "result": "foo", "id": 1}
jsonrpc_response = JSONRPCResponse(res)
assert total_results([jsonrpc_response, jsonrpc_response]) == 2
class TestResponse:
def test(self):
response = Response("foo")
assert response.text == "foo"
def test_repr(self):
response = Response("foo")
assert repr(response) == "<Response[0]>"
def test_repr_with_results(self):
response = Response("foo")
response.data = JSONRPCResponse(
{"jsonrpc": "2.0", "error": {"message": "foo"}, "id": 1}
)
assert repr(response) == "<Response[0 ok, 1 errors]>"
| [
"jsonrpcclient.response.JSONRPCResponse",
"jsonrpcclient.response.total_results",
"jsonrpcclient.response.Response"
] | [((282, 302), 'jsonrpcclient.response.JSONRPCResponse', 'JSONRPCResponse', (['res'], {}), '(res)\n', (297, 302), False, 'from jsonrpcclient.response import JSONRPCResponse, Response, total_results\n'), ((467, 487), 'jsonrpcclient.response.JSONRPCResponse', 'JSONRPCResponse', (['res'], {}), '(res)\n', (482, 487), False, 'from jsonrpcclient.response import JSONRPCResponse, Response, total_results\n'), ((621, 636), 'jsonrpcclient.response.Response', 'Response', (['"""foo"""'], {}), "('foo')\n", (629, 636), False, 'from jsonrpcclient.response import JSONRPCResponse, Response, total_results\n'), ((720, 735), 'jsonrpcclient.response.Response', 'Response', (['"""foo"""'], {}), "('foo')\n", (728, 735), False, 'from jsonrpcclient.response import JSONRPCResponse, Response, total_results\n'), ((843, 858), 'jsonrpcclient.response.Response', 'Response', (['"""foo"""'], {}), "('foo')\n", (851, 858), False, 'from jsonrpcclient.response import JSONRPCResponse, Response, total_results\n'), ((883, 956), 'jsonrpcclient.response.JSONRPCResponse', 'JSONRPCResponse', (["{'jsonrpc': '2.0', 'error': {'message': 'foo'}, 'id': 1}"], {}), "({'jsonrpc': '2.0', 'error': {'message': 'foo'}, 'id': 1})\n", (898, 956), False, 'from jsonrpcclient.response import JSONRPCResponse, Response, total_results\n'), ((146, 165), 'jsonrpcclient.response.total_results', 'total_results', (['None'], {}), '(None)\n', (159, 165), False, 'from jsonrpcclient.response import JSONRPCResponse, Response, total_results\n'), ((318, 349), 'jsonrpcclient.response.total_results', 'total_results', (['jsonrpc_response'], {}), '(jsonrpc_response)\n', (331, 349), False, 'from jsonrpcclient.response import JSONRPCResponse, Response, total_results\n'), ((503, 554), 'jsonrpcclient.response.total_results', 'total_results', (['[jsonrpc_response, jsonrpc_response]'], {}), '([jsonrpc_response, jsonrpc_response])\n', (516, 554), False, 'from jsonrpcclient.response import JSONRPCResponse, Response, total_results\n')] |
import sys
from pathlib import Path
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
PIXELSIZE = 50*10**-3
DPI = 300
XLABEL = r"$\mathrm{Horizontal\,pos.}$ $[\mathrm{mm}]$"
YLABEL = r"$\mathrm{Vertical\,pos.}$ $[\mathrm{mm}]$"
FONTSIZE_LABELS = 20
FONTSIZE_TICKS = 14
TICKINDEX_MAJOR_X = 1
TICKINDEX_MINOR_X = TICKINDEX_MAJOR_X / 5
TICKINDEX_MAJOR_Y = 1
TICKINDEX_MINOR_Y = TICKINDEX_MAJOR_Y / 5
SHRINK = 0.935
CBAR_SCILIMIT = 10**3
CMAPS = {0:'viridis', 1:'plasma', 2:'inferno', 3:'magma', 4:'Greys',
5:'Purples', 6:'Blues', 7:'Greens', 8:'Oranges', 9:'Reds',
10: 'YlOrBr', 11:'YlOrRd', 12:'OrRd', 13:'PuRd', 14:'RdPu',
15:'BuPu', 16:'GnBu', 17:'PuBu', 18:'YlGnBu', 19:'PuBuGn',
20:'BuGn', 21:'YlGn', 22:'binary', 23:'gist_yarg', 24:'gist_gray',
25:'gray', 26:'bone', 27:'pink', 28:'spring', 29:'summer',
30:'autumn', 31:'winter', 32:'cool', 33:'Wistia', 34:'hot',
35:'afmhot', 36:'gist_heat', 37:'copper', 38:'PiYG', 39:'PRGn',
40:'BrBG', 41:'PuOr', 42:'RdGy', 43:'RdBu', 44:'RdYlBu',
45:'RdYlGn', 46:'Spectral', 47:'coolwarm', 48:'bwr', 49:'seismic',
50:'twilight', 51:'twilight_shifted', 52:'hsv', 53:'ocean', 54:'gist_earth',
55:'terrain', 56:'gist_stern', 57:'gnuplot', 58:'gnuplot2', 59:'CMRmap',
60:'cubehelix', 61:'brg', 62:'gist_rainbow', 63:'rainbow', 64:'jet',
65:'turbo', 66:'nipy_spectral', 67:'gist_ncar'}
def one_to_two_d(file):
d = {}
print(f"File:\t{file.name}")
df = pd.read_csv(file, delimiter='\t')
cols = df.columns
for i in range(1, len(cols)):
df[cols[i]] = df[cols[i]].astype(float)
d[cols[0]] = list(df[cols[0]])
for i in range(1, len(cols)):
d[cols[i]] = df[cols[i]].to_numpy()
print(f"\n\tThe following quantities are present:")
for k in d.keys():
if not 'unnamed' in k.lower():
print(f"\t\t{k}")
col_len = len(d[list(d.keys())[0]])
nor = int(input(f"\n\tThe length of the 1D column is: {col_len}\
\n\n\tPlease provide the desired number of rows for the new matrix: "))
noc = int(col_len / nor)
print(f"\tNumber of columns: {noc}")
# pixelsize = float(input(f"\tPlease provide the pixelsize in µm: "))
pixelsize = PIXELSIZE
plot_default = input("\n\tDo you want to plot all quantities with default settings? (y/n): ")
if plot_default == "n":
print("\tNumber\tColormap")
for k,v in CMAPS.items():
print(f"\t{k}:\t{v}")
cmap_desire = CMAPS[int(input("\n\tPlease select the number of the desired colormap: "))]
print(f"\tColormap '{cmap_desire}' was selected.")
else:
cmap_desire = CMAPS[0]
print(f"\n\tWriting txt files and plotting...")
for k in d.keys():
try:
if isinstance(d[k], np.ndarray) and 'unnamed' not in k.lower():
print(f"\t\t{k}")
num_header = '1'
for i in range(1, noc):
num_header += f"\t{i+1}"
new_array = np.reshape(d[k], (nor, noc))
np.savetxt(f"{file.stem}/txt/{file.stem}_{k}.txt", new_array,
fmt='%.5e', encoding='utf-8', header=num_header,
comments='')
if plot_default == "n":
plot_desire = input(f"\t\t\tDo you want to plot '{k}'? (y/n): ")
if plot_desire == "y":
cbar_limits = input("\t\t\t\tPlease provide min, max for colorbar: ")
else:
continue
else:
cbar_limits = None
two_d_array_plot(new_array, k, file, pixelsize, cmap_desire, cbar_limits)
except ValueError:
print(f"{90*'-'}\nValueError: cannot reshape array of size {col_len} into shape ({nor},{noc})\
\nPlease rerun the code and provide a proper number of columns for the 2D array.\
\n{90*'-'}")
sys.exit()
print(f"\n\ttxt files containing {new_array.shape} arrays have been saved to the txt directory.\
\n\tPlots have been saved to the 'pdf' and 'png' directories.\
\n{90*'-'}")
return plot_default, cmap_desire
def delimiter_fix(file):
with open(file) as f:
lines = f.readlines()
header = lines[0].split()
noc = len(header)
new_del = []
for line in lines:
cols = line.split()
s = ''
for col in cols:
s += f"{col}\t"
new_del.append(f"{s}\n")
with open(f"delimiter_fix/{file.stem}.txt", 'w') as o:
o.writelines(new_del)
return None
def two_d_array_plot(two_d_array, column_name, file, pixelsize, cmap_desire, cbar_limits):
if "unnamed" in column_name.lower():
return None
else:
fig, ax = plt.subplots(dpi=DPI)
im = plt.imshow(two_d_array,
extent=[0, two_d_array.shape[1] * pixelsize,
two_d_array.shape[0] * pixelsize, 0],
cmap=cmap_desire
)
plt.xlabel(XLABEL, fontsize=FONTSIZE_LABELS)
plt.ylabel(YLABEL, fontsize=FONTSIZE_LABELS)
ax.tick_params(axis='both', labelsize=FONTSIZE_TICKS)
ax.xaxis.set_label_position('top')
ax.xaxis.tick_top()
ax.xaxis.set_major_locator(ticker.MultipleLocator(TICKINDEX_MAJOR_X))
ax.xaxis.set_minor_locator(ticker.MultipleLocator(TICKINDEX_MINOR_X))
ax.yaxis.set_major_locator(ticker.MultipleLocator(TICKINDEX_MAJOR_Y))
ax.yaxis.set_minor_locator(ticker.MultipleLocator(TICKINDEX_MINOR_Y))
cbar = plt.colorbar(im, ax=ax, anchor=(0,1), shrink=SHRINK)
cbar.ax.tick_params(labelsize=FONTSIZE_TICKS)
if column_name.lower() == 'scan_number':
cbar.set_label(label=r"Scan Number", size=FONTSIZE_LABELS)
elif column_name.lower() == 'r_wp':
cbar.set_label(label=r"$r_{\mathrm{wp}}$", size=FONTSIZE_LABELS)
elif column_name.lower() == 'scale_total':
cbar.set_label(label=r"Scale Total", size=FONTSIZE_LABELS)
elif column_name.lower() == 'scale_lfp':
cbar.set_label(label=r"Scale LFP", size=FONTSIZE_LABELS)
elif column_name.lower() == 'lp1_a':
cbar.set_label(label=r"$a_{1}$ $[\mathrm{\AA}]$", size=FONTSIZE_LABELS)
elif column_name.lower() == 'lp1_b':
cbar.set_label(label=r"$b_{1}$ $[\mathrm{\AA}]$", size=FONTSIZE_LABELS)
elif column_name.lower() == 'lp1_c':
cbar.set_label(label=r"$c_{1}$ $[\mathrm{\AA}]$", size=FONTSIZE_LABELS)
elif column_name.lower() == 'rbragg_1':
cbar.set_label(label=r"$r_{\mathrm{Bragg,1}}$", size=FONTSIZE_LABELS)
elif column_name.lower() == 'wp_1':
cbar.set_label(label=r"$wp_{1}$", size=FONTSIZE_LABELS)
elif column_name.lower() == 'lp2_a':
cbar.set_label(label=r"$a_{2}$ $[\mathrm{\AA}]$", size=FONTSIZE_LABELS)
elif column_name.lower() == 'lp2_b':
cbar.set_label(label=r"$b_{2}$ $[\mathrm{\AA}]$", size=FONTSIZE_LABELS)
elif column_name.lower() == 'lp2_c':
cbar.set_label(label=r"$c_{2}$ $[\mathrm{\AA}]$", size=FONTSIZE_LABELS)
elif column_name.lower() == 'rbragg_2':
cbar.set_label(label=r"$r_{\mathrm{Bragg,2}}$", size=FONTSIZE_LABELS)
elif column_name.lower() == 'wp_2':
cbar.set_label(label=r"$wp_{2}$", size=FONTSIZE_LABELS)
else:
cbar.set_label(label=column_name, size=FONTSIZE_LABELS)
if not isinstance(cbar_limits, type(None)):
cbar_min, cbar_max = float(cbar_limits.split(",")[0]), float(cbar_limits.split(",")[1])
plt.clim(cbar_min, cbar_max)
plt.savefig(f"{file.stem}/png/{file.stem}_{column_name}_{cbar_min}-{cbar_max}.png", bbox_inches='tight')
plt.savefig(f"{file.stem}/pdf/{file.stem}_{column_name}_{cbar_min}-{cbar_max}.pdf", bbox_inches='tight')
else:
cbar.ax.ticklabel_format(style="sci", scilimits=(0,0))
plt.savefig(f"{file.stem}/png/{file.stem}_{column_name}.png", bbox_inches='tight')
plt.savefig(f"{file.stem}/pdf/{file.stem}_{column_name}.pdf", bbox_inches='tight')
plt.close()
return None
def main():
if not (Path.cwd() / 'data').exists():
(Path.cwd() / 'data').mkdir()
print(f"{90*'-'}\nPlease place your data in the data folder and rerun the code.\
\n{90*'-'}")
sys.exit()
files = list((Path.cwd() / 'data').glob(f"*.*"))
if len(files) == 0:
print(f"{90*'-'}\nPlease place your {FILEEXT} files in the data and rerun the code.\
\n{90*'-'}")
sys.exit()
print(f"{90*'-'}")
if not (Path.cwd() / 'delimiter_fix').exists():
(Path.cwd() / 'delimiter_fix').mkdir()
for file in files:
if not (Path.cwd() / f"{file.stem}").exists():
(Path.cwd() / f"{file.stem}").mkdir()
folders = ['txt', 'png', 'pdf']
for folder in folders:
if not ((Path.cwd() / f"{file.stem}") / folder).exists():
((Path.cwd() / f"{file.stem}") / folder).mkdir()
delimiter_fix(file)
files = list((Path.cwd() / 'delimiter_fix').glob(f"*.*"))
for file in files:
plot_default, cmap_desire = one_to_two_d(file)
print(f"Done with all files.\nWell done! (^^,)\n{90*'-'}")
return None
if __name__ == '__main__':
main()
# End of file.
| [
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.clim",
"matplotlib.pyplot.savefig",
"numpy.reshape",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.ticker.MultipleLocator",
"pathlib.Path.cwd",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.close",
"numpy.savetxt",
"sys.exit",
"matplotlib.pyplot.subplots"
] | [((1580, 1613), 'pandas.read_csv', 'pd.read_csv', (['file'], {'delimiter': '"""\t"""'}), "(file, delimiter='\\t')\n", (1591, 1613), True, 'import pandas as pd\n'), ((4940, 4961), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'dpi': 'DPI'}), '(dpi=DPI)\n', (4952, 4961), True, 'import matplotlib.pyplot as plt\n'), ((4975, 5104), 'matplotlib.pyplot.imshow', 'plt.imshow', (['two_d_array'], {'extent': '[0, two_d_array.shape[1] * pixelsize, two_d_array.shape[0] * pixelsize, 0]', 'cmap': 'cmap_desire'}), '(two_d_array, extent=[0, two_d_array.shape[1] * pixelsize, \n two_d_array.shape[0] * pixelsize, 0], cmap=cmap_desire)\n', (4985, 5104), True, 'import matplotlib.pyplot as plt\n'), ((5193, 5237), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['XLABEL'], {'fontsize': 'FONTSIZE_LABELS'}), '(XLABEL, fontsize=FONTSIZE_LABELS)\n', (5203, 5237), True, 'import matplotlib.pyplot as plt\n'), ((5246, 5290), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['YLABEL'], {'fontsize': 'FONTSIZE_LABELS'}), '(YLABEL, fontsize=FONTSIZE_LABELS)\n', (5256, 5290), True, 'import matplotlib.pyplot as plt\n'), ((5751, 5804), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'ax': 'ax', 'anchor': '(0, 1)', 'shrink': 'SHRINK'}), '(im, ax=ax, anchor=(0, 1), shrink=SHRINK)\n', (5763, 5804), True, 'import matplotlib.pyplot as plt\n'), ((8385, 8396), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8394, 8396), True, 'import matplotlib.pyplot as plt\n'), ((8635, 8645), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8643, 8645), False, 'import sys\n'), ((8853, 8863), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8861, 8863), False, 'import sys\n'), ((5459, 5500), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['TICKINDEX_MAJOR_X'], {}), '(TICKINDEX_MAJOR_X)\n', (5481, 5500), True, 'import matplotlib.ticker as ticker\n'), ((5537, 5578), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['TICKINDEX_MINOR_X'], {}), '(TICKINDEX_MINOR_X)\n', (5559, 5578), True, 'import matplotlib.ticker as ticker\n'), ((5615, 5656), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['TICKINDEX_MAJOR_Y'], {}), '(TICKINDEX_MAJOR_Y)\n', (5637, 5656), True, 'import matplotlib.ticker as ticker\n'), ((5693, 5734), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['TICKINDEX_MINOR_Y'], {}), '(TICKINDEX_MINOR_Y)\n', (5715, 5734), True, 'import matplotlib.ticker as ticker\n'), ((7843, 7871), 'matplotlib.pyplot.clim', 'plt.clim', (['cbar_min', 'cbar_max'], {}), '(cbar_min, cbar_max)\n', (7851, 7871), True, 'import matplotlib.pyplot as plt\n'), ((7884, 7997), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{file.stem}/png/{file.stem}_{column_name}_{cbar_min}-{cbar_max}.png"""'], {'bbox_inches': '"""tight"""'}), "(\n f'{file.stem}/png/{file.stem}_{column_name}_{cbar_min}-{cbar_max}.png',\n bbox_inches='tight')\n", (7895, 7997), True, 'import matplotlib.pyplot as plt\n'), ((8001, 8114), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{file.stem}/pdf/{file.stem}_{column_name}_{cbar_min}-{cbar_max}.pdf"""'], {'bbox_inches': '"""tight"""'}), "(\n f'{file.stem}/pdf/{file.stem}_{column_name}_{cbar_min}-{cbar_max}.pdf',\n bbox_inches='tight')\n", (8012, 8114), True, 'import matplotlib.pyplot as plt\n'), ((8199, 8286), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{file.stem}/png/{file.stem}_{column_name}.png"""'], {'bbox_inches': '"""tight"""'}), "(f'{file.stem}/png/{file.stem}_{column_name}.png', bbox_inches=\n 'tight')\n", (8210, 8286), True, 'import matplotlib.pyplot as plt\n'), ((8294, 8381), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{file.stem}/pdf/{file.stem}_{column_name}.pdf"""'], {'bbox_inches': '"""tight"""'}), "(f'{file.stem}/pdf/{file.stem}_{column_name}.pdf', bbox_inches=\n 'tight')\n", (8305, 8381), True, 'import matplotlib.pyplot as plt\n'), ((3114, 3142), 'numpy.reshape', 'np.reshape', (['d[k]', '(nor, noc)'], {}), '(d[k], (nor, noc))\n', (3124, 3142), True, 'import numpy as np\n'), ((3159, 3286), 'numpy.savetxt', 'np.savetxt', (['f"""{file.stem}/txt/{file.stem}_{k}.txt"""', 'new_array'], {'fmt': '"""%.5e"""', 'encoding': '"""utf-8"""', 'header': 'num_header', 'comments': '""""""'}), "(f'{file.stem}/txt/{file.stem}_{k}.txt', new_array, fmt='%.5e',\n encoding='utf-8', header=num_header, comments='')\n", (3169, 3286), True, 'import numpy as np\n'), ((4090, 4100), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4098, 4100), False, 'import sys\n'), ((8440, 8450), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (8448, 8450), False, 'from pathlib import Path\n'), ((8480, 8490), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (8488, 8490), False, 'from pathlib import Path\n'), ((8664, 8674), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (8672, 8674), False, 'from pathlib import Path\n'), ((8899, 8909), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (8907, 8909), False, 'from pathlib import Path\n'), ((8948, 8958), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (8956, 8958), False, 'from pathlib import Path\n'), ((9366, 9376), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (9374, 9376), False, 'from pathlib import Path\n'), ((9025, 9035), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (9033, 9035), False, 'from pathlib import Path\n'), ((9077, 9087), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (9085, 9087), False, 'from pathlib import Path\n'), ((9206, 9216), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (9214, 9216), False, 'from pathlib import Path\n'), ((9273, 9283), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (9281, 9283), False, 'from pathlib import Path\n')] |
import numpy as np
import tensorflow as tf
from .module import Module
from ..initializers import glorot_uniform
from ..utils import shape
class Dense(Module):
'''
Fully-connected layer
y = nonlinearity(Wx+b)
'''
def __init__(self, fan_in, fan_out,
use_bias=True, activation=None,
w_init=glorot_uniform, b_init=tf.zeros_initializer(),
trainable=True, name=None):
self._fan_in = fan_in
self._fan_out = fan_out
self._use_bias = use_bias
self._w_init = w_init
self._b_init = b_init
self.activation = activation
self._trainable = trainable
super(Dense, self).__init__(name)
def _initialize(self):
if type(self._w_init) is np.ndarray:
shape = None
self._w_init = self._w_init.astype(np.float32)
else:
shape = (self._fan_in, self._fan_out)
self.W = tf.get_variable('W', shape=shape,
initializer=self._w_init,
trainable=self._trainable)
tf.add_to_collection(tf.GraphKeys.WEIGHTS, self.W)
tf.summary.histogram('W', self.W)
if self._use_bias:
self.b = tf.get_variable('b', shape=(self._fan_out,),
initializer=self._b_init,
trainable=self._trainable)
tf.add_to_collection(tf.GraphKeys.BIASES, self.b)
tf.summary.histogram('b', self.b)
def _forward(self, x):
x_shape = shape(x)
ndims = len(x_shape)
# reshape for broadcasting
assert x_shape[-1] == self._fan_in
xr = tf.reshape(x, (-1, self._fan_in))
y = tf.matmul(xr, self.W)
if self._use_bias:
y += self.b
if self.activation:
y = self.activation(y)
new_shape = tf.concat([tf.shape(x)[:ndims - 1], [self._fan_out]],
axis=0)
y = tf.reshape(y, new_shape)
new_dims = x_shape[:-1] + [self._fan_out]
y.set_shape(new_dims)
tf.summary.histogram('activations', y)
return y
| [
"tensorflow.shape",
"tensorflow.get_variable",
"tensorflow.zeros_initializer",
"tensorflow.summary.histogram",
"tensorflow.matmul",
"tensorflow.reshape",
"tensorflow.add_to_collection"
] | [((373, 395), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (393, 395), True, 'import tensorflow as tf\n'), ((951, 1042), 'tensorflow.get_variable', 'tf.get_variable', (['"""W"""'], {'shape': 'shape', 'initializer': 'self._w_init', 'trainable': 'self._trainable'}), "('W', shape=shape, initializer=self._w_init, trainable=self.\n _trainable)\n", (966, 1042), True, 'import tensorflow as tf\n'), ((1112, 1162), 'tensorflow.add_to_collection', 'tf.add_to_collection', (['tf.GraphKeys.WEIGHTS', 'self.W'], {}), '(tf.GraphKeys.WEIGHTS, self.W)\n', (1132, 1162), True, 'import tensorflow as tf\n'), ((1171, 1204), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""W"""', 'self.W'], {}), "('W', self.W)\n", (1191, 1204), True, 'import tensorflow as tf\n'), ((1710, 1743), 'tensorflow.reshape', 'tf.reshape', (['x', '(-1, self._fan_in)'], {}), '(x, (-1, self._fan_in))\n', (1720, 1743), True, 'import tensorflow as tf\n'), ((1756, 1777), 'tensorflow.matmul', 'tf.matmul', (['xr', 'self.W'], {}), '(xr, self.W)\n', (1765, 1777), True, 'import tensorflow as tf\n'), ((2019, 2043), 'tensorflow.reshape', 'tf.reshape', (['y', 'new_shape'], {}), '(y, new_shape)\n', (2029, 2043), True, 'import tensorflow as tf\n'), ((2132, 2170), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""activations"""', 'y'], {}), "('activations', y)\n", (2152, 2170), True, 'import tensorflow as tf\n'), ((1254, 1355), 'tensorflow.get_variable', 'tf.get_variable', (['"""b"""'], {'shape': '(self._fan_out,)', 'initializer': 'self._b_init', 'trainable': 'self._trainable'}), "('b', shape=(self._fan_out,), initializer=self._b_init,\n trainable=self._trainable)\n", (1269, 1355), True, 'import tensorflow as tf\n'), ((1438, 1487), 'tensorflow.add_to_collection', 'tf.add_to_collection', (['tf.GraphKeys.BIASES', 'self.b'], {}), '(tf.GraphKeys.BIASES, self.b)\n', (1458, 1487), True, 'import tensorflow as tf\n'), ((1500, 1533), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""b"""', 'self.b'], {}), "('b', self.b)\n", (1520, 1533), True, 'import tensorflow as tf\n'), ((1926, 1937), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (1934, 1937), True, 'import tensorflow as tf\n')] |
import FWCore.ParameterSet.Config as cms
from RecoBTag.SecondaryVertex.trackSelection_cff import *
pfBoostedDoubleSVCA15TagInfos = cms.EDProducer("BoostedDoubleSVProducer",
trackSelectionBlock,
beta = cms.double(1.0),
R0 = cms.double(1.5),
maxSVDeltaRToJet = cms.double(1.0),
trackPairV0Filter = cms.PSet(k0sMassWindow = cms.double(0.03)),
svTagInfos = cms.InputTag("pfInclusiveSecondaryVertexFinderCA15TagInfos"),
weights = cms.InputTag("")
)
pfBoostedDoubleSVCA15TagInfos.trackSelection.jetDeltaRMax = cms.double(1.5)
| [
"FWCore.ParameterSet.Config.double",
"FWCore.ParameterSet.Config.InputTag"
] | [((535, 550), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(1.5)'], {}), '(1.5)\n', (545, 550), True, 'import FWCore.ParameterSet.Config as cms\n'), ((211, 226), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(1.0)'], {}), '(1.0)\n', (221, 226), True, 'import FWCore.ParameterSet.Config as cms\n'), ((237, 252), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(1.5)'], {}), '(1.5)\n', (247, 252), True, 'import FWCore.ParameterSet.Config as cms\n'), ((277, 292), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(1.0)'], {}), '(1.0)\n', (287, 292), True, 'import FWCore.ParameterSet.Config as cms\n'), ((379, 439), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""pfInclusiveSecondaryVertexFinderCA15TagInfos"""'], {}), "('pfInclusiveSecondaryVertexFinderCA15TagInfos')\n", (391, 439), True, 'import FWCore.ParameterSet.Config as cms\n'), ((455, 471), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['""""""'], {}), "('')\n", (467, 471), True, 'import FWCore.ParameterSet.Config as cms\n'), ((343, 359), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.03)'], {}), '(0.03)\n', (353, 359), True, 'import FWCore.ParameterSet.Config as cms\n')] |
"""
backend/scoreboard/urls.py
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
"""
from django.urls import path
from .views import scores_day, scores_week, scores_month
# pylint: disable=invalid-name
urlpatterns = [
path('day', scores_day),
path('week', scores_week),
path('month', scores_month)
]
| [
"django.urls.path"
] | [((312, 335), 'django.urls.path', 'path', (['"""day"""', 'scores_day'], {}), "('day', scores_day)\n", (316, 335), False, 'from django.urls import path\n'), ((341, 366), 'django.urls.path', 'path', (['"""week"""', 'scores_week'], {}), "('week', scores_week)\n", (345, 366), False, 'from django.urls import path\n'), ((372, 399), 'django.urls.path', 'path', (['"""month"""', 'scores_month'], {}), "('month', scores_month)\n", (376, 399), False, 'from django.urls import path\n')] |
import os
import pytest
from motifscan.config import Config, user_rc_path, user_genome_dir, \
user_motif_dir
from motifscan.exceptions import InvalidConfigFileError, GenomeNotFoundError, \
MotifSetNotFoundError
def test_invalid_config(data_dir):
with pytest.raises(InvalidConfigFileError):
Config(os.path.join(data_dir, 'invalid.motifscanrc'))
def test_config_init(config):
assert len(config._config.sections()) == 3
assert config._config.has_section('motifscan')
assert config._config.has_section('genome')
assert config._config.has_section('motif')
assert config._config.get('motifscan', 'genome_dir') == user_genome_dir
assert config._config.get('motifscan', 'motif_dir') == user_motif_dir
config = Config(path=None)
assert config.path == user_rc_path
def test_get_genome_dir(config):
assert config.get_genome_dir() == user_genome_dir
def test_set_genome_dir(config):
path = 'test_dir'
config.set_genome_dir(path)
assert config.get_genome_dir() == path
def test_get_motif_dir(config):
assert config.get_motif_dir() == user_motif_dir
def test_set_motif_dir(config):
path = 'test_dir'
config.set_motif_dir(path)
assert config.get_motif_dir() == path
def test_set_genome_path(config):
name = 'hg19'
path = 'test_dir'
config.set_genome_path(name, path)
assert config._config.get('genome', name) == path
def test_get_genome_path(config):
name = 'hg19'
path = 'test_dir'
config.set_genome_path(name, path)
assert config.get_genome_path(name) == path
with pytest.raises(GenomeNotFoundError):
assert config.get_genome_path('mm9')
def test_remove_genome_path(config):
name = 'hg19'
path = 'test_dir'
config.set_genome_path(name, path)
assert config.remove_genome_path(name)
with pytest.raises(GenomeNotFoundError):
assert config.remove_genome_path(name)
def test_list_genome_assemblies(config):
config.set_genome_path('hg19', 'test_dir1')
config.set_genome_path('hg18', 'test_dir2')
assert list(config.list_genome_assemblies()) == [('hg19', 'test_dir1'),
('hg18', 'test_dir2')]
def test_has_genome_assembly(config):
name = 'hg19'
path = 'test_dir'
config.set_genome_path(name, path)
assert config.has_genome_assembly(name)
assert not config.has_genome_assembly('mm9')
def test_set_motif_path(config):
name = 'motif_set'
path = 'test_dir'
config.set_motif_path(name, path)
assert config._config.get('motif', name) == path
def test_get_motif_path(config):
name = 'motif_set'
path = 'test_dir'
config.set_motif_path(name, path)
assert config.get_motif_path(name) == path
with pytest.raises(MotifSetNotFoundError):
assert config.get_motif_path('motif_set1')
def test_remove_motif_path(config):
name = 'motif_set'
path = 'test_dir'
config.set_motif_path(name, path)
assert config.remove_motif_path(name)
with pytest.raises(MotifSetNotFoundError):
assert config.remove_motif_path(name)
def test_list_motif_sets(config):
config.set_motif_path('motif_set1', 'test_dir1')
config.set_motif_path('motif_set2', 'test_dir2')
assert list(config.list_motif_sets()) == [('motif_set1', 'test_dir1'),
('motif_set2', 'test_dir2')]
def test_has_motif_set(config):
name = 'motif_set'
path = 'test_dir'
config.set_motif_path(name, path)
assert config.has_motif_set(name)
assert not config.has_motif_set('motif_set1')
def test_write(config):
config.set_genome_dir("genome_root_dir")
config.set_motif_dir("motif_root_dir")
config.set_genome_path('hg19', 'test_dir1')
config.set_genome_path('mm9', 'test_dir2')
config.set_motif_path('motif_set1', 'test_dir3')
config.set_motif_path('motif_set2', 'test_dir4')
config.write()
assert os.path.isfile(config.path)
fin = open(config.path, 'r')
assert fin.read() == "[motifscan]\n" \
"genome_dir = genome_root_dir\n" \
"motif_dir = motif_root_dir\n\n" \
"[genome]\n" \
"hg19 = test_dir1\n" \
"mm9 = test_dir2\n\n" \
"[motif]\n" \
"motif_set1 = test_dir3\n" \
"motif_set2 = test_dir4\n\n"
| [
"os.path.isfile",
"pytest.raises",
"os.path.join",
"motifscan.config.Config"
] | [((756, 773), 'motifscan.config.Config', 'Config', ([], {'path': 'None'}), '(path=None)\n', (762, 773), False, 'from motifscan.config import Config, user_rc_path, user_genome_dir, user_motif_dir\n'), ((3959, 3986), 'os.path.isfile', 'os.path.isfile', (['config.path'], {}), '(config.path)\n', (3973, 3986), False, 'import os\n'), ((267, 304), 'pytest.raises', 'pytest.raises', (['InvalidConfigFileError'], {}), '(InvalidConfigFileError)\n', (280, 304), False, 'import pytest\n'), ((1590, 1624), 'pytest.raises', 'pytest.raises', (['GenomeNotFoundError'], {}), '(GenomeNotFoundError)\n', (1603, 1624), False, 'import pytest\n'), ((1841, 1875), 'pytest.raises', 'pytest.raises', (['GenomeNotFoundError'], {}), '(GenomeNotFoundError)\n', (1854, 1875), False, 'import pytest\n'), ((2772, 2808), 'pytest.raises', 'pytest.raises', (['MotifSetNotFoundError'], {}), '(MotifSetNotFoundError)\n', (2785, 2808), False, 'import pytest\n'), ((3033, 3069), 'pytest.raises', 'pytest.raises', (['MotifSetNotFoundError'], {}), '(MotifSetNotFoundError)\n', (3046, 3069), False, 'import pytest\n'), ((321, 366), 'os.path.join', 'os.path.join', (['data_dir', '"""invalid.motifscanrc"""'], {}), "(data_dir, 'invalid.motifscanrc')\n", (333, 366), False, 'import os\n')] |
import mapzen.whosonfirst.spatial
import mapzen.whosonfirst.placetypes
import mapzen.whosonfirst.utils
import logging
import os
import json
import requests
# as in the wof-pip-server this is part of go-whosonfirst-pip-v2
class pip (mapzen.whosonfirst.spatial.base):
def __init__(self, **kwargs):
mapzen.whosonfirst.spatial.base.__init__(self, **kwargs)
self.scheme = kwargs.get('scheme', 'http')
self.hostname = kwargs.get('hostname', 'localhost')
self.port = kwargs.get('port', 8080)
self.data_root = kwargs.get("data_root", "https://data.whosonfirst.org")
def point_in_polygon(self, lat, lon, **kwargs):
filters = kwargs.get("filters", {})
params = {
"latitude": lat,
"longitude": lon
}
if filters.get("wof:placetype_id", None):
pt = mapzen.whosonfirst.placetypes.placetype(filters["wof:placetype_id"])
params["placetype"] = str(pt)
existential = (
"wof:is_supersedes",
"wof:is_deprecated",
"wof:is_ceased",
"wof:is_current",
)
for flag in existential:
if filters.get(flag, None) != None:
param = flag.replace("wof:", "")
params[param] = filters[flag]
endpoint = "%s://%s" % (self.scheme, self.hostname)
if self.port:
endpoint = "%s:%s" % (endpoint, self.port)
try:
rsp = requests.get(endpoint, params=params)
if rsp.status_code != requests.codes.ok:
logging.warning(rsp.content)
rsp.raise_for_status()
data = json.loads(rsp.content)
except Exception as e:
logging.error("failed to PIP with %s (%s) because %s" % (endpoint, params, e))
raise Exception(e)
for row in data:
if kwargs.get("as_feature", False):
row = self.row_to_feature(row)
yield row
def row_to_feature(self, row):
wofid = row["wof:id"]
repo = row["wof:repo"]
root = self.data_root
# please sort out using repo when fetching local files
root = os.path.join(root, "data")
return mapzen.whosonfirst.utils.load(root, wofid)
# as in an endpoint that implements the whosonfirst-api
class api (mapzen.whosonfirst.spatial.base):
def __init__(self, **kwargs):
mapzen.whosonfirst.spatial.base.__init__(self, **kwargs)
self.endpoint = kwargs.get('endpoint', 'https://api.whosonfirst.org/rest')
self.access_token = kwargs.get('access_token', None)
self.data_root = kwargs.get("data_root", "https://data.whosonfirst.org")
self.insecure = kwargs.get("insecure", False)
def point_in_polygon(self, lat, lon, **kwargs):
filters = kwargs.get("filters", {})
params = {
"access_token": self.access_token,
"method": "whosonfirst.places.getByLatLon",
"latitude": lat,
"longitude": lon,
}
if filters.get("wof:placetype_id", None):
pt = mapzen.whosonfirst.placetypes.placetype(filters["wof:placetype_id"])
params["placetype"] = str(pt)
existential = (
"wof:is_superseded",
"wof:is_deprecated",
"wof:is_ceased",
"wof:is_current",
)
for flag in existential:
if filters.get(flag, None) != None:
param = flag.replace("wof:", "")
params[param] = filters[flag]
if kwargs.get("extras", None):
params["extras"] = kwargs["extras"]
try:
if self.insecure:
rsp = requests.get(self.endpoint, params=params, verify=False)
else:
rsp = requests.get(self.endpoint, params=params)
if rsp.status_code != requests.codes.ok:
rsp.raise_for_status()
data = json.loads(rsp.content)
except Exception as e:
logging.error("failed to PIP with %s (%s) because %s" % (self.endpoint, params, e))
raise Exception(e)
for row in data["places"] :
if kwargs.get("as_feature", False):
row = self.row_to_feature(row)
yield row
# intersects - we could call 'whosonfirst.places.getIntersects' here but
# since that only does bounding boxes it's likely to confuse things since
# the postgis one assumes polygons... (20170502/thisisaaronland)
def row_to_feature(self, row):
wofid = row["wof:id"]
repo = row["wof:repo"]
root = self.data_root
# please sort out using repo when fetching local files
root = os.path.join(root, "data")
return mapzen.whosonfirst.utils.load(root, wofid, insecure=self.insecure)
| [
"json.loads",
"logging.warning",
"os.path.join",
"requests.get",
"logging.error"
] | [((2215, 2241), 'os.path.join', 'os.path.join', (['root', '"""data"""'], {}), "(root, 'data')\n", (2227, 2241), False, 'import os\n'), ((4776, 4802), 'os.path.join', 'os.path.join', (['root', '"""data"""'], {}), "(root, 'data')\n", (4788, 4802), False, 'import os\n'), ((1486, 1523), 'requests.get', 'requests.get', (['endpoint'], {'params': 'params'}), '(endpoint, params=params)\n', (1498, 1523), False, 'import requests\n'), ((1682, 1705), 'json.loads', 'json.loads', (['rsp.content'], {}), '(rsp.content)\n', (1692, 1705), False, 'import json\n'), ((4002, 4025), 'json.loads', 'json.loads', (['rsp.content'], {}), '(rsp.content)\n', (4012, 4025), False, 'import json\n'), ((1594, 1622), 'logging.warning', 'logging.warning', (['rsp.content'], {}), '(rsp.content)\n', (1609, 1622), False, 'import logging\n'), ((1751, 1829), 'logging.error', 'logging.error', (["('failed to PIP with %s (%s) because %s' % (endpoint, params, e))"], {}), "('failed to PIP with %s (%s) because %s' % (endpoint, params, e))\n", (1764, 1829), False, 'import logging\n'), ((3749, 3805), 'requests.get', 'requests.get', (['self.endpoint'], {'params': 'params', 'verify': '(False)'}), '(self.endpoint, params=params, verify=False)\n', (3761, 3805), False, 'import requests\n'), ((3846, 3888), 'requests.get', 'requests.get', (['self.endpoint'], {'params': 'params'}), '(self.endpoint, params=params)\n', (3858, 3888), False, 'import requests\n'), ((4071, 4158), 'logging.error', 'logging.error', (["('failed to PIP with %s (%s) because %s' % (self.endpoint, params, e))"], {}), "('failed to PIP with %s (%s) because %s' % (self.endpoint,\n params, e))\n", (4084, 4158), False, 'import logging\n')] |
import pandas as pd
# data from https://archive.ics.uci.edu/ml/datasets/Computer+Hardware
df = pd.read_csv('../data/machine.data', header=None)
df.columns = [
'VENDOR', 'MODEL', 'MYCT', 'MMIN', 'MMAX',
'CACH', 'CHMIN', 'CHMAX', 'PRP', 'ERP'
]
import numpy as np
X = df[['PRP']].values
y = df['ERP'].values
import matplotlib.pyplot as plt
def lin_regplot(X, y, model):
plt.scatter(X, y, c='blue')
plt.plot(X, model.predict(X), color='red')
from sklearn.tree import DecisionTreeRegressor
tree = DecisionTreeRegressor(max_depth=3)
tree.fit(X, y)
sort_idx = X.flatten().argsort()
lin_regplot(X[sort_idx], y[sort_idx], tree)
plt.xlabel('[PRP]')
plt.ylabel('[ERP]')
plt.show()
# Random Forests
from sklearn.model_selection import train_test_split
X = df[['CACH', 'CHMIN', 'CHMAX', 'PRP']].values
y = df['ERP'].values
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.4, random_state=1
)
from sklearn.ensemble import RandomForestRegressor
forest = RandomForestRegressor(
n_estimators=500, criterion="mse", random_state=1, n_jobs=2
)
forest.fit(X_train, y_train)
y_train_pred = forest.predict(X_train)
y_test_pred = forest.predict(X_test)
from sklearn.metrics import mean_squared_error
print('MSE train: %.3f, test: %.3f' % (
mean_squared_error(y_train, y_train_pred),
mean_squared_error(y_test, y_test_pred)
))
from sklearn.metrics import r2_score
print(
'R^2 train: %.3f, test: %.3f' % (
r2_score(y_train, y_train_pred),
r2_score(y_test, y_test_pred)
)
)
plt.scatter(
y_train_pred, y_train_pred - y_train, c='black',
marker='o', s=35, alpha=0.5, label='Training data'
)
plt.scatter(
y_test_pred, y_test_pred - y_test, c='lightgreen',
marker='s', s=35, alpha=0.7, label='Test data'
)
plt.xlabel('Predicted values')
plt.ylabel('Residuals')
plt.legend(loc='upper left')
plt.hlines(y=0, xmin=0, xmax=1000, lw=2, color='red')
plt.xlim([0, 1000])
plt.show() | [
"sklearn.ensemble.RandomForestRegressor",
"sklearn.tree.DecisionTreeRegressor",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.hlines",
"sklearn.metrics.mean_squared_error",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.xlim",
"sklearn.metrics.r2_score",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((96, 144), 'pandas.read_csv', 'pd.read_csv', (['"""../data/machine.data"""'], {'header': 'None'}), "('../data/machine.data', header=None)\n", (107, 144), True, 'import pandas as pd\n'), ((515, 549), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {'max_depth': '(3)'}), '(max_depth=3)\n', (536, 549), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((642, 661), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""[PRP]"""'], {}), "('[PRP]')\n", (652, 661), True, 'import matplotlib.pyplot as plt\n'), ((662, 681), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""[ERP]"""'], {}), "('[ERP]')\n", (672, 681), True, 'import matplotlib.pyplot as plt\n'), ((682, 692), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (690, 692), True, 'import matplotlib.pyplot as plt\n'), ((869, 922), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.4)', 'random_state': '(1)'}), '(X, y, test_size=0.4, random_state=1)\n', (885, 922), False, 'from sklearn.model_selection import train_test_split\n'), ((989, 1075), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': '(500)', 'criterion': '"""mse"""', 'random_state': '(1)', 'n_jobs': '(2)'}), "(n_estimators=500, criterion='mse', random_state=1,\n n_jobs=2)\n", (1010, 1075), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((1533, 1650), 'matplotlib.pyplot.scatter', 'plt.scatter', (['y_train_pred', '(y_train_pred - y_train)'], {'c': '"""black"""', 'marker': '"""o"""', 's': '(35)', 'alpha': '(0.5)', 'label': '"""Training data"""'}), "(y_train_pred, y_train_pred - y_train, c='black', marker='o', s=\n 35, alpha=0.5, label='Training data')\n", (1544, 1650), True, 'import matplotlib.pyplot as plt\n'), ((1656, 1770), 'matplotlib.pyplot.scatter', 'plt.scatter', (['y_test_pred', '(y_test_pred - y_test)'], {'c': '"""lightgreen"""', 'marker': '"""s"""', 's': '(35)', 'alpha': '(0.7)', 'label': '"""Test data"""'}), "(y_test_pred, y_test_pred - y_test, c='lightgreen', marker='s',\n s=35, alpha=0.7, label='Test data')\n", (1667, 1770), True, 'import matplotlib.pyplot as plt\n'), ((1777, 1807), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted values"""'], {}), "('Predicted values')\n", (1787, 1807), True, 'import matplotlib.pyplot as plt\n'), ((1808, 1831), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Residuals"""'], {}), "('Residuals')\n", (1818, 1831), True, 'import matplotlib.pyplot as plt\n'), ((1832, 1860), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (1842, 1860), True, 'import matplotlib.pyplot as plt\n'), ((1861, 1914), 'matplotlib.pyplot.hlines', 'plt.hlines', ([], {'y': '(0)', 'xmin': '(0)', 'xmax': '(1000)', 'lw': '(2)', 'color': '"""red"""'}), "(y=0, xmin=0, xmax=1000, lw=2, color='red')\n", (1871, 1914), True, 'import matplotlib.pyplot as plt\n'), ((1915, 1934), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 1000]'], {}), '([0, 1000])\n', (1923, 1934), True, 'import matplotlib.pyplot as plt\n'), ((1935, 1945), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1943, 1945), True, 'import matplotlib.pyplot as plt\n'), ((385, 412), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X', 'y'], {'c': '"""blue"""'}), "(X, y, c='blue')\n", (396, 412), True, 'import matplotlib.pyplot as plt\n'), ((1274, 1315), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_train', 'y_train_pred'], {}), '(y_train, y_train_pred)\n', (1292, 1315), False, 'from sklearn.metrics import mean_squared_error\n'), ((1321, 1360), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'y_test_pred'], {}), '(y_test, y_test_pred)\n', (1339, 1360), False, 'from sklearn.metrics import mean_squared_error\n'), ((1454, 1485), 'sklearn.metrics.r2_score', 'r2_score', (['y_train', 'y_train_pred'], {}), '(y_train, y_train_pred)\n', (1462, 1485), False, 'from sklearn.metrics import r2_score\n'), ((1495, 1524), 'sklearn.metrics.r2_score', 'r2_score', (['y_test', 'y_test_pred'], {}), '(y_test, y_test_pred)\n', (1503, 1524), False, 'from sklearn.metrics import r2_score\n')] |
from typing import (
Dict,
Tuple,
Type,
TYPE_CHECKING,
)
from cytoolz import (
compose,
)
import rlp
from eth_typing import (
BlockIdentifier,
Hash32,
)
from eth_hash.auto import keccak
from eth.db.trie import make_trie_root_and_nodes
from eth.rlp.headers import BlockHeader
from eth.rlp.receipts import Receipt
from p2p.exceptions import MalformedMessage
from p2p.protocol import (
Command,
)
from trinity.protocol.common.managers import (
BaseRequestManager,
)
from trinity.rlp.block_body import BlockBody
from .commands import (
BlockBodies,
BlockHeaders,
NodeData,
Receipts,
)
from .requests import (
BlockBodiesRequest,
HeaderRequest,
NodeDataRequest,
ReceiptsRequest,
)
if TYPE_CHECKING:
from .peer import ETHPeer # noqa: F401
BaseGetBlockHeadersRequestManager = BaseRequestManager[
'ETHPeer',
HeaderRequest,
Tuple[BlockHeader, ...],
Tuple[BlockHeader, ...],
]
class GetBlockHeadersRequestManager(BaseGetBlockHeadersRequestManager):
msg_queue_maxsize = 100
_response_msg_type: Type[Command] = BlockHeaders
# All `RequestManager` classes are expected to implement the `__call__`
# method, including changing the function signature, thus the
# `# type: ignore` here is both expected and required.
async def __call__(self, # type: ignore
block_number_or_hash: BlockIdentifier,
max_headers: int = None,
skip: int = 0,
reverse: bool = True,
timeout: int = None) -> Tuple[BlockHeader, ...]:
request = HeaderRequest(
block_number_or_hash,
max_headers,
skip,
reverse,
)
return await self._request_and_wait(request, timeout)
def _send_sub_proto_request(self, request: HeaderRequest) -> None:
self._peer.sub_proto.send_get_block_headers(request)
async def _normalize_response(self,
msg: Tuple[BlockHeader, ...],
) -> Tuple[BlockHeader, ...]:
return msg
def _get_item_count(self, msg: Tuple[BlockHeader, ...]) -> int:
return len(msg)
NodeDataBundles = Tuple[Tuple[Hash32, bytes], ...]
BaseGetNodeDataRequestManager = BaseRequestManager[
'ETHPeer',
NodeDataRequest,
Tuple[bytes, ...],
NodeDataBundles,
]
class GetNodeDataRequestManager(BaseGetNodeDataRequestManager):
msg_queue_maxsize = 100
_response_msg_type: Type[Command] = NodeData
async def __call__(self, # type: ignore
node_hashes: Tuple[Hash32, ...],
timeout: int = None) -> NodeDataBundles:
request = NodeDataRequest(node_hashes)
return await self._request_and_wait(request, timeout)
def _send_sub_proto_request(self, request: NodeDataRequest) -> None:
self._peer.sub_proto.send_get_node_data(request)
async def _normalize_response(self,
msg: Tuple[bytes, ...]
) -> NodeDataBundles:
if not isinstance(msg, tuple):
raise MalformedMessage("Invalid msg, must be tuple of byte strings")
elif not all(isinstance(item, bytes) for item in msg):
raise MalformedMessage("Invalid msg, must be tuple of byte strings")
node_keys = await self._run_in_executor(tuple, map(keccak, msg))
return tuple(zip(node_keys, msg))
def _get_item_count(self, msg: Tuple[bytes, ...]) -> int:
return len(msg)
ReceiptsBundles = Tuple[Tuple[Tuple[Receipt, ...], Tuple[Hash32, Dict[Hash32, bytes]]], ...]
ReceiptsByBlock = Tuple[Tuple[Receipt, ...], ...]
BaseGetReceiptsRequestManager = BaseRequestManager[
'ETHPeer',
ReceiptsRequest,
ReceiptsByBlock,
ReceiptsBundles,
]
class GetReceiptsRequestManager(BaseGetReceiptsRequestManager):
msg_queue_maxsize = 100
_response_msg_type: Type[Command] = Receipts
async def __call__(self, # type: ignore
headers: Tuple[BlockHeader, ...],
timeout: int = None) -> ReceiptsBundles:
request = ReceiptsRequest(headers)
return await self._request_and_wait(request, timeout)
def _send_sub_proto_request(self, request: ReceiptsRequest) -> None:
self._peer.sub_proto.send_get_receipts(request)
async def _normalize_response(self,
response: Tuple[Tuple[Receipt, ...], ...],
) -> ReceiptsBundles:
if not isinstance(response, tuple):
raise MalformedMessage(
"`GetReceipts` response must be a tuple. Got: {0}".format(type(response))
)
elif not all(isinstance(item, tuple) for item in response):
raise MalformedMessage("`GetReceipts` response must be a tuple of tuples")
for item in response:
if not all(isinstance(value, Receipt) for value in item):
raise MalformedMessage(
"Response must be a tuple of tuples of `BlockHeader` objects"
)
trie_roots_and_data = await self._run_in_executor(
tuple,
map(make_trie_root_and_nodes, response),
)
receipt_bundles = tuple(zip(response, trie_roots_and_data))
return receipt_bundles
def _get_item_count(self, msg: ReceiptsByBlock) -> int:
return sum(len(item) for item in msg)
# (BlockBody, (txn_root, txn_trie_data), uncles_hash)
BlockBodyBundles = Tuple[Tuple[
BlockBody,
Tuple[Hash32, Dict[Hash32, bytes]],
Hash32,
], ...]
BaseGetBlockBodiesManager = BaseRequestManager[
'ETHPeer',
BlockBodiesRequest,
Tuple[BlockBody, ...],
BlockBodyBundles,
]
class GetBlockBodiesRequestManager(BaseGetBlockBodiesManager):
msg_queue_maxsize = 100
_response_msg_type: Type[Command] = BlockBodies
async def __call__(self, # type: ignore
headers: Tuple[Hash32, ...],
timeout: int = None) -> BlockBodyBundles:
request = BlockBodiesRequest(headers)
return await self._request_and_wait(request, timeout)
def _send_sub_proto_request(self, request: BlockBodiesRequest) -> None:
self._peer.sub_proto.send_get_block_bodies(request)
async def _normalize_response(self,
response: Tuple[BlockBody, ...]) -> BlockBodyBundles:
if not isinstance(response, tuple):
raise MalformedMessage(
"`GetBlockBodies` response must be a tuple. Got: {0}".format(type(response))
)
elif not all(isinstance(item, BlockBody) for item in response):
raise MalformedMessage("`GetBlockBodies` response must be a tuple of block bodies")
uncles_hashes = await self._run_in_executor(
tuple,
map(compose(keccak, rlp.encode), tuple(body.uncles for body in response)),
)
transaction_roots_and_trie_data = await self._run_in_executor(
tuple,
map(make_trie_root_and_nodes, tuple(body.transactions for body in response)),
)
body_bundles = tuple(zip(response, transaction_roots_and_trie_data, uncles_hashes))
return body_bundles
def _get_item_count(self, msg: Tuple[BlockBody, ...]) -> int:
return len(msg)
| [
"p2p.exceptions.MalformedMessage",
"cytoolz.compose"
] | [((3200, 3262), 'p2p.exceptions.MalformedMessage', 'MalformedMessage', (['"""Invalid msg, must be tuple of byte strings"""'], {}), "('Invalid msg, must be tuple of byte strings')\n", (3216, 3262), False, 'from p2p.exceptions import MalformedMessage\n'), ((3344, 3406), 'p2p.exceptions.MalformedMessage', 'MalformedMessage', (['"""Invalid msg, must be tuple of byte strings"""'], {}), "('Invalid msg, must be tuple of byte strings')\n", (3360, 3406), False, 'from p2p.exceptions import MalformedMessage\n'), ((4877, 4945), 'p2p.exceptions.MalformedMessage', 'MalformedMessage', (['"""`GetReceipts` response must be a tuple of tuples"""'], {}), "('`GetReceipts` response must be a tuple of tuples')\n", (4893, 4945), False, 'from p2p.exceptions import MalformedMessage\n'), ((5069, 5148), 'p2p.exceptions.MalformedMessage', 'MalformedMessage', (['"""Response must be a tuple of tuples of `BlockHeader` objects"""'], {}), "('Response must be a tuple of tuples of `BlockHeader` objects')\n", (5085, 5148), False, 'from p2p.exceptions import MalformedMessage\n'), ((6796, 6873), 'p2p.exceptions.MalformedMessage', 'MalformedMessage', (['"""`GetBlockBodies` response must be a tuple of block bodies"""'], {}), "('`GetBlockBodies` response must be a tuple of block bodies')\n", (6812, 6873), False, 'from p2p.exceptions import MalformedMessage\n'), ((6963, 6990), 'cytoolz.compose', 'compose', (['keccak', 'rlp.encode'], {}), '(keccak, rlp.encode)\n', (6970, 6990), False, 'from cytoolz import compose\n')] |
# Generated by Django 3.1.7 on 2021-02-28 21:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0009_post_audio'),
]
operations = [
migrations.AddField(
model_name='post',
name='audio_title',
field=models.CharField(max_length=30, null=True),
),
]
| [
"django.db.models.CharField"
] | [((327, 369), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'null': '(True)'}), '(max_length=30, null=True)\n', (343, 369), False, 'from django.db import migrations, models\n')] |
# -*- coding:utf-8 -*-
import json
from flask import Blueprint
from flask_restful import Resource
from atp.api.comm_log import logger
from atp.api.mysql_manager import (
ApiCompanyInfoManager, ApiSystemInfoManager, ApiIntfInfoManager, ApiProjectInfoManager,
ApiProjectIntfRelationManager, ApiProjectSystemRelationManager
)
from atp.views.wrappers import timer, login_check, developer_check
from atp.utils.common import get_request_json, make_response, username_to_nickname
from atp.api.redis_api import RedisManager
from flask import request
redis = RedisManager()
api_project = Blueprint('api_project_interface', __name__)
class ApiProject(Resource):
def __init__(self):
self.data = get_request_json()
self.username = redis.get_username(request.headers.get('X-Token'))
self.acim = ApiCompanyInfoManager()
self.asim = ApiSystemInfoManager()
self.aiim = ApiIntfInfoManager()
self.apim = ApiProjectInfoManager()
self.apsrm = ApiProjectSystemRelationManager()
self.apirm = ApiProjectIntfRelationManager()
@timer
def post(self, action):
if action == 'add':
return self.add_project()
elif action == 'edit':
return self.edit_project()
elif action == 'delete':
return self.delete_project()
elif action == 'list':
return self.project_list_by_company_id()
elif action == 'includeSystem':
return self.include_system()
elif action == 'includeIntf':
return self.include_intf()
elif action == 'excludeSystem':
return self.exclude_system()
elif action == 'excludeIntf':
return self.exclude_intf()
elif action == 'getIncludeIntfList':
return self.get_include_intf_list()
elif action == 'subtree':
return self.subtree()
else:
return make_response({"code": "100", "desc": "url错误,不存在的接口动作<{action}>".format(action=action)})
@developer_check
def add_project(self):
try:
company_id = self.data.pop('companyId')
project_name = self.data.pop('projectName')
simple_desc = self.data.pop('simpleDesc', None)
except KeyError:
return make_response({"code": "100", "desc": "入参校验失败"})
project_name = str(project_name).strip()
if self.apim.get_project(project_name=project_name, api_company_id=company_id):
return make_response({"code": "201", "desc": "公司下存在相同项目名称\"{}\", 无法新增".format(project_name)})
self.apim.insert_project(project_name=project_name, simple_desc=simple_desc, api_company_id=company_id,
creator=self.username)
return make_response({"code": "000", "desc": "项目\"{}\"增加成功".format(project_name)})
@developer_check
def edit_project(self):
try:
project_id = self.data.pop('projectId')
project_name = self.data.pop('projectName')
simple_desc = self.data.pop('simpleDesc', None)
except KeyError:
return make_response({"code": "100", "desc": "入参校验失败"})
project_obj = self.apim.get_project(id=project_id)
if not project_obj:
return make_response({"code": "202", "desc": "项目id\"{}\"不存在, 请刷新后重试".format(project_id)})
exist_project_obj = self.apim.get_project(project_name=project_name, api_company_id=project_obj.api_company_id)
if exist_project_obj and exist_project_obj.id != int(project_id):
return make_response({"code": "201", "desc": "公司下存在相同工项目\"{}\", 无法修改".format(project_name)})
self.apim.update_project(project_id, project_name=project_name, simple_desc=simple_desc,
last_modifier=self.username)
return make_response({"code": "000", "desc": "公司\"{}\"修改成功".format(project_name)})
@developer_check
def delete_project(self):
try:
project_id = self.data.pop('projectId')
except KeyError:
return make_response({"code": "100", "desc": "入参校验失败"})
if not self.apim.get_project(id=project_id):
return make_response({"code": "202", "desc": "项目id\"{}\"不存在, 请刷新后重试".format(project_id)})
relation_objs = self.apirm.get_relations(api_project_id=project_id)
if relation_objs:
return make_response({"code": "300", "desc": "项目下已引入{}个接口,无法直接删除".format(len(relation_objs))})
self.apim.delete_project(project_id)
return make_response({"code": "000", "desc": "项目删除成功"})
@login_check
def project_list_by_company_id(self):
try:
company_id = int(self.data.pop('companyId', 0))
except KeyError:
return make_response({"code": "100", "desc": "入参校验失败"})
if company_id:
p_objs = self.apim.get_projects_reverse(api_company_id=company_id)
else:
p_objs = self.apim.get_projects()
# 根据项目分组查询项目中的用例总数
count_res = self.apim.count_api_project_subtree_group_by_project_id(company_id)
res_list = []
for p_obj in p_objs:
testcase_num = [row[1] for row in count_res if row[0] == p_obj.id][0]
if testcase_num:
has_children = True
else:
r_obj = ApiProjectSystemRelationManager.get_relation(api_project_id=p_obj.id)
has_children = True if r_obj else False
res_list.append(
{
'projectId': p_obj.id,
'projectName': p_obj.project_name,
'simpleDesc': p_obj.simple_desc,
'creator': p_obj.creator,
'last_modifier': p_obj.last_modifier,
'hasChildren': has_children,
'testcaseNum': testcase_num,
}
)
return make_response({"code": "000", "projectList": res_list})
@developer_check
def include_system(self):
try:
project_id = self.data.pop('projectId')
system_id_list = self.data.pop('systemIdList')
except KeyError:
return make_response({"code": "100", "desc": "入参校验失败"})
if not system_id_list:
return make_response({"code": "200", "desc": "未选择工程,无法引入"})
if len(system_id_list) != len(set(system_id_list)):
return make_response({"code": "101", "desc": "入参校验失败, systemIdList包含重复元素"})
exist_relation_objs = self.apsrm.get_relations(api_project_id=project_id)
for exist_relation_obj in exist_relation_objs:
if exist_relation_obj.api_system_id in system_id_list:
system_id_list.remove(exist_relation_obj.api_system_id)
insert_list = []
for system_id in system_id_list:
insert_list.append(
{
'api_project_id': project_id,
'api_system_id': system_id
}
)
self.apsrm.batch_insert_relation(insert_list)
return make_response({"code": "000", "desc": "{}个工程引入成功".format(len(system_id_list))})
@developer_check
def include_intf(self):
try:
project_id = self.data.pop('projectId')
intf_id_list = self.data.pop('intfIdList')
except KeyError:
return make_response({"code": "100", "desc": "入参校验失败"})
if not intf_id_list:
return make_response({"code": "200", "desc": "未选择接口,无法引入"})
if len(intf_id_list) != len(set(intf_id_list)):
return make_response({"code": "101", "desc": "入参校验失败, intf_id_list包含重复元素"})
exist_relation_objs = self.apirm.get_relations(api_project_id=project_id)
for exist_relation_obj in exist_relation_objs:
if exist_relation_obj.api_intf_id in intf_id_list:
intf_id_list.remove(exist_relation_obj.api_intf_id)
insert_list = []
for intf_id in intf_id_list:
insert_list.append(
{
'api_project_id': project_id,
'api_intf_id': intf_id
}
)
self.apirm.batch_insert_relation(insert_list)
return make_response({"code": "000", "desc": "{}个接口引入成功".format(len(intf_id_list))})
@developer_check
def exclude_system(self):
try:
project_id = self.data.pop('projectId')
system_id = self.data.pop('systemId')
except KeyError:
return make_response({"code": "100", "desc": "入参校验失败"})
obj = self.apsrm.get_relation(api_project_id=project_id, api_system_id=system_id)
if not obj:
return make_response({"code": "200", "desc": "此工程未被引入,请刷新后重试"})
p_i_relation_objs = self.apirm.get_relations(api_project_id=project_id)
intf_id_list = [p_i_relation_obj.api_intf_id for p_i_relation_obj in p_i_relation_objs]
intf_objs = self.aiim.get_intfs_in_id_list(intf_id_list)
for intf_obj in intf_objs:
if intf_obj.api_system_id == int(system_id):
return make_response({"code": "300", "desc": "此工程下已引入接口,无法直接去除"})
self.apsrm.delete_relation(id_=obj.id)
return make_response({"code": "000", "desc": "工程去除成功"})
@developer_check
def exclude_intf(self):
try:
project_id = self.data.pop('projectId')
intf_id = self.data.pop('intfId')
except KeyError:
return make_response({"code": "100", "desc": "入参校验失败"})
obj = self.apirm.get_relation(api_project_id=project_id, api_intf_id=intf_id)
if not obj:
return make_response({"code": "200", "desc": "此接口未被引入,请刷新后重试"})
self.apirm.delete_relation(id_=obj.id)
return make_response({"code": "000", "desc": "接口去除成功"})
@developer_check
def get_include_intf_list(self):
"""
Input:
{"projectId":"7"}
Return:
{
"code": "000",
"includeIntfList": [
441,
457,
653,
658,
679,
737,
680,
765
]
}
"""
try:
project_id = self.data.pop('projectId')
except KeyError:
return make_response({"code": "100", "desc": "入参校验失败"})
objs = self.apirm.get_relations(api_project_id=project_id)
intf_id_list = [obj.api_intf_id for obj in objs]
return make_response({"code": "000", "includeIntfList": intf_id_list})
def subtree(self):
try:
project_id = int(self.data.pop('projectId'))
except (KeyError, ValueError):
return make_response({"code": "100", "desc": "入参校验失败"})
subtree = []
index_id = 0
result_list = self.apim.query_api_project_subtree(project_id)
patch_result_list = self.apim.query_api_project_subtree_patch(project_id)
result_dic = db_result_to_map(result_list, patch_result_list)
for p_k, p_dic in result_dic.items():
p_name = p_dic.pop('name')
index_id += 1
p_tree = {
'id': index_id,
'label': p_name,
'projectId': p_k,
'children': []
}
for s_k, s_dic in p_dic.items():
s_name = s_dic.pop('name')
index_id += 1
s_tree = {
'id': index_id,
'label': s_name,
'systemId': s_k,
'children': []
}
for i_k, i_dic in s_dic.items():
i_name = i_dic.pop('name')
index_id += 1
i_tree = {
'id': index_id,
'label': i_name,
'intfId': i_k,
'children': []
}
for t_k, t_dic in i_dic.items():
index_id += 1
t_tree = {
'id': index_id,
'label': '{0}_{1}'.format(t_k, t_dic['name']),
'testcaseId': t_k,
}
i_tree['children'].append(t_tree)
s_tree['children'].append(i_tree)
p_tree['children'].append(s_tree)
subtree.append(p_tree)
return make_response({"code": "000", "data": subtree[0]['children']})
def db_result_to_map(query_res, patch_res=None):
mapped_dic = {}
if not patch_res:
patch_res = []
for row in query_res:
width = len(row)
# print(row)
if row[0] not in mapped_dic:
mapped_dic[row[0]] = {'name': row[1]}
if width >= 4 and row[2]:
mapped_dic[row[0]][row[2]] = {'name': row[3]}
if width >= 6 and row[4]:
mapped_dic[row[0]][row[2]][row[4]] = {'name': row[5]}
if width >= 8 and row[6]:
mapped_dic[row[0]][row[2]][row[4]][row[6]] = {'name': row[7]}
else:
if width >= 4 and row[2] not in mapped_dic[row[0]]:
mapped_dic[row[0]][row[2]] = {'name': row[3]}
if width >= 6 and row[4]:
mapped_dic[row[0]][row[2]][row[4]] = {'name': row[5]}
if width >= 8 and row[6]:
mapped_dic[row[0]][row[2]][row[4]][row[6]] = {'name': row[7]}
else:
if width >= 6 and row[4] not in mapped_dic[row[0]][row[2]]:
mapped_dic[row[0]][row[2]][row[4]] = {'name': row[5]}
if width >= 8 and row[6]:
mapped_dic[row[0]][row[2]][row[4]][row[6]] = {'name': row[7]}
else:
if width >= 8 and row[6] not in mapped_dic[row[0]][row[2]][row[4]]:
mapped_dic[row[0]][row[2]][row[4]][row[6]] = {'name': row[7]}
for row in patch_res:
if row[0] in mapped_dic:
if row[2] and row[2] not in mapped_dic[row[0]]:
mapped_dic[row[0]][row[2]] = {'name': row[3]}
# print(json_dumps(mapped_dic))
# print(mapped_dic)
return mapped_dic
| [
"atp.api.mysql_manager.ApiProjectIntfRelationManager",
"atp.api.mysql_manager.ApiProjectSystemRelationManager",
"atp.api.mysql_manager.ApiIntfInfoManager",
"atp.api.mysql_manager.ApiCompanyInfoManager",
"atp.api.mysql_manager.ApiProjectInfoManager",
"atp.api.redis_api.RedisManager",
"atp.api.mysql_manager.ApiProjectSystemRelationManager.get_relation",
"atp.api.mysql_manager.ApiSystemInfoManager",
"atp.utils.common.get_request_json",
"flask.Blueprint",
"flask.request.headers.get",
"atp.utils.common.make_response"
] | [((562, 576), 'atp.api.redis_api.RedisManager', 'RedisManager', ([], {}), '()\n', (574, 576), False, 'from atp.api.redis_api import RedisManager\n'), ((591, 635), 'flask.Blueprint', 'Blueprint', (['"""api_project_interface"""', '__name__'], {}), "('api_project_interface', __name__)\n", (600, 635), False, 'from flask import Blueprint\n'), ((710, 728), 'atp.utils.common.get_request_json', 'get_request_json', ([], {}), '()\n', (726, 728), False, 'from atp.utils.common import get_request_json, make_response, username_to_nickname\n'), ((824, 847), 'atp.api.mysql_manager.ApiCompanyInfoManager', 'ApiCompanyInfoManager', ([], {}), '()\n', (845, 847), False, 'from atp.api.mysql_manager import ApiCompanyInfoManager, ApiSystemInfoManager, ApiIntfInfoManager, ApiProjectInfoManager, ApiProjectIntfRelationManager, ApiProjectSystemRelationManager\n'), ((868, 890), 'atp.api.mysql_manager.ApiSystemInfoManager', 'ApiSystemInfoManager', ([], {}), '()\n', (888, 890), False, 'from atp.api.mysql_manager import ApiCompanyInfoManager, ApiSystemInfoManager, ApiIntfInfoManager, ApiProjectInfoManager, ApiProjectIntfRelationManager, ApiProjectSystemRelationManager\n'), ((911, 931), 'atp.api.mysql_manager.ApiIntfInfoManager', 'ApiIntfInfoManager', ([], {}), '()\n', (929, 931), False, 'from atp.api.mysql_manager import ApiCompanyInfoManager, ApiSystemInfoManager, ApiIntfInfoManager, ApiProjectInfoManager, ApiProjectIntfRelationManager, ApiProjectSystemRelationManager\n'), ((952, 975), 'atp.api.mysql_manager.ApiProjectInfoManager', 'ApiProjectInfoManager', ([], {}), '()\n', (973, 975), False, 'from atp.api.mysql_manager import ApiCompanyInfoManager, ApiSystemInfoManager, ApiIntfInfoManager, ApiProjectInfoManager, ApiProjectIntfRelationManager, ApiProjectSystemRelationManager\n'), ((997, 1030), 'atp.api.mysql_manager.ApiProjectSystemRelationManager', 'ApiProjectSystemRelationManager', ([], {}), '()\n', (1028, 1030), False, 'from atp.api.mysql_manager import ApiCompanyInfoManager, ApiSystemInfoManager, ApiIntfInfoManager, ApiProjectInfoManager, ApiProjectIntfRelationManager, ApiProjectSystemRelationManager\n'), ((1052, 1083), 'atp.api.mysql_manager.ApiProjectIntfRelationManager', 'ApiProjectIntfRelationManager', ([], {}), '()\n', (1081, 1083), False, 'from atp.api.mysql_manager import ApiCompanyInfoManager, ApiSystemInfoManager, ApiIntfInfoManager, ApiProjectInfoManager, ApiProjectIntfRelationManager, ApiProjectSystemRelationManager\n'), ((4557, 4605), 'atp.utils.common.make_response', 'make_response', (["{'code': '000', 'desc': '项目删除成功'}"], {}), "({'code': '000', 'desc': '项目删除成功'})\n", (4570, 4605), False, 'from atp.utils.common import get_request_json, make_response, username_to_nickname\n'), ((5924, 5979), 'atp.utils.common.make_response', 'make_response', (["{'code': '000', 'projectList': res_list}"], {}), "({'code': '000', 'projectList': res_list})\n", (5937, 5979), False, 'from atp.utils.common import get_request_json, make_response, username_to_nickname\n'), ((9265, 9313), 'atp.utils.common.make_response', 'make_response', (["{'code': '000', 'desc': '工程去除成功'}"], {}), "({'code': '000', 'desc': '工程去除成功'})\n", (9278, 9313), False, 'from atp.utils.common import get_request_json, make_response, username_to_nickname\n'), ((9814, 9862), 'atp.utils.common.make_response', 'make_response', (["{'code': '000', 'desc': '接口去除成功'}"], {}), "({'code': '000', 'desc': '接口去除成功'})\n", (9827, 9862), False, 'from atp.utils.common import get_request_json, make_response, username_to_nickname\n'), ((10626, 10689), 'atp.utils.common.make_response', 'make_response', (["{'code': '000', 'includeIntfList': intf_id_list}"], {}), "({'code': '000', 'includeIntfList': intf_id_list})\n", (10639, 10689), False, 'from atp.utils.common import get_request_json, make_response, username_to_nickname\n'), ((12617, 12679), 'atp.utils.common.make_response', 'make_response', (["{'code': '000', 'data': subtree[0]['children']}"], {}), "({'code': '000', 'data': subtree[0]['children']})\n", (12630, 12679), False, 'from atp.utils.common import get_request_json, make_response, username_to_nickname\n'), ((772, 802), 'flask.request.headers.get', 'request.headers.get', (['"""X-Token"""'], {}), "('X-Token')\n", (791, 802), False, 'from flask import request\n'), ((6300, 6352), 'atp.utils.common.make_response', 'make_response', (["{'code': '200', 'desc': '未选择工程,无法引入'}"], {}), "({'code': '200', 'desc': '未选择工程,无法引入'})\n", (6313, 6352), False, 'from atp.utils.common import get_request_json, make_response, username_to_nickname\n'), ((6433, 6501), 'atp.utils.common.make_response', 'make_response', (["{'code': '101', 'desc': '入参校验失败, systemIdList包含重复元素'}"], {}), "({'code': '101', 'desc': '入参校验失败, systemIdList包含重复元素'})\n", (6446, 6501), False, 'from atp.utils.common import get_request_json, make_response, username_to_nickname\n'), ((7486, 7538), 'atp.utils.common.make_response', 'make_response', (["{'code': '200', 'desc': '未选择接口,无法引入'}"], {}), "({'code': '200', 'desc': '未选择接口,无法引入'})\n", (7499, 7538), False, 'from atp.utils.common import get_request_json, make_response, username_to_nickname\n'), ((7615, 7683), 'atp.utils.common.make_response', 'make_response', (["{'code': '101', 'desc': '入参校验失败, intf_id_list包含重复元素'}"], {}), "({'code': '101', 'desc': '入参校验失败, intf_id_list包含重复元素'})\n", (7628, 7683), False, 'from atp.utils.common import get_request_json, make_response, username_to_nickname\n'), ((8728, 8784), 'atp.utils.common.make_response', 'make_response', (["{'code': '200', 'desc': '此工程未被引入,请刷新后重试'}"], {}), "({'code': '200', 'desc': '此工程未被引入,请刷新后重试'})\n", (8741, 8784), False, 'from atp.utils.common import get_request_json, make_response, username_to_nickname\n'), ((9694, 9750), 'atp.utils.common.make_response', 'make_response', (["{'code': '200', 'desc': '此接口未被引入,请刷新后重试'}"], {}), "({'code': '200', 'desc': '此接口未被引入,请刷新后重试'})\n", (9707, 9750), False, 'from atp.utils.common import get_request_json, make_response, username_to_nickname\n'), ((2301, 2349), 'atp.utils.common.make_response', 'make_response', (["{'code': '100', 'desc': '入参校验失败'}"], {}), "({'code': '100', 'desc': '入参校验失败'})\n", (2314, 2349), False, 'from atp.utils.common import get_request_json, make_response, username_to_nickname\n'), ((3130, 3178), 'atp.utils.common.make_response', 'make_response', (["{'code': '100', 'desc': '入参校验失败'}"], {}), "({'code': '100', 'desc': '入参校验失败'})\n", (3143, 3178), False, 'from atp.utils.common import get_request_json, make_response, username_to_nickname\n'), ((4081, 4129), 'atp.utils.common.make_response', 'make_response', (["{'code': '100', 'desc': '入参校验失败'}"], {}), "({'code': '100', 'desc': '入参校验失败'})\n", (4094, 4129), False, 'from atp.utils.common import get_request_json, make_response, username_to_nickname\n'), ((4783, 4831), 'atp.utils.common.make_response', 'make_response', (["{'code': '100', 'desc': '入参校验失败'}"], {}), "({'code': '100', 'desc': '入参校验失败'})\n", (4796, 4831), False, 'from atp.utils.common import get_request_json, make_response, username_to_nickname\n'), ((5351, 5420), 'atp.api.mysql_manager.ApiProjectSystemRelationManager.get_relation', 'ApiProjectSystemRelationManager.get_relation', ([], {'api_project_id': 'p_obj.id'}), '(api_project_id=p_obj.id)\n', (5395, 5420), False, 'from atp.api.mysql_manager import ApiCompanyInfoManager, ApiSystemInfoManager, ApiIntfInfoManager, ApiProjectInfoManager, ApiProjectIntfRelationManager, ApiProjectSystemRelationManager\n'), ((6200, 6248), 'atp.utils.common.make_response', 'make_response', (["{'code': '100', 'desc': '入参校验失败'}"], {}), "({'code': '100', 'desc': '入参校验失败'})\n", (6213, 6248), False, 'from atp.utils.common import get_request_json, make_response, username_to_nickname\n'), ((7388, 7436), 'atp.utils.common.make_response', 'make_response', (["{'code': '100', 'desc': '入参校验失败'}"], {}), "({'code': '100', 'desc': '入参校验失败'})\n", (7401, 7436), False, 'from atp.utils.common import get_request_json, make_response, username_to_nickname\n'), ((8549, 8597), 'atp.utils.common.make_response', 'make_response', (["{'code': '100', 'desc': '入参校验失败'}"], {}), "({'code': '100', 'desc': '入参校验失败'})\n", (8562, 8597), False, 'from atp.utils.common import get_request_json, make_response, username_to_nickname\n'), ((9143, 9201), 'atp.utils.common.make_response', 'make_response', (["{'code': '300', 'desc': '此工程下已引入接口,无法直接去除'}"], {}), "({'code': '300', 'desc': '此工程下已引入接口,无法直接去除'})\n", (9156, 9201), False, 'from atp.utils.common import get_request_json, make_response, username_to_nickname\n'), ((9519, 9567), 'atp.utils.common.make_response', 'make_response', (["{'code': '100', 'desc': '入参校验失败'}"], {}), "({'code': '100', 'desc': '入参校验失败'})\n", (9532, 9567), False, 'from atp.utils.common import get_request_json, make_response, username_to_nickname\n'), ((10437, 10485), 'atp.utils.common.make_response', 'make_response', (["{'code': '100', 'desc': '入参校验失败'}"], {}), "({'code': '100', 'desc': '入参校验失败'})\n", (10450, 10485), False, 'from atp.utils.common import get_request_json, make_response, username_to_nickname\n'), ((10842, 10890), 'atp.utils.common.make_response', 'make_response', (["{'code': '100', 'desc': '入参校验失败'}"], {}), "({'code': '100', 'desc': '入参校验失败'})\n", (10855, 10890), False, 'from atp.utils.common import get_request_json, make_response, username_to_nickname\n')] |
# coding: utf-8
from __future__ import annotations
from datetime import date, datetime # noqa: F401
import re # noqa: F401
from typing import Any, Dict, List, Optional # noqa: F401
from pydantic import AnyUrl, BaseModel, EmailStr, validator # noqa: F401
class IndyPresPredSpec(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
IndyPresPredSpec - a model defined in OpenAPI
cred_def_id: The cred_def_id of this IndyPresPredSpec [Optional].
name: The name of this IndyPresPredSpec.
predicate: The predicate of this IndyPresPredSpec.
threshold: The threshold of this IndyPresPredSpec.
"""
cred_def_id: Optional[str] = None
name: str
predicate: str
threshold: int
@validator("cred_def_id")
def cred_def_id_pattern(cls, value):
assert value is not None and re.match(
r"^([123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz]{21,22}):3:CL:(([1-9][0-9]*)|([123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz]{21,22}:2:.+:[0-9.]+)):(.+)?$",
value,
)
return value
IndyPresPredSpec.update_forward_refs()
| [
"re.match",
"pydantic.validator"
] | [((830, 854), 'pydantic.validator', 'validator', (['"""cred_def_id"""'], {}), "('cred_def_id')\n", (839, 854), False, 'from pydantic import AnyUrl, BaseModel, EmailStr, validator\n'), ((933, 1143), 're.match', 're.match', (['"""^([123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz]{21,22}):3:CL:(([1-9][0-9]*)|([123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz]{21,22}:2:.+:[0-9.]+)):(.+)?$"""', 'value'], {}), "(\n '^([123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz]{21,22}):3:CL:(([1-9][0-9]*)|([123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz]{21,22}:2:.+:[0-9.]+)):(.+)?$'\n , value)\n", (941, 1143), False, 'import re\n')] |
# -*- coding: utf-8 -*-
from django.db import models, migrations
import datetime
import rels.django
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Hero',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at_turn', models.IntegerField(default=0)),
('saved_at_turn', models.IntegerField(default=0)),
('last_rare_operation_at_turn', models.IntegerField(default=0)),
('saved_at', models.DateTimeField(default=datetime.datetime(1970, 1, 1, 0, 0), auto_now=True)),
('is_fast', models.BooleanField(default=True, db_index=True)),
('is_bot', models.BooleanField(default=False)),
('is_alive', models.BooleanField(default=True)),
('active_state_end_at', models.DateTimeField(default=datetime.datetime(1970, 1, 1, 0, 0), db_index=True)),
('premium_state_end_at', models.DateTimeField(default=datetime.datetime(1970, 1, 1, 0, 0), db_index=True)),
('ban_state_end_at', models.DateTimeField(default=datetime.datetime(1970, 1, 1, 0, 0), db_index=True)),
('ui_caching_started_at', models.DateTimeField(default=datetime.datetime(2000, 1, 1, 0, 0), auto_now_add=True)),
('gender', rels.django.RelationIntegerField()),
('race', rels.django.RelationIntegerField()),
('level', models.IntegerField(default=1)),
('experience', models.IntegerField(default=0)),
('health', models.IntegerField(default=0.0)),
('raw_power_magic', models.BigIntegerField(default=0)),
('raw_power_physic', models.BigIntegerField(default=0)),
('money', models.BigIntegerField(default=0)),
('data', models.TextField(default=b'{}')),
('equipment', models.TextField(default=b'{}')),
('bag', models.TextField(default=b'{}')),
('abilities', models.TextField(default=b'', blank=True)),
('places_history', models.TextField(default=b'{}')),
('cards', models.TextField(default=b'{}')),
('messages', models.TextField(default=b'[]')),
('diary', models.TextField(default=b'[]')),
('actions', models.TextField(default=b'{}')),
('quests', models.TextField(default=b'{}')),
('quest_created_time', models.DateTimeField(default=datetime.datetime(1970, 1, 1, 0, 0), db_index=True)),
('settings_approved', models.BooleanField(default=True)),
('pvp', models.TextField(default=b'{}')),
('next_spending', rels.django.RelationIntegerField()),
('energy', models.IntegerField(default=0)),
('last_energy_regeneration_at_turn', models.IntegerField(default=0)),
('energy_bonus', models.BigIntegerField(default=0)),
('might', models.FloatField(default=0.0)),
('pos_percents', models.FloatField(default=None, null=True, blank=True)),
('pos_invert_direction', models.NullBooleanField(default=False)),
('pos_from_x', models.IntegerField(default=None, null=True, blank=True)),
('pos_from_y', models.IntegerField(default=None, null=True, blank=True)),
('pos_to_x', models.IntegerField(default=None, null=True, blank=True)),
('pos_to_y', models.IntegerField(default=None, null=True, blank=True)),
('preferences', models.TextField(default=b'{}')),
('habit_honor', models.FloatField(default=0)),
('habit_peacefulness', models.FloatField(default=0)),
('stat_pve_deaths', models.BigIntegerField(default=0)),
('stat_pve_kills', models.BigIntegerField(default=0)),
('stat_money_earned_from_loot', models.BigIntegerField(default=0)),
('stat_money_earned_from_artifacts', models.BigIntegerField(default=0)),
('stat_money_earned_from_quests', models.BigIntegerField(default=0)),
('stat_money_earned_from_help', models.BigIntegerField(default=0)),
('stat_money_earned_from_habits', models.BigIntegerField(default=0)),
('stat_money_earned_from_companions', models.BigIntegerField(default=0)),
('stat_money_spend_for_heal', models.BigIntegerField(default=0)),
('stat_money_spend_for_artifacts', models.BigIntegerField(default=0)),
('stat_money_spend_for_sharpening', models.BigIntegerField(default=0)),
('stat_money_spend_for_useless', models.BigIntegerField(default=0)),
('stat_money_spend_for_impact', models.BigIntegerField(default=0)),
('stat_money_spend_for_experience', models.BigIntegerField(default=0)),
('stat_money_spend_for_repairing', models.BigIntegerField(default=0)),
('stat_money_spend_for_tax', models.BigIntegerField(default=0)),
('stat_money_spend_for_companions', models.BigIntegerField(default=0)),
('stat_artifacts_had', models.BigIntegerField(default=0)),
('stat_loot_had', models.BigIntegerField(default=0)),
('stat_help_count', models.BigIntegerField(default=0)),
('stat_quests_done', models.BigIntegerField(default=0)),
('stat_companions_count', models.BigIntegerField(default=0)),
('stat_pvp_battles_1x1_number', models.BigIntegerField(default=0)),
('stat_pvp_battles_1x1_victories', models.BigIntegerField(default=0)),
('stat_pvp_battles_1x1_draws', models.BigIntegerField(default=0)),
('stat_cards_used', models.BigIntegerField(default=0)),
('stat_cards_combined', models.BigIntegerField(default=0)),
('stat_gifts_returned', models.BigIntegerField(default=0)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='HeroPreferences',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('energy_regeneration_type', models.IntegerField(default=0, blank=True, choices=[(0, '\u043c\u043e\u043b\u0438\u0442\u0432\u0430'), (1, '\u0436\u0435\u0440\u0442\u0432\u043e\u043f\u0440\u0438\u043d\u043e\u0448\u0435\u043d\u0438\u0435'), (2, '\u0431\u043b\u0430\u0433\u043e\u0432\u043e\u043d\u0438\u044f'), (3, '\u0441\u0438\u043c\u0432\u043e\u043b\u044b'), (4, '\u043c\u0435\u0434\u0438\u0442\u0430\u0446\u0438\u044f')])),
('equipment_slot', rels.django.RelationIntegerField(default=None, null=True, blank=True)),
('risk_level', rels.django.RelationIntegerField()),
('favorite_item', rels.django.RelationIntegerField(default=None, null=True, blank=True)),
('archetype', rels.django.RelationIntegerField(default=None, null=True, blank=True)),
('companion_dedication', rels.django.RelationIntegerField(default=None, null=True, blank=True)),
('companion_empathy', rels.django.RelationIntegerField(default=None, null=True, blank=True)),
],
options={
},
bases=(models.Model,),
),
]
| [
"datetime.datetime",
"django.db.models.FloatField",
"django.db.models.TextField",
"django.db.models.IntegerField",
"django.db.models.NullBooleanField",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.BigIntegerField"
] | [((292, 385), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (308, 385), False, 'from django.db import models, migrations\n'), ((420, 450), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (439, 450), False, 'from django.db import models, migrations\n'), ((487, 517), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (506, 517), False, 'from django.db import models, migrations\n'), ((568, 598), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (587, 598), False, 'from django.db import models, migrations\n'), ((741, 789), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)', 'db_index': '(True)'}), '(default=True, db_index=True)\n', (760, 789), False, 'from django.db import models, migrations\n'), ((819, 853), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (838, 853), False, 'from django.db import models, migrations\n'), ((885, 918), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (904, 918), False, 'from django.db import models, migrations\n'), ((1569, 1599), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(1)'}), '(default=1)\n', (1588, 1599), False, 'from django.db import models, migrations\n'), ((1633, 1663), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (1652, 1663), False, 'from django.db import models, migrations\n'), ((1693, 1725), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0.0)'}), '(default=0.0)\n', (1712, 1725), False, 'from django.db import models, migrations\n'), ((1764, 1797), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (1786, 1797), False, 'from django.db import models, migrations\n'), ((1837, 1870), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (1859, 1870), False, 'from django.db import models, migrations\n'), ((1899, 1932), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (1921, 1932), False, 'from django.db import models, migrations\n'), ((1960, 1991), 'django.db.models.TextField', 'models.TextField', ([], {'default': "b'{}'"}), "(default=b'{}')\n", (1976, 1991), False, 'from django.db import models, migrations\n'), ((2024, 2055), 'django.db.models.TextField', 'models.TextField', ([], {'default': "b'{}'"}), "(default=b'{}')\n", (2040, 2055), False, 'from django.db import models, migrations\n'), ((2082, 2113), 'django.db.models.TextField', 'models.TextField', ([], {'default': "b'{}'"}), "(default=b'{}')\n", (2098, 2113), False, 'from django.db import models, migrations\n'), ((2146, 2187), 'django.db.models.TextField', 'models.TextField', ([], {'default': "b''", 'blank': '(True)'}), "(default=b'', blank=True)\n", (2162, 2187), False, 'from django.db import models, migrations\n'), ((2225, 2256), 'django.db.models.TextField', 'models.TextField', ([], {'default': "b'{}'"}), "(default=b'{}')\n", (2241, 2256), False, 'from django.db import models, migrations\n'), ((2285, 2316), 'django.db.models.TextField', 'models.TextField', ([], {'default': "b'{}'"}), "(default=b'{}')\n", (2301, 2316), False, 'from django.db import models, migrations\n'), ((2348, 2379), 'django.db.models.TextField', 'models.TextField', ([], {'default': "b'[]'"}), "(default=b'[]')\n", (2364, 2379), False, 'from django.db import models, migrations\n'), ((2408, 2439), 'django.db.models.TextField', 'models.TextField', ([], {'default': "b'[]'"}), "(default=b'[]')\n", (2424, 2439), False, 'from django.db import models, migrations\n'), ((2470, 2501), 'django.db.models.TextField', 'models.TextField', ([], {'default': "b'{}'"}), "(default=b'{}')\n", (2486, 2501), False, 'from django.db import models, migrations\n'), ((2531, 2562), 'django.db.models.TextField', 'models.TextField', ([], {'default': "b'{}'"}), "(default=b'{}')\n", (2547, 2562), False, 'from django.db import models, migrations\n'), ((2725, 2758), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (2744, 2758), False, 'from django.db import models, migrations\n'), ((2785, 2816), 'django.db.models.TextField', 'models.TextField', ([], {'default': "b'{}'"}), "(default=b'{}')\n", (2801, 2816), False, 'from django.db import models, migrations\n'), ((2917, 2947), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (2936, 2947), False, 'from django.db import models, migrations\n'), ((3003, 3033), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (3022, 3033), False, 'from django.db import models, migrations\n'), ((3069, 3102), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (3091, 3102), False, 'from django.db import models, migrations\n'), ((3131, 3161), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0.0)'}), '(default=0.0)\n', (3148, 3161), False, 'from django.db import models, migrations\n'), ((3197, 3251), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (3214, 3251), False, 'from django.db import models, migrations\n'), ((3295, 3333), 'django.db.models.NullBooleanField', 'models.NullBooleanField', ([], {'default': '(False)'}), '(default=False)\n', (3318, 3333), False, 'from django.db import models, migrations\n'), ((3367, 3423), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (3386, 3423), False, 'from django.db import models, migrations\n'), ((3457, 3513), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (3476, 3513), False, 'from django.db import models, migrations\n'), ((3545, 3601), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (3564, 3601), False, 'from django.db import models, migrations\n'), ((3633, 3689), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (3652, 3689), False, 'from django.db import models, migrations\n'), ((3724, 3755), 'django.db.models.TextField', 'models.TextField', ([], {'default': "b'{}'"}), "(default=b'{}')\n", (3740, 3755), False, 'from django.db import models, migrations\n'), ((3790, 3818), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0)'}), '(default=0)\n', (3807, 3818), False, 'from django.db import models, migrations\n'), ((3860, 3888), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0)'}), '(default=0)\n', (3877, 3888), False, 'from django.db import models, migrations\n'), ((3927, 3960), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (3949, 3960), False, 'from django.db import models, migrations\n'), ((3998, 4031), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (4020, 4031), False, 'from django.db import models, migrations\n'), ((4082, 4115), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (4104, 4115), False, 'from django.db import models, migrations\n'), ((4171, 4204), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (4193, 4204), False, 'from django.db import models, migrations\n'), ((4257, 4290), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (4279, 4290), False, 'from django.db import models, migrations\n'), ((4341, 4374), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (4363, 4374), False, 'from django.db import models, migrations\n'), ((4427, 4460), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (4449, 4460), False, 'from django.db import models, migrations\n'), ((4517, 4550), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (4539, 4550), False, 'from django.db import models, migrations\n'), ((4599, 4632), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (4621, 4632), False, 'from django.db import models, migrations\n'), ((4686, 4719), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (4708, 4719), False, 'from django.db import models, migrations\n'), ((4774, 4807), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (4796, 4807), False, 'from django.db import models, migrations\n'), ((4859, 4892), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (4881, 4892), False, 'from django.db import models, migrations\n'), ((4943, 4976), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (4965, 4976), False, 'from django.db import models, migrations\n'), ((5031, 5064), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (5053, 5064), False, 'from django.db import models, migrations\n'), ((5118, 5151), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (5140, 5151), False, 'from django.db import models, migrations\n'), ((5199, 5232), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (5221, 5232), False, 'from django.db import models, migrations\n'), ((5287, 5320), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (5309, 5320), False, 'from django.db import models, migrations\n'), ((5362, 5395), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (5384, 5395), False, 'from django.db import models, migrations\n'), ((5432, 5465), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (5454, 5465), False, 'from django.db import models, migrations\n'), ((5504, 5537), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (5526, 5537), False, 'from django.db import models, migrations\n'), ((5577, 5610), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (5599, 5610), False, 'from django.db import models, migrations\n'), ((5655, 5688), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (5677, 5688), False, 'from django.db import models, migrations\n'), ((5739, 5772), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (5761, 5772), False, 'from django.db import models, migrations\n'), ((5826, 5859), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (5848, 5859), False, 'from django.db import models, migrations\n'), ((5909, 5942), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (5931, 5942), False, 'from django.db import models, migrations\n'), ((5981, 6014), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (6003, 6014), False, 'from django.db import models, migrations\n'), ((6057, 6090), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (6079, 6090), False, 'from django.db import models, migrations\n'), ((6133, 6166), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (6155, 6166), False, 'from django.db import models, migrations\n'), ((6379, 6472), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (6395, 6472), False, 'from django.db import models, migrations\n'), ((6516, 6666), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'blank': '(True)', 'choices': "[(0, 'молитва'), (1, 'жертвоприношение'), (2, 'благовония'), (3, 'символы'),\n (4, 'медитация')]"}), "(default=0, blank=True, choices=[(0, 'молитва'), (1,\n 'жертвоприношение'), (2, 'благовония'), (3, 'символы'), (4, 'медитация')])\n", (6535, 6666), False, 'from django.db import models, migrations\n'), ((659, 694), 'datetime.datetime', 'datetime.datetime', (['(1970)', '(1)', '(1)', '(0)', '(0)'], {}), '(1970, 1, 1, 0, 0)\n', (676, 694), False, 'import datetime\n'), ((990, 1025), 'datetime.datetime', 'datetime.datetime', (['(1970)', '(1)', '(1)', '(0)', '(0)'], {}), '(1970, 1, 1, 0, 0)\n', (1007, 1025), False, 'import datetime\n'), ((1114, 1149), 'datetime.datetime', 'datetime.datetime', (['(1970)', '(1)', '(1)', '(0)', '(0)'], {}), '(1970, 1, 1, 0, 0)\n', (1131, 1149), False, 'import datetime\n'), ((1234, 1269), 'datetime.datetime', 'datetime.datetime', (['(1970)', '(1)', '(1)', '(0)', '(0)'], {}), '(1970, 1, 1, 0, 0)\n', (1251, 1269), False, 'import datetime\n'), ((1359, 1394), 'datetime.datetime', 'datetime.datetime', (['(2000)', '(1)', '(1)', '(0)', '(0)'], {}), '(2000, 1, 1, 0, 0)\n', (1376, 1394), False, 'import datetime\n'), ((2633, 2668), 'datetime.datetime', 'datetime.datetime', (['(1970)', '(1)', '(1)', '(0)', '(0)'], {}), '(1970, 1, 1, 0, 0)\n', (2650, 2668), False, 'import datetime\n')] |
# Copyright (C) 2016 Sony Mobile Communications Inc.
# All rights, including trade secret rights, reserved.
import apt
def get_package_version(package='ave'):
cache = apt.cache.Cache()
pkg = cache[package]
instver = pkg.installed
if instver is None:
raise Exception('The package "%s" was not installed' % package)
return instver.version
| [
"apt.cache.Cache"
] | [((173, 190), 'apt.cache.Cache', 'apt.cache.Cache', ([], {}), '()\n', (188, 190), False, 'import apt\n')] |
import argparse
from pathlib import Path, PurePath
from typing import Dict
import numpy as np
import src.data
import src.file_util
import src.image_util
import src.model
def _batch_update(
dataset: src.data.Dataset,
batch_index: int,
d_f: src.data.ModelFile,
d_c: src.data.ModelFile,
g_c: src.data.ModelFile,
g_f: src.data.ModelFile,
gan: src.data.ModelFile,
) -> Dict[str, float]:
batch_losses = {}
# See "data.py" docs and readme for Hungarian notation meaning
# ASSEMBLE DATA
d_f.model.trainable = False
d_c.model.trainable = False
gan.model.trainable = False
g_c.model.trainable = False
g_f.model.trainable = False
data = dataset.get_batch_data(batch_index=batch_index)
[XA_fr, XB_fr, XC_fr] = data["X_fr"]
[y1_fr, y2_fr] = data["y_fr"]
[XA_cr, XB_cr, XC_cr] = data["X_cr"]
XC_cx = data["XC_cx"]
y1_cx = data["y_cx"]
XC_fx = data["XC_fx"]
y1_fx = data["y_fx"]
weights_c_to_f = data["c_to_f"]
# UPDATE DISCRIMINATORS
d_f.model.trainable = True
d_c.model.trainable = True
gan.model.trainable = False
g_c.model.trainable = False
g_f.model.trainable = False
for _ in range(2):
losses = {
"d_fr": d_f.model.train_on_batch([XA_fr, XC_fr], y1_fr)[0],
"d_fx": d_f.model.train_on_batch([XA_fr, XC_fx], y1_fx)[0],
"d_cr": d_c.model.train_on_batch([XA_cr, XC_cr], y2_fr)[0],
"d_cx": d_c.model.train_on_batch([XA_cr, XC_cx], y1_cx)[0],
}
batch_losses.update(losses) # type: ignore
# UPDATE COARSE GENERATOR: _cr
d_f.model.trainable = False
d_c.model.trainable = False
gan.model.trainable = False
g_c.model.trainable = True
g_f.model.trainable = False
batch_losses["g_c"], _ = g_c.model.train_on_batch([XA_cr, XB_cr], [XC_cr])
# UPDATE FINE GENERATOR: _fr
d_f.model.trainable = False
d_c.model.trainable = False
gan.model.trainable = False
g_c.model.trainable = False
g_f.model.trainable = True
batch_losses["g_f"] = g_f.model.train_on_batch(
[XA_fr, XB_fr, weights_c_to_f], XC_fr
)
# UPDATE GAN
d_f.model.trainable = False
d_c.model.trainable = False
gan.model.trainable = True
g_c.model.trainable = True
g_f.model.trainable = True
(
loss_gan,
_,
_,
loss_fm_c,
loss_fm_f,
_,
_,
loss_g_c_reconstruct,
loss_g_f_reconstruct,
) = gan.model.train_on_batch(
[XA_fr, XA_cr, weights_c_to_f, XB_fr, XB_cr, XC_fr, XC_cr],
[y1_fr, y2_fr, XC_fx, XC_cx, XC_cx, XC_fx, XC_cx, XC_fx], # type: ignore
)
batch_losses.update(
{
"gan": loss_gan,
"fm1": loss_fm_c,
"fm2": loss_fm_f,
"g_c_recon": loss_g_c_reconstruct,
"g_f_recon": loss_g_f_reconstruct,
}
)
return batch_losses
def train(
dataset: src.data.Dataset,
d_f: src.data.ModelFile,
d_c: src.data.ModelFile,
g_c: src.data.ModelFile,
g_f: src.data.ModelFile,
gan: src.data.ModelFile,
statistics: src.data.Statistics,
visualizations: src.data.Visualizations,
epoch_count: int,
):
start_epoch = statistics.latest_epoch
if 0 < start_epoch:
start_epoch += 1
statistics.start_timer()
print(f"starting at epoch {start_epoch} of {epoch_count}")
print(f"epochs have {dataset.batch_count} batches of {dataset.images_per_batch}")
for epoch in range(start_epoch, epoch_count):
# BATCH LOOP
for batch in range(dataset.batch_count):
batch_losses = _batch_update(
dataset=dataset,
batch_index=batch,
d_f=d_f,
d_c=d_c,
g_c=g_c,
g_f=g_f,
gan=gan,
)
statistics.append(epoch=epoch, batch=batch, data=batch_losses)
print(statistics.latest_batch_to_string())
print(statistics.latest_epoch_to_string())
# SAVE
print("saving epoch")
statistics.save()
visualizations.save_plot(epoch=epoch)
VERSION = "latest"
d_f.save(version=VERSION)
d_c.save(version=VERSION)
g_c.save(version=VERSION)
g_f.save(version=VERSION)
gan.save(version=VERSION)
VERSION = f"eval_{epoch}"
g_c.save(version=VERSION)
g_f.save(version=VERSION)
print(f"training complete")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--npz_file", type=str, required=True, help="path/to/npz/file",
)
parser.add_argument(
"--save_folder",
type=str,
required=True,
help="path/to/save_directory",
default="RVGAN",
)
parser.add_argument("--resume_training", action="store_true")
parser.add_argument("--config_file", type=str, default="config.yaml")
args = parser.parse_args()
input_npz_file = PurePath(args.npz_file)
assert src.file_util.check_file(input_npz_file)
output_folder = PurePath(args.save_folder)
Path(output_folder).mkdir(parents=True, exist_ok=True)
config_file = PurePath(args.config_file)
assert src.file_util.check_file(config_file)
resume_training = args.resume_training
print("loading config")
config = src.file_util.read_yaml(path=config_file)
input_shape_px = np.array(config["arch"]["input_size"])
downscale_factor = config["arch"]["downscale_factor"]
inner_weight = config["arch"]["inner_weight"]
epoch_count = config["train"]["epochs"]
images_per_batch = config["train"]["batch_size"]
print("building model")
arch_factory = src.model.ArchFactory(
input_shape_px=input_shape_px, downscale_factor=downscale_factor,
)
print(" d_f")
d_f_arch = arch_factory.build_discriminator(scale_type="fine", name="D1")
d_f = src.data.ModelFile(name="d_f", folder=output_folder, arch=d_f_arch)
print(" d_c")
d_c_arch = arch_factory.build_discriminator(scale_type="coarse", name="D2")
d_c = src.data.ModelFile(name="d_c", folder=output_folder, arch=d_c_arch)
print(" g_f")
g_f_arch = arch_factory.build_generator(scale_type="fine")
g_f = src.data.ModelFile(name="g_f", folder=output_folder, arch=g_f_arch)
print(" g_c")
g_c_arch = arch_factory.build_generator(scale_type="coarse")
g_c = src.data.ModelFile(name="g_c", folder=output_folder, arch=g_c_arch)
print(" gan")
gan_arch = arch_factory.build_gan(
d_coarse=d_c_arch,
d_fine=d_f_arch,
g_coarse=g_c_arch,
g_fine=g_f_arch,
inner_weight=inner_weight,
)
gan = src.data.ModelFile(name="gan", folder=output_folder, arch=gan_arch)
print("loading dataset")
[XA_fr, XB_fr, XC_fr] = src.data.load_npz_data(path=input_npz_file)
dataset = src.data.Dataset(
XA_fr=XA_fr,
XB_fr=XB_fr,
XC_fr=XC_fr,
downscale_factor=downscale_factor,
images_per_batch=images_per_batch,
g_f_arch=g_f.model,
g_c_arch=g_c.model,
)
print("initializing statistics")
statistics = src.data.Statistics(output_folder=output_folder)
print("initializing visualizations")
visualizations = src.data.Visualizations(
output_folder=output_folder,
dataset=dataset,
downscale_factor=downscale_factor,
sample_count=5,
g_c=g_c,
g_f=g_f,
)
if args.resume_training:
print("resuming training")
VERSION = "latest"
d_f.load(version=VERSION)
d_c.load(version=VERSION)
g_c.load(version=VERSION)
g_f.load(version=VERSION)
gan.load(version=VERSION)
statistics.load()
else:
print("starting training")
train(
dataset=dataset,
d_f=d_f,
d_c=d_c,
g_c=g_c,
g_f=g_f,
gan=gan,
statistics=statistics,
visualizations=visualizations,
epoch_count=epoch_count,
)
print("Training complete")
| [
"pathlib.PurePath",
"numpy.array",
"argparse.ArgumentParser",
"pathlib.Path"
] | [((4545, 4570), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4568, 4570), False, 'import argparse\n'), ((5028, 5051), 'pathlib.PurePath', 'PurePath', (['args.npz_file'], {}), '(args.npz_file)\n', (5036, 5051), False, 'from pathlib import Path, PurePath\n'), ((5125, 5151), 'pathlib.PurePath', 'PurePath', (['args.save_folder'], {}), '(args.save_folder)\n', (5133, 5151), False, 'from pathlib import Path, PurePath\n'), ((5230, 5256), 'pathlib.PurePath', 'PurePath', (['args.config_file'], {}), '(args.config_file)\n', (5238, 5256), False, 'from pathlib import Path, PurePath\n'), ((5455, 5493), 'numpy.array', 'np.array', (["config['arch']['input_size']"], {}), "(config['arch']['input_size'])\n", (5463, 5493), True, 'import numpy as np\n'), ((5156, 5175), 'pathlib.Path', 'Path', (['output_folder'], {}), '(output_folder)\n', (5160, 5175), False, 'from pathlib import Path, PurePath\n')] |
"""
1. https://realpython.com/async-io-python/#the-asyncawait-syntax-and-native-coroutines
todo - async with, async for.
"""
import asyncio
from asyncio.events import AbstractEventLoop
import time
from typing import Coroutine
def test1():
"""
"""
pass
if __name__ == '__main__':
# - test asyncio
s = time.perf_counter()
test1()
# - print stats
elapsed = time.perf_counter() - s
print(f"{__file__} executed in {elapsed:0.2f} seconds.")
print('Success, done!\a') | [
"time.perf_counter"
] | [((326, 345), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (343, 345), False, 'import time\n'), ((392, 411), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (409, 411), False, 'import time\n')] |
"""
Module with some miscellaneous useful tools.
"""
from __future__ import absolute_import
def plotbox(xy, kwargs={'color':'b', 'linewidth':2}):
"""
Add a box around a region to an existing plot.
xy can be a list of [x1, x2, y1, y2] of the corners
or a string "x1 x2 y1 y2".
"""
from pylab import plot
if type(xy)==str:
xy=xy.split()
x1 = float(xy[0])
x2 = float(xy[1])
y1 = float(xy[2])
y2 = float(xy[3])
plot([x1,x2,x2,x1,x1],[y1,y1,y2,y2,y1],**kwargs)
def pcolorcells(X, Y, Z, ax=None, **kwargs):
"""
Wraps pcolormesh in a way that works if X,Y are cell centers or edges.
X,Y can be 2d or 1d arrays.
if `ax==None` then the plot is done on a new set of axes, otherwise on ax
X,Y,Z is the data to be plotted. It is assumed to be finite volume data
where Z[i,j] is a constant value over a grid cell.
Internally x,y are defined as 1d arrays since it is assumed the
grids are Cartesian.
If the length of the 1d arrays x and y match the dimensions of Z then
these are assumed to be cell center values. In this case the arrays
are expanded by one to obtain x_edge, y_edge as edge values,
as needed for proper alignment.
If the length of x,y is already one greater than the corresponding
dimension of Z, then it is assumed that these are already edge values.
Notes:
- This should work also if x and/or y is decreasing rather than increasing.
- Should also work regardless of index order.
"""
from matplotlib import pyplot as plt
import numpy as np
transposeZ = False # default for 1d arrays X,Y, as in pcolormesh
if X.ndim == 1:
x = X
Zshapex = Z.shape[1]
assert len(x) in [Zshapex, Zshapex+1], '*** Z has wrong shape, transpose?'
# If X is 2d extract proper 1d slice:
elif X.ndim == 2:
if X[0,0] == X[0,1]:
x = X[:,0]
transposeZ = True
Zshapex = Z.shape[0]
else:
x = X[0,:]
transposeZ = False
Zshapex = Z.shape[1]
# If Y is 2d extract proper 1d slice:
if Y.ndim == 1:
y = Y
elif Y.ndim == 2:
if Y[0,0] == Y[0,1]:
y = Y[:,0]
assert not transposeZ, '*** X and Y not consistent'
else:
y = Y[0,:]
assert transposeZ, '*** X and Y not consistent'
x_at_cell_centers = (len(x) == Zshapex)
if not x_at_cell_centers:
assert (len(x) == Zshapex + 1), \
'*** X should be same shape as Z or one larger'
diffx = np.diff(x)
diffy = np.diff(y)
dx = np.mean(diffx)
dy = np.mean(diffy)
if diffx.max()-diffx.min() > 1e-3*dx:
raise ValueError("x must be equally spaced for pcolorcells")
if diffy.max()-diffy.min() > 1e-3*dy:
raise ValueError("y must be equally spaced for pcolorcells")
if x_at_cell_centers:
# cell centers, so xedge should be expanded by dx/2 on each end:
xedge = np.arange(x[0]-0.5*dx, x[-1]+dx, dx)
yedge = np.arange(y[0]-0.5*dy, y[-1]+dy, dy)
else:
# assume x already contains edge values
xedge = x
yedge = y
if transposeZ:
if ax is None:
pc = plt.pcolormesh(xedge, yedge, Z.T, **kwargs)
else:
pc = ax.pcolormesh(xedge, yedge, Z.T, **kwargs)
else:
if ax is None:
pc = plt.pcolormesh(xedge, yedge, Z, **kwargs)
else:
pc = ax.pcolormesh(xedge, yedge, Z, **kwargs)
return pc
| [
"numpy.mean",
"pylab.plot",
"numpy.diff",
"matplotlib.pyplot.pcolormesh",
"numpy.arange"
] | [((467, 525), 'pylab.plot', 'plot', (['[x1, x2, x2, x1, x1]', '[y1, y1, y2, y2, y1]'], {}), '([x1, x2, x2, x1, x1], [y1, y1, y2, y2, y1], **kwargs)\n', (471, 525), False, 'from pylab import plot\n'), ((2695, 2705), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (2702, 2705), True, 'import numpy as np\n'), ((2718, 2728), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (2725, 2728), True, 'import numpy as np\n'), ((2738, 2752), 'numpy.mean', 'np.mean', (['diffx'], {}), '(diffx)\n', (2745, 2752), True, 'import numpy as np\n'), ((2762, 2776), 'numpy.mean', 'np.mean', (['diffy'], {}), '(diffy)\n', (2769, 2776), True, 'import numpy as np\n'), ((3120, 3162), 'numpy.arange', 'np.arange', (['(x[0] - 0.5 * dx)', '(x[-1] + dx)', 'dx'], {}), '(x[0] - 0.5 * dx, x[-1] + dx, dx)\n', (3129, 3162), True, 'import numpy as np\n'), ((3173, 3215), 'numpy.arange', 'np.arange', (['(y[0] - 0.5 * dy)', '(y[-1] + dy)', 'dy'], {}), '(y[0] - 0.5 * dy, y[-1] + dy, dy)\n', (3182, 3215), True, 'import numpy as np\n'), ((3372, 3415), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['xedge', 'yedge', 'Z.T'], {}), '(xedge, yedge, Z.T, **kwargs)\n', (3386, 3415), True, 'from matplotlib import pyplot as plt\n'), ((3540, 3581), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['xedge', 'yedge', 'Z'], {}), '(xedge, yedge, Z, **kwargs)\n', (3554, 3581), True, 'from matplotlib import pyplot as plt\n')] |
import pickle
from threading import Lock
from flask import Flask, request
from helpers.yolo_model import YoloModel
yolo = Flask(__name__)
LOCK = Lock()
@yolo.route("/yolo/predict", methods=["POST"])
def predict_objects():
image_path = request.form.get("image_path")
pkl_path = f"{image_path}-pkl"
with LOCK:
yolo_model = YoloModel(image_path=image_path)
yolo_model.predict_objects()
with open(pkl_path, "wb") as f:
pickle.dump(yolo_model.get_detected_objects(), f)
return pkl_path
if __name__ == "__main__":
yolo.run(host="0.0.0.0", port=5001, debug=False)
| [
"helpers.yolo_model.YoloModel",
"threading.Lock",
"flask.request.form.get",
"flask.Flask"
] | [((125, 140), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (130, 140), False, 'from flask import Flask, request\n'), ((149, 155), 'threading.Lock', 'Lock', ([], {}), '()\n', (153, 155), False, 'from threading import Lock\n'), ((245, 275), 'flask.request.form.get', 'request.form.get', (['"""image_path"""'], {}), "('image_path')\n", (261, 275), False, 'from flask import Flask, request\n'), ((347, 379), 'helpers.yolo_model.YoloModel', 'YoloModel', ([], {'image_path': 'image_path'}), '(image_path=image_path)\n', (356, 379), False, 'from helpers.yolo_model import YoloModel\n')] |
import os
COUNTER='approxmc'
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_PATH = os.path.abspath(os.path.join(ROOT_DIR, '..', 'data'))
BNN2CNF_PATH = os.path.join(ROOT_DIR, '../mlp2cnf/bin/bnn2cnf')
EXAMPLE_PATH = os.path.join(ROOT_DIR, '../mlp2cnf/bin/example')
# Tests paths
TEST_SAMPLES_DIR = os.path.join(ROOT_DIR, '..', 'test_samples')
TEST_FORMULAS_DIR = os.path.join(ROOT_DIR, '..', 'test_formulas')
TESTS_INPUT = os.path.join(ROOT_DIR, 'tests', 'tests_input')
ROOT_BNN = os.path.join(TESTS_INPUT, 'bnn_tests')
BNN_TEST_CFG = os.path.join(TESTS_INPUT, 'bnn_tests_cfg')
# XXX hack should put it as option but too many options
# UNCOMMENT THIS TO RUN ON THE SHARED STORAGE
RESULTS_DIR = os.path.join(ROOT_DIR, '..')
#RESULTS_DIR = '/home/teo/test-nn/experiments'
TRAINED_MODELS_DIR = os.path.join(RESULTS_DIR, 'models')
TRAINED_MODELS_CP_DIR = os.path.join(RESULTS_DIR, 'models_checkpoint')
#FORMULAS_DIR = os.path.join(RESULTS_DIR, 'formulas')
#FORMULAS_DIR = os.path.join(RESULTS_DIR, 'formulas_card')
FORMULAS_DIR = os.path.join(RESULTS_DIR, 'formulas_cam_ready')
COUNT_OUT_DIR = os.path.join(RESULTS_DIR, 'output_' + COUNTER)
MNIST_SAMPLES = os.path.join(RESULTS_DIR, 'mnist_samples')
CONCRETE_IN_DIR = os.path.join(RESULTS_DIR, 'concrete_inputs')
# For canaries insertion
CANARY_DATASET_DIR = os.path.join(DATA_PATH, 'canary')
# For trojan attack
# TROJAN_RETRAIN_DATASET_DIR = ''
TROJAN_RETRAIN_DATASET_DIR = os.path.abspath(os.path.join(ROOT_DIR, '..', 'trojan'))
TROJAN_ORIGIN_DATA_DIR = ''
TROJAN_DIR = os.path.abspath(os.path.join(ROOT_DIR, '..', 'trojan'))
TROJAN_VERBOSE_DIR = os.path.join(TROJAN_DIR, 'verbose')
TROJAN_PREFC1_PATH = ''
ORIGIN_TROJAN_DATA_DIR = os.path.abspath(os.path.join(os.path.join(ROOT_DIR, '..', 'data'),
'trojan_data'))
# For adversarial training
ADV_TRAIN_DIR = os.path.abspath(os.path.join(ROOT_DIR, '..', 'adv_train'))
ADV_TRAIN_DATA_DIR = os.path.join(DATA_PATH, 'adv_train_data')
# constraints for dataset
UCI_CONSTRAINTS = os.path.abspath(os.path.join(ROOT_DIR, '..',
'dataset_constraints/uci_adult-constraints.txt'))
TROJAN_IMGS = os.path.abspath(os.path.join(ROOT_DIR, '..', 'trojan_imgs'))
TROJAN_MASK = os.path.abspath(os.path.join(ROOT_DIR, '..', 'trojan_mask'))
TROJAN_TARGETS=[0,1,4,5,9]
TROJAN_EPOCHS=[1,10,30]
| [
"os.path.abspath",
"os.path.join"
] | [((168, 216), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""../mlp2cnf/bin/bnn2cnf"""'], {}), "(ROOT_DIR, '../mlp2cnf/bin/bnn2cnf')\n", (180, 216), False, 'import os\n'), ((232, 280), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""../mlp2cnf/bin/example"""'], {}), "(ROOT_DIR, '../mlp2cnf/bin/example')\n", (244, 280), False, 'import os\n'), ((315, 359), 'os.path.join', 'os.path.join', (['ROOT_DIR', '""".."""', '"""test_samples"""'], {}), "(ROOT_DIR, '..', 'test_samples')\n", (327, 359), False, 'import os\n'), ((380, 425), 'os.path.join', 'os.path.join', (['ROOT_DIR', '""".."""', '"""test_formulas"""'], {}), "(ROOT_DIR, '..', 'test_formulas')\n", (392, 425), False, 'import os\n'), ((440, 486), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""tests"""', '"""tests_input"""'], {}), "(ROOT_DIR, 'tests', 'tests_input')\n", (452, 486), False, 'import os\n'), ((498, 536), 'os.path.join', 'os.path.join', (['TESTS_INPUT', '"""bnn_tests"""'], {}), "(TESTS_INPUT, 'bnn_tests')\n", (510, 536), False, 'import os\n'), ((552, 594), 'os.path.join', 'os.path.join', (['TESTS_INPUT', '"""bnn_tests_cfg"""'], {}), "(TESTS_INPUT, 'bnn_tests_cfg')\n", (564, 594), False, 'import os\n'), ((712, 740), 'os.path.join', 'os.path.join', (['ROOT_DIR', '""".."""'], {}), "(ROOT_DIR, '..')\n", (724, 740), False, 'import os\n'), ((809, 844), 'os.path.join', 'os.path.join', (['RESULTS_DIR', '"""models"""'], {}), "(RESULTS_DIR, 'models')\n", (821, 844), False, 'import os\n'), ((869, 915), 'os.path.join', 'os.path.join', (['RESULTS_DIR', '"""models_checkpoint"""'], {}), "(RESULTS_DIR, 'models_checkpoint')\n", (881, 915), False, 'import os\n'), ((1044, 1091), 'os.path.join', 'os.path.join', (['RESULTS_DIR', '"""formulas_cam_ready"""'], {}), "(RESULTS_DIR, 'formulas_cam_ready')\n", (1056, 1091), False, 'import os\n'), ((1108, 1154), 'os.path.join', 'os.path.join', (['RESULTS_DIR', "('output_' + COUNTER)"], {}), "(RESULTS_DIR, 'output_' + COUNTER)\n", (1120, 1154), False, 'import os\n'), ((1171, 1213), 'os.path.join', 'os.path.join', (['RESULTS_DIR', '"""mnist_samples"""'], {}), "(RESULTS_DIR, 'mnist_samples')\n", (1183, 1213), False, 'import os\n'), ((1232, 1276), 'os.path.join', 'os.path.join', (['RESULTS_DIR', '"""concrete_inputs"""'], {}), "(RESULTS_DIR, 'concrete_inputs')\n", (1244, 1276), False, 'import os\n'), ((1324, 1357), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""canary"""'], {}), "(DATA_PATH, 'canary')\n", (1336, 1357), False, 'import os\n'), ((1616, 1651), 'os.path.join', 'os.path.join', (['TROJAN_DIR', '"""verbose"""'], {}), "(TROJAN_DIR, 'verbose')\n", (1628, 1651), False, 'import os\n'), ((1962, 2003), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""adv_train_data"""'], {}), "(DATA_PATH, 'adv_train_data')\n", (1974, 2003), False, 'import os\n'), ((58, 83), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (73, 83), False, 'import os\n'), ((114, 150), 'os.path.join', 'os.path.join', (['ROOT_DIR', '""".."""', '"""data"""'], {}), "(ROOT_DIR, '..', 'data')\n", (126, 150), False, 'import os\n'), ((1458, 1496), 'os.path.join', 'os.path.join', (['ROOT_DIR', '""".."""', '"""trojan"""'], {}), "(ROOT_DIR, '..', 'trojan')\n", (1470, 1496), False, 'import os\n'), ((1555, 1593), 'os.path.join', 'os.path.join', (['ROOT_DIR', '""".."""', '"""trojan"""'], {}), "(ROOT_DIR, '..', 'trojan')\n", (1567, 1593), False, 'import os\n'), ((1898, 1939), 'os.path.join', 'os.path.join', (['ROOT_DIR', '""".."""', '"""adv_train"""'], {}), "(ROOT_DIR, '..', 'adv_train')\n", (1910, 1939), False, 'import os\n'), ((2066, 2143), 'os.path.join', 'os.path.join', (['ROOT_DIR', '""".."""', '"""dataset_constraints/uci_adult-constraints.txt"""'], {}), "(ROOT_DIR, '..', 'dataset_constraints/uci_adult-constraints.txt')\n", (2078, 2143), False, 'import os\n'), ((2223, 2266), 'os.path.join', 'os.path.join', (['ROOT_DIR', '""".."""', '"""trojan_imgs"""'], {}), "(ROOT_DIR, '..', 'trojan_imgs')\n", (2235, 2266), False, 'import os\n'), ((2298, 2341), 'os.path.join', 'os.path.join', (['ROOT_DIR', '""".."""', '"""trojan_mask"""'], {}), "(ROOT_DIR, '..', 'trojan_mask')\n", (2310, 2341), False, 'import os\n'), ((1730, 1766), 'os.path.join', 'os.path.join', (['ROOT_DIR', '""".."""', '"""data"""'], {}), "(ROOT_DIR, '..', 'data')\n", (1742, 1766), False, 'import os\n')] |
#!/usr/bin/env python2
"""Class for writing position controller."""
from __future__ import division, print_function, absolute_import
# Import ROS libraries
import roslib
import rospy
import numpy as np
# from ros_interface import ROSControllerNode
# Import class that computes the desired positions
from tf.transformations import euler_from_quaternion
from geometry_msgs.msg import TransformStamped, Twist
class PositionController(object):
"""ROS interface for controlling the Parrot ARDrone in the Vicon Lab."""
# write code here for position controller
def __init__(self):
self.g = -9.8
self.Kp_yaw = 4
# natural frequencies and damping ratios
w_n_x = 3.0 # 2.2 / 1.8 # 1.8
zeta_x = 0.0 #1.0 # 0.9
w_n_y = 3.0
zeta_y = 1.0
w_n_z = 10.0 # 4.0 / 1.8 # 2
zeta_z = 1.0 #1.0 # 0.7
# gains
self.Kp_xc = w_n_x**2
self.Kv_xc = 2 * w_n_x * zeta_x
self.Kp_yc = w_n_x**2
self.Kv_yc = 2 * w_n_x * zeta_x
self.Kp_zc = w_n_z**2
self.Kv_zc = 2 * w_n_z * zeta_z
self.limit_x = 1.0 #0.7
self.limit_y = 1.0 #0.7
self.limit_z = 1.0 #0.5
self.old_time = rospy.get_time()
self._x_old = 0.0
self._y_old = 0.0
self._z_old = 0.0
self._z_oold = 0.0
self.yaw_old = 0.0
self.yaw_d = 0.0
def get_command(self, _x, _y, _z, roll, pitch, yaw, _x_d, _y_d, _z_d,
_vx_d, _vy_d, _vz_d, _yaw_d):
self.yaw_d = _yaw_d
current_time = rospy.get_time()
dt = current_time - self.old_time
if dt == 0:
dt = 1.0/100
self.old_time = current_time
z_dd = (_z - 2 * self._z_old + self._z_oold) / (dt**2)
# commanded specific force
f = (z_dd - self.g) / (np.cos(pitch) * np.cos(roll))
_vx = (_x - self._x_old) / dt
_vy = (_y - self._y_old) / dt
_vz = (_z - self._z_old) / dt
if (_x_d - _x) > self.limit_x:
_x_d = _x + self.limit_x
elif (_x_d - _x) < -self.limit_x:
_x_d = _x - self.limit_x
if (_y_d - _y) > self.limit_y:
_y_d = _y + self.limit_y
elif (_y_d - _y) < -self.limit_y:
_y_d = _y - self.limit_y
if (_z_d - _z) > self.limit_z:
_z_d = _z + self.limit_z
elif (_z_d - _z) < -self.limit_z:
_z_d = _z - self.limit_z
# commanded horizontal accelerations
x_dd_c = self.Kv_xc * (_vx_d - _vx) + self.Kp_xc * (_x_d - _x)
y_dd_c = self.Kv_yc * (_vy_d - _vy) + self.Kp_yc * (_y_d - _y)
z_dd_c = self.Kv_zc * (_vz_d - _vz) + self.Kp_zc * (_z_d - _z)
print("x_dd_c: ",x_dd_c)
print("y_dd_c: ",y_dd_c)
print("z_dd_c: ",z_dd_c)
# code for tuning
#x_dd_c = 1
#y_dd_c = 0
#z_dd_c = 0
if (-y_dd_c / f > 1):
y_dd_c = -1.0*f
elif (-y_dd_c/f < -1):
y_dd_c = 1.0*f
# commanded rolls and pitch
roll_c_int = np.arcsin(-y_dd_c / f)
if (x_dd_c / f / np.cos(roll_c_int) > 1):
x_dd_c = 0.98*f *np.cos(roll_c_int) #0.98
elif (x_dd_c/f / np.cos(roll_c_int) < -1):
x_dd_c = -0.98*f*np.cos(roll_c_int) #-0.98
pitch_c_int = np.arcsin(x_dd_c / f / np.cos(roll_c_int))
roll_c = roll_c_int * np.cos(yaw) + pitch_c_int * np.sin(yaw)
pitch_c = -roll_c_int * np.sin(yaw) + pitch_c_int * np.cos(yaw)
# commanded climb and yaw rates
climb_rate_c = z_dd_c
# need to wrap angles for yaw so we dont get weird yaw rates
# first wrap both angles from pi to - pi
while self.yaw_d > np.pi:
self.yaw_d = self.yaw_d - 2*np.pi
while self.yaw_d < -np.pi:
self.yaw_d = self.yaw_d + 2*np.pi
while yaw > np.pi:
yaw = yaw - 2*np.pi
while yaw < -np.pi:
yaw = yaw + 2*np.pi
# then modify to get correct yaw rate
if (self.yaw_d - yaw) > np.pi:
yaw = yaw + 2*np.pi
elif (self.yaw_d - yaw) < -np.pi:
yaw = yaw - 2*np.pi
yaw_rate_c = self.Kp_yaw * (self.yaw_d - yaw)
#yaw_rate_c = 0.5
#print("desired yaw: ", self.yaw_d)
#print("actual yaw: ", yaw)
# update old values
self._x_old = _x
self._y_old = _y
self._z_oold = self._z_old
self._z_old = _z
# self.yaw_old = yaw
# output limiters
# roll pitch and climb were [-1,1]
# yaw rate was [-5,5]
# if (roll_c > np.pi/6):
# roll_c = np.pi/6
# elif (roll_c < -np.pi/6):
# roll_c = -np.pi/6
# if (pitch_c > np.pi/6):
# pitch_c = np.pi/6
# elif (pitch_c < -np.pi/6):
# pitch_c = -np.pi/6
# if (climb_rate_c > 8163):
# climb_rate_c = 8163
# elif (climb_rate_c < 5156):
# climb_rate_c = 5156
# if (yaw_rate_c > 1.11*np.pi):
# yaw_rate_c = 1.11*np.pi
# elif (yaw_rate_c < -1.11*np.pi):
# yaw_rate_c = -1.11*np.pi
# print(roll_c, pitch_c, climb_rate_c, yaw_rate_c)
return roll_c, pitch_c, climb_rate_c, yaw_rate_c
| [
"numpy.sin",
"rospy.get_time",
"numpy.arcsin",
"numpy.cos"
] | [((1220, 1236), 'rospy.get_time', 'rospy.get_time', ([], {}), '()\n', (1234, 1236), False, 'import rospy\n'), ((1579, 1595), 'rospy.get_time', 'rospy.get_time', ([], {}), '()\n', (1593, 1595), False, 'import rospy\n'), ((3085, 3107), 'numpy.arcsin', 'np.arcsin', (['(-y_dd_c / f)'], {}), '(-y_dd_c / f)\n', (3094, 3107), True, 'import numpy as np\n'), ((1851, 1864), 'numpy.cos', 'np.cos', (['pitch'], {}), '(pitch)\n', (1857, 1864), True, 'import numpy as np\n'), ((1867, 1879), 'numpy.cos', 'np.cos', (['roll'], {}), '(roll)\n', (1873, 1879), True, 'import numpy as np\n'), ((3134, 3152), 'numpy.cos', 'np.cos', (['roll_c_int'], {}), '(roll_c_int)\n', (3140, 3152), True, 'import numpy as np\n'), ((3188, 3206), 'numpy.cos', 'np.cos', (['roll_c_int'], {}), '(roll_c_int)\n', (3194, 3206), True, 'import numpy as np\n'), ((3366, 3384), 'numpy.cos', 'np.cos', (['roll_c_int'], {}), '(roll_c_int)\n', (3372, 3384), True, 'import numpy as np\n'), ((3416, 3427), 'numpy.cos', 'np.cos', (['yaw'], {}), '(yaw)\n', (3422, 3427), True, 'import numpy as np\n'), ((3444, 3455), 'numpy.sin', 'np.sin', (['yaw'], {}), '(yaw)\n', (3450, 3455), True, 'import numpy as np\n'), ((3488, 3499), 'numpy.sin', 'np.sin', (['yaw'], {}), '(yaw)\n', (3494, 3499), True, 'import numpy as np\n'), ((3516, 3527), 'numpy.cos', 'np.cos', (['yaw'], {}), '(yaw)\n', (3522, 3527), True, 'import numpy as np\n'), ((3238, 3256), 'numpy.cos', 'np.cos', (['roll_c_int'], {}), '(roll_c_int)\n', (3244, 3256), True, 'import numpy as np\n'), ((3293, 3311), 'numpy.cos', 'np.cos', (['roll_c_int'], {}), '(roll_c_int)\n', (3299, 3311), True, 'import numpy as np\n')] |
from rest_framework import serializers
from university_app.exceptions import NotFoundException
from university_app.models import University, Department, Faculty, Class
class UniversitySerializer(serializers.ModelSerializer):
class Meta:
model = University
fields = ('id', 'name')
class DepartmentSerializer(serializers.ModelSerializer):
class Meta:
model = Department
fields = ('id', 'name')
def create(self, validated_data):
faculty_id = self.context['faculty_id']
faculty = Faculty.objects.get(id=faculty_id)
if faculty:
return Department.objects.create(**validated_data, faculty=faculty)
else:
raise NotFoundException()
def update(self, instance, validated_data):
instance.name = validated_data.get('name', instance.name)
instance.save()
return instance
class ClassSerializer(serializers.ModelSerializer):
class Meta:
model = Class
fields = ('id', 'num')
def create(self, validated_data):
department_id = self.context['department_id']
department = Department.objects.get(id=department_id)
if department:
return Class.objects.create(**validated_data, department=department)
else:
raise NotFoundException()
def update(self, instance, validated_data):
instance.num = validated_data.get('num', instance.num)
instance.save()
return instance
class FacultySerializer(serializers.ModelSerializer):
class Meta:
model = Faculty
fields = ('id', 'name', 'university')
| [
"university_app.models.Department.objects.get",
"university_app.exceptions.NotFoundException",
"university_app.models.Faculty.objects.get",
"university_app.models.Class.objects.create",
"university_app.models.Department.objects.create"
] | [((542, 576), 'university_app.models.Faculty.objects.get', 'Faculty.objects.get', ([], {'id': 'faculty_id'}), '(id=faculty_id)\n', (561, 576), False, 'from university_app.models import University, Department, Faculty, Class\n'), ((1129, 1169), 'university_app.models.Department.objects.get', 'Department.objects.get', ([], {'id': 'department_id'}), '(id=department_id)\n', (1151, 1169), False, 'from university_app.models import University, Department, Faculty, Class\n'), ((616, 676), 'university_app.models.Department.objects.create', 'Department.objects.create', ([], {'faculty': 'faculty'}), '(**validated_data, faculty=faculty)\n', (641, 676), False, 'from university_app.models import University, Department, Faculty, Class\n'), ((709, 728), 'university_app.exceptions.NotFoundException', 'NotFoundException', ([], {}), '()\n', (726, 728), False, 'from university_app.exceptions import NotFoundException\n'), ((1212, 1273), 'university_app.models.Class.objects.create', 'Class.objects.create', ([], {'department': 'department'}), '(**validated_data, department=department)\n', (1232, 1273), False, 'from university_app.models import University, Department, Faculty, Class\n'), ((1306, 1325), 'university_app.exceptions.NotFoundException', 'NotFoundException', ([], {}), '()\n', (1323, 1325), False, 'from university_app.exceptions import NotFoundException\n')] |
from dassl.engine import TRAINER_REGISTRY,TrainerMultiAdaptation
from dassl.data import DataManager
from dassl.utils import MetricMeter
from torch.utils.data import Dataset as TorchDataset
from dassl.optim import build_optimizer, build_lr_scheduler
from dassl.utils import count_num_param
import torch
import torch.nn as nn
from torch.nn import functional as F
from dassl.engine.trainer_tmp import SimpleNet
import numpy as np
from dassl.modeling import build_layer
from dassl.engine.da.heterogeneous.heterogeneous_adaptation import HeterogeneousModelAdaptation
@TRAINER_REGISTRY.register()
class HeterogeneousModelAdaptationDSBN(HeterogeneousModelAdaptation):
"""
"""
def __init__(self, cfg):
super().__init__(cfg)
self.target_dsbn_idx = len(self.dm.source_domains_label_size)
print("current target dsbn idx : ", self.target_dsbn_idx)
def build_temp_layer(self, cfg):
embedding_layer_info = cfg.MODEL.LAYER
layer_name = embedding_layer_info.NAME
layer_params = embedding_layer_info.PARAMS
total_domain = layer_params.total_domain
check_total_domain = len(self.dm.source_domains_label_size) + 1
if total_domain != check_total_domain:
print("there is problem with the provided total domain : ", total_domain)
layer_params.total_domain = check_total_domain
print("total domain for DSBN : ", layer_params.total_domain)
return [layer_name, layer_params]
def forward_backward(self, batch_x, list_batch_u,backprob = True):
parsed = self.parse_batch_train(batch_x, list_batch_u)
input_x, label_x, domain_x, list_input_u,list_label_u,domain_u = parsed
loss_u = 0
for u, y, d in zip(list_input_u, list_label_u, domain_u):
# print("check range for source data : {} - {}".format(u.max(),u.min()))
f = self.SourceFeatures[d](u)
temp_layer = self.TemporalLayer(f,d)
logits = self.SourceClassifiers[d](temp_layer)
loss_u += self.cce[d](logits, y)
# print("loss U :",loss_u)
# print("num domain : ",len(domain_u))
loss_u /= len(domain_u)
# print("check range for target data : {} - {}".format(input_x.max(), input_x.min()))
f_target = self.TargetFeature(input_x)
temp_layer_target = self.TemporalLayer(f_target,self.target_dsbn_idx)
logits_target = self.TargetClassifier(temp_layer_target)
if backprob:
loss_x = self.ce(logits_target,label_x)
else:
loss_x = self.val_ce(logits_target, label_x)
total_loss = loss_x+loss_u
loss_summary = {
'total_loss': total_loss.item(),
'loss_x': loss_x.item(),
'loss_u': loss_u.item()
}
# print("loss x :",loss_x)
if backprob:
self.model_backward_and_update(total_loss)
if (self.batch_idx + 1) == self.num_batches:
self.update_lr()
# else:
# f_target = self.TargetFeature(input_x)
# temp_layer_target = self.TemporalLayer(f_target)
# logits_target = self.TargetClassifier(temp_layer_target)
# loss_x = self.val_ce(logits_target, label_x)
# loss_summary = {
# 'loss_x': loss_x.item()
# }
return loss_summary
def model_inference(self, input,return_feature=False):
f = self.TargetFeature(input)
temp_layer = self.TemporalLayer(f, self.target_dsbn_idx)
logits = self.TargetClassifier(temp_layer)
result = F.softmax(logits, 1)
if return_feature:
return result,temp_layer
return result
| [
"dassl.engine.TRAINER_REGISTRY.register",
"torch.nn.functional.softmax"
] | [((565, 592), 'dassl.engine.TRAINER_REGISTRY.register', 'TRAINER_REGISTRY.register', ([], {}), '()\n', (590, 592), False, 'from dassl.engine import TRAINER_REGISTRY, TrainerMultiAdaptation\n'), ((3605, 3625), 'torch.nn.functional.softmax', 'F.softmax', (['logits', '(1)'], {}), '(logits, 1)\n', (3614, 3625), True, 'from torch.nn import functional as F\n')] |
# -----------------------------------------------------------------------------
# Copyright (c) 2009-2016 <NAME>. All rights reserved.
# Distributed under the (new) BSD License.
# -----------------------------------------------------------------------------
from glumpy import app, gloo, gl, transforms
from . axes import Axes
class Figure(object):
""" """
def __init__(self, figsize=(10,10), dpi=72, color=(.95,.95,.95,1)):
width = int(round(figsize[0] * dpi))
height = int(round(figsize[1] * dpi))
self.window = app.Window(width=width, height=height, color=color,
title = "Figure (matplotlib API)")
self.viewport = app.Viewport()
def on_draw(self, dt):
self.window.clear()
def show(self):
self.window.push_handlers(self.viewport)
self.window.push_handlers(self)
app.run()
def add_axes(self, rect=[0,0,1,1], facecolor=(1,1,1,1),
xscale = None, yscale = None, zscale = None,
projection = None, interface = None, aspect=None):
axes = Axes(rect=rect, facecolor=facecolor, aspect=aspect,
xscale=xscale, yscale=yscale, zscale=zscale,
projection=projection, interface=interface)
self.viewport.add(axes)
return axes
| [
"glumpy.app.Window",
"glumpy.app.run",
"glumpy.app.Viewport"
] | [((548, 637), 'glumpy.app.Window', 'app.Window', ([], {'width': 'width', 'height': 'height', 'color': 'color', 'title': '"""Figure (matplotlib API)"""'}), "(width=width, height=height, color=color, title=\n 'Figure (matplotlib API)')\n", (558, 637), False, 'from glumpy import app, gloo, gl, transforms\n'), ((692, 706), 'glumpy.app.Viewport', 'app.Viewport', ([], {}), '()\n', (704, 706), False, 'from glumpy import app, gloo, gl, transforms\n'), ((883, 892), 'glumpy.app.run', 'app.run', ([], {}), '()\n', (890, 892), False, 'from glumpy import app, gloo, gl, transforms\n')] |
import random
max_length = 20
elem_range = 5 #do generowania list
def generate(length):
l = []
for i in range(1, length):
l.append(random.randrange(elem_range))
return l
def remove_duplicates(l):
l.sort()
for i in range(len(l) - 1, 0, -1):
if l[i] == l[i - 1]:
del l[i]
return l
l = generate(random.randrange(1, max_length))
print("lista: ", end = "")
print(*l, sep = ", ")
remove_duplicates(l)
print("bez duplikatow: ", end = "")
print(*l, sep = ", ")
| [
"random.randrange"
] | [((357, 388), 'random.randrange', 'random.randrange', (['(1)', 'max_length'], {}), '(1, max_length)\n', (373, 388), False, 'import random\n'), ((148, 176), 'random.randrange', 'random.randrange', (['elem_range'], {}), '(elem_range)\n', (164, 176), False, 'import random\n')] |
'''
Created on Nov 3, 2011
@author: sean
'''
import os
import sys
import struct
from time import time
py3 = sys.version_info.major >= 3
if py3:
import builtins #@UnresolvedImport
else:
import __builtin__ as builtins
import marshal
import imp
MAGIC = imp.get_magic()
def create_pyc(codestring, cfile, timestamp=None):
if timestamp is None:
timestamp = time()
codeobject = builtins.compile(codestring, '<recompile>', 'exec')
cfile.write(MAGIC)
cfile.write(struct.pack('i', timestamp))
marshal.dump(codeobject, cfile)
cfile.flush()
def dump_pyc(code, fd, timestamp=None):
if timestamp is None:
timestamp = time()
fd.write(MAGIC)
fd.write(struct.pack('i', timestamp))
marshal.dump(code, fd)
fd.flush()
| [
"imp.get_magic",
"__builtin__.compile",
"marshal.dump",
"struct.pack",
"time.time"
] | [((268, 283), 'imp.get_magic', 'imp.get_magic', ([], {}), '()\n', (281, 283), False, 'import imp\n'), ((412, 463), '__builtin__.compile', 'builtins.compile', (['codestring', '"""<recompile>"""', '"""exec"""'], {}), "(codestring, '<recompile>', 'exec')\n", (428, 463), True, 'import __builtin__ as builtins\n'), ((545, 576), 'marshal.dump', 'marshal.dump', (['codeobject', 'cfile'], {}), '(codeobject, cfile)\n', (557, 576), False, 'import marshal\n'), ((765, 787), 'marshal.dump', 'marshal.dump', (['code', 'fd'], {}), '(code, fd)\n', (777, 787), False, 'import marshal\n'), ((383, 389), 'time.time', 'time', ([], {}), '()\n', (387, 389), False, 'from time import time\n'), ((512, 539), 'struct.pack', 'struct.pack', (['"""i"""', 'timestamp'], {}), "('i', timestamp)\n", (523, 539), False, 'import struct\n'), ((687, 693), 'time.time', 'time', ([], {}), '()\n', (691, 693), False, 'from time import time\n'), ((732, 759), 'struct.pack', 'struct.pack', (['"""i"""', 'timestamp'], {}), "('i', timestamp)\n", (743, 759), False, 'import struct\n')] |
# TODO: Implement checkpoints
import os
import argparse
import sys
import json
import datetime
import time
import keras
import skimage.io as io
import skimage.transform as trans
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras.preprocessing.image import ImageDataGenerator
import glob
class Metrics(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.metrics = {}
self.metrics2 = {}
with open('{}/experiment.json'.format(output_path), 'w') as file:
json.dump(self.metrics, file)
def on_batch_end(self, batch, logs={}):
try:
self.metrics2[batch] = {
'acc': float(logs.get('acc')),
'loss': float(logs.get('loss')),
}
with open('{}/experiment_batch_unet.json'.format(output_path), 'w') as file:
json.dump(self.metrics2, file)
except Exception as identifier:
print("Error encountered: ", identifier)
return None
def on_epoch_end(self, epoch, logs={}):
self.metrics[epoch] = {
'acc': logs.get('acc'),
'val_acc': logs.get('val_acc'),
'loss': logs.get('loss'),
'val_loss': logs.get('val_loss')
}
with open('{}/experiment_unet.json'.format(output_path), 'w') as file:
json.dump(self.metrics, file)
return None
def unet(height, width, loss, pretrained_weights=None):
input_size = (height, width, 1)
inputs = Input(input_size)
conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)
conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)
conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(512, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))
(drop5))
merge6 = concatenate([drop4, up6], axis=3)
conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge6)
conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)
up7 = Conv2D(256, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D
(size=(2, 2))(conv6))
merge7 = concatenate([conv3, up7], axis=3)
conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)
conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)
up8 = Conv2D(128, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))
(conv7))
merge8 = concatenate([conv2, up8], axis=3)
conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)
conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8)
up9 = Conv2D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(conv8))
merge9 = concatenate([conv1, up9], axis=3)
conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)
conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
conv9 = Conv2D(2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)
model: Model = Model(input=inputs, output=conv10)
model.compile(optimizer=Adam(lr=1e-4), loss=loss, metrics=['accuracy'])
model.summary()
if pretrained_weights:
model.load_weights(pretrained_weights)
return model
def adjust_data(img, mask, flag_multi_class, num_class):
if flag_multi_class:
img = img / 255
mask = mask[:, :, :, 0] if (len(mask.shape) == 4) else mask[:, :, 0]
new_mask = np.zeros(mask.shape + (num_class,))
for i in range(num_class):
new_mask[mask == i, i] = 1
new_mask = np.reshape(new_mask, (new_mask.shape[0], new_mask.shape[1] * new_mask.shape[2], new_mask.shape[3])) \
if flag_multi_class else np.reshape(new_mask, (new_mask.shape[0] * new_mask.shape[1], new_mask.shape[2]))
mask = new_mask
elif np.max(img) > 1:
img = img / 255
mask = mask / 255
mask[mask > 0.5] = 1
mask[mask <= 0.5] = 0
return img, mask
def train_generator(batch_size, train_path, image_folder, mask_folder, height, width, aug_dict,
image_color_mode="grayscale",
mask_color_mode="grayscale", image_save_prefix="image", mask_save_prefix="mask",
flag_multi_class=False, num_class=2, save_to_dir=None, seed=1):
print("Executing 2")
print(batch_size)
target_size = (height, width)
print(target_size)
image_datagen = ImageDataGenerator(**aug_dict)
mask_datagen = ImageDataGenerator(**aug_dict)
image_generator = image_datagen.flow_from_directory(
train_path,
classes=[image_folder],
class_mode=None,
color_mode=image_color_mode,
target_size=target_size,
batch_size=batch_size,
save_to_dir=save_to_dir,
save_prefix=image_save_prefix,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
train_path,
classes=[mask_folder],
class_mode=None,
color_mode=mask_color_mode,
target_size=target_size,
batch_size=batch_size,
save_to_dir=save_to_dir,
save_prefix=mask_save_prefix,
seed=seed)
train_generator = zip(image_generator, mask_generator)
for (img, mask) in train_generator:
img, mask = adjust_data(img, mask, flag_multi_class, num_class)
yield (img, mask)
def test_generator(test_path, num_image=30, target_size=(256, 256), flag_multi_class=False, as_gray=True):
for i in range(num_image):
img = io.imread(os.path.join(test_path, "%d.png" % i), as_gray=as_gray)
img = img / 255
img = trans.resize(img, target_size)
img = np.reshape(img, img.shape + (1,)) if (not flag_multi_class) else img
img = np.reshape(img, (1,) + img.shape)
yield img
def gen_train_npy(image_path, mask_path, flag_multi_class=False, num_class=2, image_prefix="image", mask_prefix="mask",
image_as_gray=True, mask_as_gray=True):
image_name_arr = glob.glob(os.path.join(image_path, "%s*.png" % image_prefix))
image_arr = []
mask_arr = []
for index, item in enumerate(image_name_arr):
img = io.imread(item, as_gray=image_as_gray)
img = np.reshape(img, img.shape + (1,)) if image_as_gray else img
mask = io.imread(item.replace(image_path, mask_path).replace(image_prefix, mask_prefix), as_gray=mask_as_gray)
mask = np.reshape(mask, mask.shape + (1,)) if mask_as_gray else mask
img, mask = adjust_data(img, mask, flag_multi_class, num_class)
image_arr.append(img)
mask_arr.append(mask)
image_arr = np.array(image_arr)
mask_arr = np.array(mask_arr)
return image_arr, mask_arr
data_gen_args = dict(rotation_range=0.0, width_shift_range=0.00, height_shift_range=0.00, shear_range=0.00,
zoom_range=0.00, horizontal_flip=False, fill_mode='nearest')
def process_arguments(args):
parser = argparse.ArgumentParser(description='UNet model for semantic segmentation')
parser.add_argument('--images-path', action='store', help='path to directory of images')
parser.add_argument('--images-name', action='store', help='name of the folder of images')
parser.add_argument('--mask-name', action='store', help='namme of the folder of corresponding masks')
parser.add_argument('--output-path', action='store', default='.', help='path to output metrics')
parser.add_argument('--height', action='store', default=256, help='height of images (int)')
parser.add_argument('--width', action='store', default=256, help='width of images (int)')
parser.add_argument('--channels', action='store', default=3, help='channels of images: 1 = grayscale, 3 = RGB ,'
'4=RGBA (int)')
parser.add_argument('--use-pretrained', action='store', default=False, help='use pretrained ResNet50 weights (bool)'
)
parser.add_argument('--weights-path', action='store', help='path to pretrained weights')
parser.add_argument('--epochs', action='store', help='number of epochs for training')
parser.add_argument('--steps-per-epoch', action='store', default=2500,
help='number of steps per epochs for training'
)
parser.add_argument('--batch-size', action='store', default=4, help='batch size fed to the neural network (int)')
parser.add_argument('--class_mode', action='store', default='categorical', help='"categorical", "binary", "sparse",'
' "input", or None')
parser.add_argument('--learning-rate', action='store', default=0.0001, help='learning rate of Adam Optimizer'
' (float)')
parser.add_argument('--loss', action='store', default='binary_crossentropy', help='loss function used to '
'compile model')
params = vars(parser.parse_args(args))
return params
if __name__ == "__main__":
params = process_arguments(sys.argv[1:])
images_path = params['images_path']
images_name = params['images_name']
mask_name = params['mask_name']
output_path = params['output_path']
height = int(params['height'])
width = int(params['width'])
channels = int(params['channels'])
use_pretrained = (params['use_pretrained'])
epochs = int(params['epochs'])
steps_per_epoch = int(params['steps_per_epoch'])
batch_size = int(params['batch_size'])
class_mode = params['class_mode']
learning_rate = float(params['learning_rate'])
loss = params['loss']
color_mode = lambda: 'rbga' if channels == 4 else (
'grayscale' if channels == 1 else 'rgb') # handle this potential error
print("Executing")
generator = train_generator(batch_size, images_path, images_name, mask_name, height, width, data_gen_args,
save_to_dir=None)
model = unet(height, width, loss)
metric_logger = Metrics()
keras_history = model.fit_generator(generator, steps_per_epoch=steps_per_epoch, epochs=epochs,
callbacks=[metric_logger])
model.save_weights("{}/model_unet_{}_epochs_{}.h5".format(output_path, datetime.datetime.fromtimestamp(time.time()).
strftime('%Y-%m-%d-%H:%M:%S'), epochs))
pass
| [
"argparse.ArgumentParser",
"os.path.join",
"keras.preprocessing.image.ImageDataGenerator",
"skimage.io.imread",
"skimage.transform.resize",
"time.time",
"json.dump"
] | [((6213, 6243), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), '(**aug_dict)\n', (6231, 6243), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((6263, 6293), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), '(**aug_dict)\n', (6281, 6293), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((8718, 8793), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""UNet model for semantic segmentation"""'}), "(description='UNet model for semantic segmentation')\n", (8741, 8793), False, 'import argparse\n'), ((7396, 7426), 'skimage.transform.resize', 'trans.resize', (['img', 'target_size'], {}), '(img, target_size)\n', (7408, 7426), True, 'import skimage.transform as trans\n'), ((7787, 7837), 'os.path.join', 'os.path.join', (['image_path', "('%s*.png' % image_prefix)"], {}), "(image_path, '%s*.png' % image_prefix)\n", (7799, 7837), False, 'import os\n'), ((7940, 7978), 'skimage.io.imread', 'io.imread', (['item'], {'as_gray': 'image_as_gray'}), '(item, as_gray=image_as_gray)\n', (7949, 7978), True, 'import skimage.io as io\n'), ((555, 584), 'json.dump', 'json.dump', (['self.metrics', 'file'], {}), '(self.metrics, file)\n', (564, 584), False, 'import json\n'), ((1380, 1409), 'json.dump', 'json.dump', (['self.metrics', 'file'], {}), '(self.metrics, file)\n', (1389, 1409), False, 'import json\n'), ((7302, 7339), 'os.path.join', 'os.path.join', (['test_path', "('%d.png' % i)"], {}), "(test_path, '%d.png' % i)\n", (7314, 7339), False, 'import os\n'), ((894, 924), 'json.dump', 'json.dump', (['self.metrics2', 'file'], {}), '(self.metrics2, file)\n', (903, 924), False, 'import json\n'), ((12201, 12212), 'time.time', 'time.time', ([], {}), '()\n', (12210, 12212), False, 'import time\n')] |
from unittest import TestCase
from unittest.mock import Mock, create_autospec
from acnportal.acnsim.models import Battery
from acnportal.acnsim.models import EV
class TestEV(TestCase):
def setUp(self):
basicBatt = create_autospec(Battery)
self.ev = EV(0, 10, 25.0, 'PS-001', '0001', basicBatt)
def test_charge_valid_rate(self):
self.ev._battery.charge = Mock(return_value=16)
rate = self.ev.charge(16, 240, 5)
self.assertEqual(rate, 16)
self.assertAlmostEqual(self.ev.energy_delivered, 0.32)
self.ev._battery.charge.assert_called_once()
def test_charge_over_max_rate(self):
self.ev._battery.charge = Mock(return_value=32)
rate = self.ev.charge(40, 240, 5)
self.assertEqual(rate, 32)
self.assertAlmostEqual(self.ev.energy_delivered, 0.64)
self.ev._battery.charge.assert_called_once()
def test_reset(self):
self.ev.reset()
self.assertEqual(self.ev.energy_delivered, 0)
self.ev._battery.reset.assert_called_once()
| [
"unittest.mock.Mock",
"acnportal.acnsim.models.EV",
"unittest.mock.create_autospec"
] | [((229, 253), 'unittest.mock.create_autospec', 'create_autospec', (['Battery'], {}), '(Battery)\n', (244, 253), False, 'from unittest.mock import Mock, create_autospec\n'), ((272, 316), 'acnportal.acnsim.models.EV', 'EV', (['(0)', '(10)', '(25.0)', '"""PS-001"""', '"""0001"""', 'basicBatt'], {}), "(0, 10, 25.0, 'PS-001', '0001', basicBatt)\n", (274, 316), False, 'from acnportal.acnsim.models import EV\n'), ((390, 411), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '(16)'}), '(return_value=16)\n', (394, 411), False, 'from unittest.mock import Mock, create_autospec\n'), ((681, 702), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '(32)'}), '(return_value=32)\n', (685, 702), False, 'from unittest.mock import Mock, create_autospec\n')] |
"""passbook auth oidc provider app config"""
from importlib import import_module
from django.apps import AppConfig
from django.db.utils import InternalError, OperationalError, ProgrammingError
from django.urls import include, path
from structlog import get_logger
LOGGER = get_logger()
class PassbookProviderOIDCConfig(AppConfig):
"""passbook auth oidc provider app config"""
name = "passbook.providers.oidc"
label = "passbook_providers_oidc"
verbose_name = "passbook Providers.OIDC"
def ready(self):
try:
from Cryptodome.PublicKey import RSA
from oidc_provider.models import RSAKey
if not RSAKey.objects.exists():
key = RSA.generate(2048)
rsakey = RSAKey(key=key.exportKey("PEM").decode("utf8"))
rsakey.save()
LOGGER.info("Created key")
except (OperationalError, ProgrammingError, InternalError):
pass
from passbook.root import urls
urls.urlpatterns.append(
path(
"application/oidc/",
include("oidc_provider.urls", namespace="oidc_provider"),
),
)
import_module("passbook.providers.oidc.signals")
| [
"structlog.get_logger",
"importlib.import_module",
"Cryptodome.PublicKey.RSA.generate",
"django.urls.include",
"oidc_provider.models.RSAKey.objects.exists"
] | [((275, 287), 'structlog.get_logger', 'get_logger', ([], {}), '()\n', (285, 287), False, 'from structlog import get_logger\n'), ((1194, 1242), 'importlib.import_module', 'import_module', (['"""passbook.providers.oidc.signals"""'], {}), "('passbook.providers.oidc.signals')\n", (1207, 1242), False, 'from importlib import import_module\n'), ((661, 684), 'oidc_provider.models.RSAKey.objects.exists', 'RSAKey.objects.exists', ([], {}), '()\n', (682, 684), False, 'from oidc_provider.models import RSAKey\n'), ((708, 726), 'Cryptodome.PublicKey.RSA.generate', 'RSA.generate', (['(2048)'], {}), '(2048)\n', (720, 726), False, 'from Cryptodome.PublicKey import RSA\n'), ((1102, 1158), 'django.urls.include', 'include', (['"""oidc_provider.urls"""'], {'namespace': '"""oidc_provider"""'}), "('oidc_provider.urls', namespace='oidc_provider')\n", (1109, 1158), False, 'from django.urls import include, path\n')] |
# SPDX-License-Identifier: MIT
# Copyright (c) 2016-2020 <NAME>, <NAME>, <NAME>, <NAME>
from __future__ import print_function
import sys
import os
import glob
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../../../metadata/utils"))
from exp import Exp
from exp_file import ExpFile
class CreateHg38:
LISTFILES = ["dnase-list.txt", "h3k4me3-list.txt", "h3k27ac-list.txt", "ctcf-list.txt"]
@staticmethod
def _process(exp, tassembly="GRCh38"):
allsignal = glob.glob("/data/projects/encode/data/%s/*.bigWig" % exp.encodeID)
signal = {}
for signalfile in allsignal:
f = ExpFile.fromJsonFile(exp.encodeID, os.path.basename(signalfile).split(".")[0], True)
if f.assembly == tassembly:
signal[f.biological_replicates[0]] = f
peaks = {x.biological_replicates[0]: x for x in filter(lambda x: x.assembly == tassembly and x.file_type == "bed broadPeak", exp.files)}
return (peaks, signal)
@staticmethod
def _writehotspots(filemap, path):
with open(path, "wb") as o:
for k, v in filemap.iteritems():
ct, acc = k
for peaks, signal in v:
o.write("%s\t%s\t%s\t%s\t%s\n" % (acc, peaks, acc, signal, ct))
@staticmethod
def _writelist(filemap, path):
with open(path, "wb") as o:
for k, v in filemap.iteritems():
ct, acc = k
peaks, signal = v
o.write("%s\t%s\t%s\n" % (acc, signal, ct))
def __init__(self, rootdir):
self.filemap = {}
for k in CreateHg38.LISTFILES:
self.filemap[k] = {}
self.filemap[k + "_all"] = {}
# for each assay
for listfile in CreateHg38.LISTFILES:
# load each exp accession from the existing list
# for each, append only the first rep to one list and all reps to the other
with open(os.path.join(rootdir, listfile), "r") as f:
for line in f:
p = line.strip().split('\t')
try:
e = Exp.fromJsonFile(p[0])
peaks, signal = CreateHg38._process(e)
k = p[4] if len(p) >= 5 else p[2]
self.filemap[listfile][(k, e.encodeID)] = (peaks[1].fileID, signal[1].fileID)
self.filemap[listfile + "_all"][(k, e.encodeID)] = [(peaks[x].fileID, signal[x].fileID) for x, _ in signal.iteritems()]
except:
print("00_create_hg38$CreateHg38::__init__: could not process %s; skipping" % p[0])
# if DNase, write all reps to Hotspot-List.txt
if listfile == "dnase-list.txt":
CreateHg38._writehotspots(self.filemap[listfile + "_all"], "/data/projects/cREs/hg38/Hotspot-List.txt")
print("wrote /data/projects/cREs/hg38/Hotspot-List.txt")
# write first reps to list file
CreateHg38._writelist(self.filemap[listfile], "/data/projects/cREs/hg38/%s" % listfile)
print("wrote /data/projects/cREs/hg38/%s" % listfile)
def main():
CreateHg38("/data/projects/screen/Version-4/ver10/hg19/raw")
return 0
if __name__ == "__main__":
sys.exit(main())
| [
"os.path.join",
"exp.Exp.fromJsonFile",
"os.path.realpath",
"os.path.basename",
"glob.glob"
] | [((511, 577), 'glob.glob', 'glob.glob', (["('/data/projects/encode/data/%s/*.bigWig' % exp.encodeID)"], {}), "('/data/projects/encode/data/%s/*.bigWig' % exp.encodeID)\n", (520, 577), False, 'import glob\n'), ((208, 234), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (224, 234), False, 'import os\n'), ((1973, 2004), 'os.path.join', 'os.path.join', (['rootdir', 'listfile'], {}), '(rootdir, listfile)\n', (1985, 2004), False, 'import os\n'), ((2150, 2172), 'exp.Exp.fromJsonFile', 'Exp.fromJsonFile', (['p[0]'], {}), '(p[0])\n', (2166, 2172), False, 'from exp import Exp\n'), ((686, 714), 'os.path.basename', 'os.path.basename', (['signalfile'], {}), '(signalfile)\n', (702, 714), False, 'import os\n')] |
import redis
from proxypooler import config
from proxypooler.errors import ProxyPoolerEmptyError
class RedisClient:
"""Underlying storage unit.
Save Item object and its expire time in redis.
"""
def __init__(self, host=config.redis_host, port=config.redis_port):
self._db = redis.Redis(host=host, port=port)
def get(self):
"""Get single item from pool.
Returns:
(item, expire).
Raises:
ProxyPoolEmptyError.
"""
try:
# timeout return None, otherwise return bytes data
item = self._db.zrange(config.pool_name, 0, 0)[0]
expire = self._db.zscore(config.pool_name, item) # float
self._db.zrem(config.pool_name, item)
return item, expire
except IndexError:
raise ProxyPoolerEmptyError('proxypooler was empty') from None
def get_list(self, count=1, rev=False):
"""Get item list from pool.
Args:
the length of item list.
Returns:
(item, expire) list, like: [(item1, expire1), ..., (itemN, expireN)].
"""
if count <= 0:
return []
if not rev:
items = self._db.zrange(config.pool_name, 0, count - 1)
else:
items = self._db.zrevrange(config.pool_name, 0, count - 1) # the last
items_expires = [(item, self._db.zscore(config.pool_name, item)) for item in items]
if items:
self._db.zrem(config.pool_name, *items)
return items_expires
def put(self, item, expire):
self._db.zadd(config.pool_name, item, expire) # name和score 与redis官方命令的顺序相反
def put_list(self, items):
for item, expire in items:
self.put(item, expire)
@property
def size(self):
return self._db.zcard(config.pool_name)
| [
"proxypooler.errors.ProxyPoolerEmptyError",
"redis.Redis"
] | [((307, 340), 'redis.Redis', 'redis.Redis', ([], {'host': 'host', 'port': 'port'}), '(host=host, port=port)\n', (318, 340), False, 'import redis\n'), ((842, 888), 'proxypooler.errors.ProxyPoolerEmptyError', 'ProxyPoolerEmptyError', (['"""proxypooler was empty"""'], {}), "('proxypooler was empty')\n", (863, 888), False, 'from proxypooler.errors import ProxyPoolerEmptyError\n')] |
import os
import time
class QuykFile:
"""
QuykFile Class:\n
Allows easy file manipulation:\n
\n
qf = QuykFile('path/to/file.txt')\n
if qf:
\t qf.write('QuykFile!')\n
\t read = qf.read()\n
\t read_list = qf.read(as_list=True)\n
\t qf.rename('file_renamed.txt')\n
\t print(str(qf.file_data))
"""
def __init__(self, path, as_full_dir=False, force_create=False):
self.success = False
self.file_data = {
'full': '',
'path': '',
'name': ''
}
if force_create:
if as_full_dir:
full_path = path
else:
full_path = os.getcwd() + '/' + path
if os.path.isfile(full_path):
_path, _file = os.path.split(full_path)
if _path:
self.file_data['path'] = _path
else:
self.file_data['path'] = _path
self.file_data['name'] = _file
self.file_data['full'] = full_path
self.success = True
else:
_path, _file = os.path.split(full_path)
self.file_data['path'] = _path
self.file_data['name'] = _file
self.file_data['full'] = path
if _path:
if os.path.isdir(_path) is False:
os.mkdir(_path)
if os.path.isfile(full_path) is False:
f = open(full_path, 'w+')
f.close()
while os.path.isfile(full_path) is False:
time.sleep(1.5)
if os.path.isfile(full_path):
self.success = True
if self.success is False:
print('QuykFile - Error could not create a valid object for ( Dir Creation Failed ) :\n' + full_path)
else:
_reason = ""
if as_full_dir:
full_path = path
else:
full_path = os.getcwd() + '/' + path
if os.path.isfile(full_path):
_path, _file = os.path.split(full_path)
if _path:
self.file_data['path'] = _path
else:
self.file_data['path'] = _path
self.file_data['name'] = _file
self.file_data['full'] = full_path
self.success = True
else:
self.success = False
_reason = "( No Such File )"
if self.success is False:
print('QuykFile - Error could not create a valid object ' + _reason + ':\n' + full_path)
def read(self, as_list=False):
if self.success:
f = open(self.file_data['full'])
r = f.read()
f.close()
if as_list:
r = r.split('\n')
return r
def write(self, text):
if self.success:
_type = str(type(text))
if 'str' in _type:
f = open(self.file_data['full'], 'w')
f.write(text)
f.close()
elif 'list' in _type:
f = open(self.file_data['full'], 'w')
t = '\n'.join(text)
f.write(t)
f.close()
def append(self, text, as_new_line=True, before=False):
if self.success:
_type = str(type(text))
if 'str' in _type:
f = open(self.file_data['full'], 'a')
if as_new_line:
text = '\n' + text
f.write(text)
f.close()
elif 'list' in _type:
f = open(self.file_data['full'], 'a')
_text = '\n'.join(text)
if as_new_line:
text = '\n' + _text
else:
text = _text
f.write(text)
f.close()
def insert(self, text, line_index: int):
if self.success:
_type = str(type(text))
if 'str' in _type:
rl = self.read(as_list=True)
rl.insert(line_index, text)
f = open(self.file_data['full'], 'w')
t = '\n'.join(rl)
f.write(t)
f.close()
elif 'list' in _type:
pass
def copy_to(self, path, as_full_dir=False):
if self.success:
c = self.read()
if c:
if as_full_dir is False:
path = os.getcwd() + '/' + path
_path, _file = os.path.split(path)
if os.path.isdir(_path):
f = open(path, 'w+')
f.write(c)
f.close()
return True
return False
def rename(self, name):
if self.success:
if self.copy_to(name):
os.remove(self.file_data['full'])
n_replace = self.file_data['name']
self.file_data['full'] = self.file_data['full'].replace(n_replace, name)
self.file_data['name'] = name
print(str(self.file_data))
return True
return False
def delete(self):
if self.success:
os.remove(self.file_data['full'])
self.file_data = {}
self.success = False | [
"time.sleep",
"os.path.split",
"os.path.isfile",
"os.getcwd",
"os.path.isdir",
"os.mkdir",
"os.remove"
] | [((730, 755), 'os.path.isfile', 'os.path.isfile', (['full_path'], {}), '(full_path)\n', (744, 755), False, 'import os\n'), ((2096, 2121), 'os.path.isfile', 'os.path.isfile', (['full_path'], {}), '(full_path)\n', (2110, 2121), False, 'import os\n'), ((5356, 5389), 'os.remove', 'os.remove', (["self.file_data['full']"], {}), "(self.file_data['full'])\n", (5365, 5389), False, 'import os\n'), ((788, 812), 'os.path.split', 'os.path.split', (['full_path'], {}), '(full_path)\n', (801, 812), False, 'import os\n'), ((1146, 1170), 'os.path.split', 'os.path.split', (['full_path'], {}), '(full_path)\n', (1159, 1170), False, 'import os\n'), ((1685, 1710), 'os.path.isfile', 'os.path.isfile', (['full_path'], {}), '(full_path)\n', (1699, 1710), False, 'import os\n'), ((2154, 2178), 'os.path.split', 'os.path.split', (['full_path'], {}), '(full_path)\n', (2167, 2178), False, 'import os\n'), ((4662, 4681), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (4675, 4681), False, 'import os\n'), ((4701, 4721), 'os.path.isdir', 'os.path.isdir', (['_path'], {}), '(_path)\n', (4714, 4721), False, 'import os\n'), ((4984, 5017), 'os.remove', 'os.remove', (["self.file_data['full']"], {}), "(self.file_data['full'])\n", (4993, 5017), False, 'import os\n'), ((1451, 1476), 'os.path.isfile', 'os.path.isfile', (['full_path'], {}), '(full_path)\n', (1465, 1476), False, 'import os\n'), ((689, 700), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (698, 700), False, 'import os\n'), ((1360, 1380), 'os.path.isdir', 'os.path.isdir', (['_path'], {}), '(_path)\n', (1373, 1380), False, 'import os\n'), ((1415, 1430), 'os.mkdir', 'os.mkdir', (['_path'], {}), '(_path)\n', (1423, 1430), False, 'import os\n'), ((1589, 1614), 'os.path.isfile', 'os.path.isfile', (['full_path'], {}), '(full_path)\n', (1603, 1614), False, 'import os\n'), ((1649, 1664), 'time.sleep', 'time.sleep', (['(1.5)'], {}), '(1.5)\n', (1659, 1664), False, 'import time\n'), ((2055, 2066), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2064, 2066), False, 'import os\n'), ((4605, 4616), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4614, 4616), False, 'import os\n')] |
import datetime as dt
from dateutil.relativedelta import relativedelta
from dateutil.parser import parse
from dateutil import rrule
from nsetools.errors import DateFormatError
def get_nearest_business_day(d):
""" takes datetime object"""
if d.isoweekday() is 7 or d.isoweekday() is 6:
d = d - relativedelta(days=1)
return get_nearest_business_day(d)
# republic day
elif d.month is 1 and d.day is 26:
d = d - relativedelta(days=1)
return get_nearest_business_day(d)
# labour day
elif d.month is 5 and d.day is 1:
d = d - relativedelta(days=1)
return get_nearest_business_day(d)
# independece day
elif d.month is 8 and d.day is 15:
d = d - relativedelta(days=1)
return get_nearest_business_day(d)
# <NAME>
elif d.month is 10 and d.day is 2:
d = d - relativedelta(days=1)
return get_nearest_business_day(d)
# chirstmas
elif d.month is 12 and d.day is 25:
d = d - relativedelta(days=1)
return get_nearest_business_day(d)
else:
return d
def is_known_holiday(d):
"""accepts datetime/date object and returns boolean"""
if type(d) == dt.datetime:
d = d.date()
elif type(d) != dt.date:
raise DateFormatError("only date objects or datetime objects")
else:
# fine do nothing
pass
# declare the list of holidays here.
# republic day.
if d.month is 1 and d.day is 26:
return True
# labour day
elif d.month is 5 and d.day is 1:
d = d - relativedelta(days=1)
return get_nearest_business_day(d)
# independence day
elif d.month is 8 and d.day is 15:
return True
# gandhi jayanti
elif d.month is 10 and d.day is 2:
return True
# christmas
elif d.month is 12 and d.day is 25:
return True
else:
return False
def mkdate(d):
"""tries its best to return a valid date. it can accept pharse like today,
yesterday, day before yesterday etc.
"""
# check if the it is a string
return_date = ""
if type(d) is str:
if d == "today":
return_date = dt.date.today()
elif d == "yesterday":
return_date = dt.date.today() - relativedelta(days=1)
elif d == "day before yesterday":
return_date = dt.date.today() - relativedelta(days=2)
else:
return_date = parse(d, dayfirst=True).date()
elif type(d) == dt.datetime:
return_date = d.date()
elif type(d) == dt.date:
return d
else:
raise DateFormatError("wrong date format %s" % str(d))
# check if future date.
return return_date
def usable_date(d):
"""accepts fuzzy format and returns most sensible date"""
return get_nearest_business_day(mkdate(d))
def get_date_range(frm, to, skip_dates=[]):
"""accepts fuzzy format date and returns business adjusted date ranges"""
# for x in rrule.rrule(rrule.DAILY, dtstart=s, until=dt.datetime.now(), byweekday=[0, 1, 2, 3, 4]): print(x)
frm = usable_date(frm)
to = usable_date(to)
datelist = []
for date in rrule.rrule(rrule.DAILY, dtstart=frm, until=to, byweekday=[0, 1, 2, 3, 4]):
if not is_known_holiday(date):
datelist.append(date.date())
return datelist
| [
"dateutil.parser.parse",
"dateutil.relativedelta.relativedelta",
"nsetools.errors.DateFormatError",
"dateutil.rrule.rrule",
"datetime.date.today"
] | [((3154, 3228), 'dateutil.rrule.rrule', 'rrule.rrule', (['rrule.DAILY'], {'dtstart': 'frm', 'until': 'to', 'byweekday': '[0, 1, 2, 3, 4]'}), '(rrule.DAILY, dtstart=frm, until=to, byweekday=[0, 1, 2, 3, 4])\n', (3165, 3228), False, 'from dateutil import rrule\n'), ((311, 332), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(1)'}), '(days=1)\n', (324, 332), False, 'from dateutil.relativedelta import relativedelta\n'), ((1271, 1327), 'nsetools.errors.DateFormatError', 'DateFormatError', (['"""only date objects or datetime objects"""'], {}), "('only date objects or datetime objects')\n", (1286, 1327), False, 'from nsetools.errors import DateFormatError\n'), ((2176, 2191), 'datetime.date.today', 'dt.date.today', ([], {}), '()\n', (2189, 2191), True, 'import datetime as dt\n'), ((451, 472), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(1)'}), '(days=1)\n', (464, 472), False, 'from dateutil.relativedelta import relativedelta\n'), ((1568, 1589), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(1)'}), '(days=1)\n', (1581, 1589), False, 'from dateutil.relativedelta import relativedelta\n'), ((587, 608), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(1)'}), '(days=1)\n', (600, 608), False, 'from dateutil.relativedelta import relativedelta\n'), ((2249, 2264), 'datetime.date.today', 'dt.date.today', ([], {}), '()\n', (2262, 2264), True, 'import datetime as dt\n'), ((2267, 2288), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(1)'}), '(days=1)\n', (2280, 2288), False, 'from dateutil.relativedelta import relativedelta\n'), ((729, 750), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(1)'}), '(days=1)\n', (742, 750), False, 'from dateutil.relativedelta import relativedelta\n'), ((2357, 2372), 'datetime.date.today', 'dt.date.today', ([], {}), '()\n', (2370, 2372), True, 'import datetime as dt\n'), ((2375, 2396), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(2)'}), '(days=2)\n', (2388, 2396), False, 'from dateutil.relativedelta import relativedelta\n'), ((862, 883), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(1)'}), '(days=1)\n', (875, 883), False, 'from dateutil.relativedelta import relativedelta\n'), ((2437, 2460), 'dateutil.parser.parse', 'parse', (['d'], {'dayfirst': '(True)'}), '(d, dayfirst=True)\n', (2442, 2460), False, 'from dateutil.parser import parse\n'), ((999, 1020), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(1)'}), '(days=1)\n', (1012, 1020), False, 'from dateutil.relativedelta import relativedelta\n')] |
from mutagen.mp3 import MP3
import os
import math
import multiprocessing
import queue
import pickle
import json
path = "/mnt/mount_point/ML_music_norm/"
genres = ["thrash", "black", "death", "heavy"]
def mp_length_featcher(no, genre):
def worker(genre, paths, num):
file_length_d = {}
for f in paths:
#print("working on", f)
try:
file_length_d[f] = MP3(f).info.length
print("working on: "+f)
except:
pass
print("putting in dict")
with open("genre_pickles_norm/"+genres[genre]+str(num)+".pckl","wb") as f:
pickle.dump(file_length_d, f)
#with open("jsons_norm/"+genres[genre]+str(num)+".json","w") as f:
# json.dump(file_length_d, f)
#out_q.put(file_length_d)
paths = []
for filename in os.listdir(path+genres[genre]):
paths.append(path+genres[genre]+"/"+filename)
chunk_size = int(math.ceil(len(paths)/float(no)))
proc = []
out_q = queue.Queue()
for i in range(no):
p = multiprocessing.Process(
target=worker,
args=(genre, paths[chunk_size*i:chunk_size*(i+1)], i)
)
proc.append(p)
p.start()
for p in proc:
p.join()
#return resultdict
if __name__ == "__main__":
for i in range(len(genres)):
mp_length_featcher(8, i)
| [
"os.listdir",
"pickle.dump",
"multiprocessing.Process",
"mutagen.mp3.MP3",
"queue.Queue"
] | [((857, 889), 'os.listdir', 'os.listdir', (['(path + genres[genre])'], {}), '(path + genres[genre])\n', (867, 889), False, 'import os\n'), ((1024, 1037), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (1035, 1037), False, 'import queue\n'), ((1074, 1178), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'worker', 'args': '(genre, paths[chunk_size * i:chunk_size * (i + 1)], i)'}), '(target=worker, args=(genre, paths[chunk_size * i:\n chunk_size * (i + 1)], i))\n', (1097, 1178), False, 'import multiprocessing\n'), ((641, 670), 'pickle.dump', 'pickle.dump', (['file_length_d', 'f'], {}), '(file_length_d, f)\n', (652, 670), False, 'import pickle\n'), ((413, 419), 'mutagen.mp3.MP3', 'MP3', (['f'], {}), '(f)\n', (416, 419), False, 'from mutagen.mp3 import MP3\n')] |
from django import forms
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, Http404
from manager import models as hmod
from account import models as amod
from . import templater
from base_app.user_util import *
from django.contrib.auth.decorators import login_required, user_passes_test
@login_required
@user_passes_test(employee_check)
def process_request(request):
'''Create a new serialized product'''
s = hmod.SerializedItem()
form = SerializedItemForm()
if request.method == 'POST':
form = SerializedItemForm(request.POST)
if form.is_valid():
#Set these to the same as the catalog item
s.listPrice = form.cleaned_data['listPrice']
s.cost = form.cleaned_data['cost']
s.commissionRate = form.cleaned_data['commissionRate']
s.store = form.cleaned_data['store']
s.catalogItem = form.cleaned_data['catalogItem']
s.serialNum = form.cleaned_data['serialNum']
s.shelfLocation = form.cleaned_data['shelfLocation']
s.condition = form.cleaned_data['condition']
s.conditionDetails = form.cleaned_data['conditionDetails']
s.isRental = form.cleaned_data['isRental']
s.pricePerDay = form.cleaned_data['pricePerDay']
s.replacementFee =form.cleaned_data['replacementFee']
s.lateFee = form.cleaned_data['lateFee']
s.createdBy = amod.Employee.objects.get(user_id=request.user.id)
s.save()
return HttpResponseRedirect('/manager/searchinventory/')
tvars = {
'form':form,
}
return templater.render_to_response(request, 'newserializeditem.html', tvars)
class SerializedItemForm(forms.Form):
'''A form for new serialized item'''
store = forms.ModelChoiceField(label='Store', queryset=hmod.Store.objects.filter(isActive="TRUE").order_by('locationName').exclude(id=99999), widget=forms.Select(attrs={'class': 'form-control',}))
catalogItem = forms.ModelChoiceField(label='Catalog Item', queryset=hmod.CatalogItem.objects.filter(isSerial=True).order_by('manufacturer'), widget=forms.Select(attrs={'class': 'form-control',}))
listPrice = forms.DecimalField(label='List Price', max_digits=8, decimal_places=2, widget=forms.NumberInput(attrs={'class': 'form-control', 'placeholder': 'List Price',}))
cost = forms.DecimalField(max_digits=8, decimal_places=2, widget=forms.NumberInput(attrs={'class': 'form-control', 'placeholder': 'Cost',}))
commissionRate = forms.ChoiceField(label='Commission Rate' ,choices=([(0.05, '5%'), (0.10, '10%'), (0.12, '12%'), (0.15, '15%')]), widget=forms.Select(attrs={'class': 'form-control',}))
serialNum = forms.CharField(label='Serial Number',widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Serial Number',}))
shelfLocation = forms.CharField(label='Shelf Location', widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Shelf Location',}))
condition = forms.ModelChoiceField( label='Condition', queryset=hmod.Condition.objects.all(), widget=forms.Select(attrs={'class': 'form-control'}))
conditionDetails = forms.CharField(required=False, label='Condition Details', widget=forms.Textarea(attrs={'class': 'form-control', 'placeholder': 'Condition Details',}))
isRental = forms.BooleanField(label='Rentable Item?', required=False )
pricePerDay = forms.DecimalField(required=False, label='Price Per Day', max_digits=8, decimal_places=2, widget=forms.NumberInput(attrs={'class': 'form-control', 'placeholder': 'Price Per Day',}))
replacementFee = forms.DecimalField(required=False, label='Replacement Fee', max_digits=8, decimal_places=2, widget=forms.NumberInput(attrs={'class': 'form-control', 'placeholder': 'Replacement Fee',}))
lateFee = forms.DecimalField(required=False, label='Late Fee', max_digits=8, decimal_places=2, widget=forms.NumberInput(attrs={'class': 'form-control', 'placeholder': 'Late Fee',}))
| [
"django.http.HttpResponseRedirect",
"manager.models.Store.objects.filter",
"django.forms.BooleanField",
"manager.models.SerializedItem",
"django.forms.Select",
"django.forms.NumberInput",
"django.contrib.auth.decorators.user_passes_test",
"account.models.Employee.objects.get",
"django.forms.Textarea",
"django.forms.TextInput",
"manager.models.CatalogItem.objects.filter",
"manager.models.Condition.objects.all"
] | [((347, 379), 'django.contrib.auth.decorators.user_passes_test', 'user_passes_test', (['employee_check'], {}), '(employee_check)\n', (363, 379), False, 'from django.contrib.auth.decorators import login_required, user_passes_test\n'), ((455, 476), 'manager.models.SerializedItem', 'hmod.SerializedItem', ([], {}), '()\n', (474, 476), True, 'from manager import models as hmod\n'), ((3148, 3206), 'django.forms.BooleanField', 'forms.BooleanField', ([], {'label': '"""Rentable Item?"""', 'required': '(False)'}), "(label='Rentable Item?', required=False)\n", (3166, 3206), False, 'from django import forms\n'), ((1314, 1364), 'account.models.Employee.objects.get', 'amod.Employee.objects.get', ([], {'user_id': 'request.user.id'}), '(user_id=request.user.id)\n', (1339, 1364), True, 'from account import models as amod\n'), ((1388, 1437), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/manager/searchinventory/"""'], {}), "('/manager/searchinventory/')\n", (1408, 1437), False, 'from django.http import HttpResponse, HttpResponseRedirect, Http404\n'), ((1779, 1824), 'django.forms.Select', 'forms.Select', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (1791, 1824), False, 'from django import forms\n'), ((1976, 2021), 'django.forms.Select', 'forms.Select', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (1988, 2021), False, 'from django import forms\n'), ((2115, 2194), 'django.forms.NumberInput', 'forms.NumberInput', ([], {'attrs': "{'class': 'form-control', 'placeholder': 'List Price'}"}), "(attrs={'class': 'form-control', 'placeholder': 'List Price'})\n", (2132, 2194), False, 'from django import forms\n'), ((2263, 2336), 'django.forms.NumberInput', 'forms.NumberInput', ([], {'attrs': "{'class': 'form-control', 'placeholder': 'Cost'}"}), "(attrs={'class': 'form-control', 'placeholder': 'Cost'})\n", (2280, 2336), False, 'from django import forms\n'), ((2478, 2523), 'django.forms.Select', 'forms.Select', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (2490, 2523), False, 'from django import forms\n'), ((2584, 2669), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control', 'placeholder': 'Serial Number'}"}), "(attrs={'class': 'form-control', 'placeholder': 'Serial Number'}\n )\n", (2599, 2669), False, 'from django import forms\n'), ((2731, 2816), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control', 'placeholder': 'Shelf Location'}"}), "(attrs={'class': 'form-control', 'placeholder':\n 'Shelf Location'})\n", (2746, 2816), False, 'from django import forms\n'), ((2880, 2908), 'manager.models.Condition.objects.all', 'hmod.Condition.objects.all', ([], {}), '()\n', (2906, 2908), True, 'from manager import models as hmod\n'), ((2917, 2962), 'django.forms.Select', 'forms.Select', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (2929, 2962), False, 'from django import forms\n'), ((3050, 3137), 'django.forms.Textarea', 'forms.Textarea', ([], {'attrs': "{'class': 'form-control', 'placeholder': 'Condition Details'}"}), "(attrs={'class': 'form-control', 'placeholder':\n 'Condition Details'})\n", (3064, 3137), False, 'from django import forms\n'), ((3320, 3406), 'django.forms.NumberInput', 'forms.NumberInput', ([], {'attrs': "{'class': 'form-control', 'placeholder': 'Price Per Day'}"}), "(attrs={'class': 'form-control', 'placeholder':\n 'Price Per Day'})\n", (3337, 3406), False, 'from django import forms\n'), ((3522, 3610), 'django.forms.NumberInput', 'forms.NumberInput', ([], {'attrs': "{'class': 'form-control', 'placeholder': 'Replacement Fee'}"}), "(attrs={'class': 'form-control', 'placeholder':\n 'Replacement Fee'})\n", (3539, 3610), False, 'from django import forms\n'), ((3712, 3789), 'django.forms.NumberInput', 'forms.NumberInput', ([], {'attrs': "{'class': 'form-control', 'placeholder': 'Late Fee'}"}), "(attrs={'class': 'form-control', 'placeholder': 'Late Fee'})\n", (3729, 3789), False, 'from django import forms\n'), ((1896, 1942), 'manager.models.CatalogItem.objects.filter', 'hmod.CatalogItem.objects.filter', ([], {'isSerial': '(True)'}), '(isSerial=True)\n', (1927, 1942), True, 'from manager import models as hmod\n'), ((1685, 1727), 'manager.models.Store.objects.filter', 'hmod.Store.objects.filter', ([], {'isActive': '"""TRUE"""'}), "(isActive='TRUE')\n", (1710, 1727), True, 'from manager import models as hmod\n')] |
"""Adds datasets from Stanford Open Policing Project
The Stanford Open Policing Project contains datasets from many cities.
However, it appears to no longer be updated.
This methods adds all the datasets from Stanford that are not already added.
It is assumed that ones that already added are from other open data sets
and therefore, have more up-to-date data.
"""
import pandas as pd
import requests
from datetime import datetime
plot_flag = False
_us_state_abbrev = {
'AL' : 'Alabama',
'AK' : 'Alaska',
'AS' : 'American Samoa',
'AZ' : 'Arizona',
'AR' : 'Arkansas',
'CA' : 'California',
'CO' : 'Colorado',
'CT' : 'Connecticut',
'DE' : 'Delaware',
'DC' : 'District of Columbia',
'FL' : 'Florida',
'GA' : 'Georgia',
'GU' : 'Guam',
'HI' : 'Hawaii',
'ID' : 'Idaho',
'IL' : 'Illinois',
'IN' : 'Indiana',
'IA' : 'Iowa',
'KS' : 'Kansas',
'KY' : 'Kentucky',
'LA' : 'Louisiana',
'ME' : 'Maine',
'MD' : 'Maryland',
'MA' : 'Massachusetts',
'MI' : 'Michigan',
'MN' : 'Minnesota',
'MS' : 'Mississippi',
'MO' : 'Missouri',
'MT' : 'Montana',
'NE' : 'Nebraska',
'NV' : 'Nevada',
'NH' : 'New Hampshire',
'NJ' : 'New Jersey',
'NM' : 'New Mexico',
'NY' : 'New York',
'NC' : 'North Carolina',
'ND' : 'North Dakota',
'MP' : 'Northern Mariana Islands',
'OH' : 'Ohio',
'OK' : 'Oklahoma',
'OR' : 'Oregon',
'PA' : 'Pennsylvania',
'PR' : 'Puerto Rico',
'RI' : 'Rhode Island',
'SC' : 'South Carolina',
'SD' : 'South Dakota',
'TN' : 'Tennessee',
'TX' : 'Texas',
'UT' : 'Utah',
'VT' : 'Vermont',
'VI' : 'Virgin Islands',
'VA' : 'Virginia',
'WA' : 'Washington',
'WV' : 'West Virginia',
'WI' : 'Wisconsin',
'WY' : 'Wyoming'
}
def find_next(r, string, last_loc):
new_loc = r.text[last_loc+1:].find(string)
if new_loc >= 0:
new_loc += last_loc+1
return new_loc
def find_next_state(r, last_loc):
new_loc = find_next(r, '<tr class="state-title">', last_loc)
if new_loc < 0:
return new_loc, None
td_loc = find_next(r, '<td', new_loc)
start = find_next(r, '>', td_loc)+1
end = find_next(r, '<', start)
name = r.text[start:end].strip()
name = _us_state_abbrev[name]
return new_loc, name
def find_next_pd(r, last_loc):
new_loc = find_next(r, '<td class="state text-left" data-title="State">', last_loc)
if new_loc < 0:
return new_loc, None, None
span_loc = find_next(r, '<span', new_loc)
start = find_next(r, '>', span_loc)+1
end = find_next(r, '<', start)
name = r.text[start:end].strip()
local_str = "<sup>1</sup>"
is_multi = r.text[end:end+len(local_str)] == local_str
return new_loc, name, is_multi
def find_next_csv(r, start, end):
open_loc = start
while open_loc < end:
open_loc = find_next(r, '<a href', open_loc+1)
if open_loc >= end:
raise ValueError("unable to find CSV")
close_loc = find_next(r, '</a>', open_loc)
if close_loc >= end:
raise ValueError("unable to find CSV")
if "Download data as CSV" in r.text[open_loc:close_loc]:
first_quote = find_next(r, '"', open_loc)
last_quote = find_next(r, '"', first_quote+1)
return r.text[first_quote+1:last_quote]
raise ValueError("unable to find CSV")
def includes_pedestrian_stops(r, start, end):
open_loc = find_next(r, '<td class="text-right" data-title="Stops">', start)
if open_loc >= end:
raise ValueError("Unable to find # of stops")
close_loc = find_next(r, '</td>', open_loc)
if close_loc >= end:
raise ValueError("Unable to find # of stops")
return '<sup>2</sup>' in r.text[open_loc:close_loc]
def find_time_range(r, start, end):
open_loc = find_next(r, '<td class="text-right" data-title="Time range">', start)
if open_loc >= end:
raise ValueError("Unable to find time range")
close_loc = find_next(r, '</span></td>', open_loc)
if close_loc >= end:
raise ValueError("Unable to find time range end")
date_str = r.text[close_loc-10:close_loc]
return datetime.strptime(date_str, "%Y-%m-%d")
opd_csv = "opd_source_table.csv"
df = pd.read_csv(opd_csv)
stanford_desc = "Standardized stop data from the Stanford Open Policing Project"
# Remove any Stanford data prior to adding
print(len(df))
df = df[df["Description"] != stanford_desc]
print(len(df))
url = "https://openpolicing.stanford.edu/data/"
r = requests.get(url)
row_states = df["State"].to_list()
row_pds = ["Charlotte" if x == "Charlotte-Mecklenburg" else x for x in df["Jurisdiction"].to_list()]
row_types = df["TableType"].to_list()
st_loc, state = find_next_state(r, -1)
next_st_loc, next_state = find_next_state(r, st_loc)
pd_loc, pd_name, is_multi = find_next_pd(r, -1)
num_datasets = 0
end_dates = []
while pd_loc >= 0 and pd_loc != len(r.text):
next_pd_loc, next_pd_name, next_is_multi = find_next_pd(r, pd_loc+1)
if next_pd_loc < 0:
next_pd_loc = len(r.text)
num_datasets += 1
csv_file = find_next_csv(r, pd_loc, next_pd_loc)
if includes_pedestrian_stops(r, pd_loc, next_pd_loc):
table_type = "STOPS"
else:
table_type = "TRAFFIC STOPS"
end_dates.append(find_time_range(r, pd_loc, next_pd_loc))
already_added = False
for k in range(len(row_states)):
if pd_name == row_pds[k] and state == row_states[k] and table_type == row_types[k]:
already_added = True
break
if not already_added:
date_field = "date"
if is_multi:
source_name = state
jurisdiction = "MULTI"
jurisdiction_field = "department_name"
else:
source_name = pd_name
jurisdiction = pd_name
jurisdiction_field = ""
df_append = pd.DataFrame(
[[state,source_name,jurisdiction,table_type,"MULTI",stanford_desc,"CSV",csv_file,date_field,"",jurisdiction_field]],
columns=df.columns)
df = pd.concat([df, df_append])
pd_loc = next_pd_loc
pd_name = next_pd_name
is_multi = next_is_multi
if pd_loc > next_st_loc:
st_loc = next_st_loc
state = next_state
next_st_loc, next_state = find_next_state(r, st_loc)
if plot_flag:
import matplotlib.pyplot as plt
import pandas as pd
fig, ax = plt.subplots()
s = pd.Series(end_dates)
s.hist(ax=ax)
plt.show()
df.to_csv(opd_csv,index=False) | [
"pandas.Series",
"pandas.read_csv",
"datetime.datetime.strptime",
"requests.get",
"pandas.DataFrame",
"pandas.concat",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((4290, 4310), 'pandas.read_csv', 'pd.read_csv', (['opd_csv'], {}), '(opd_csv)\n', (4301, 4310), True, 'import pandas as pd\n'), ((4565, 4582), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (4577, 4582), False, 'import requests\n'), ((4210, 4249), 'datetime.datetime.strptime', 'datetime.strptime', (['date_str', '"""%Y-%m-%d"""'], {}), "(date_str, '%Y-%m-%d')\n", (4227, 4249), False, 'from datetime import datetime\n'), ((6456, 6470), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6468, 6470), True, 'import matplotlib.pyplot as plt\n'), ((6479, 6499), 'pandas.Series', 'pd.Series', (['end_dates'], {}), '(end_dates)\n', (6488, 6499), True, 'import pandas as pd\n'), ((6522, 6532), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6530, 6532), True, 'import matplotlib.pyplot as plt\n'), ((5922, 6089), 'pandas.DataFrame', 'pd.DataFrame', (["[[state, source_name, jurisdiction, table_type, 'MULTI', stanford_desc,\n 'CSV', csv_file, date_field, '', jurisdiction_field]]"], {'columns': 'df.columns'}), "([[state, source_name, jurisdiction, table_type, 'MULTI',\n stanford_desc, 'CSV', csv_file, date_field, '', jurisdiction_field]],\n columns=df.columns)\n", (5934, 6089), True, 'import pandas as pd\n'), ((6111, 6137), 'pandas.concat', 'pd.concat', (['[df, df_append]'], {}), '([df, df_append])\n', (6120, 6137), True, 'import pandas as pd\n')] |
"""
Merges Zeptrion Lights together with Hue Lights in one new Light.
Version V.0.0.1
Package:
custom_components.light.zeptrion_hue_lights.py
configuration.yaml:
light:
- platform: zeptrion_hue_lights
mappings:
# Name: [zeptrion_entity_name, hue_entity_name, group_name]
"Licht Küche": [light.kueche_kueche_2, light.kueche_kueche, 'Kueche']
"Licht Stube": [light.stube_stube_2, light.stube_stube, 'Stube']
scan_interval: 1
ToDo:
For more details about this Class, please refer to the documentation at
https://github.com/swissglider/homeassistant_custome_components
"""
import logging
# Import the device class from the component that you want to support
from homeassistant.components.light import Light
from homeassistant.components.group import ENTITY_ID_FORMAT as GROUP_ENTITY_ID_FORMAT
import homeassistant.helpers.entity as entity_helper
REQUIREMENTS = ['zeptrionAirApi']
DEPENDENCIES = ['zeptrionairhub','hue']
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
lights = []
mappings = config.get('mappings')
for key in mappings:
name = key
zeptrion_entity = mappings[key][0]
hue_entity = mappings[key][1]
group_name = mappings[key][2]
entity_id = entity_helper.generate_entity_id(GROUP_ENTITY_ID_FORMAT, group_name, hass=hass)
lights.append(ZeptrionHueLights(name, entity_id, group_name, zeptrion_entity, hue_entity, hass))
add_devices(lights)
class ZeptrionHueLights(Light):
def __init__(self, name, group, friendly_group_name, zeptrion_entity, hue_entity, hass):
"""Initialize an AwesomeLight."""
self._zeptrion_entity = zeptrion_entity
self._hue_entity = hue_entity
self._name = name
self._state_hue = None
self._state_zeptrion = None
self.hass = hass
self._group = group
self._friendly_group_name = friendly_group_name
@property
def unique_id(self):
"""Return the ID of this Zeptrion light."""
return self._zeptrion_entity + self._hue_entity
@property
def name(self):
"""Return the display name of this light."""
return self._name
@property
def brightness(self):
"""Return the brightness of the light."""
temp_param = 'brightness'
return self._get_return_value(temp_param, self._state_hue)
@property
def xy_color(self):
"""Return the XY color value."""
temp_param = 'xy_color'
return self._get_return_value(temp_param, self._state_hue)
@property
def color_temp(self):
"""Return the CT color value."""
temp_param = 'color_temp'
return self._get_return_value(temp_param, self._state_hue)
@property
def supported_features(self):
"""Flag supported features."""
temp_param = 'supported_features'
return self._get_return_value(temp_param, self._state_hue)
@property
def effect_list(self):
"""Return the list of supported effect_list."""
temp_param = 'effect_list'
return self._get_return_value(temp_param, self._state_hue)
@property
def rgb_color(self):
"""Return the list of supported rgb_color."""
temp_param = 'rgb_color'
return self._get_return_value(temp_param, self._state_hue)
@property
def min_mireds(self):
"""Return the list of supported min_mireds."""
temp_param = 'min_mireds'
return self._get_return_value(temp_param, self._state_hue)
@property
def max_mireds(self):
"""Return the list of supported max_mireds."""
temp_param = 'max_mireds'
return self._get_return_value(temp_param, self._state_hue)
@property
def effect(self):
"""Return the list of supported effects."""
temp_param = 'effect'
return self._get_return_value(temp_param, self._state_hue)
@property
def brightness_pct(self):
"""Return the list of supported brightness_pct."""
temp_param = 'brightness_pct'
return self._get_return_value(temp_param, self._state_hue)
@property
def kelvin(self):
"""Return the list of supported kelvin."""
temp_param = 'kelvin'
return self._get_return_value(temp_param, self._state_hue)
@property
def flash(self):
"""Return the list of supported flash."""
temp_param = 'flash'
return self._get_return_value(temp_param, self._state_hue)
@property
def white_value(self):
"""Return the list of supported white_value."""
temp_param = 'white_value'
return self._get_return_value(temp_param, self._state_hue)
@property
def profile(self):
"""Return the list of supported profile."""
temp_param = 'profile'
return self._get_return_value(temp_param, self._state_hue)
@property
def transition(self):
"""Return the list of supported transition."""
temp_param = 'transition'
return self._get_return_value(temp_param, self._state_hue)
@property
def last_updated(self):
"""Return the list of supported effects."""
if self._state_hue and self._state_hue.last_updated:
return self._state_hue.last_updated
return None
@property
def last_changed(self):
"""Return the list of supported last_changed."""
if self._state_hue and self._state_hue.attributes:
return self._state_hue.last_changed
return None
@property
def is_on(self):
"""Return true if light is on."""
# if zeptrion is off --> return False
if self._state_zeptrion and self._state_zeptrion.state and self._state_zeptrion.state == 'on':
return True
return False
def turn_on(self, **kwargs):
"""Instruct the light to turn on."""
self.hass.services.call("light", "turn_on", self._get_kwargs_payload(self._zeptrion_entity))
self.hass.services.call("light", "turn_on", self._get_kwargs_payload(self._hue_entity, kwargs))
self.update()
def turn_off(self, **kwargs):
"""Instruct the light to turn off."""
self.hass.services.call("light", "turn_off", self._get_kwargs_payload(self._zeptrion_entity))
self.hass.services.call("light", "turn_off", self._get_kwargs_payload(self._hue_entity, kwargs))
self.update()
def TOGGLE(self, **kwargs):
"""Instruct the light toggle."""
self.hass.services.call("light", "toogle", self._get_kwargs_payload(self._zeptrion_entity))
self.hass.services.call("light", "toogle", self._get_kwargs_payload(self._hue_entity, kwargs))
self.update()
def update(self):
"""Fetch new state data for this light.
This is the only method that should fetch new data for Home Assistant.
"""
self._state_hue = self.hass.states.get(self._hue_entity)
self._state_zeptrion = self.hass.states.get(self._zeptrion_entity)
@property
def device_state_attributes(self):
"""Return the device state attributes."""
# get Group Name
attributes = {}
attributes['group'] = self._friendly_group_name
attributes['zeptrion_hue_group_entity'] = self._group
attributes['is_zeptrion_hue_group'] = True
attributes['binded_devices'] = [self._zeptrion_entity, self._hue_entity]
return attributes
@ classmethod
def _get_kwargs_payload(self, entity, kwargs = None):
dictMainGroup = {}
if kwargs:
for arg in kwargs:
dictMainGroup[arg] = kwargs[arg]
dictMainGroup['entity_id'] = entity
return dictMainGroup
@ classmethod
def _get_return_value(self, arg_name, t_state):
if t_state and t_state.attributes and arg_name in t_state.attributes:
return t_state.attributes[arg_name]
return None | [
"logging.getLogger",
"homeassistant.helpers.entity.generate_entity_id"
] | [((1022, 1049), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1039, 1049), False, 'import logging\n'), ((1361, 1440), 'homeassistant.helpers.entity.generate_entity_id', 'entity_helper.generate_entity_id', (['GROUP_ENTITY_ID_FORMAT', 'group_name'], {'hass': 'hass'}), '(GROUP_ENTITY_ID_FORMAT, group_name, hass=hass)\n', (1393, 1440), True, 'import homeassistant.helpers.entity as entity_helper\n')] |
# [카카오] 길 찾기 게임
import sys
sys.setrecursionlimit(100000)
class Node:
def __init__(self, idx, data) -> None:
self.idx = idx
self.data = data
self.left = None
self.right = None
class Tree:
def __init__(self) -> None:
self.root = None
def insert(self, idx, data):
self.root = self._insert(self.root, idx, data)
return self.root is not None
def _insert(self, node, idx, data):
if not node:
return Node(idx, data)
if data < node.data:
node.left = self._insert(node.left, idx, data)
else:
node.right = self._insert(node.right, idx, data)
return node
def preorder(self, node):
ret = [node.idx]
if node.left:
ret += self.preorder(node.left)
if node.right:
ret += self.preorder(node.right)
return ret
def postorder(self, node):
ret = []
if node.left:
ret += self.postorder(node.left)
if node.right:
ret += self.postorder(node.right)
ret += [node.idx]
return ret
def solution(nodeinfo):
tree = Tree()
answer = []
table = []
for i in range(len(nodeinfo)):
table.append((nodeinfo[i][0], nodeinfo[i][1], i + 1))
table.sort(key=lambda x: -x[1])
for i in range(len(table)):
tree.insert(table[i][2], table[i][0])
answer.append(tree.preorder(tree.root))
answer.append(tree.postorder(tree.root))
return answer
if __name__ == "__main__":
nodeinfo = [
[5, 3],
[11, 5],
[13, 3],
[3, 5],
[6, 1],
[1, 3],
[8, 6],
[7, 2],
[2, 2],
]
print(solution(nodeinfo))
| [
"sys.setrecursionlimit"
] | [((28, 57), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(100000)'], {}), '(100000)\n', (49, 57), False, 'import sys\n')] |
import os
from aimrecords import Storage
storage_path = os.getcwd()
storage_writer = Storage(storage_path, 'w')
storage_writer.open('test_metric')
for i in range(10):
storage_writer.append_record('test_metric', str(i).encode(),
{'subset:': 'train'})
for i in range(10):
storage_writer.append_record('test_metric', str(i * 10).encode(),
{'subset': 'val'})
| [
"aimrecords.Storage",
"os.getcwd"
] | [((59, 70), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (68, 70), False, 'import os\n'), ((89, 115), 'aimrecords.Storage', 'Storage', (['storage_path', '"""w"""'], {}), "(storage_path, 'w')\n", (96, 115), False, 'from aimrecords import Storage\n')] |
import json
import sys
from functools import partial
from textwrap import dedent
import pytest
from pkgcheck import base, reporters
from pkgcheck.checks import git, metadata, metadata_xml, pkgdir, profiles
from pkgcore.test.misc import FakePkg
from snakeoil.formatters import PlainTextFormatter
class BaseReporter:
reporter_cls = reporters.Reporter
@pytest.fixture(autouse=True)
def _setup(self):
self.log_warning = profiles.ProfileWarning(Exception('profile warning'))
self.log_error = profiles.ProfileError(Exception('profile error'))
pkg = FakePkg('dev-libs/foo-0')
self.commit_result = git.InvalidCommitMessage('no commit message', commit='8d86269bb4c7')
self.category_result = metadata_xml.CatMissingMetadataXml('metadata.xml', pkg=pkg)
self.package_result = pkgdir.InvalidPN(('bar', 'baz'), pkg=pkg)
self.versioned_result = metadata.BadFilename(('0.tar.gz', 'foo.tar.gz'), pkg=pkg)
def mk_reporter(self, **kwargs):
out = PlainTextFormatter(sys.stdout)
return self.reporter_cls(out, **kwargs)
add_report_output = None
def test_add_report(self, capsys):
with self.mk_reporter() as reporter:
reporter.report(self.commit_result)
reporter.report(self.log_warning)
reporter.report(self.category_result)
reporter.report(self.package_result)
reporter.report(self.versioned_result)
out, err = capsys.readouterr()
assert not err
assert out == self.add_report_output
class TestStrReporter(BaseReporter):
reporter_cls = reporters.StrReporter
add_report_output = dedent("""\
commit 8d86269bb4c7: no commit message
profile warning
dev-libs: category is missing metadata.xml
dev-libs/foo: invalid package names: [ bar, baz ]
dev-libs/foo-0: bad filenames: [ 0.tar.gz, foo.tar.gz ]
""")
class TestFancyReporter(BaseReporter):
reporter_cls = reporters.FancyReporter
add_report_output = dedent("""\
commit
InvalidCommitMessage: commit 8d86269bb4c7: no commit message
profiles
ProfileWarning: profile warning
dev-libs
CatMissingMetadataXml: category is missing metadata.xml
dev-libs/foo
InvalidPN: invalid package names: [ bar, baz ]
BadFilename: version 0: bad filenames: [ 0.tar.gz, foo.tar.gz ]
""")
class TestJsonReporter(BaseReporter):
reporter_cls = reporters.JsonReporter
add_report_output = dedent("""\
{"_style": {"InvalidCommitMessage": "commit 8d86269bb4c7: no commit message"}}
{"_warning": {"ProfileWarning": "profile warning"}}
{"dev-libs": {"_error": {"CatMissingMetadataXml": "category is missing metadata.xml"}}}
{"dev-libs": {"foo": {"_error": {"InvalidPN": "invalid package names: [ bar, baz ]"}}}}
{"dev-libs": {"foo": {"0": {"_warning": {"BadFilename": "bad filenames: [ 0.tar.gz, foo.tar.gz ]"}}}}}
""")
class TestXmlReporter(BaseReporter):
reporter_cls = reporters.XmlReporter
add_report_output = dedent("""\
<checks>
<result><class>InvalidCommitMessage</class><msg>commit 8d86269bb4c7: no commit message</msg></result>
<result><class>ProfileWarning</class><msg>profile warning</msg></result>
<result><category>dev-libs</category><class>CatMissingMetadataXml</class><msg>category is missing metadata.xml</msg></result>
<result><category>dev-libs</category><package>foo</package><class>InvalidPN</class><msg>invalid package names: [ bar, baz ]</msg></result>
<result><category>dev-libs</category><package>foo</package><version>0</version><class>BadFilename</class><msg>bad filenames: [ 0.tar.gz, foo.tar.gz ]</msg></result>
</checks>
""")
class TestCsvReporter(BaseReporter):
reporter_cls = reporters.CsvReporter
add_report_output = dedent("""\
,,,commit 8d86269bb4c7: no commit message
,,,profile warning
dev-libs,,,category is missing metadata.xml
dev-libs,foo,,"invalid package names: [ bar, baz ]"
dev-libs,foo,0,"bad filenames: [ 0.tar.gz, foo.tar.gz ]"
""")
class TestFormatReporter(BaseReporter):
reporter_cls = partial(reporters.FormatReporter, '')
def test_add_report(self, capsys):
for format_str, expected in (
('r', 'r\n' * 5),
('{category}', 'dev-libs\n' * 3),
('{category}/{package}', '/\n/\ndev-libs/\n' + 'dev-libs/foo\n' * 2),
('{category}/{package}-{version}', '/-\n/-\ndev-libs/-\ndev-libs/foo-\ndev-libs/foo-0\n'),
('{name}',
'InvalidCommitMessage\nProfileWarning\nCatMissingMetadataXml\nInvalidPN\nBadFilename\n'),
('{foo}', ''),
):
self.reporter_cls = partial(reporters.FormatReporter, format_str)
self.add_report_output = expected
super().test_add_report(capsys)
def test_unsupported_index(self, capsys):
self.reporter_cls = partial(reporters.FormatReporter, '{0}')
with self.mk_reporter() as reporter:
with pytest.raises(base.PkgcheckUserException) as excinfo:
reporter.report(self.versioned_result)
assert 'integer indexes are not supported' in str(excinfo.value)
class TestJsonStream(BaseReporter):
reporter_cls = reporters.JsonStream
def test_add_report(self, capsys):
with self.mk_reporter() as reporter:
for result in (
self.log_warning, self.log_error, self.commit_result,
self.category_result, self.package_result, self.versioned_result):
reporter.report(result)
out, err = capsys.readouterr()
assert not err
deserialized_result = next(reporter.from_iter([out]))
assert str(deserialized_result) == str(result)
def test_deserialize_error(self):
with self.mk_reporter() as reporter:
# deserializing non-result objects raises exception
obj = reporter.to_json(['result'])
with pytest.raises(reporters.DeserializationError, match='failed loading'):
next(reporter.from_iter([obj]))
# deserializing mangled JSON result objects raises exception
obj = reporter.to_json(self.versioned_result)
del obj['__class__']
json_obj = json.dumps(obj)
with pytest.raises(reporters.DeserializationError, match='unknown result'):
next(reporter.from_iter([json_obj]))
| [
"textwrap.dedent",
"pkgcheck.checks.metadata_xml.CatMissingMetadataXml",
"pkgcheck.checks.metadata.BadFilename",
"json.dumps",
"pkgcore.test.misc.FakePkg",
"snakeoil.formatters.PlainTextFormatter",
"functools.partial",
"pytest.raises",
"pkgcheck.checks.pkgdir.InvalidPN",
"pytest.fixture",
"pkgcheck.checks.git.InvalidCommitMessage"
] | [((363, 391), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (377, 391), False, 'import pytest\n'), ((1663, 1935), 'textwrap.dedent', 'dedent', (['""" commit 8d86269bb4c7: no commit message\n profile warning\n dev-libs: category is missing metadata.xml\n dev-libs/foo: invalid package names: [ bar, baz ]\n dev-libs/foo-0: bad filenames: [ 0.tar.gz, foo.tar.gz ]\n """'], {}), '(\n """ commit 8d86269bb4c7: no commit message\n profile warning\n dev-libs: category is missing metadata.xml\n dev-libs/foo: invalid package names: [ bar, baz ]\n dev-libs/foo-0: bad filenames: [ 0.tar.gz, foo.tar.gz ]\n """\n )\n', (1669, 1935), False, 'from textwrap import dedent\n'), ((2037, 2448), 'textwrap.dedent', 'dedent', (['""" commit\n InvalidCommitMessage: commit 8d86269bb4c7: no commit message\n\n profiles\n ProfileWarning: profile warning\n\n dev-libs\n CatMissingMetadataXml: category is missing metadata.xml\n\n dev-libs/foo\n InvalidPN: invalid package names: [ bar, baz ]\n BadFilename: version 0: bad filenames: [ 0.tar.gz, foo.tar.gz ]\n """'], {}), '(\n """ commit\n InvalidCommitMessage: commit 8d86269bb4c7: no commit message\n\n profiles\n ProfileWarning: profile warning\n\n dev-libs\n CatMissingMetadataXml: category is missing metadata.xml\n\n dev-libs/foo\n InvalidPN: invalid package names: [ bar, baz ]\n BadFilename: version 0: bad filenames: [ 0.tar.gz, foo.tar.gz ]\n """\n )\n', (2043, 2448), False, 'from textwrap import dedent\n'), ((2548, 3026), 'textwrap.dedent', 'dedent', (['""" {"_style": {"InvalidCommitMessage": "commit 8d86269bb4c7: no commit message"}}\n {"_warning": {"ProfileWarning": "profile warning"}}\n {"dev-libs": {"_error": {"CatMissingMetadataXml": "category is missing metadata.xml"}}}\n {"dev-libs": {"foo": {"_error": {"InvalidPN": "invalid package names: [ bar, baz ]"}}}}\n {"dev-libs": {"foo": {"0": {"_warning": {"BadFilename": "bad filenames: [ 0.tar.gz, foo.tar.gz ]"}}}}}\n """'], {}), '(\n """ {"_style": {"InvalidCommitMessage": "commit 8d86269bb4c7: no commit message"}}\n {"_warning": {"ProfileWarning": "profile warning"}}\n {"dev-libs": {"_error": {"CatMissingMetadataXml": "category is missing metadata.xml"}}}\n {"dev-libs": {"foo": {"_error": {"InvalidPN": "invalid package names: [ bar, baz ]"}}}}\n {"dev-libs": {"foo": {"0": {"_warning": {"BadFilename": "bad filenames: [ 0.tar.gz, foo.tar.gz ]"}}}}}\n """\n )\n', (2554, 3026), False, 'from textwrap import dedent\n'), ((3124, 3832), 'textwrap.dedent', 'dedent', (['""" <checks>\n <result><class>InvalidCommitMessage</class><msg>commit 8d86269bb4c7: no commit message</msg></result>\n <result><class>ProfileWarning</class><msg>profile warning</msg></result>\n <result><category>dev-libs</category><class>CatMissingMetadataXml</class><msg>category is missing metadata.xml</msg></result>\n <result><category>dev-libs</category><package>foo</package><class>InvalidPN</class><msg>invalid package names: [ bar, baz ]</msg></result>\n <result><category>dev-libs</category><package>foo</package><version>0</version><class>BadFilename</class><msg>bad filenames: [ 0.tar.gz, foo.tar.gz ]</msg></result>\n </checks>\n """'], {}), '(\n """ <checks>\n <result><class>InvalidCommitMessage</class><msg>commit 8d86269bb4c7: no commit message</msg></result>\n <result><class>ProfileWarning</class><msg>profile warning</msg></result>\n <result><category>dev-libs</category><class>CatMissingMetadataXml</class><msg>category is missing metadata.xml</msg></result>\n <result><category>dev-libs</category><package>foo</package><class>InvalidPN</class><msg>invalid package names: [ bar, baz ]</msg></result>\n <result><category>dev-libs</category><package>foo</package><version>0</version><class>BadFilename</class><msg>bad filenames: [ 0.tar.gz, foo.tar.gz ]</msg></result>\n </checks>\n """\n )\n', (3130, 3832), False, 'from textwrap import dedent\n'), ((3930, 4212), 'textwrap.dedent', 'dedent', (['""" ,,,commit 8d86269bb4c7: no commit message\n ,,,profile warning\n dev-libs,,,category is missing metadata.xml\n dev-libs,foo,,"invalid package names: [ bar, baz ]"\n dev-libs,foo,0,"bad filenames: [ 0.tar.gz, foo.tar.gz ]"\n """'], {}), '(\n """ ,,,commit 8d86269bb4c7: no commit message\n ,,,profile warning\n dev-libs,,,category is missing metadata.xml\n dev-libs,foo,,"invalid package names: [ bar, baz ]"\n dev-libs,foo,0,"bad filenames: [ 0.tar.gz, foo.tar.gz ]"\n """\n )\n', (3936, 4212), False, 'from textwrap import dedent\n'), ((4267, 4304), 'functools.partial', 'partial', (['reporters.FormatReporter', '""""""'], {}), "(reporters.FormatReporter, '')\n", (4274, 4304), False, 'from functools import partial\n'), ((584, 609), 'pkgcore.test.misc.FakePkg', 'FakePkg', (['"""dev-libs/foo-0"""'], {}), "('dev-libs/foo-0')\n", (591, 609), False, 'from pkgcore.test.misc import FakePkg\n'), ((639, 707), 'pkgcheck.checks.git.InvalidCommitMessage', 'git.InvalidCommitMessage', (['"""no commit message"""'], {'commit': '"""8d86269bb4c7"""'}), "('no commit message', commit='8d86269bb4c7')\n", (663, 707), False, 'from pkgcheck.checks import git, metadata, metadata_xml, pkgdir, profiles\n'), ((739, 798), 'pkgcheck.checks.metadata_xml.CatMissingMetadataXml', 'metadata_xml.CatMissingMetadataXml', (['"""metadata.xml"""'], {'pkg': 'pkg'}), "('metadata.xml', pkg=pkg)\n", (773, 798), False, 'from pkgcheck.checks import git, metadata, metadata_xml, pkgdir, profiles\n'), ((829, 870), 'pkgcheck.checks.pkgdir.InvalidPN', 'pkgdir.InvalidPN', (["('bar', 'baz')"], {'pkg': 'pkg'}), "(('bar', 'baz'), pkg=pkg)\n", (845, 870), False, 'from pkgcheck.checks import git, metadata, metadata_xml, pkgdir, profiles\n'), ((903, 960), 'pkgcheck.checks.metadata.BadFilename', 'metadata.BadFilename', (["('0.tar.gz', 'foo.tar.gz')"], {'pkg': 'pkg'}), "(('0.tar.gz', 'foo.tar.gz'), pkg=pkg)\n", (923, 960), False, 'from pkgcheck.checks import git, metadata, metadata_xml, pkgdir, profiles\n'), ((1013, 1043), 'snakeoil.formatters.PlainTextFormatter', 'PlainTextFormatter', (['sys.stdout'], {}), '(sys.stdout)\n', (1031, 1043), False, 'from snakeoil.formatters import PlainTextFormatter\n'), ((5115, 5155), 'functools.partial', 'partial', (['reporters.FormatReporter', '"""{0}"""'], {}), "(reporters.FormatReporter, '{0}')\n", (5122, 5155), False, 'from functools import partial\n'), ((4904, 4949), 'functools.partial', 'partial', (['reporters.FormatReporter', 'format_str'], {}), '(reporters.FormatReporter, format_str)\n', (4911, 4949), False, 'from functools import partial\n'), ((6527, 6542), 'json.dumps', 'json.dumps', (['obj'], {}), '(obj)\n', (6537, 6542), False, 'import json\n'), ((5218, 5259), 'pytest.raises', 'pytest.raises', (['base.PkgcheckUserException'], {}), '(base.PkgcheckUserException)\n', (5231, 5259), False, 'import pytest\n'), ((6220, 6289), 'pytest.raises', 'pytest.raises', (['reporters.DeserializationError'], {'match': '"""failed loading"""'}), "(reporters.DeserializationError, match='failed loading')\n", (6233, 6289), False, 'import pytest\n'), ((6560, 6629), 'pytest.raises', 'pytest.raises', (['reporters.DeserializationError'], {'match': '"""unknown result"""'}), "(reporters.DeserializationError, match='unknown result')\n", (6573, 6629), False, 'import pytest\n')] |
import logging
import os
import sys
from tambo import Transport
import chacractl
from chacractl.api import binaries, repos, exists, projects
from chacractl.decorators import catches
from chacractl import log, util
class ChacraCtl(object):
_help = """
chacractl: A utility to interact with a Chacra (binary HTTP API) service.
Version: %s
Global Options:
--log, --logging Set the level of logging. Acceptable values:
debug, warning, error, critical
Sub Commands:
%s
"""
mapper = {
'binary': binaries.Binary,
'project': projects.Project,
'repo': repos.Repo,
'exists': exists.Exists,
}
def __init__(self, argv=None, parse=True):
if argv is None:
argv = sys.argv
if parse:
self.main(argv)
def help(self):
sub_help = '\n'.join(['%-19s %s' % (
sub.__name__.lower(), getattr(sub, 'help_menu', ''))
for sub in self.mapper.values()])
return self._help % (chacractl.__version__, sub_help)
def api_credentials(self):
util.ensure_default_config()
user, key = os.environ.get('CHACRA_USER'), os.environ.get('CHACRA_KEY')
if not user or not key:
# check for the config file
conf_module = util.load_config()
user, key = conf_module.user, conf_module.key
chacractl.config['credentials'] = (user, key)
chacractl.config['url'] = conf_module.url
chacractl.config['ssl_verify'] = getattr(conf_module, 'ssl_verify', True)
@catches((RuntimeError, KeyboardInterrupt))
def main(self, argv):
# Console Logger
sh = logging.StreamHandler()
sh.setFormatter(log.color_format())
sh.setLevel(logging.DEBUG)
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
root_logger.addHandler(sh)
self.api_credentials()
# TODO: Need to implement `--filename` and make it available
options = [['--log', '--logging']]
parser = Transport(argv, mapper=self.mapper,
options=options, check_help=False,
check_version=False)
parser.parse_args()
chacractl.config['verbosity'] = parser.get('--log', 'info')
parser.catch_help = self.help()
parser.catch_version = chacractl.__version__
parser.mapper = self.mapper
if len(argv) <= 1:
return parser.print_help()
parser.dispatch()
parser.catches_help()
parser.catches_version()
| [
"logging.getLogger",
"logging.StreamHandler",
"tambo.Transport",
"chacractl.decorators.catches",
"os.environ.get",
"chacractl.util.load_config",
"chacractl.util.ensure_default_config",
"chacractl.log.color_format"
] | [((1563, 1605), 'chacractl.decorators.catches', 'catches', (['(RuntimeError, KeyboardInterrupt)'], {}), '((RuntimeError, KeyboardInterrupt))\n', (1570, 1605), False, 'from chacractl.decorators import catches\n'), ((1087, 1115), 'chacractl.util.ensure_default_config', 'util.ensure_default_config', ([], {}), '()\n', (1113, 1115), False, 'from chacractl import log, util\n'), ((1670, 1693), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1691, 1693), False, 'import logging\n'), ((1796, 1815), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1813, 1815), False, 'import logging\n'), ((2057, 2152), 'tambo.Transport', 'Transport', (['argv'], {'mapper': 'self.mapper', 'options': 'options', 'check_help': '(False)', 'check_version': '(False)'}), '(argv, mapper=self.mapper, options=options, check_help=False,\n check_version=False)\n', (2066, 2152), False, 'from tambo import Transport\n'), ((1136, 1165), 'os.environ.get', 'os.environ.get', (['"""CHACRA_USER"""'], {}), "('CHACRA_USER')\n", (1150, 1165), False, 'import os\n'), ((1167, 1195), 'os.environ.get', 'os.environ.get', (['"""CHACRA_KEY"""'], {}), "('CHACRA_KEY')\n", (1181, 1195), False, 'import os\n'), ((1294, 1312), 'chacractl.util.load_config', 'util.load_config', ([], {}), '()\n', (1310, 1312), False, 'from chacractl import log, util\n'), ((1718, 1736), 'chacractl.log.color_format', 'log.color_format', ([], {}), '()\n', (1734, 1736), False, 'from chacractl import log, util\n')] |
from flask import Flask, render_template, request, Response
from uuid import uuid4
import json
from time import sleep
from threading import Thread
app = Flask(__name__)
import psycopg2
from jinja2 import Environment, FileSystemLoader, select_autoescape
env = Environment(
loader = FileSystemLoader ('templates'),
autoescape=select_autoescape(['html', 'xml'])
)
def getConnection():
return psycopg2.connect("dbname='dysfunctional' user='allen' host='localhost' password='<PASSWORD>'")
directions = dict({"-1":"left", "0":"none", "1":"right"})
swapaxes= dict({"left":"up", "right":"down", "none":"none"})
#read queries from file
query = dict()
queryNames = ['teamPicker', 'registerPlayer', 'input', 'data', 'deleteInactive', 'inactiveid']
for queryName in queryNames:
f = open('queries/' + queryName + '.sql', 'r')
query[queryName] = f.read()
f.close()
@app.route("/input", methods=['POST'])
def input():
direction = directions.get(request.form["direction"])
id = request.form["id"]
conn = getConnection()
cursor = conn.cursor()
cursor.execute(query['input'], (id, direction,))
conn.commit()
resp = Response("done!")
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp
@app.route("/register", methods=['GET'])
def register():
team = decideTeam()
id = uuid4().hex
logUser(team,id)
data = dict({"team":team, "id":id})
resp = Response(json.dumps(data))
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['Content-Type'] = 'application/json'
return resp
def decideTeam():
conn = getConnection()
cursor = conn.cursor()
cursor.execute(query['teamPicker'])
zucc = cursor.fetchone()[0]
if zucc:
return "zucc"
else:
return "user"
def logUser(team, id):
conn = getConnection()
cursor = conn.cursor()
cursor.execute(query['registerPlayer'], (id, team,))
conn.commit()
@app.route("/data")
def getData():
seconds = request.args.get("seconds")
print(seconds)
userValues = dict()
teams = dict()
conn = getConnection()
cursor = conn.cursor()
cursor.execute(query['data'], (str(seconds) + " seconds",))
for row in cursor.fetchall():
userValues[row[0]] = row[2]
teams[row[0]] = row[3]
data = dict({"zucc":{"up":0, "none":0, "down":0}, "user":{"up":0, "none":0, "down":0}})
for userID, direction in userValues.items():
data[teams[userID]][swapaxes[direction]] += 1
resp = Response(json.dumps(data))
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['Content-Type'] = 'application/json'
return resp
def sqlFormat(list):
output = "("
for element in list:
output += "'" + element + "',"
return output[:-1] + ")"
def watchdog():
while True:
conn = getConnection()
cursor = conn.cursor()
cursor.execute(query['inactiveid'])
dead = list()
for row in cursor.fetchall():
dead.append(row[0])
if len(dead) > 0:
conn = getConnection()
cursor = conn.cursor()
updateQuery = query['deleteInactive'].replace("<replace>", sqlFormat(dead))
cursor.execute(updateQuery)
conn.commit()
sleep(60)
if __name__ == '__main__':
thread = Thread(target = watchdog)
thread.start()
app.run() | [
"psycopg2.connect",
"flask.request.args.get",
"flask.Flask",
"json.dumps",
"time.sleep",
"uuid.uuid4",
"jinja2.select_autoescape",
"flask.Response",
"jinja2.FileSystemLoader",
"threading.Thread"
] | [((153, 168), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (158, 168), False, 'from flask import Flask, render_template, request, Response\n'), ((404, 508), 'psycopg2.connect', 'psycopg2.connect', (['"""dbname=\'dysfunctional\' user=\'allen\' host=\'localhost\' password=\'<PASSWORD>\'"""'], {}), '(\n "dbname=\'dysfunctional\' user=\'allen\' host=\'localhost\' password=\'<PASSWORD>\'"\n )\n', (420, 508), False, 'import psycopg2\n'), ((1156, 1173), 'flask.Response', 'Response', (['"""done!"""'], {}), "('done!')\n", (1164, 1173), False, 'from flask import Flask, render_template, request, Response\n'), ((1985, 2012), 'flask.request.args.get', 'request.args.get', (['"""seconds"""'], {}), "('seconds')\n", (2001, 2012), False, 'from flask import Flask, render_template, request, Response\n'), ((3333, 3356), 'threading.Thread', 'Thread', ([], {'target': 'watchdog'}), '(target=watchdog)\n', (3339, 3356), False, 'from threading import Thread\n'), ((287, 316), 'jinja2.FileSystemLoader', 'FileSystemLoader', (['"""templates"""'], {}), "('templates')\n", (303, 316), False, 'from jinja2 import Environment, FileSystemLoader, select_autoescape\n'), ((334, 368), 'jinja2.select_autoescape', 'select_autoescape', (["['html', 'xml']"], {}), "(['html', 'xml'])\n", (351, 368), False, 'from jinja2 import Environment, FileSystemLoader, select_autoescape\n'), ((1335, 1342), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (1340, 1342), False, 'from uuid import uuid4\n'), ((1428, 1444), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (1438, 1444), False, 'import json\n'), ((2509, 2525), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (2519, 2525), False, 'import json\n'), ((3282, 3291), 'time.sleep', 'sleep', (['(60)'], {}), '(60)\n', (3287, 3291), False, 'from time import sleep\n')] |
import pickle
from unittest import TestCase
from immutablecollections import ImmutableSet
from vistautils.range import ImmutableRangeMap, Range, immutablerangemap
class TestRangeMap(TestCase):
def test_empty(self):
self.assertFalse(0 in ImmutableRangeMap.empty())
def test_no_overlap(self):
with self.assertRaises(ValueError):
(
ImmutableRangeMap.builder()
.put(Range.closed(0, 2), "foo")
.put(Range.closed(1, 3), "bar")
.build()
)
def test_lookup(self):
range_map = (
ImmutableRangeMap.builder()
.put(Range.closed(0, 2), "foo")
.put(Range.open_closed(6, 8), "bar")
.build()
)
self.assertEqual("foo", range_map[0])
self.assertEqual("foo", range_map[1])
self.assertEqual("foo", range_map[2])
self.assertEqual(None, range_map[6])
self.assertEqual("bar", range_map[7])
self.assertEqual("bar", range_map[8])
self.assertEqual(None, range_map[9])
def test_enclosed(self):
range_map: ImmutableRangeMap[int, str] = (
ImmutableRangeMap.builder()
.put(Range.closed(0, 2), "foo")
.put(Range.open_closed(6, 8), "bar")
.put(Range.open(12, 14), "meep")
.build()
)
self.assertEqual(
ImmutableSet.of(["foo", "bar", "meep"]),
range_map.get_enclosed_by(Range.closed(-1, 15)),
)
self.assertEqual(
ImmutableSet.of(["foo"]), range_map.get_enclosed_by(Range.closed(0, 6))
)
self.assertEqual(
ImmutableSet.empty(), range_map.get_enclosed_by(Range.closed(5, 5))
)
def test_overlapping_keys_banned(self):
with self.assertRaisesRegex(
ValueError,
"Some range keys are connected or overlapping. Overlapping keys "
"will never be supported. Support for connected keys is tracked in "
"https://github.com/isi-vista/vistautils/issues/37",
):
(
ImmutableRangeMap.builder()
.put(Range.closed(0, 2), 0)
.put(Range.closed(1, 3), 1)
.build()
)
# this test should be remove after
# https://github.com/isi-vista/vistautils/issues/37 is fixed
def test_temporary_exception_on_connected_range_keys(self):
with self.assertRaisesRegex(
ValueError,
"Some range keys are connected or overlapping. Overlapping keys "
"will never be supported. Support for connected keys is tracked in "
"https://github.com/isi-vista/vistautils/issues/37",
):
(
ImmutableRangeMap.builder()
.put(Range.open(0, 2), 0)
.put(Range.closed(2, 3), 1)
.build()
)
# adapted from corresponding tests in test_range_set
def test_get_rightmost_containing_or_below(self):
range_map = immutablerangemap(
(
(Range.closed(-2, -1), 0),
(Range.closed_open(0, 2), 1),
# we don't do [0, 2), [2.1, 3] because they will coalesce
# ditto for (4, 5] and (5.1, 7)
(Range.closed(2.1, 3), 2),
(Range.open_closed(4, 5), 3),
(Range.open(5.1, 7), 4),
)
)
# probe value is in the middle of a set
# [2.1 ... *2.5* ... 3]
self.assertEqual(2, range_map.get_from_rightmost_containing_or_below(2.5))
# probe value is at a closed upper limit
# [2.1 .... *3*]
self.assertEqual(2, range_map.get_from_rightmost_containing_or_below(3.0))
# probe value is at a closed lower limit
# [*2.1* .... 3]
self.assertEqual(2, range_map.get_from_rightmost_containing_or_below(2.1))
# probe value is at an open lower limit
# [2.1 ... 3], (*4* ... 5]
self.assertEqual(2, range_map.get_from_rightmost_containing_or_below(4.0))
# probe value is at an open upper limit
# [0 ... *2.1*)
self.assertEqual(1, range_map.get_from_rightmost_containing_or_below(2.0))
# probe value falls into a gap
# [-2, -1] ... *-0.5* ... [0, 2)
self.assertEqual(0, range_map.get_from_rightmost_containing_or_below(-0.5))
# no range below
# *-3* .... [-2,-1]
self.assertIsNone(range_map.get_from_rightmost_containing_or_below(-3.0))
# empty rangeset
self.assertIsNone(
immutablerangemap(
((Range.closed(1.0, 2.0), 1),)
).get_from_rightmost_containing_or_below(0.0)
)
# lowest range has open lower bound
# (*1*,2)
self.assertIsNone(
immutablerangemap(
((Range.open(1.0, 2.0), 1),)
).get_from_rightmost_containing_or_below(1.0)
)
# adapted from corresponding tests in test_range_set
def test_get_leftmost_containing_or_above(self):
range_map = immutablerangemap(
(
(Range.closed(-2, -1), 0),
(Range.closed_open(0, 2), 1),
# we don't do [0, 2), [2.1, 3] because they will coalesce
# ditto for (4, 5] and (5.1, 7)
(Range.closed(2.1, 3), 2),
(Range.open_closed(4, 5), 3),
(Range.open(5.1, 7), 4),
)
)
# probe value is in the middle of a set
# [2.1 ... *2.5* ... 3]
self.assertEqual(2, range_map.get_from_leftmost_containing_or_above(2.5))
# probe value is at a closed upper limit
# [2.1 .... *3*]
self.assertEqual(2, range_map.get_from_leftmost_containing_or_above(3.0))
# probe value is at a closed lower limit
# [*2.1* .... 3]
self.assertEqual(2, range_map.get_from_leftmost_containing_or_above(2.1))
# probe value is at an open lower limit
# [2 ... 3], (*4* ... 5]
self.assertEqual(3, range_map.get_from_leftmost_containing_or_above(4.0))
# probe value is at an open upper limit
# [0 ... *2*) [2.1, 3.0]
self.assertEqual(2, range_map.get_from_leftmost_containing_or_above(2.0))
# probe value falls into a gap
# [-2, -1] ... *-0.5* ... [0, 2)
self.assertEqual(1, range_map.get_from_leftmost_containing_or_above(-0.5))
# no range above
# (5.1 ... 7) ... *8*
self.assertIsNone(range_map.get_from_leftmost_containing_or_above(8))
# empty rangeset
self.assertIsNone(
immutablerangemap(
((Range.closed(1.0, 2.0), 1),)
).get_from_leftmost_containing_or_above(3.0)
)
# higher range has open upper bound
# (1,*2*)
self.assertIsNone(
immutablerangemap(
((Range.open(1.0, 2.0), 1),)
).get_from_leftmost_containing_or_above(2.0)
)
def test_pickling(self):
empty_rangemap = immutablerangemap({})
ranges = (Range.closed(0, 2), Range.closed(5, 29), Range.closed(35, 39))
values = ("foo", "bar", "meep")
rangemap = immutablerangemap(zip(ranges, values))
self.assertEqual(empty_rangemap, pickle.loads(pickle.dumps(empty_rangemap)))
self.assertEqual(rangemap, pickle.loads(pickle.dumps(rangemap)))
self.assertEqual(empty_rangemap.__reduce__(), (immutablerangemap, ((),)))
self.assertEqual(
rangemap.__reduce__(), (immutablerangemap, (tuple(zip(ranges, values)),))
)
| [
"vistautils.range.Range.closed_open",
"vistautils.range.ImmutableRangeMap.builder",
"vistautils.range.Range.closed",
"pickle.dumps",
"vistautils.range.immutablerangemap",
"vistautils.range.Range.open_closed",
"vistautils.range.Range.open",
"immutablecollections.ImmutableSet.of",
"vistautils.range.ImmutableRangeMap.empty",
"immutablecollections.ImmutableSet.empty"
] | [((7105, 7126), 'vistautils.range.immutablerangemap', 'immutablerangemap', (['{}'], {}), '({})\n', (7122, 7126), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((1412, 1451), 'immutablecollections.ImmutableSet.of', 'ImmutableSet.of', (["['foo', 'bar', 'meep']"], {}), "(['foo', 'bar', 'meep'])\n", (1427, 1451), False, 'from immutablecollections import ImmutableSet\n'), ((1562, 1586), 'immutablecollections.ImmutableSet.of', 'ImmutableSet.of', (["['foo']"], {}), "(['foo'])\n", (1577, 1586), False, 'from immutablecollections import ImmutableSet\n'), ((1682, 1702), 'immutablecollections.ImmutableSet.empty', 'ImmutableSet.empty', ([], {}), '()\n', (1700, 1702), False, 'from immutablecollections import ImmutableSet\n'), ((7145, 7163), 'vistautils.range.Range.closed', 'Range.closed', (['(0)', '(2)'], {}), '(0, 2)\n', (7157, 7163), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((7165, 7184), 'vistautils.range.Range.closed', 'Range.closed', (['(5)', '(29)'], {}), '(5, 29)\n', (7177, 7184), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((7186, 7206), 'vistautils.range.Range.closed', 'Range.closed', (['(35)', '(39)'], {}), '(35, 39)\n', (7198, 7206), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((253, 278), 'vistautils.range.ImmutableRangeMap.empty', 'ImmutableRangeMap.empty', ([], {}), '()\n', (276, 278), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((1491, 1511), 'vistautils.range.Range.closed', 'Range.closed', (['(-1)', '(15)'], {}), '(-1, 15)\n', (1503, 1511), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((1614, 1632), 'vistautils.range.Range.closed', 'Range.closed', (['(0)', '(6)'], {}), '(0, 6)\n', (1626, 1632), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((1730, 1748), 'vistautils.range.Range.closed', 'Range.closed', (['(5)', '(5)'], {}), '(5, 5)\n', (1742, 1748), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((7361, 7389), 'pickle.dumps', 'pickle.dumps', (['empty_rangemap'], {}), '(empty_rangemap)\n', (7373, 7389), False, 'import pickle\n'), ((7440, 7462), 'pickle.dumps', 'pickle.dumps', (['rangemap'], {}), '(rangemap)\n', (7452, 7462), False, 'import pickle\n'), ((700, 723), 'vistautils.range.Range.open_closed', 'Range.open_closed', (['(6)', '(8)'], {}), '(6, 8)\n', (717, 723), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((1314, 1332), 'vistautils.range.Range.open', 'Range.open', (['(12)', '(14)'], {}), '(12, 14)\n', (1324, 1332), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((3116, 3136), 'vistautils.range.Range.closed', 'Range.closed', (['(-2)', '(-1)'], {}), '(-2, -1)\n', (3128, 3136), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((3159, 3182), 'vistautils.range.Range.closed_open', 'Range.closed_open', (['(0)', '(2)'], {}), '(0, 2)\n', (3176, 3182), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((3327, 3347), 'vistautils.range.Range.closed', 'Range.closed', (['(2.1)', '(3)'], {}), '(2.1, 3)\n', (3339, 3347), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((3370, 3393), 'vistautils.range.Range.open_closed', 'Range.open_closed', (['(4)', '(5)'], {}), '(4, 5)\n', (3387, 3393), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((3416, 3434), 'vistautils.range.Range.open', 'Range.open', (['(5.1)', '(7)'], {}), '(5.1, 7)\n', (3426, 3434), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((5175, 5195), 'vistautils.range.Range.closed', 'Range.closed', (['(-2)', '(-1)'], {}), '(-2, -1)\n', (5187, 5195), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((5218, 5241), 'vistautils.range.Range.closed_open', 'Range.closed_open', (['(0)', '(2)'], {}), '(0, 2)\n', (5235, 5241), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((5386, 5406), 'vistautils.range.Range.closed', 'Range.closed', (['(2.1)', '(3)'], {}), '(2.1, 3)\n', (5398, 5406), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((5429, 5452), 'vistautils.range.Range.open_closed', 'Range.open_closed', (['(4)', '(5)'], {}), '(4, 5)\n', (5446, 5452), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((5475, 5493), 'vistautils.range.Range.open', 'Range.open', (['(5.1)', '(7)'], {}), '(5.1, 7)\n', (5485, 5493), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((483, 501), 'vistautils.range.Range.closed', 'Range.closed', (['(1)', '(3)'], {}), '(1, 3)\n', (495, 501), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((2224, 2242), 'vistautils.range.Range.closed', 'Range.closed', (['(1)', '(3)'], {}), '(1, 3)\n', (2236, 2242), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((2872, 2890), 'vistautils.range.Range.closed', 'Range.closed', (['(2)', '(3)'], {}), '(2, 3)\n', (2884, 2890), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((656, 674), 'vistautils.range.Range.closed', 'Range.closed', (['(0)', '(2)'], {}), '(0, 2)\n', (668, 674), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((1265, 1288), 'vistautils.range.Range.open_closed', 'Range.open_closed', (['(6)', '(8)'], {}), '(6, 8)\n', (1282, 1288), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((435, 453), 'vistautils.range.Range.closed', 'Range.closed', (['(0)', '(2)'], {}), '(0, 2)\n', (447, 453), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((611, 638), 'vistautils.range.ImmutableRangeMap.builder', 'ImmutableRangeMap.builder', ([], {}), '()\n', (636, 638), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((2180, 2198), 'vistautils.range.Range.closed', 'Range.closed', (['(0)', '(2)'], {}), '(0, 2)\n', (2192, 2198), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((2830, 2846), 'vistautils.range.Range.open', 'Range.open', (['(0)', '(2)'], {}), '(0, 2)\n', (2840, 2846), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((4664, 4686), 'vistautils.range.Range.closed', 'Range.closed', (['(1.0)', '(2.0)'], {}), '(1.0, 2.0)\n', (4676, 4686), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((4899, 4919), 'vistautils.range.Range.open', 'Range.open', (['(1.0)', '(2.0)'], {}), '(1.0, 2.0)\n', (4909, 4919), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((6722, 6744), 'vistautils.range.Range.closed', 'Range.closed', (['(1.0)', '(2.0)'], {}), '(1.0, 2.0)\n', (6734, 6744), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((6956, 6976), 'vistautils.range.Range.open', 'Range.open', (['(1.0)', '(2.0)'], {}), '(1.0, 2.0)\n', (6966, 6976), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((386, 413), 'vistautils.range.ImmutableRangeMap.builder', 'ImmutableRangeMap.builder', ([], {}), '()\n', (411, 413), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((1221, 1239), 'vistautils.range.Range.closed', 'Range.closed', (['(0)', '(2)'], {}), '(0, 2)\n', (1233, 1239), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((2131, 2158), 'vistautils.range.ImmutableRangeMap.builder', 'ImmutableRangeMap.builder', ([], {}), '()\n', (2156, 2158), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((2781, 2808), 'vistautils.range.ImmutableRangeMap.builder', 'ImmutableRangeMap.builder', ([], {}), '()\n', (2806, 2808), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n'), ((1176, 1203), 'vistautils.range.ImmutableRangeMap.builder', 'ImmutableRangeMap.builder', ([], {}), '()\n', (1201, 1203), False, 'from vistautils.range import ImmutableRangeMap, Range, immutablerangemap\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-07 19:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('carts', '0004_auto_20170607_1354'),
]
operations = [
migrations.RemoveField(
model_name='cart',
name='items',
),
migrations.RemoveField(
model_name='cart',
name='products',
),
migrations.RemoveField(
model_name='cart',
name='total',
),
migrations.AddField(
model_name='cartitem',
name='cart',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='carts.Cart'),
),
]
| [
"django.db.migrations.RemoveField",
"django.db.models.ForeignKey"
] | [((331, 386), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""cart"""', 'name': '"""items"""'}), "(model_name='cart', name='items')\n", (353, 386), False, 'from django.db import migrations, models\n'), ((431, 489), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""cart"""', 'name': '"""products"""'}), "(model_name='cart', name='products')\n", (453, 489), False, 'from django.db import migrations, models\n'), ((534, 589), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""cart"""', 'name': '"""total"""'}), "(model_name='cart', name='total')\n", (556, 589), False, 'from django.db import migrations, models\n'), ((733, 840), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""carts.Cart"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, to='carts.Cart')\n", (750, 840), False, 'from django.db import migrations, models\n')] |
#!/usr/bin/env python2.7
import subprocess
import os
import json
import datetime
import syslog
import traceback
import sys
from Foundation import NSUserDefaults, CFPreferencesSetAppValue, CFPreferencesAppSynchronize, NSDate
from pwd import getpwnam
from optparse import OptionParser
def load_config(config_files):
""" loads json configuration files
the latter configs overwrite the previous configs
"""
config = dict()
for f in config_files:
try:
with open(f, 'rt') as cfg:
config.update(json.load(cfg))
except ValueError as e:
exit_with_error("The json config file {configfile} is not correctly formatted." \
"The following exception was raised:\n{exc}".format(configfile=f, exc=e))
return config
def run(config):
""" starts self-control with custom parameters, depending on the weekday and the config """
if check_if_running(config["username"]):
syslog.syslog(syslog.LOG_ALERT, "SelfControl is already running, ignore current execution of Auto-SelfControl.")
exit(2)
try:
schedule = next(s for s in config["block-schedules"] if is_schedule_active(s))
except StopIteration:
syslog.syslog(syslog.LOG_ALERT, "No schedule is active at the moment. Shutting down.")
exit(0)
duration = get_duration_minutes(schedule["end-hour"], schedule["end-minute"])
set_selfcontrol_setting("BlockDuration", duration, config["username"])
set_selfcontrol_setting("BlockAsWhitelist", 1 if schedule.get("block-as-whitelist", False) else 0,
config["username"])
if schedule.get("host-blacklist", None) is not None:
set_selfcontrol_setting("HostBlacklist", schedule["host-blacklist"], config["username"])
elif config.get("host-blacklist", None) is not None:
set_selfcontrol_setting("HostBlacklist", config["host-blacklist"], config["username"])
# In legacy mode manually set the BlockStartedDate, this should not be required anymore in future versions
# of SelfControl.
if config.get("legacy-mode", True):
set_selfcontrol_setting("BlockStartedDate", NSDate.date(), config["username"])
# Start SelfControl
os.system("{path}/Contents/MacOS/org.eyebeam.SelfControl {userId} --install".format(path=config["selfcontrol-path"], userId=str(getpwnam(config["username"]).pw_uid)))
syslog.syslog(syslog.LOG_ALERT, "SelfControl started for {min} minute(s).".format(min=duration))
def check_if_running(username):
""" checks if self-control is already running. """
defaults = get_selfcontrol_settings(username)
return defaults.has_key("BlockStartedDate") and not NSDate.distantFuture().isEqualToDate_(defaults["BlockStartedDate"])
def is_schedule_active(schedule):
""" checks if we are right now in the provided schedule or not """
currenttime = datetime.datetime.today()
starttime = datetime.datetime(currenttime.year, currenttime.month, currenttime.day, schedule["start-hour"],
schedule["start-minute"])
endtime = datetime.datetime(currenttime.year, currenttime.month, currenttime.day, schedule["end-hour"],
schedule["end-minute"])
d = endtime - starttime
for weekday in get_schedule_weekdays(schedule):
weekday_diff = currenttime.isoweekday() % 7 - weekday % 7
if weekday_diff == 0:
# schedule's weekday is today
result = starttime <= currenttime and endtime >= currenttime if d.days == 0 else starttime <= currenttime
elif weekday_diff == 1 or weekday_diff == -6:
# schedule's weekday was yesterday
result = d.days != 0 and currenttime <= endtime
else:
# schedule's weekday was on any other day.
result = False
if result:
return result
return False
def get_duration_minutes(endhour, endminute):
""" returns the minutes left until the schedule's end-hour and end-minute are reached """
currenttime = datetime.datetime.today()
endtime = datetime.datetime(currenttime.year, currenttime.month, currenttime.day, endhour, endminute)
d = endtime - currenttime
return int(round(d.seconds / 60.0))
def get_schedule_weekdays(schedule):
""" returns a list of weekdays the specified schedule is active """
return [schedule["weekday"]] if schedule.get("weekday", None) is not None else range(1, 8)
def set_selfcontrol_setting(key, value, username):
""" sets a single default setting of SelfControl for the provied username """
NSUserDefaults.resetStandardUserDefaults()
originalUID = os.geteuid()
os.seteuid(getpwnam(username).pw_uid)
CFPreferencesSetAppValue(key, value, "org.eyebeam.SelfControl")
CFPreferencesAppSynchronize("org.eyebeam.SelfControl")
NSUserDefaults.resetStandardUserDefaults()
os.seteuid(originalUID)
def get_selfcontrol_settings(username):
""" returns all default settings of SelfControl for the provided username """
NSUserDefaults.resetStandardUserDefaults()
originalUID = os.geteuid()
os.seteuid(getpwnam(username).pw_uid)
defaults = NSUserDefaults.standardUserDefaults()
defaults.addSuiteNamed_("org.eyebeam.SelfControl")
defaults.synchronize()
result = defaults.dictionaryRepresentation()
NSUserDefaults.resetStandardUserDefaults()
os.seteuid(originalUID)
return result
def get_launchscript(config):
""" returns the string of the launchscript """
return '''<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>com.parrot-bytes.auto-selfcontrol</string>
<key>ProgramArguments</key>
<array>
<string>/usr/bin/python</string>
<string>{path}</string>
<string>-r</string>
</array>
<key>StartCalendarInterval</key>
<array>
{startintervals}</array>
<key>RunAtLoad</key>
<true/>
</dict>
</plist>'''.format(path=os.path.realpath(__file__), startintervals="".join(get_launchscript_startintervals(config)))
def get_launchscript_startintervals(config):
""" returns the string of the launchscript start intervals """
entries = list()
for schedule in config["block-schedules"]:
for weekday in get_schedule_weekdays(schedule):
yield ('''<dict>
<key>Weekday</key>
<integer>{weekday}</integer>
<key>Minute</key>
<integer>{startminute}</integer>
<key>Hour</key>
<integer>{starthour}</integer>
</dict>
'''.format(weekday=weekday, startminute=schedule['start-minute'], starthour=schedule['start-hour']))
def install(config):
""" installs auto-selfcontrol """
print("> Start installation of Auto-SelfControl")
launchplist_path = "/Library/LaunchDaemons/com.parrot-bytes.auto-selfcontrol.plist"
# Check for existing plist
if os.path.exists(launchplist_path):
print("> Removed previous installation files")
subprocess.call(["launchctl", "unload", "-w", launchplist_path])
os.unlink(launchplist_path)
launchplist_script = get_launchscript(config)
with open(launchplist_path, 'w') as myfile:
myfile.write(launchplist_script)
subprocess.call(["launchctl", "load", "-w", launchplist_path])
print("> Installed\n")
def check_config(config):
""" checks whether the config file is correct """
if not config.has_key("username"):
exit_with_error("No username specified in config.")
if config["username"] not in get_osx_usernames():
exit_with_error(
"Username '{username}' unknown.\nPlease use your OSX username instead.\n" \
"If you have trouble finding it, just enter the command 'whoami'\n" \
"in your terminal.".format(
username=config["username"]))
if not config.has_key("selfcontrol-path"):
exit_with_error("The setting 'selfcontrol-path' is required and must point to the location of SelfControl.")
if not os.path.exists(config["selfcontrol-path"]):
exit_with_error(
"The setting 'selfcontrol-path' does not point to the correct location of SelfControl. " \
"Please make sure to use an absolute path and include the '.app' extension, " \
"e.g. /Applications/SelfControl.app")
if not config.has_key("block-schedules"):
exit_with_error("The setting 'block-schedules' is required.")
if len(config["block-schedules"]) == 0:
exit_with_error("You need at least one schedule in 'block-schedules'.")
if config.get("host-blacklist", None) is None:
print("WARNING:")
msg = "It is not recommended to directly use SelfControl's blacklist. Please use the 'host-blacklist' " \
"setting instead."
print(msg)
syslog.syslog(syslog.LOG_WARNING, msg)
def get_osx_usernames():
output = subprocess.check_output(["dscl", ".", "list", "/users"])
return [s.strip() for s in output.splitlines()]
def excepthook(excType, excValue, tb):
""" this function is called whenever an exception is not caught """
err = "Uncaught exception:\n{}\n{}\n{}".format(str(excType), excValue,
"".join(traceback.format_exception(excType, excValue, tb)))
syslog.syslog(syslog.LOG_CRIT, err)
print(err)
def exit_with_error(message):
syslog.syslog(syslog.LOG_CRIT, message)
print("ERROR:")
print(message)
exit(1)
if __name__ == "__main__":
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
sys.excepthook = excepthook
syslog.openlog("Auto-SelfControl")
if os.geteuid() != 0:
exit_with_error("Please make sure to run the script with elevated rights, such as:\nsudo python {file}".format(
file=os.path.realpath(__file__)))
parser = OptionParser()
parser.add_option("-r", "--run", action="store_true",
dest="run", default=False)
(opts, args) = parser.parse_args()
config = load_config([os.path.join(__location__, "config.json")])
if opts.run:
run(config)
else:
check_config(config)
install(config)
if not check_if_running(config["username"]) and any(s for s in config["block-schedules"] if is_schedule_active(s)):
print("> Active schedule found for SelfControl!")
print("> Start SelfControl (this could take a few minutes)\n")
run(config)
print("\n> SelfControl was started.\n")
| [
"Foundation.NSUserDefaults.standardUserDefaults",
"datetime.datetime.today",
"datetime.datetime",
"os.path.exists",
"Foundation.NSDate.date",
"pwd.getpwnam",
"Foundation.CFPreferencesAppSynchronize",
"subprocess.call",
"os.unlink",
"Foundation.NSUserDefaults.resetStandardUserDefaults",
"subprocess.check_output",
"os.seteuid",
"traceback.format_exception",
"syslog.syslog",
"os.path.dirname",
"syslog.openlog",
"Foundation.NSDate.distantFuture",
"os.path.join",
"os.geteuid",
"optparse.OptionParser",
"os.getcwd",
"os.path.realpath",
"json.load",
"Foundation.CFPreferencesSetAppValue"
] | [((2908, 2933), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (2931, 2933), False, 'import datetime\n'), ((2950, 3075), 'datetime.datetime', 'datetime.datetime', (['currenttime.year', 'currenttime.month', 'currenttime.day', "schedule['start-hour']", "schedule['start-minute']"], {}), "(currenttime.year, currenttime.month, currenttime.day,\n schedule['start-hour'], schedule['start-minute'])\n", (2967, 3075), False, 'import datetime\n'), ((3120, 3241), 'datetime.datetime', 'datetime.datetime', (['currenttime.year', 'currenttime.month', 'currenttime.day', "schedule['end-hour']", "schedule['end-minute']"], {}), "(currenttime.year, currenttime.month, currenttime.day,\n schedule['end-hour'], schedule['end-minute'])\n", (3137, 3241), False, 'import datetime\n'), ((4089, 4114), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (4112, 4114), False, 'import datetime\n'), ((4129, 4224), 'datetime.datetime', 'datetime.datetime', (['currenttime.year', 'currenttime.month', 'currenttime.day', 'endhour', 'endminute'], {}), '(currenttime.year, currenttime.month, currenttime.day,\n endhour, endminute)\n', (4146, 4224), False, 'import datetime\n'), ((4636, 4678), 'Foundation.NSUserDefaults.resetStandardUserDefaults', 'NSUserDefaults.resetStandardUserDefaults', ([], {}), '()\n', (4676, 4678), False, 'from Foundation import NSUserDefaults, CFPreferencesSetAppValue, CFPreferencesAppSynchronize, NSDate\n'), ((4697, 4709), 'os.geteuid', 'os.geteuid', ([], {}), '()\n', (4707, 4709), False, 'import os\n'), ((4756, 4819), 'Foundation.CFPreferencesSetAppValue', 'CFPreferencesSetAppValue', (['key', 'value', '"""org.eyebeam.SelfControl"""'], {}), "(key, value, 'org.eyebeam.SelfControl')\n", (4780, 4819), False, 'from Foundation import NSUserDefaults, CFPreferencesSetAppValue, CFPreferencesAppSynchronize, NSDate\n'), ((4824, 4878), 'Foundation.CFPreferencesAppSynchronize', 'CFPreferencesAppSynchronize', (['"""org.eyebeam.SelfControl"""'], {}), "('org.eyebeam.SelfControl')\n", (4851, 4878), False, 'from Foundation import NSUserDefaults, CFPreferencesSetAppValue, CFPreferencesAppSynchronize, NSDate\n'), ((4883, 4925), 'Foundation.NSUserDefaults.resetStandardUserDefaults', 'NSUserDefaults.resetStandardUserDefaults', ([], {}), '()\n', (4923, 4925), False, 'from Foundation import NSUserDefaults, CFPreferencesSetAppValue, CFPreferencesAppSynchronize, NSDate\n'), ((4930, 4953), 'os.seteuid', 'os.seteuid', (['originalUID'], {}), '(originalUID)\n', (4940, 4953), False, 'import os\n'), ((5082, 5124), 'Foundation.NSUserDefaults.resetStandardUserDefaults', 'NSUserDefaults.resetStandardUserDefaults', ([], {}), '()\n', (5122, 5124), False, 'from Foundation import NSUserDefaults, CFPreferencesSetAppValue, CFPreferencesAppSynchronize, NSDate\n'), ((5143, 5155), 'os.geteuid', 'os.geteuid', ([], {}), '()\n', (5153, 5155), False, 'import os\n'), ((5213, 5250), 'Foundation.NSUserDefaults.standardUserDefaults', 'NSUserDefaults.standardUserDefaults', ([], {}), '()\n', (5248, 5250), False, 'from Foundation import NSUserDefaults, CFPreferencesSetAppValue, CFPreferencesAppSynchronize, NSDate\n'), ((5386, 5428), 'Foundation.NSUserDefaults.resetStandardUserDefaults', 'NSUserDefaults.resetStandardUserDefaults', ([], {}), '()\n', (5426, 5428), False, 'from Foundation import NSUserDefaults, CFPreferencesSetAppValue, CFPreferencesAppSynchronize, NSDate\n'), ((5433, 5456), 'os.seteuid', 'os.seteuid', (['originalUID'], {}), '(originalUID)\n', (5443, 5456), False, 'import os\n'), ((7210, 7242), 'os.path.exists', 'os.path.exists', (['launchplist_path'], {}), '(launchplist_path)\n', (7224, 7242), False, 'import os\n'), ((7554, 7616), 'subprocess.call', 'subprocess.call', (["['launchctl', 'load', '-w', launchplist_path]"], {}), "(['launchctl', 'load', '-w', launchplist_path])\n", (7569, 7616), False, 'import subprocess\n'), ((9252, 9308), 'subprocess.check_output', 'subprocess.check_output', (["['dscl', '.', 'list', '/users']"], {}), "(['dscl', '.', 'list', '/users'])\n", (9275, 9308), False, 'import subprocess\n'), ((9664, 9699), 'syslog.syslog', 'syslog.syslog', (['syslog.LOG_CRIT', 'err'], {}), '(syslog.LOG_CRIT, err)\n', (9677, 9699), False, 'import syslog\n'), ((9751, 9790), 'syslog.syslog', 'syslog.syslog', (['syslog.LOG_CRIT', 'message'], {}), '(syslog.LOG_CRIT, message)\n', (9764, 9790), False, 'import syslog\n'), ((9998, 10032), 'syslog.openlog', 'syslog.openlog', (['"""Auto-SelfControl"""'], {}), "('Auto-SelfControl')\n", (10012, 10032), False, 'import syslog\n'), ((10244, 10258), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (10256, 10258), False, 'from optparse import OptionParser\n'), ((981, 1102), 'syslog.syslog', 'syslog.syslog', (['syslog.LOG_ALERT', '"""SelfControl is already running, ignore current execution of Auto-SelfControl."""'], {}), "(syslog.LOG_ALERT,\n 'SelfControl is already running, ignore current execution of Auto-SelfControl.'\n )\n", (994, 1102), False, 'import syslog\n'), ((7307, 7371), 'subprocess.call', 'subprocess.call', (["['launchctl', 'unload', '-w', launchplist_path]"], {}), "(['launchctl', 'unload', '-w', launchplist_path])\n", (7322, 7371), False, 'import subprocess\n'), ((7380, 7407), 'os.unlink', 'os.unlink', (['launchplist_path'], {}), '(launchplist_path)\n', (7389, 7407), False, 'import os\n'), ((8356, 8398), 'os.path.exists', 'os.path.exists', (["config['selfcontrol-path']"], {}), "(config['selfcontrol-path'])\n", (8370, 8398), False, 'import os\n'), ((9173, 9211), 'syslog.syslog', 'syslog.syslog', (['syslog.LOG_WARNING', 'msg'], {}), '(syslog.LOG_WARNING, msg)\n', (9186, 9211), False, 'import syslog\n'), ((10041, 10053), 'os.geteuid', 'os.geteuid', ([], {}), '()\n', (10051, 10053), False, 'import os\n'), ((1241, 1331), 'syslog.syslog', 'syslog.syslog', (['syslog.LOG_ALERT', '"""No schedule is active at the moment. Shutting down."""'], {}), "(syslog.LOG_ALERT,\n 'No schedule is active at the moment. Shutting down.')\n", (1254, 1331), False, 'import syslog\n'), ((2187, 2200), 'Foundation.NSDate.date', 'NSDate.date', ([], {}), '()\n', (2198, 2200), False, 'from Foundation import NSUserDefaults, CFPreferencesSetAppValue, CFPreferencesAppSynchronize, NSDate\n'), ((4725, 4743), 'pwd.getpwnam', 'getpwnam', (['username'], {}), '(username)\n', (4733, 4743), False, 'from pwd import getpwnam\n'), ((5171, 5189), 'pwd.getpwnam', 'getpwnam', (['username'], {}), '(username)\n', (5179, 5189), False, 'from pwd import getpwnam\n'), ((6200, 6226), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (6216, 6226), False, 'import os\n'), ((9608, 9657), 'traceback.format_exception', 'traceback.format_exception', (['excType', 'excValue', 'tb'], {}), '(excType, excValue, tb)\n', (9634, 9657), False, 'import traceback\n'), ((9920, 9931), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (9929, 9931), False, 'import os\n'), ((9933, 9958), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (9948, 9958), False, 'import os\n'), ((10431, 10472), 'os.path.join', 'os.path.join', (['__location__', '"""config.json"""'], {}), "(__location__, 'config.json')\n", (10443, 10472), False, 'import os\n'), ((549, 563), 'json.load', 'json.load', (['cfg'], {}), '(cfg)\n', (558, 563), False, 'import json\n'), ((2715, 2737), 'Foundation.NSDate.distantFuture', 'NSDate.distantFuture', ([], {}), '()\n', (2735, 2737), False, 'from Foundation import NSUserDefaults, CFPreferencesSetAppValue, CFPreferencesAppSynchronize, NSDate\n'), ((10201, 10227), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (10217, 10227), False, 'import os\n'), ((2379, 2407), 'pwd.getpwnam', 'getpwnam', (["config['username']"], {}), "(config['username'])\n", (2387, 2407), False, 'from pwd import getpwnam\n')] |
"""
This file provides support for initializing projects for Java.
"""
from pathlib import Path
from builder.java.java import JavaConfiguration, java_version_number
from builder.jetbrains.intellij import IJProject, IJImlFile
from builder.project import Project
from builder.utils import global_options
def init_java_project(language_config: JavaConfiguration):
project = global_options.project()
ij_project = IJProject(project.directory)
_create_project_yaml(project)
create_ij_project_file(language_config, ij_project)
_create_directory_tree(language_config)
_create_misc_xml(ij_project)
_create_modules_xml(ij_project, project.name)
ij_project.save()
def _create_project_yaml(project: Project):
title = global_options.var('title')
version = global_options.var('version') or '1.0.0'
lines = [
f'# This is the project file for the ${project.name} project.',
'',
'info:',
f' name: {project.name}']
if title:
lines.append(f' title: {title}')
lines.append(f' version: {version}')
lines.append(f' languages: java')
lines.append('')
path = project.directory / 'project.yaml'
path.write_text('\n'.join(lines), encoding='utf-8')
def create_ij_project_file(config: JavaConfiguration, ij_project: IJProject):
_ = ij_project.iml_file(
source=f'{config.source}/{config.code_source}',
resources=f'{config.source}/{config.code_resources}',
tests=f'{config.source}/{config.tests_source}',
test_resources=f'{config.source}/{config.test_resources}'
)
def _create_directory_tree(language_config: JavaConfiguration):
package = global_options.var('package')
code_dir = language_config.code_dir(ensure=True)
tests_dir = language_config.tests_dir(ensure=True)
if package:
sub_path = Path(package.replace('.', '/'))
path = code_dir / sub_path
path.mkdir(parents=True)
path = tests_dir / sub_path
path.mkdir(parents=True)
language_config.resources_dir(ensure=True)
language_config.test_resources_dir(ensure=True)
def _create_misc_xml(ij_project: IJProject):
_ = ij_project.misc_file(
java_version_number=java_version_number
)
def _create_modules_xml(ij_project: IJProject, project_name: str):
_ = ij_project.modules_file(
project_name=project_name
)
| [
"builder.utils.global_options.project",
"builder.utils.global_options.var",
"builder.jetbrains.intellij.IJProject"
] | [((378, 402), 'builder.utils.global_options.project', 'global_options.project', ([], {}), '()\n', (400, 402), False, 'from builder.utils import global_options\n'), ((420, 448), 'builder.jetbrains.intellij.IJProject', 'IJProject', (['project.directory'], {}), '(project.directory)\n', (429, 448), False, 'from builder.jetbrains.intellij import IJProject, IJImlFile\n'), ((748, 775), 'builder.utils.global_options.var', 'global_options.var', (['"""title"""'], {}), "('title')\n", (766, 775), False, 'from builder.utils import global_options\n'), ((1688, 1717), 'builder.utils.global_options.var', 'global_options.var', (['"""package"""'], {}), "('package')\n", (1706, 1717), False, 'from builder.utils import global_options\n'), ((790, 819), 'builder.utils.global_options.var', 'global_options.var', (['"""version"""'], {}), "('version')\n", (808, 819), False, 'from builder.utils import global_options\n')] |
import unittest
from jute import (
Attribute, Opaque, DynamicInterface, implements,
InterfaceConformanceError
)
class Result:
OK = 'ok'
ERROR = 'exc'
def __init__(self, state=None, result=None):
self.state = state
self.result = result
@classmethod
def ok(cls, result):
return cls(cls.OK, result)
@classmethod
def exception(cls, exc):
exc = (exc.__class__, exc.args)
return cls(cls.ERROR, exc)
def __repr__(self):
return '<Result {}: {!r}>'.format(self.state, self.result)
def __eq__(self, other):
return self.state == other.state and self.result == other.result
def result(f, *args, **kw):
try:
return Result.ok(f(*args, **kw))
except Exception as e:
return Result.exception(e)
class IFoo(Opaque):
foo = Attribute()
@implements(IFoo)
class FooImplements:
foo = 5
bar = 6
class FooImplementsRegistered:
foo = 5
bar = 6
IFoo.register_implementation(FooImplementsRegistered)
@implements(IFoo)
class FooProvider:
def __init__(self):
self.foo = 5
self.bar = 6
class FooProviderRegistered:
def __init__(self):
self.foo = 5
self.bar = 6
IFoo.register_implementation(FooProviderRegistered)
@implements(DynamicInterface)
class FooDynamic:
def __getattr__(self, name):
if name == 'foo':
return 5
elif name == 'bar':
return 6
return super().__getattr__(name)
def provides_interface(self, interface):
return interface.implemented_by(IFoo)
class WhenInterfaceHasAttribute:
def test_get_internal_attribute_fails(self):
"""Caller cannot see the interface's hidden attributes."""
# Interface does have a provider attribute
object.__getattribute__(self.inf, 'provider')
# but it is hidden from normal attribute access
with self.assertRaises(AttributeError):
self.inf.provider
def test_get_attribute_in_interface(self):
self.assertEqual(
result(lambda: self.obj.foo),
result(lambda: self.inf.foo)
)
def test_set_attribute_in_interface(self):
self.inf.foo = 9
self.assertEqual(self.obj.foo, 9)
def test_del_attribute_in_interface(self):
with self.assertRaises(InterfaceConformanceError):
del self.inf.foo
class WhenInterfaceDoesNotHaveAttribute:
def test_get_attribute_not_in_interface(self):
with self.assertRaises(AttributeError):
self.inf.bar
def test_set_attribute_not_in_interface(self):
with self.assertRaises(AttributeError):
self.inf.bar = 9
def test_del_attribute_not_in_interface(self):
with self.assertRaises(AttributeError):
del self.inf.bar
def mktest(cls):
class TestClass(
unittest.TestCase, WhenInterfaceHasAttribute,
WhenInterfaceDoesNotHaveAttribute
):
def setUp(self):
self.obj = cls()
self.inf = IFoo(self.obj)
return TestClass
class FooImplementsTests(mktest(FooImplements)):
pass
class FooImplementsRegisteredTests(mktest(FooImplementsRegistered)):
pass
class FooProviderTests(mktest(FooProvider)):
pass
class FooProviderRegisteredTests(mktest(FooProviderRegistered)):
pass
class FooDynamicTests(mktest(FooDynamic)):
pass
| [
"jute.implements",
"jute.Attribute"
] | [((861, 877), 'jute.implements', 'implements', (['IFoo'], {}), '(IFoo)\n', (871, 877), False, 'from jute import Attribute, Opaque, DynamicInterface, implements, InterfaceConformanceError\n'), ((1040, 1056), 'jute.implements', 'implements', (['IFoo'], {}), '(IFoo)\n', (1050, 1056), False, 'from jute import Attribute, Opaque, DynamicInterface, implements, InterfaceConformanceError\n'), ((1297, 1325), 'jute.implements', 'implements', (['DynamicInterface'], {}), '(DynamicInterface)\n', (1307, 1325), False, 'from jute import Attribute, Opaque, DynamicInterface, implements, InterfaceConformanceError\n'), ((846, 857), 'jute.Attribute', 'Attribute', ([], {}), '()\n', (855, 857), False, 'from jute import Attribute, Opaque, DynamicInterface, implements, InterfaceConformanceError\n')] |
"""
Django settings for api project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
import os
from website import settings as osf_settings
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = osf_settings.SECRET_KEY
AUTHENTICATION_BACKENDS = (
'api.base.authentication.backends.ODMBackend',
)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = osf_settings.DEBUG_MODE
ALLOWED_HOSTS = [
'.osf.io'
]
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.staticfiles',
# 3rd party
'rest_framework',
'rest_framework_swagger',
'raven.contrib.django.raven_compat',
)
# TODO: Are there more granular ways to configure reporting specifically related to the API?
RAVEN_CONFIG = {
'dsn': osf_settings.SENTRY_DSN
}
REST_FRAMEWORK = {
'PAGE_SIZE': 10,
# Order is important here because of a bug in rest_framework_swagger. For now,
# rest_framework.renderers.JSONRenderer needs to be first, at least until
# https://github.com/marcgibbons/django-rest-swagger/issues/271 is resolved.
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'api.base.renderers.JSONAPIRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.AcceptHeaderVersioning',
'DEFAULT_VERSION': '2.0',
'DEFAULT_FILTER_BACKENDS': ('api.base.filters.ODMOrderingFilter',),
'DEFAULT_PAGINATION_CLASS': 'api.base.pagination.JSONAPIPagination',
'ORDERING_PARAM': 'sort',
'DEFAULT_AUTHENTICATION_CLASSES': (
# Custom auth classes
'api.base.authentication.drf.OSFBasicAuthentication',
'api.base.authentication.drf.OSFSessionAuthentication',
'api.base.authentication.drf.OSFCASAuthentication'
),
}
MIDDLEWARE_CLASSES = (
# TokuMX transaction support
# Needs to go before CommonMiddleware, so that transactions are always started,
# even in the event of a redirect. CommonMiddleware may cause other middlewares'
# process_request to be skipped, e.g. when a trailing slash is omitted
'api.base.middleware.DjangoGlobalMiddleware',
'api.base.middleware.TokuTransactionsMiddleware',
# 'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
# 'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
# 'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True
}]
ROOT_URLCONF = 'api.base.urls'
WSGI_APPLICATION = 'api.base.wsgi.application'
LANGUAGE_CODE = 'en-us'
# Disabled to make a test work (TestNodeLog.test_formatted_date)
# TODO Try to understand what's happening to cause the test to break when that line is active.
# TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static/vendor')
API_BASE = 'v2/'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
('rest_framework_swagger/css', os.path.join(BASE_DIR, 'static/css')),
('rest_framework_swagger/images', os.path.join(BASE_DIR, 'static/images')),
)
SWAGGER_SETTINGS = {
'info': {
'api_path': '/',
'description':
"""
<p>Welcome to the V2 Open Science Framework API. With this API you can programatically access users,
projects, components, and files from the <a href="https://osf.io/">Open Science Framework</a>. The Open Science
Framework is a website that
integrates with the scientist's daily workflow. OSF helps document and archive study designs, materials, and data.
OSF facilitates sharing of materials and data within a research group or between groups. OSF also facilitates
transparency of research and provides a network design that details and credits individual
contributions for all aspects of the research process.</p>
<p>NOTE: This API is currently in beta. The beta period should be fairly short, but until then, details about
the api could change. Once this notice disappears, it will be replaced with a description of how long we will
support the current api and under what circumstances it might change.</p>
<h2>General API Usage</h2>
<p>Each endpoint will have its own documentation, but there are some general things that should work pretty much everywhere.</p>
<h3>Filtering</h3>
<p>Collections can be filtered by adding a query parameter in the form:</p>
<pre>filter[<fieldname>]=<matching information></pre>
<p>For example, if you were trying to find <a href="http://en.wikipedia.org/wiki/Lise_Meitner">Lise Meitner</a>:</p>
<pre>/users?filter[fullname]=meitn</pre>
<p>You can filter on multiple fields, or the same field in different ways, by &-ing the query parameters together.</p>
<pre>/users?filter[fullname]=lise&filter[family_name]=mei</pre>
<h3>Links</h3>
<p>Responses will generally have associated links. These are helpers to keep you from having to construct
URLs in your code or by hand. If you know the route to a high-level resource, then feel free to just go to that
route. For example, going to:</p>
<pre>/nodes/<node_id></pre>
<p>is a perfectly good route to create rather than going to /nodes/ and navigating from there by filtering by id
(which would be ridiculous). However, if you are creating something that crawls the structure of a node
going to child node or gathering children, contributors, and similar related resources, then grab the link from
the object you\'re crawling rather than constructing the link yourself.
In general, links include:</p>
<ol>
<li>1. "Related" links, which will give you detail information on individual items or a collection of related resources;</li>
<li>2. "Self" links, which is what you use for general REST operations (POST, DELETE, and so on);</li>
<li>3. Pagination links such as "next", "prev", "first", and "last". These are great for navigating long lists of information.</li></ol>
<p>Some routes may have extra rules for links, especially if those links work with external services. Collections
may have counts with them to indicate how many items are in that collection.</p>""",
'title': 'OSF API Documentation',
},
'doc_expansion': 'list',
}
| [
"os.path.abspath",
"os.path.join"
] | [((3809, 3848), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""static/vendor"""'], {}), "(BASE_DIR, 'static/vendor')\n", (3821, 3848), False, 'import os\n'), ((3948, 3984), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""static/css"""'], {}), "(BASE_DIR, 'static/css')\n", (3960, 3984), False, 'import os\n'), ((4025, 4064), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""static/images"""'], {}), "(BASE_DIR, 'static/images')\n", (4037, 4064), False, 'import os\n'), ((417, 442), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (432, 442), False, 'import os\n'), ((3326, 3361), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""templates"""'], {}), "(BASE_DIR, 'templates')\n", (3338, 3361), False, 'import os\n')] |
#!python
# coding=utf-8
import logging
import pkg_resources
import simplejson as json
from datetime import datetime
import pytz
import click
import sqlalchemy as sql
from sqlalchemy.dialects.postgresql import insert
from dbsink import L, ea, log_format, utils
def get_mappings():
return {
e.name: e.resolve() for e in pkg_resources.iter_entry_points('dbsink.maps')
}
@click.command()
@click.option('--brokers', type=str, required=True, default='localhost:4001', help="Kafka broker string (comman separated).")
@click.option('--topic', type=str, required=True, default='axds-netcdf-replayer-data', help="Kafka topic to send the data to. '-value' is auto appended if using avro packing.")
@click.option('--table', type=str, required=False, default='', help="Name of the table to sink into. Defaults to the topic name.")
@click.option('--lookup', type=str, required=False, default='JsonMap', help="Lookup name to use to find the correct table format (default: the topic name).")
@click.option('--db', type=str, required=True, default='postgresql+psycopg2://sink:sink@localhost:30300/sink', help="SQLAlchemy compatible postgres connection string.")
@click.option('--schema', type=str, required=True, default='public', help="Database schema to use (default: public).")
@click.option('--consumer', type=str, default='', help="Consumer group to listen with (default: random).")
@click.option('--offset', type=str, default='largest', help="Kafka offset to start with (default: largest).")
@click.option('--packing', type=click.Choice(['json', 'avro', 'msgpack']), default='json', help="The data unpacking algorithm to use (default: json).")
@click.option('--registry', type=str, default='http://localhost:4002', help="URL to a Schema Registry if avro packing is requested")
@click.option('--drop/--no-drop', default=False, help="Drop the table first")
@click.option('--truncate/--no-truncate', default=False, help="Truncate the table first")
@click.option('--logfile', type=str, default='', help="File to log messages to (default: stdout).")
@click.option('--listen/--no-listen', default=True, help="Whether to listen for messages.")
@click.option('--do-inserts/--no-do-inserts', default=True, help="Whether to insert data into a database.")
@click.option('--datafile', type=str, default='', help="File to pull messages from instead of listening for messages.")
@click.option('-v', '--verbose', count=True, help="Control the output verbosity, use up to 3 times (-vvv)")
# Filters
@click.option('--start_date', type=click.DateTime(), required=False, default=None, help="Start date filter passed to each mapping class (UTC)")
@click.option('--end_date', type=click.DateTime(), required=False, default=None, help="End date filter passed to each mapping class (UTC)")
def setup(brokers, topic, table, lookup, db, schema, consumer, offset, packing, registry, drop, truncate, logfile, listen, do_inserts, datafile, verbose, start_date, end_date):
if logfile:
handler = logging.FileHandler(logfile)
handler.setFormatter(log_format)
ea.addHandler(handler)
L.addHandler(handler)
if verbose == 0:
ea.setLevel(logging.INFO)
L.setLevel(logging.INFO)
elif verbose >= 1:
ea.setLevel(logging.DEBUG)
L.setLevel(logging.DEBUG)
# If no specific table was specified, use the topic name
if not table:
table = topic
# Be sure an empty string passed in is interpretted as a None offset, meaning
# use the consumer based offsets stored by zoo/kafka
if not offset:
offset = None
# Get consumer and unpack/pack information based on packing
consume_cls, consume_kw, unpack, pack = utils.get_kafka_consumer(
brokers=brokers.split(','),
topic=topic,
offset=offset,
packing=packing,
consumer=consumer,
registry=registry
)
filters = {}
if isinstance(start_date, datetime):
filters['start_date'] = start_date.replace(tzinfo=pytz.utc)
if isinstance(end_date, datetime):
filters['end_date'] = end_date.replace(tzinfo=pytz.utc)
# Get the mapping object from the lookup parameter
mappings = get_mappings()
mapping = mappings[lookup](topic, table=table, filters=filters)
L.debug(f'Using mapping: {lookup}, topic: {topic}, table: {mapping.table}, filters: {len(filters)}')
if do_inserts is True:
""" Database connection and setup
"""
engine = sql.create_engine(
db,
pool_size=5,
max_overflow=100,
pool_recycle=3600,
pool_pre_ping=True,
client_encoding='utf8',
use_native_hstore=True,
echo=verbose >= 2
)
# Create schema
engine.execute(f"CREATE SCHEMA if not exists {schema}")
# Add HSTORE extension
engine.execute("CREATE EXTENSION if not exists hstore cascade")
if drop is True:
L.info(f'Dropping table {mapping.table}')
engine.execute(sql.text(f'DROP TABLE IF EXISTS \"{mapping.table}\"'))
# If we didn't drop the table, we should now truncate it.
# There is no need to truncate if we just dropped the table.
if drop is False and truncate is True:
L.info(f'Truncating table {mapping.table}')
try:
engine.execute(sql.text(f'TRUNCATE TABLE \"{mapping.table}\" RESTART IDENTITY'))
except BaseException as e:
L.error(f'Could not truncate table: {e}')
# Reflect to see if this table already exists. Create or update it.
meta = sql.MetaData(engine, schema=schema)
meta.reflect()
if f'{schema}.{mapping.table}' not in meta.tables:
sqltable = sql.Table(mapping.table, meta, *mapping.schema)
else:
sqltable = sql.Table(
mapping.table,
meta,
*mapping.schema,
autoload=True,
keep_existing=False,
extend_existing=True
)
meta.create_all(tables=[sqltable])
def on_recieve(k, v):
if v is not None and unpack:
try:
v = unpack(v)
except BaseException:
L.error(f'Error unpacking message using {packing}: {v}')
return
# Custom conversion function for the table
try:
newkey, newvalues = mapping.message_to_values(k, v)
except utils.MessageFiltered as e:
L.debug(e)
return
except BaseException as e:
L.error(f'Skipping {v}, message could not be converted to a row - {repr(e)}')
return
if do_inserts:
# I wonder if we can just do set_=v? Other seem to extract the
# exact columns to update but this method is currently working...
# https://gist.github.com/bhtucker/c40578a2fb3ca50b324e42ef9dce58e1
insert_cmd = insert(sqltable).values(newvalues)
if mapping.upsert_constraint_name is not None:
upsert_cmd = insert_cmd.on_conflict_do_update(
constraint=mapping.upsert_constraint_name,
set_=newvalues
)
res = engine.execute(upsert_cmd)
mode = 'inserted/updated'
else:
res = engine.execute(insert_cmd)
mode = 'inserted'
res.close()
L.debug(f'{mode} row {res.inserted_primary_key}')
if datafile:
with open(datafile) as f:
messages = json.load(f)
for m in messages:
on_recieve(None, pack(m))
elif listen is True:
c = consume_cls(**consume_kw)
c.consume(
on_recieve=on_recieve,
initial_wait=1,
timeout=10,
cleanup_every=100,
loop=True
)
def run():
setup(auto_envvar_prefix='DBSINK')
if __name__ == '__main__':
run()
| [
"click.Choice",
"sqlalchemy.Table",
"dbsink.ea.addHandler",
"dbsink.L.setLevel",
"simplejson.load",
"sqlalchemy.MetaData",
"click.DateTime",
"click.option",
"sqlalchemy.create_engine",
"pkg_resources.iter_entry_points",
"dbsink.L.error",
"logging.FileHandler",
"dbsink.L.info",
"click.command",
"sqlalchemy.dialects.postgresql.insert",
"dbsink.ea.setLevel",
"dbsink.L.addHandler",
"sqlalchemy.text",
"dbsink.L.debug"
] | [((391, 406), 'click.command', 'click.command', ([], {}), '()\n', (404, 406), False, 'import click\n'), ((408, 536), 'click.option', 'click.option', (['"""--brokers"""'], {'type': 'str', 'required': '(True)', 'default': '"""localhost:4001"""', 'help': '"""Kafka broker string (comman separated)."""'}), "('--brokers', type=str, required=True, default='localhost:4001',\n help='Kafka broker string (comman separated).')\n", (420, 536), False, 'import click\n'), ((535, 725), 'click.option', 'click.option', (['"""--topic"""'], {'type': 'str', 'required': '(True)', 'default': '"""axds-netcdf-replayer-data"""', 'help': '"""Kafka topic to send the data to. \'-value\' is auto appended if using avro packing."""'}), '(\'--topic\', type=str, required=True, default=\n \'axds-netcdf-replayer-data\', help=\n "Kafka topic to send the data to. \'-value\' is auto appended if using avro packing."\n )\n', (547, 725), False, 'import click\n'), ((715, 849), 'click.option', 'click.option', (['"""--table"""'], {'type': 'str', 'required': '(False)', 'default': '""""""', 'help': '"""Name of the table to sink into. Defaults to the topic name."""'}), "('--table', type=str, required=False, default='', help=\n 'Name of the table to sink into. Defaults to the topic name.')\n", (727, 849), False, 'import click\n'), ((849, 1015), 'click.option', 'click.option', (['"""--lookup"""'], {'type': 'str', 'required': '(False)', 'default': '"""JsonMap"""', 'help': '"""Lookup name to use to find the correct table format (default: the topic name)."""'}), "('--lookup', type=str, required=False, default='JsonMap', help=\n 'Lookup name to use to find the correct table format (default: the topic name).'\n )\n", (861, 1015), False, 'import click\n'), ((1009, 1186), 'click.option', 'click.option', (['"""--db"""'], {'type': 'str', 'required': '(True)', 'default': '"""postgresql+psycopg2://sink:sink@localhost:30300/sink"""', 'help': '"""SQLAlchemy compatible postgres connection string."""'}), "('--db', type=str, required=True, default=\n 'postgresql+psycopg2://sink:sink@localhost:30300/sink', help=\n 'SQLAlchemy compatible postgres connection string.')\n", (1021, 1186), False, 'import click\n'), ((1184, 1306), 'click.option', 'click.option', (['"""--schema"""'], {'type': 'str', 'required': '(True)', 'default': '"""public"""', 'help': '"""Database schema to use (default: public)."""'}), "('--schema', type=str, required=True, default='public', help=\n 'Database schema to use (default: public).')\n", (1196, 1306), False, 'import click\n'), ((1305, 1415), 'click.option', 'click.option', (['"""--consumer"""'], {'type': 'str', 'default': '""""""', 'help': '"""Consumer group to listen with (default: random)."""'}), "('--consumer', type=str, default='', help=\n 'Consumer group to listen with (default: random).')\n", (1317, 1415), False, 'import click\n'), ((1412, 1525), 'click.option', 'click.option', (['"""--offset"""'], {'type': 'str', 'default': '"""largest"""', 'help': '"""Kafka offset to start with (default: largest)."""'}), "('--offset', type=str, default='largest', help=\n 'Kafka offset to start with (default: largest).')\n", (1424, 1525), False, 'import click\n'), ((1677, 1813), 'click.option', 'click.option', (['"""--registry"""'], {'type': 'str', 'default': '"""http://localhost:4002"""', 'help': '"""URL to a Schema Registry if avro packing is requested"""'}), "('--registry', type=str, default='http://localhost:4002', help=\n 'URL to a Schema Registry if avro packing is requested')\n", (1689, 1813), False, 'import click\n'), ((1810, 1886), 'click.option', 'click.option', (['"""--drop/--no-drop"""'], {'default': '(False)', 'help': '"""Drop the table first"""'}), "('--drop/--no-drop', default=False, help='Drop the table first')\n", (1822, 1886), False, 'import click\n'), ((1888, 1981), 'click.option', 'click.option', (['"""--truncate/--no-truncate"""'], {'default': '(False)', 'help': '"""Truncate the table first"""'}), "('--truncate/--no-truncate', default=False, help=\n 'Truncate the table first')\n", (1900, 1981), False, 'import click\n'), ((1978, 2081), 'click.option', 'click.option', (['"""--logfile"""'], {'type': 'str', 'default': '""""""', 'help': '"""File to log messages to (default: stdout)."""'}), "('--logfile', type=str, default='', help=\n 'File to log messages to (default: stdout).')\n", (1990, 2081), False, 'import click\n'), ((2079, 2174), 'click.option', 'click.option', (['"""--listen/--no-listen"""'], {'default': '(True)', 'help': '"""Whether to listen for messages."""'}), "('--listen/--no-listen', default=True, help=\n 'Whether to listen for messages.')\n", (2091, 2174), False, 'import click\n'), ((2171, 2282), 'click.option', 'click.option', (['"""--do-inserts/--no-do-inserts"""'], {'default': '(True)', 'help': '"""Whether to insert data into a database."""'}), "('--do-inserts/--no-do-inserts', default=True, help=\n 'Whether to insert data into a database.')\n", (2183, 2282), False, 'import click\n'), ((2279, 2402), 'click.option', 'click.option', (['"""--datafile"""'], {'type': 'str', 'default': '""""""', 'help': '"""File to pull messages from instead of listening for messages."""'}), "('--datafile', type=str, default='', help=\n 'File to pull messages from instead of listening for messages.')\n", (2291, 2402), False, 'import click\n'), ((2399, 2510), 'click.option', 'click.option', (['"""-v"""', '"""--verbose"""'], {'count': '(True)', 'help': '"""Control the output verbosity, use up to 3 times (-vvv)"""'}), "('-v', '--verbose', count=True, help=\n 'Control the output verbosity, use up to 3 times (-vvv)')\n", (2411, 2510), False, 'import click\n'), ((3014, 3042), 'logging.FileHandler', 'logging.FileHandler', (['logfile'], {}), '(logfile)\n', (3033, 3042), False, 'import logging\n'), ((3092, 3114), 'dbsink.ea.addHandler', 'ea.addHandler', (['handler'], {}), '(handler)\n', (3105, 3114), False, 'from dbsink import L, ea, log_format, utils\n'), ((3123, 3144), 'dbsink.L.addHandler', 'L.addHandler', (['handler'], {}), '(handler)\n', (3135, 3144), False, 'from dbsink import L, ea, log_format, utils\n'), ((3175, 3200), 'dbsink.ea.setLevel', 'ea.setLevel', (['logging.INFO'], {}), '(logging.INFO)\n', (3186, 3200), False, 'from dbsink import L, ea, log_format, utils\n'), ((3209, 3233), 'dbsink.L.setLevel', 'L.setLevel', (['logging.INFO'], {}), '(logging.INFO)\n', (3219, 3233), False, 'from dbsink import L, ea, log_format, utils\n'), ((4496, 4662), 'sqlalchemy.create_engine', 'sql.create_engine', (['db'], {'pool_size': '(5)', 'max_overflow': '(100)', 'pool_recycle': '(3600)', 'pool_pre_ping': '(True)', 'client_encoding': '"""utf8"""', 'use_native_hstore': '(True)', 'echo': '(verbose >= 2)'}), "(db, pool_size=5, max_overflow=100, pool_recycle=3600,\n pool_pre_ping=True, client_encoding='utf8', use_native_hstore=True,\n echo=verbose >= 2)\n", (4513, 4662), True, 'import sqlalchemy as sql\n'), ((5657, 5692), 'sqlalchemy.MetaData', 'sql.MetaData', (['engine'], {'schema': 'schema'}), '(engine, schema=schema)\n', (5669, 5692), True, 'import sqlalchemy as sql\n'), ((1556, 1597), 'click.Choice', 'click.Choice', (["['json', 'avro', 'msgpack']"], {}), "(['json', 'avro', 'msgpack'])\n", (1568, 1597), False, 'import click\n'), ((2551, 2567), 'click.DateTime', 'click.DateTime', ([], {}), '()\n', (2565, 2567), False, 'import click\n'), ((2695, 2711), 'click.DateTime', 'click.DateTime', ([], {}), '()\n', (2709, 2711), False, 'import click\n'), ((335, 381), 'pkg_resources.iter_entry_points', 'pkg_resources.iter_entry_points', (['"""dbsink.maps"""'], {}), "('dbsink.maps')\n", (366, 381), False, 'import pkg_resources\n'), ((3265, 3291), 'dbsink.ea.setLevel', 'ea.setLevel', (['logging.DEBUG'], {}), '(logging.DEBUG)\n', (3276, 3291), False, 'from dbsink import L, ea, log_format, utils\n'), ((3300, 3325), 'dbsink.L.setLevel', 'L.setLevel', (['logging.DEBUG'], {}), '(logging.DEBUG)\n', (3310, 3325), False, 'from dbsink import L, ea, log_format, utils\n'), ((4991, 5032), 'dbsink.L.info', 'L.info', (['f"""Dropping table {mapping.table}"""'], {}), "(f'Dropping table {mapping.table}')\n", (4997, 5032), False, 'from dbsink import L, ea, log_format, utils\n'), ((5310, 5353), 'dbsink.L.info', 'L.info', (['f"""Truncating table {mapping.table}"""'], {}), "(f'Truncating table {mapping.table}')\n", (5316, 5353), False, 'from dbsink import L, ea, log_format, utils\n'), ((5798, 5845), 'sqlalchemy.Table', 'sql.Table', (['mapping.table', 'meta', '*mapping.schema'], {}), '(mapping.table, meta, *mapping.schema)\n', (5807, 5845), True, 'import sqlalchemy as sql\n'), ((5883, 5992), 'sqlalchemy.Table', 'sql.Table', (['mapping.table', 'meta', '*mapping.schema'], {'autoload': '(True)', 'keep_existing': '(False)', 'extend_existing': '(True)'}), '(mapping.table, meta, *mapping.schema, autoload=True,\n keep_existing=False, extend_existing=True)\n', (5892, 5992), True, 'import sqlalchemy as sql\n'), ((7524, 7573), 'dbsink.L.debug', 'L.debug', (['f"""{mode} row {res.inserted_primary_key}"""'], {}), "(f'{mode} row {res.inserted_primary_key}')\n", (7531, 7573), False, 'from dbsink import L, ea, log_format, utils\n'), ((7649, 7661), 'simplejson.load', 'json.load', (['f'], {}), '(f)\n', (7658, 7661), True, 'import simplejson as json\n'), ((5060, 5111), 'sqlalchemy.text', 'sql.text', (['f"""DROP TABLE IF EXISTS "{mapping.table}\\""""'], {}), '(f\'DROP TABLE IF EXISTS "{mapping.table}"\')\n', (5068, 5111), True, 'import sqlalchemy as sql\n'), ((6567, 6577), 'dbsink.L.debug', 'L.debug', (['e'], {}), '(e)\n', (6574, 6577), False, 'from dbsink import L, ea, log_format, utils\n'), ((5402, 5464), 'sqlalchemy.text', 'sql.text', (['f"""TRUNCATE TABLE "{mapping.table}" RESTART IDENTITY"""'], {}), '(f\'TRUNCATE TABLE "{mapping.table}" RESTART IDENTITY\')\n', (5410, 5464), True, 'import sqlalchemy as sql\n'), ((5523, 5564), 'dbsink.L.error', 'L.error', (['f"""Could not truncate table: {e}"""'], {}), "(f'Could not truncate table: {e}')\n", (5530, 5564), False, 'from dbsink import L, ea, log_format, utils\n'), ((6303, 6359), 'dbsink.L.error', 'L.error', (['f"""Error unpacking message using {packing}: {v}"""'], {}), "(f'Error unpacking message using {packing}: {v}')\n", (6310, 6359), False, 'from dbsink import L, ea, log_format, utils\n'), ((7023, 7039), 'sqlalchemy.dialects.postgresql.insert', 'insert', (['sqltable'], {}), '(sqltable)\n', (7029, 7039), False, 'from sqlalchemy.dialects.postgresql import insert\n')] |
import worek.dialects.postgres as pgdialect
from worek.exc import WorekException
class WorekOperationException(WorekException):
pass
def backup(backup_file, backup_type='full', **params):
"""Create backup of the database to the backup file
:param backup_file: The file to send the backup to, this can be any file-like object including a
a stream like. sys.stdin and sys.stdout should work no problem.
:param backup_type: The type of database backup requested. The only option is 'full'.
:param driver: the driver to use for connecting to the database
:param host: the host of the database server
:param port: the port of the database server
:param user: the user of the database server
:param password: <PASSWORD> the <PASSWORD>
:param dbname: the database name to backup
:param saengine: an optional sqlalchemy engine, if this is passed, this will be used for the
backup and other connection type parameters (e.g. driver, host, port) will be ignored
:param version: version of PG client executables to use
"""
PG = pgdialect.Postgres(
engine=params.get('saengine') or pgdialect.Postgres.construct_engine_from_params(**params),
schemas=params.get('schemas'),
version=params.get('version'),
)
if not PG.engine_can_connect:
raise WorekOperationException('Can\'t connect to the database.')
if backup_type == 'full':
PG.backup_binary(backup_file)
else:
raise NotImplementedError('Only full backups are available at this time.')
def restore(restore_file, file_format=None, clean_existing_database=True, **params):
"""Restore a backup file to the specified database
:param restore_file: The file to pull the backup from, this can be any file-like object
including a a stream like. sys.stdin and sys.stdout should work no problem.
:param file_format: an optional file format. By default we try to be smart about this and detect
the type of file, but sometimes we can't and this allows hard setting it.
:param clean_existing_database: clean an existing database before restore
:param driver: the driver to use for connecting to the database
:param host: the host of the database server
:param port: the port of the database server
:param user: the user of the database server
:param password: the password of the database server
:param dbname: the database name to backup
:param saengine: an optional sqlalchemy engine, if this is passed, this will be used for the
backup and other connection type parameters (e.g. driver, host, port) will be ignored
:param version: version of PG client executables to use
"""
PG = pgdialect.Postgres(
engine=params.get('saengine') or pgdialect.Postgres.construct_engine_from_params(**params),
schemas=params.get('schemas'),
version=params.get('version'),
)
if not PG.engine_can_connect:
raise WorekOperationException('Can\'t connect to the database.')
if clean_existing_database:
PG.clean_existing_database()
# perform the restore
if file_format == 'c':
return PG.restore_binary(restore_file)
elif file_format == 't':
return PG.restore_text(restore_file)
elif file_format is None:
return PG.restore(restore_file)
else:
raise NotImplementedError(
'Got an unexpected file_format. {} is not a valid type, expecting'
' "c", "t", or nothing.'.format(file_format)
)
| [
"worek.dialects.postgres.Postgres.construct_engine_from_params"
] | [((1155, 1212), 'worek.dialects.postgres.Postgres.construct_engine_from_params', 'pgdialect.Postgres.construct_engine_from_params', ([], {}), '(**params)\n', (1202, 1212), True, 'import worek.dialects.postgres as pgdialect\n'), ((2797, 2854), 'worek.dialects.postgres.Postgres.construct_engine_from_params', 'pgdialect.Postgres.construct_engine_from_params', ([], {}), '(**params)\n', (2844, 2854), True, 'import worek.dialects.postgres as pgdialect\n')] |
import logging
from os import environ
from typing import TYPE_CHECKING, Literal, NamedTuple, cast
import disnake
if TYPE_CHECKING:
from monty.log import MontyLogger
__all__ = (
"Client",
"Colours",
"Emojis",
"Icons",
"Stats",
"Tokens",
"RedisConfig",
"ERROR_REPLIES",
"NEGATIVE_REPLIES",
"POSITIVE_REPLIES",
)
# due to recursive imports, we have to use this
log = cast("MontyLogger", logging.getLogger(__name__))
class Client:
name = "Monty Python"
redis_prefix = config_prefix = "monty-python"
version = environ.get("GIT_SHA", "main")
default_command_prefix = environ.get("PREFIX", "-")
token = environ.get("BOT_TOKEN")
debug = environ.get("BOT_DEBUG", "true").lower() == "true"
github_bot_repo = "https://github.com/onerandomusername/monty-python"
trace_loggers = environ.get("BOT_TRACE_LOGGERS")
log_mode: Literal["daily", "dev"] = "daily" if "daily" == environ.get("BOT_LOG_MODE", "dev").lower() else "dev"
extensions = environ.get("BOT_EXTENSIONS", None) and {
ext.strip() for ext in environ.get("BOT_EXTENSIONS").split(",") # type: ignore reportOptionalMemberAccess
}
support_server = "mPscM4FjWB"
invite_permissions = disnake.Permissions(
view_channel=True,
send_messages=True,
send_messages_in_threads=True,
manage_messages=True,
manage_threads=True,
embed_links=True,
attach_files=True,
read_message_history=True,
add_reactions=True,
use_external_emojis=True,
# these are enabled for future features, but not currently used
change_nickname=True,
create_public_threads=True,
create_private_threads=True,
view_audit_log=True,
)
DEBUG_MODE = Client.debug
class Database:
postgres_bind: str = environ.get("DB_BIND", "")
run_migrations: bool = not (environ.get("DB_RUN_MIGRATIONS", "true").lower() == "false")
migration_target: str = environ.get("DB_MIGRATION_TARGET", "head")
class CodeBlock:
channel_whitelist: list[int] = []
cooldown_channels: list[int] = []
cooldown_seconds: int = 300
minimum_lines: int = 4
class Colours:
white = 0xFFFFFF
blue = 0x0279FD
bright_green = 0x01D277
dark_green = 0x1F8B4C
orange = 0xE67E22
pink = 0xCF84E0
purple = 0xB734EB
soft_green = 0x68C290
soft_orange = 0xF9CB54
soft_red = 0xCD6D6D
yellow = 0xF9F586
python_blue = 0x4B8BBE
python_yellow = 0xFFD43B
grass_green = 0x66FF00
gold = 0xE6C200
class Emojis:
cross_mark = "\u274C"
star = "\u2B50"
christmas_tree = "\U0001F384"
check = "\u2611"
envelope = "\U0001F4E8"
trashcan = environ.get("TRASHCAN_EMOJI", "<:trashcan:637136429717389331>")
trashcan_on_red = environ.get("TRASHCAN_ON_RED_EMOJI", "<:trashcan:976669056587415592>")
trashcat_special = environ.get("TRASHCAT_SPECIAL_EMOJI", "<:catborked:976598820651679794>")
ok_hand = ":ok_hand:"
hand_raised = "\U0001F64B"
black = "<:black_format:928530654143066143>"
upload = "\U0001f4dd"
snekbox = "\U0001f40d"
# These icons are from Github's repo https://github.com/primer/octicons/
discussion_answered = "<:discussion_answered:979267343710584894>"
issue_open = "<:issue_open:882464248951877682>"
issue_closed = "<:issue_closed:882464248972865536>"
issue_closed_completed = "<:issue_closed_completed:979047130847117343>"
issue_closed_unplanned = "<:issue_closed_unplanned:979052245507276840>"
issue_draft = "<:issue_draft:882464249337774130>" # Not currently used by Github, but here for future.
pull_request_open = "<:pull_open:882464248721182842>"
pull_request_closed = "<:pull_closed:882464248989638676>"
pull_request_draft = "<:pull_draft:882464249065136138>"
pull_request_merged = "<:pull_merged:882464249119645787>"
number_emojis = {
1: "\u0031\ufe0f\u20e3",
2: "\u0032\ufe0f\u20e3",
3: "\u0033\ufe0f\u20e3",
4: "\u0034\ufe0f\u20e3",
5: "\u0035\ufe0f\u20e3",
6: "\u0036\ufe0f\u20e3",
7: "\u0037\ufe0f\u20e3",
8: "\u0038\ufe0f\u20e3",
9: "\u0039\ufe0f\u20e3",
}
confirmation = "\u2705"
decline = "\u274c"
x = "\U0001f1fd"
o = "\U0001f1f4"
stackoverflow_tag = "<:stackoverflow_tag:882722838161797181>"
stackoverflow_views = "<:stackoverflow_views:882722838006607922>"
reddit_upvote = "<:reddit_upvote:882722837868195901>"
reddit_comments = "<:reddit_comments:882722838153416705>"
class Endpoints:
app_info = environ.get("APPLICATION_INFO_ENDPOINT")
class Guilds:
disnake = 808030843078836254
nextcord = 881118111967883295
testing = 789603028382122014
class Icons:
questionmark = "https://cdn.discordapp.com/emojis/512367613339369475.png"
bookmark = (
"https://images-ext-2.discordapp.net/external/zl4oDwcmxUILY7sD9ZWE2fU5R7n6QcxEmPYSE5eddbg/"
"%3Fv%3D1/https/cdn.discordapp.com/emojis/654080405988966419.png?width=20&height=20"
)
class URLs:
paste_service = environ.get("PASTE_SERVICE", "")
snekbox_api = environ.get("SNEKBOX_URL")
snekbox_auth = environ.get("SNEKBOX_AUTH")
black_formatter = environ.get("BLACK_API")
black_playground = environ.get("BLACK_PLAYGROUND", "https://black.vercel.app/")
class Paste:
raw_paste_endpoint: str = environ.get("PASTE_SERVICE_RAW", "")
class Stats(NamedTuple):
host = environ.get("STATS_HOST", "localhost")
port = int(environ.get("STATS_PORT", 8125))
prefix = Client.config_prefix
class Tokens(NamedTuple):
github = environ.get("GITHUB_TOKEN")
class RedisConfig(NamedTuple):
uri = environ.get("REDIS_URI", "redis://redis:6379")
use_fakeredis = environ.get("USE_FAKEREDIS", "false").lower() == "true"
prefix = Client.redis_prefix + ":"
class Source:
github = Client.github_bot_repo
github_avatar_url = "https://avatars1.githubusercontent.com/u/9919"
# Bot replies
ERROR_REPLIES = [
"Please don't do that.",
"You have to stop.",
"Do you mind?",
"In the future, don't do that.",
"That was a mistake.",
"You blew it.",
"You're bad at computers.",
"Are you trying to kill me?",
"Noooooo!!",
"I can't believe you've done this",
]
NEGATIVE_REPLIES = [
"Noooooo!!",
"Nope.",
"I'm sorry Dave, I'm afraid I can't do that.",
"I don't think so.",
"Not gonna happen.",
"Out of the question.",
"Huh? No.",
"Nah.",
"Naw.",
"Not likely.",
"No way, José.",
"Not in a million years.",
"Fat chance.",
"Certainly not.",
"NEGATORY.",
"Nuh-uh.",
"Not in my house!",
]
POSITIVE_REPLIES = [
"Yep.",
"Absolutely!",
"Can do!",
"Affirmative!",
"Yeah okay.",
"Sure.",
"Sure thing!",
"You're the boss!",
"Okay.",
"No problem.",
"I got you.",
"Alright.",
"You got it!",
"ROGER THAT",
"Of course!",
"Aye aye, cap'n!",
"I'll allow it.",
]
| [
"logging.getLogger",
"disnake.Permissions",
"os.environ.get"
] | [((432, 459), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (449, 459), False, 'import logging\n'), ((568, 598), 'os.environ.get', 'environ.get', (['"""GIT_SHA"""', '"""main"""'], {}), "('GIT_SHA', 'main')\n", (579, 598), False, 'from os import environ\n'), ((628, 654), 'os.environ.get', 'environ.get', (['"""PREFIX"""', '"""-"""'], {}), "('PREFIX', '-')\n", (639, 654), False, 'from os import environ\n'), ((667, 691), 'os.environ.get', 'environ.get', (['"""BOT_TOKEN"""'], {}), "('BOT_TOKEN')\n", (678, 691), False, 'from os import environ\n'), ((849, 881), 'os.environ.get', 'environ.get', (['"""BOT_TRACE_LOGGERS"""'], {}), "('BOT_TRACE_LOGGERS')\n", (860, 881), False, 'from os import environ\n'), ((1237, 1601), 'disnake.Permissions', 'disnake.Permissions', ([], {'view_channel': '(True)', 'send_messages': '(True)', 'send_messages_in_threads': '(True)', 'manage_messages': '(True)', 'manage_threads': '(True)', 'embed_links': '(True)', 'attach_files': '(True)', 'read_message_history': '(True)', 'add_reactions': '(True)', 'use_external_emojis': '(True)', 'change_nickname': '(True)', 'create_public_threads': '(True)', 'create_private_threads': '(True)', 'view_audit_log': '(True)'}), '(view_channel=True, send_messages=True,\n send_messages_in_threads=True, manage_messages=True, manage_threads=\n True, embed_links=True, attach_files=True, read_message_history=True,\n add_reactions=True, use_external_emojis=True, change_nickname=True,\n create_public_threads=True, create_private_threads=True, view_audit_log\n =True)\n', (1256, 1601), False, 'import disnake\n'), ((1842, 1868), 'os.environ.get', 'environ.get', (['"""DB_BIND"""', '""""""'], {}), "('DB_BIND', '')\n", (1853, 1868), False, 'from os import environ\n'), ((1990, 2032), 'os.environ.get', 'environ.get', (['"""DB_MIGRATION_TARGET"""', '"""head"""'], {}), "('DB_MIGRATION_TARGET', 'head')\n", (2001, 2032), False, 'from os import environ\n'), ((2725, 2788), 'os.environ.get', 'environ.get', (['"""TRASHCAN_EMOJI"""', '"""<:trashcan:637136429717389331>"""'], {}), "('TRASHCAN_EMOJI', '<:trashcan:637136429717389331>')\n", (2736, 2788), False, 'from os import environ\n'), ((2811, 2881), 'os.environ.get', 'environ.get', (['"""TRASHCAN_ON_RED_EMOJI"""', '"""<:trashcan:976669056587415592>"""'], {}), "('TRASHCAN_ON_RED_EMOJI', '<:trashcan:976669056587415592>')\n", (2822, 2881), False, 'from os import environ\n'), ((2905, 2977), 'os.environ.get', 'environ.get', (['"""TRASHCAT_SPECIAL_EMOJI"""', '"""<:catborked:976598820651679794>"""'], {}), "('TRASHCAT_SPECIAL_EMOJI', '<:catborked:976598820651679794>')\n", (2916, 2977), False, 'from os import environ\n'), ((4608, 4648), 'os.environ.get', 'environ.get', (['"""APPLICATION_INFO_ENDPOINT"""'], {}), "('APPLICATION_INFO_ENDPOINT')\n", (4619, 4648), False, 'from os import environ\n'), ((5108, 5140), 'os.environ.get', 'environ.get', (['"""PASTE_SERVICE"""', '""""""'], {}), "('PASTE_SERVICE', '')\n", (5119, 5140), False, 'from os import environ\n'), ((5159, 5185), 'os.environ.get', 'environ.get', (['"""SNEKBOX_URL"""'], {}), "('SNEKBOX_URL')\n", (5170, 5185), False, 'from os import environ\n'), ((5205, 5232), 'os.environ.get', 'environ.get', (['"""SNEKBOX_AUTH"""'], {}), "('SNEKBOX_AUTH')\n", (5216, 5232), False, 'from os import environ\n'), ((5255, 5279), 'os.environ.get', 'environ.get', (['"""BLACK_API"""'], {}), "('BLACK_API')\n", (5266, 5279), False, 'from os import environ\n'), ((5303, 5363), 'os.environ.get', 'environ.get', (['"""BLACK_PLAYGROUND"""', '"""https://black.vercel.app/"""'], {}), "('BLACK_PLAYGROUND', 'https://black.vercel.app/')\n", (5314, 5363), False, 'from os import environ\n'), ((5409, 5445), 'os.environ.get', 'environ.get', (['"""PASTE_SERVICE_RAW"""', '""""""'], {}), "('PASTE_SERVICE_RAW', '')\n", (5420, 5445), False, 'from os import environ\n'), ((5484, 5522), 'os.environ.get', 'environ.get', (['"""STATS_HOST"""', '"""localhost"""'], {}), "('STATS_HOST', 'localhost')\n", (5495, 5522), False, 'from os import environ\n'), ((5646, 5673), 'os.environ.get', 'environ.get', (['"""GITHUB_TOKEN"""'], {}), "('GITHUB_TOKEN')\n", (5657, 5673), False, 'from os import environ\n'), ((5717, 5763), 'os.environ.get', 'environ.get', (['"""REDIS_URI"""', '"""redis://redis:6379"""'], {}), "('REDIS_URI', 'redis://redis:6379')\n", (5728, 5763), False, 'from os import environ\n'), ((1015, 1050), 'os.environ.get', 'environ.get', (['"""BOT_EXTENSIONS"""', 'None'], {}), "('BOT_EXTENSIONS', None)\n", (1026, 1050), False, 'from os import environ\n'), ((5538, 5569), 'os.environ.get', 'environ.get', (['"""STATS_PORT"""', '(8125)'], {}), "('STATS_PORT', 8125)\n", (5549, 5569), False, 'from os import environ\n'), ((704, 736), 'os.environ.get', 'environ.get', (['"""BOT_DEBUG"""', '"""true"""'], {}), "('BOT_DEBUG', 'true')\n", (715, 736), False, 'from os import environ\n'), ((5784, 5821), 'os.environ.get', 'environ.get', (['"""USE_FAKEREDIS"""', '"""false"""'], {}), "('USE_FAKEREDIS', 'false')\n", (5795, 5821), False, 'from os import environ\n'), ((944, 978), 'os.environ.get', 'environ.get', (['"""BOT_LOG_MODE"""', '"""dev"""'], {}), "('BOT_LOG_MODE', 'dev')\n", (955, 978), False, 'from os import environ\n'), ((1901, 1941), 'os.environ.get', 'environ.get', (['"""DB_RUN_MIGRATIONS"""', '"""true"""'], {}), "('DB_RUN_MIGRATIONS', 'true')\n", (1912, 1941), False, 'from os import environ\n'), ((1088, 1117), 'os.environ.get', 'environ.get', (['"""BOT_EXTENSIONS"""'], {}), "('BOT_EXTENSIONS')\n", (1099, 1117), False, 'from os import environ\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-02-18 21:34
from __future__ import unicode_literals
import django.contrib.gis.db.models.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('smartpanel', '0002_layout_configuration'),
]
operations = [
migrations.AddField(
model_name='screen',
name='layout',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='smartpanel.Layout'),
),
migrations.AddField(
model_name='screen',
name='location',
field=django.contrib.gis.db.models.fields.PointField(null=True, srid=4326),
),
]
| [
"django.db.models.ForeignKey"
] | [((481, 582), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""smartpanel.Layout"""'}), "(null=True, on_delete=django.db.models.deletion.CASCADE,\n to='smartpanel.Layout')\n", (498, 582), False, 'from django.db import migrations, models\n')] |
#!/usr/bin/python
# coding: utf-8
import click
from ..core.helper import import_config_file
from ..settings import default as _default_config
from ..directclient import CDBClient
@click.group()
@click.option('--configfile', '-c', type=click.Path(exists=True))
@click.option('--configclass', type=str)
@click.pass_context
def cli(ctx, configfile, configclass):
"""CattleDB Command Line Tool"""
ctx.ensure_object(dict)
if configfile:
_imported = import_config_file(configfile)
if configclass:
config = getattr(_imported, configclass)
else:
config = _imported
click.echo("Using Config: {}".format(configfile))
else:
config = _default_config
click.echo("Using Default Config")
con = CDBClient.from_config(config)
ctx.obj["client"] = con
ctx.obj["db"] = con.db
ctx.obj["config"] = config
from .base import initdb, dbinfo, newmetric, newevent, runserver, download_timeseries
cli.add_command(initdb)
cli.add_command(dbinfo)
cli.add_command(newmetric)
cli.add_command(newevent)
cli.add_command(runserver)
cli.add_command(download_timeseries)
| [
"click.group",
"click.echo",
"click.option",
"click.Path"
] | [((184, 197), 'click.group', 'click.group', ([], {}), '()\n', (195, 197), False, 'import click\n'), ((265, 304), 'click.option', 'click.option', (['"""--configclass"""'], {'type': 'str'}), "('--configclass', type=str)\n", (277, 304), False, 'import click\n'), ((730, 764), 'click.echo', 'click.echo', (['"""Using Default Config"""'], {}), "('Using Default Config')\n", (740, 764), False, 'import click\n'), ((239, 262), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (249, 262), False, 'import click\n')] |
# Copyright 2021 The Distla Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for distla_core.io.sparse."""
import os
import multiprocessing
import pathlib
import tempfile
import jax
from jax import numpy as jnp
import numpy as np
import pytest
import scipy as sp
from distla_core.blas.summa import summa
from distla_core.io import sparse
from distla_core.linalg.utils import testutils
from distla_core.utils import pops
# These matrices were written by STRUC_PACK's own I/O module.
THIS_DIR = pathlib.Path(__file__).parent
TEST_MATRIX_PATH = THIS_DIR / "test_matrices/test_matrix01.csc"
TEST_MATRIX_SQUARED_PATH = THIS_DIR / "test_matrices/test_matrix01_squared.csc"
DIMS = (32, 256)
SEEDS = (0,)
@pytest.mark.parametrize("seed", SEEDS)
@pytest.mark.parametrize("length", (2, 100, 1000))
@pytest.mark.parametrize("dtype", (np.int32, np.int64, np.float64))
@pytest.mark.parametrize("offset", (0, 500, 1200))
@pytest.mark.parametrize("buffer_bytes", (8, 100, 2**30))
def test_read_write_elements(seed, length, dtype, offset, buffer_bytes):
element_bytes = np.dtype(dtype).itemsize
rbg = np.random.PCG64(seed) # Random bit generator
length_bytes = element_bytes * length
# Division by 8 because random_raw generates uint64.
array = np.frombuffer(rbg.random_raw(length_bytes // 8), dtype=dtype)
assert array.size == length
with tempfile.TemporaryDirectory() as tempdir:
path = pathlib.Path(f"{tempdir}/tmpfile")
f = os.open(path, os.O_RDWR | os.O_SYNC | os.O_CREAT)
try:
# Set the full file size.
os.lseek(f, offset + length_bytes - 1, 0)
os.write(f, b"\0")
sparse._write_elements(f, array, offset, buffer_bytes=buffer_bytes)
array_read = sparse._read_elements(
f,
dtype,
length,
offset,
buffer_bytes=buffer_bytes,
)
finally:
os.close(f)
np.testing.assert_array_equal(array_read, array)
def _host_block_write(args):
(
matrix,
host_index,
path,
all_nnzs,
num_hosts,
padded_dim,
unpadded_dim,
n_electrons,
) = args
total_nnz = sum(all_nnzs)
csc_file = sparse.StructPackCscFile(
path,
dim=unpadded_dim,
nnz=total_nnz,
n_electrons=n_electrons,
)
csc_file.write_struc_pack_csc_host_block(
matrix,
host_index,
num_hosts,
padded_dim,
all_nnzs,
)
return None
def _host_block_read(args):
(
matrix,
host_index,
path,
all_nnzs,
num_hosts,
padded_dim,
unpadded_dim,
n_electrons,
) = args
total_nnz = sum(all_nnzs)
csc_file = sparse.StructPackCscFile(
path,
dim=unpadded_dim,
nnz=total_nnz,
n_electrons=n_electrons,
)
matrix_undistributed, _ = csc_file.read_struc_pack_csc_host_block(
host_index,
num_hosts,
num_hosts,
)
matrix_undistributed = matrix_undistributed.toarray()
return matrix_undistributed
@pytest.mark.parametrize("num_hosts", (1, 4, 8))
@pytest.mark.parametrize("dim", DIMS)
@pytest.mark.parametrize("density", (0.1, 0.95))
@pytest.mark.parametrize("n_electrons", (10,))
@pytest.mark.parametrize("pad", (0, 3))
@pytest.mark.parametrize("seed", SEEDS)
def test_struc_pack_write_read_host_block(
num_hosts,
dim,
density,
n_electrons,
pad,
seed,
):
"""Test reading and writing STRUC_PACK CSC files emulating a multihost setup.
A multihost setup is emulated using multiprocessing.
Unlike test_struc_pack_write_read, this one does not include the part about changing
matrix distributions on the ASICs, since that is hard to emulate without an
actual asic_cluster slice. Instead, this just writes and reads a matrix between host
memory and the disk.
"""
np.random.seed(seed)
unpadded_dim = dim - pad
matrix = sp.sparse.random(
unpadded_dim,
unpadded_dim,
density=density,
dtype=np.float64,
).toarray()
matrix_padded = np.zeros((dim, dim), dtype=np.float64)
matrix_padded[:unpadded_dim, :unpadded_dim] = matrix
host_indices = tuple(range(num_hosts))
host_block_width = dim // num_hosts
blocks = [
matrix_padded[:, i * host_block_width:(i + 1) * host_block_width]
for i in host_indices
]
all_nnzs = [np.count_nonzero(b) for b in blocks]
with tempfile.TemporaryDirectory() as tempdir:
path = pathlib.Path(f"{tempdir}/tmp_test_matrix.csc")
# Arguments to be passed to the multiprocessing calls.
args = [[b, i, path, all_nnzs, num_hosts, dim, unpadded_dim, n_electrons]
for b, i in zip(blocks, host_indices)]
with multiprocessing.Pool(processes=num_hosts) as pool:
# Wait for writing to be done before reading.
# TODO The _barriers in sparse.py don't work when using emulated hosts,
# which may cause this test to fail intermittently. Figure a way around
# this.
pool.map_async(_host_block_write, args).wait()
blocks = pool.map_async(_host_block_read, args)
blocks.wait()
blocks = blocks.get()
matrix_reconstructed = np.hstack(blocks)
matrix_reconstructed = matrix_reconstructed[:unpadded_dim, :unpadded_dim]
eps = testutils.eps(jax.lax.Precision.HIGHEST, dtype=jnp.float32)
np.testing.assert_allclose(matrix_reconstructed, matrix, rtol=10 * eps)
@pytest.mark.parametrize("dim", DIMS)
@pytest.mark.parametrize("density", (0.1, 0.95))
@pytest.mark.parametrize("n_electrons", (10,))
@pytest.mark.parametrize("unpadded_dim", (11, 32))
@pytest.mark.parametrize("seed", SEEDS)
def test_struc_pack_write_read_full(dim, density, n_electrons, unpadded_dim, seed):
"""Create a random matrix, write it in STRUC_PACK CSC format, and read it back."""
np.random.seed(seed)
matrix = sp.sparse.random(
unpadded_dim,
unpadded_dim,
density=density,
dtype=np.float64,
).toarray()
matrix_padded = np.zeros((dim, dim), dtype=np.float64)
matrix_padded[:unpadded_dim, :unpadded_dim] = matrix
matrix_distributed = pops.distribute_global(matrix_padded)
with tempfile.TemporaryDirectory() as tempdir:
path = pathlib.Path(f"{tempdir}/tmp_test_matrix.csc")
sparse.write_struc_pack_csc(
path,
matrix_distributed,
n_electrons=n_electrons,
unpadded_dim=unpadded_dim,
)
matrix_read, unpadded_dim_read, n_electrons_read = sparse.read_struc_pack_csc(
path)
assert n_electrons_read == n_electrons
assert unpadded_dim_read == unpadded_dim
matrix_undistributed = pops.undistribute_global(matrix_read)
matrix_undistributed = matrix_undistributed[:unpadded_dim, :unpadded_dim]
eps = testutils.eps(jax.lax.Precision.HIGHEST, dtype=jnp.float32)
np.testing.assert_allclose(matrix_undistributed, matrix, rtol=10 * eps)
def test_struc_pack_read_and_square():
"""Read a fixed STRUC_PACK CSC matrix and its square from disk. Check that computing
the square comes out correct.
"""
matrix = sparse.read_struc_pack_csc(TEST_MATRIX_PATH)[0]
matrix_squared_expected = sparse.read_struc_pack_csc(TEST_MATRIX_SQUARED_PATH)[0]
matrix_squared = pops.pmap(lambda x, y: summa.summa(
x,
y,
p_sz=128,
transpose_A=False,
transpose_B=False,
precision=jax.lax.Precision.HIGHEST,
))(matrix, matrix)
eps = testutils.eps(jax.lax.Precision.HIGHEST, dtype=jnp.float32)
# These test matrices are 77 x 77, so handling them with numpy is safe.
norm = np.linalg.norm(matrix_squared_expected)
np.testing.assert_allclose(
matrix_squared,
matrix_squared_expected,
atol=10 * norm * eps,
)
| [
"distla_core.utils.pops.distribute_global",
"numpy.hstack",
"distla_core.io.sparse._write_elements",
"os.lseek",
"distla_core.blas.summa.summa.summa",
"os.open",
"numpy.random.PCG64",
"numpy.count_nonzero",
"numpy.linalg.norm",
"distla_core.linalg.utils.testutils.eps",
"pathlib.Path",
"distla_core.io.sparse.StructPackCscFile",
"numpy.testing.assert_allclose",
"scipy.sparse.random",
"distla_core.utils.pops.undistribute_global",
"numpy.random.seed",
"numpy.dtype",
"numpy.testing.assert_array_equal",
"os.close",
"os.write",
"distla_core.io.sparse.read_struc_pack_csc",
"tempfile.TemporaryDirectory",
"pytest.mark.parametrize",
"numpy.zeros",
"multiprocessing.Pool",
"distla_core.io.sparse.write_struc_pack_csc",
"distla_core.io.sparse._read_elements"
] | [((1320, 1358), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""seed"""', 'SEEDS'], {}), "('seed', SEEDS)\n", (1343, 1358), False, 'import pytest\n'), ((1360, 1409), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""length"""', '(2, 100, 1000)'], {}), "('length', (2, 100, 1000))\n", (1383, 1409), False, 'import pytest\n'), ((1411, 1477), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '(np.int32, np.int64, np.float64)'], {}), "('dtype', (np.int32, np.int64, np.float64))\n", (1434, 1477), False, 'import pytest\n'), ((1479, 1528), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""offset"""', '(0, 500, 1200)'], {}), "('offset', (0, 500, 1200))\n", (1502, 1528), False, 'import pytest\n'), ((1530, 1588), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""buffer_bytes"""', '(8, 100, 2 ** 30)'], {}), "('buffer_bytes', (8, 100, 2 ** 30))\n", (1553, 1588), False, 'import pytest\n'), ((3566, 3613), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""num_hosts"""', '(1, 4, 8)'], {}), "('num_hosts', (1, 4, 8))\n", (3589, 3613), False, 'import pytest\n'), ((3615, 3651), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dim"""', 'DIMS'], {}), "('dim', DIMS)\n", (3638, 3651), False, 'import pytest\n'), ((3653, 3700), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""density"""', '(0.1, 0.95)'], {}), "('density', (0.1, 0.95))\n", (3676, 3700), False, 'import pytest\n'), ((3702, 3747), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_electrons"""', '(10,)'], {}), "('n_electrons', (10,))\n", (3725, 3747), False, 'import pytest\n'), ((3749, 3787), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pad"""', '(0, 3)'], {}), "('pad', (0, 3))\n", (3772, 3787), False, 'import pytest\n'), ((3789, 3827), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""seed"""', 'SEEDS'], {}), "('seed', SEEDS)\n", (3812, 3827), False, 'import pytest\n'), ((5896, 5932), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dim"""', 'DIMS'], {}), "('dim', DIMS)\n", (5919, 5932), False, 'import pytest\n'), ((5934, 5981), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""density"""', '(0.1, 0.95)'], {}), "('density', (0.1, 0.95))\n", (5957, 5981), False, 'import pytest\n'), ((5983, 6028), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_electrons"""', '(10,)'], {}), "('n_electrons', (10,))\n", (6006, 6028), False, 'import pytest\n'), ((6030, 6079), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""unpadded_dim"""', '(11, 32)'], {}), "('unpadded_dim', (11, 32))\n", (6053, 6079), False, 'import pytest\n'), ((6081, 6119), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""seed"""', 'SEEDS'], {}), "('seed', SEEDS)\n", (6104, 6119), False, 'import pytest\n'), ((1112, 1134), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (1124, 1134), False, 'import pathlib\n'), ((1711, 1732), 'numpy.random.PCG64', 'np.random.PCG64', (['seed'], {}), '(seed)\n', (1726, 1732), True, 'import numpy as np\n'), ((2481, 2529), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['array_read', 'array'], {}), '(array_read, array)\n', (2510, 2529), True, 'import numpy as np\n'), ((2751, 2844), 'distla_core.io.sparse.StructPackCscFile', 'sparse.StructPackCscFile', (['path'], {'dim': 'unpadded_dim', 'nnz': 'total_nnz', 'n_electrons': 'n_electrons'}), '(path, dim=unpadded_dim, nnz=total_nnz, n_electrons\n =n_electrons)\n', (2775, 2844), False, 'from distla_core.io import sparse\n'), ((3234, 3327), 'distla_core.io.sparse.StructPackCscFile', 'sparse.StructPackCscFile', (['path'], {'dim': 'unpadded_dim', 'nnz': 'total_nnz', 'n_electrons': 'n_electrons'}), '(path, dim=unpadded_dim, nnz=total_nnz, n_electrons\n =n_electrons)\n', (3258, 3327), False, 'from distla_core.io import sparse\n'), ((4367, 4387), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4381, 4387), True, 'import numpy as np\n'), ((4563, 4601), 'numpy.zeros', 'np.zeros', (['(dim, dim)'], {'dtype': 'np.float64'}), '((dim, dim), dtype=np.float64)\n', (4571, 4601), True, 'import numpy as np\n'), ((5657, 5674), 'numpy.hstack', 'np.hstack', (['blocks'], {}), '(blocks)\n', (5666, 5674), True, 'import numpy as np\n'), ((5759, 5818), 'distla_core.linalg.utils.testutils.eps', 'testutils.eps', (['jax.lax.Precision.HIGHEST'], {'dtype': 'jnp.float32'}), '(jax.lax.Precision.HIGHEST, dtype=jnp.float32)\n', (5772, 5818), False, 'from distla_core.linalg.utils import testutils\n'), ((5821, 5892), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['matrix_reconstructed', 'matrix'], {'rtol': '(10 * eps)'}), '(matrix_reconstructed, matrix, rtol=10 * eps)\n', (5847, 5892), True, 'import numpy as np\n'), ((6291, 6311), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (6305, 6311), True, 'import numpy as np\n'), ((6460, 6498), 'numpy.zeros', 'np.zeros', (['(dim, dim)'], {'dtype': 'np.float64'}), '((dim, dim), dtype=np.float64)\n', (6468, 6498), True, 'import numpy as np\n'), ((6577, 6614), 'distla_core.utils.pops.distribute_global', 'pops.distribute_global', (['matrix_padded'], {}), '(matrix_padded)\n', (6599, 6614), False, 'from distla_core.utils import pops\n'), ((7077, 7114), 'distla_core.utils.pops.undistribute_global', 'pops.undistribute_global', (['matrix_read'], {}), '(matrix_read)\n', (7101, 7114), False, 'from distla_core.utils import pops\n'), ((7199, 7258), 'distla_core.linalg.utils.testutils.eps', 'testutils.eps', (['jax.lax.Precision.HIGHEST'], {'dtype': 'jnp.float32'}), '(jax.lax.Precision.HIGHEST, dtype=jnp.float32)\n', (7212, 7258), False, 'from distla_core.linalg.utils import testutils\n'), ((7261, 7332), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['matrix_undistributed', 'matrix'], {'rtol': '(10 * eps)'}), '(matrix_undistributed, matrix, rtol=10 * eps)\n', (7287, 7332), True, 'import numpy as np\n'), ((7853, 7912), 'distla_core.linalg.utils.testutils.eps', 'testutils.eps', (['jax.lax.Precision.HIGHEST'], {'dtype': 'jnp.float32'}), '(jax.lax.Precision.HIGHEST, dtype=jnp.float32)\n', (7866, 7912), False, 'from distla_core.linalg.utils import testutils\n'), ((7996, 8035), 'numpy.linalg.norm', 'np.linalg.norm', (['matrix_squared_expected'], {}), '(matrix_squared_expected)\n', (8010, 8035), True, 'import numpy as np\n'), ((8038, 8131), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['matrix_squared', 'matrix_squared_expected'], {'atol': '(10 * norm * eps)'}), '(matrix_squared, matrix_squared_expected, atol=10 *\n norm * eps)\n', (8064, 8131), True, 'import numpy as np\n'), ((1678, 1693), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (1686, 1693), True, 'import numpy as np\n'), ((1961, 1990), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1988, 1990), False, 'import tempfile\n'), ((2014, 2048), 'pathlib.Path', 'pathlib.Path', (['f"""{tempdir}/tmpfile"""'], {}), "(f'{tempdir}/tmpfile')\n", (2026, 2048), False, 'import pathlib\n'), ((2057, 2106), 'os.open', 'os.open', (['path', '(os.O_RDWR | os.O_SYNC | os.O_CREAT)'], {}), '(path, os.O_RDWR | os.O_SYNC | os.O_CREAT)\n', (2064, 2106), False, 'import os\n'), ((4867, 4886), 'numpy.count_nonzero', 'np.count_nonzero', (['b'], {}), '(b)\n', (4883, 4886), True, 'import numpy as np\n'), ((4911, 4940), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (4938, 4940), False, 'import tempfile\n'), ((4964, 5010), 'pathlib.Path', 'pathlib.Path', (['f"""{tempdir}/tmp_test_matrix.csc"""'], {}), "(f'{tempdir}/tmp_test_matrix.csc')\n", (4976, 5010), False, 'import pathlib\n'), ((6622, 6651), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (6649, 6651), False, 'import tempfile\n'), ((6675, 6721), 'pathlib.Path', 'pathlib.Path', (['f"""{tempdir}/tmp_test_matrix.csc"""'], {}), "(f'{tempdir}/tmp_test_matrix.csc')\n", (6687, 6721), False, 'import pathlib\n'), ((6726, 6836), 'distla_core.io.sparse.write_struc_pack_csc', 'sparse.write_struc_pack_csc', (['path', 'matrix_distributed'], {'n_electrons': 'n_electrons', 'unpadded_dim': 'unpadded_dim'}), '(path, matrix_distributed, n_electrons=\n n_electrons, unpadded_dim=unpadded_dim)\n', (6753, 6836), False, 'from distla_core.io import sparse\n'), ((6926, 6958), 'distla_core.io.sparse.read_struc_pack_csc', 'sparse.read_struc_pack_csc', (['path'], {}), '(path)\n', (6952, 6958), False, 'from distla_core.io import sparse\n'), ((7510, 7554), 'distla_core.io.sparse.read_struc_pack_csc', 'sparse.read_struc_pack_csc', (['TEST_MATRIX_PATH'], {}), '(TEST_MATRIX_PATH)\n', (7536, 7554), False, 'from distla_core.io import sparse\n'), ((7586, 7638), 'distla_core.io.sparse.read_struc_pack_csc', 'sparse.read_struc_pack_csc', (['TEST_MATRIX_SQUARED_PATH'], {}), '(TEST_MATRIX_SQUARED_PATH)\n', (7612, 7638), False, 'from distla_core.io import sparse\n'), ((2154, 2195), 'os.lseek', 'os.lseek', (['f', '(offset + length_bytes - 1)', '(0)'], {}), '(f, offset + length_bytes - 1, 0)\n', (2162, 2195), False, 'import os\n'), ((2202, 2222), 'os.write', 'os.write', (['f', "b'\\x00'"], {}), "(f, b'\\x00')\n", (2210, 2222), False, 'import os\n'), ((2227, 2294), 'distla_core.io.sparse._write_elements', 'sparse._write_elements', (['f', 'array', 'offset'], {'buffer_bytes': 'buffer_bytes'}), '(f, array, offset, buffer_bytes=buffer_bytes)\n', (2249, 2294), False, 'from distla_core.io import sparse\n'), ((2314, 2388), 'distla_core.io.sparse._read_elements', 'sparse._read_elements', (['f', 'dtype', 'length', 'offset'], {'buffer_bytes': 'buffer_bytes'}), '(f, dtype, length, offset, buffer_bytes=buffer_bytes)\n', (2335, 2388), False, 'from distla_core.io import sparse\n'), ((2467, 2478), 'os.close', 'os.close', (['f'], {}), '(f)\n', (2475, 2478), False, 'import os\n'), ((4426, 4505), 'scipy.sparse.random', 'sp.sparse.random', (['unpadded_dim', 'unpadded_dim'], {'density': 'density', 'dtype': 'np.float64'}), '(unpadded_dim, unpadded_dim, density=density, dtype=np.float64)\n', (4442, 4505), True, 'import scipy as sp\n'), ((5208, 5249), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'num_hosts'}), '(processes=num_hosts)\n', (5228, 5249), False, 'import multiprocessing\n'), ((6323, 6402), 'scipy.sparse.random', 'sp.sparse.random', (['unpadded_dim', 'unpadded_dim'], {'density': 'density', 'dtype': 'np.float64'}), '(unpadded_dim, unpadded_dim, density=density, dtype=np.float64)\n', (6339, 6402), True, 'import scipy as sp\n'), ((7684, 7791), 'distla_core.blas.summa.summa.summa', 'summa.summa', (['x', 'y'], {'p_sz': '(128)', 'transpose_A': '(False)', 'transpose_B': '(False)', 'precision': 'jax.lax.Precision.HIGHEST'}), '(x, y, p_sz=128, transpose_A=False, transpose_B=False, precision\n =jax.lax.Precision.HIGHEST)\n', (7695, 7791), False, 'from distla_core.blas.summa import summa\n')] |
'''
Created on Dec 4, 2017
@author: thomasriddick
'''
from os.path import join
import os.path as path
import numpy as np
import time
from Dynamic_HD_Scripts.base import iodriver
from Dynamic_HD_Scripts.base import field
from Dynamic_HD_Scripts.tools import determine_river_directions
from Dynamic_HD_Scripts.tools import extract_lake_volumes
from Dynamic_HD_Scripts.tools import compute_catchments as cc
from Dynamic_HD_Scripts.tools import flow_to_grid_cell as ftgc
from Dynamic_HD_Scripts.tools import connect_coarse_lake_catchments as cclc
from Dynamic_HD_Scripts.tools import dynamic_lake_operators
from Dynamic_HD_Scripts.tools import fill_sinks_driver
from Dynamic_HD_Scripts.tools import river_mouth_marking_driver
from Dynamic_HD_Scripts.utilities import utilities
from Dynamic_HD_Scripts.dynamic_hd_and_dynamic_lake_drivers import dynamic_hd_driver
class Dynamic_Lake_Drivers(dynamic_hd_driver.Dynamic_HD_Drivers):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
super(Dynamic_Lake_Drivers,self).__init__()
def prepare_orography_ICE5G_0k_uncorrected(self):
file_label = self._generate_file_label()
ICE5G_0k_file = join(self.orography_path,"ice5g_v1_2_00_0k_10min.nc")
ICE5G_0k_orography_fieldname = "orog"
ICE5G_minima_filename = self.generated_minima_filepath+file_label+".nc"
ICE5G_minima_reduced_filename = self.generated_minima_filepath+file_label+"_reduced.nc"
ICE5G_minima_fieldname = "minima"
ICE5G_flowdirs_filename = self.generated_rdir_filepath+file_label+'.nc'
ICE5G_flowdirs_fieldname = "FDIR"
ICE5G_output_orog_filename= self.generated_orography_filepath + file_label + '.nc'
ICE5G_lakemask_filename= join(self.lakemask_filepath,"empty_lakemask.nc")
ICE5G_lakemask_fieldname="lakemask"
ICE5G_landsea_mask_filename=join(self.orography_path,"Ice6g_c_VM5a_10min_0k.nc")
ICE5G_landsea_mask_fieldname="sftlf"
lakemask = field.makeEmptyField(field_type='Generic',dtype=np.int32,grid_type="LatLong10min")
orog = iodriver.advanced_field_loader(ICE5G_0k_file,fieldname=ICE5G_0k_orography_fieldname)
lakemask.grid = orog.get_grid()
iodriver.advanced_field_writer(ICE5G_lakemask_filename,lakemask,
ICE5G_lakemask_fieldname,clobber=True)
dynamic_lake_operators.advanced_local_minima_finding_driver(ICE5G_0k_file,
ICE5G_0k_orography_fieldname,
ICE5G_minima_filename,
ICE5G_minima_fieldname)
dynamic_lake_operators.reduce_connected_areas_to_points(ICE5G_minima_filename,
ICE5G_minima_fieldname,
ICE5G_minima_reduced_filename,
ICE5G_minima_fieldname)
fill_sinks_driver.advanced_sinkless_flow_directions_generator(filename=ICE5G_0k_file,
output_filename=ICE5G_flowdirs_filename,
ls_mask_filename=
ICE5G_landsea_mask_filename,
fieldname=ICE5G_0k_orography_fieldname,
output_fieldname=
ICE5G_flowdirs_fieldname,
ls_mask_fieldname=
ICE5G_landsea_mask_fieldname)
dynamic_lake_operators.advanced_burn_carved_rivers_driver(input_orography_file=
ICE5G_0k_file,
input_orography_fieldname=
ICE5G_0k_orography_fieldname,
input_rdirs_file=
ICE5G_flowdirs_filename,
input_rdirs_fieldname=
ICE5G_flowdirs_fieldname,
input_minima_file=
ICE5G_minima_reduced_filename,
input_minima_fieldname=
ICE5G_minima_fieldname,
input_lakemask_file=
ICE5G_lakemask_filename,
input_lakemask_fieldname=
ICE5G_lakemask_fieldname,
output_orography_file=
ICE5G_output_orog_filename,
output_orography_fieldname=
ICE5G_0k_orography_fieldname)
def prepare_orography_ICE5G_0k_corrected(self):
file_label = self._generate_file_label()
original_orography_filename = join(self.orography_path,
"ice5g_v1_2_00_0k_10min.nc")
orog_corrections_filename = join(self.orography_corrections_fields_path,
"orog_corrs_field_ICE5G_and_tarasov_upscaled_"
"srtm30plus_north_america_only_data_ALG4_sinkless"
"_glcc_olson_lsmask_0k_20170517_003802.nc")
intermediary_orography_filename = self.generated_orography_filepath +\
"intermediary_" + file_label + '.nc'
second_intermediary_orography_filename = self.generated_orography_filepath +\
"second_intermediary_" + file_label + '.nc'
orography_filename = self.generated_orography_filepath + file_label + '.nc'
output_orog_filename = self.generated_orography_filepath + "lake_" + file_label + '.nc'
rdirs_filename = self.generated_rdir_filepath + file_label + '.nc'
original_ls_mask_filename=join(self.orography_path,"Ice6g_c_VM5a_10min_0k.nc")
original_landsea_mask_fieldname="sftlf"
original_ls_mask_with_new_dtype_filename = (self.generated_ls_mask_filepath +
file_label + '_orig' + '.nc')
original_ls_mask_with_grid_filename= (self.generated_ls_mask_filepath +
file_label + '_grid' + '.nc')
minima_filename = self.generated_minima_filepath+file_label+".nc"
minima_reduced_filename = self.generated_minima_filepath+file_label+"_reduced.nc"
minima_fieldname = "minima"
lakemask_filename= self.lakemask_filepath+"empty_lakemask.nc"
lakemask_fieldname="lakemask"
glacial_mask_file = join(self.orography_path,"ice5g_v1_2_21_0k_10min.nc")
utilities.change_dtype(input_filename=original_ls_mask_filename,
output_filename=original_ls_mask_with_new_dtype_filename,
input_fieldname=original_landsea_mask_fieldname,
output_fieldname="lsmask",
new_dtype=np.int32,grid_type='LatLong10min')
utilities.apply_orog_correction_field(original_orography_filename=original_orography_filename,
orography_corrections_filename=orog_corrections_filename,
corrected_orography_filename=
intermediary_orography_filename,
original_orography_fieldname=\
"orog",
grid_type="LatLong10min")
utilities.replace_corrected_orography_with_original_for_glaciated_grid_points_drivers(
input_corrected_orography_file=intermediary_orography_filename,
input_original_orography_file=original_orography_filename,
input_glacier_mask_file=glacial_mask_file,
out_orography_file=second_intermediary_orography_filename,
grid_type="LatLong10min")
iodriver.add_grid_information_to_field(target_filename=
orography_filename,
original_filename=
second_intermediary_orography_filename,
target_fieldname="field_value",
original_fieldname="field_value",
flip_ud_raw=True,rotate180lr_raw=True,
grid_desc_file=self.ten_minute_grid_filepath)
iodriver.add_grid_information_to_field(target_filename=
original_ls_mask_with_grid_filename,
original_filename=
original_ls_mask_with_new_dtype_filename,
target_fieldname="lsmask",
original_fieldname="lsmask",
flip_ud_raw=True,rotate180lr_raw=True,
grid_desc_file=self.ten_minute_grid_filepath)
dynamic_lake_operators.advanced_local_minima_finding_driver(orography_filename,
"field_value",
minima_filename,
minima_fieldname)
dynamic_lake_operators.reduce_connected_areas_to_points(minima_filename,
minima_fieldname,
minima_reduced_filename,
minima_fieldname)
fill_sinks_driver.advanced_sinkless_flow_directions_generator(filename=orography_filename,
output_filename=rdirs_filename,
ls_mask_filename=
original_ls_mask_with_grid_filename,
fieldname="field_value",
output_fieldname=
"rdir",
ls_mask_fieldname=
"lsmask")
dynamic_lake_operators.advanced_burn_carved_rivers_driver(input_orography_file=
orography_filename,
input_orography_fieldname=
"field_value",
input_rdirs_file=
rdirs_filename,
input_rdirs_fieldname=
"rdir",
input_minima_file=
minima_filename,
input_minima_fieldname=
minima_fieldname,
input_lakemask_file=
lakemask_filename,
input_lakemask_fieldname=
lakemask_fieldname,
output_orography_file=
output_orog_filename,
output_orography_fieldname=
"field_value")
def prepare_orography_ICE6G_21k_corrected(self):
file_label = self._generate_file_label()
original_orography_filename = join(self.orography_path,
"ice5g_v1_2_00_0k_10min.nc")
ice6g_0k_filename = join(self.orography_path,
"Ice6g_c_VM5a_10min_0k.nc")
ice6g_21k_filename = join(self.orography_path,
"Ice6g_c_VM5a_10min_21k.nc")
orog_corrections_filename = join(self.orography_corrections_fields_path,
"orog_corrs_field_ICE5G_and_tarasov_upscaled_"
"srtm30plus_north_america_only_data_ALG4_sinkless"
"_glcc_olson_lsmask_0k_20170517_003802.nc")
intermediary_orography_filename = self.generated_orography_filepath +\
"intermediary_" + file_label + '.nc'
second_intermediary_orography_filename = self.generated_orography_filepath +\
"second_intermediary_" + file_label + '.nc'
orography_filename = self.generated_orography_filepath + file_label + '.nc'
output_0k_ice5g_orog_filename = self.generated_orography_filepath + "0k_ice5g_lake_" + file_label + '.nc'
output_21k_ice6g_orog_filename = self.generated_orography_filepath + "21k_ice6g_lake_" + file_label + '.nc'
output_21k_ice6g_orog_sinkless_filename = (self.generated_orography_filepath +
"21k_ice6g_lake_sinkless_" + file_label + '.nc')
output_21k_ice6g_orog_sinkless_improved_filename = (self.generated_orography_filepath +
"21k_ice6g_lake_sinkless_improved_" + file_label + '.nc')
rdirs_filename = self.generated_rdir_filepath + file_label + '.nc'
original_ls_mask_filename=join(self.orography_path,"Ice6g_c_VM5a_10min_0k.nc")
original_landsea_mask_fieldname="sftlf"
original_ls_mask_with_new_dtype_filename = (self.generated_ls_mask_filepath +
file_label + '_orig' + '.nc')
original_ls_mask_with_grid_filename= (self.generated_ls_mask_filepath +
file_label + '_grid' + '.nc')
minima_filename = self.generated_minima_filepath+file_label+".nc"
minima_filename_21k = self.generated_minima_filepath+file_label+"_21k.nc"
minima_reduced_filename = self.generated_minima_filepath+file_label+"_reduced.nc"
minima_reduced_filename_21k = self.generated_minima_filepath+file_label+"_reduced_21k.nc"
minima_fieldname = "minima"
lakemask_filename= self.lakemask_filepath+"/empty_lakemask.nc"
lakemask_fieldname="lakemask"
glacial_mask_file = join(self.orography_path,"ice5g_v1_2_21_0k_10min.nc")
utilities.change_dtype(input_filename=original_ls_mask_filename,
output_filename=original_ls_mask_with_new_dtype_filename,
input_fieldname=original_landsea_mask_fieldname,
output_fieldname="lsmask",
new_dtype=np.int32,grid_type='LatLong10min')
utilities.apply_orog_correction_field(original_orography_filename=original_orography_filename,
orography_corrections_filename=orog_corrections_filename,
corrected_orography_filename=
intermediary_orography_filename,
original_orography_fieldname=\
"orog",
grid_type="LatLong10min")
utilities.replace_corrected_orography_with_original_for_glaciated_grid_points_drivers(
input_corrected_orography_file=intermediary_orography_filename,
input_original_orography_file=original_orography_filename,
input_glacier_mask_file=glacial_mask_file,
out_orography_file=second_intermediary_orography_filename,
grid_type="LatLong10min")
iodriver.add_grid_information_to_field(target_filename=
orography_filename,
original_filename=
second_intermediary_orography_filename,
target_fieldname="field_value",
original_fieldname="field_value",
flip_ud_raw=True,rotate180lr_raw=True,
grid_desc_file=self.ten_minute_grid_filepath)
iodriver.add_grid_information_to_field(target_filename=
original_ls_mask_with_grid_filename,
original_filename=
original_ls_mask_with_new_dtype_filename,
target_fieldname="lsmask",
original_fieldname="lsmask",
flip_ud_raw=True,rotate180lr_raw=True,
grid_desc_file=self.ten_minute_grid_filepath)
dynamic_lake_operators.advanced_local_minima_finding_driver(orography_filename,
"field_value",
minima_filename,
minima_fieldname)
dynamic_lake_operators.reduce_connected_areas_to_points(minima_filename,
minima_fieldname,
minima_reduced_filename,
minima_fieldname)
fill_sinks_driver.advanced_sinkless_flow_directions_generator(filename=orography_filename,
output_filename=rdirs_filename,
ls_mask_filename=
original_ls_mask_with_grid_filename,
fieldname="field_value",
output_fieldname=
"rdir",
ls_mask_fieldname=
"lsmask")
dynamic_lake_operators.advanced_burn_carved_rivers_driver(input_orography_file=
orography_filename,
input_orography_fieldname=
"field_value",
input_rdirs_file=
rdirs_filename,
input_rdirs_fieldname=
"rdir",
input_minima_file=
minima_filename,
input_minima_fieldname=
minima_fieldname,
input_lakemask_file=
lakemask_filename,
input_lakemask_fieldname=
lakemask_fieldname,
output_orography_file=
output_0k_ice5g_orog_filename,
output_orography_fieldname=
"Topo")
utilities.advanced_rebase_orography_driver(orography_filename=
ice6g_21k_filename,
present_day_base_orography_filename=
ice6g_0k_filename,
present_day_reference_orography_filename=
output_0k_ice5g_orog_filename,
rebased_orography_filename=
output_21k_ice6g_orog_filename,
orography_fieldname="Topo",
present_day_base_orography_fieldname="Topo",
present_day_reference_orography_fieldname="Topo",
rebased_orography_fieldname="Topo")
fill_sinks_driver.\
generate_orography_with_sinks_filled_advanced_driver(output_21k_ice6g_orog_filename,
output_21k_ice6g_orog_sinkless_filename,
"Topo",
"Topo",
ls_mask_filename=None,
truesinks_filename=None,
ls_mask_fieldname=None,
truesinks_fieldname=None,
add_slight_slope_when_filling_sinks=False,
slope_param=0.1)
ice6g_sinkless_field = iodriver.advanced_field_loader(output_21k_ice6g_orog_sinkless_filename,
fieldname="Topo",
adjust_orientation=True)
ice6g_field = iodriver.advanced_field_loader(output_21k_ice6g_orog_filename,fieldname="Topo",
adjust_orientation=True)
ice6g_21k_icemask = iodriver.advanced_field_loader(ice6g_21k_filename,
fieldname="sftgif",
adjust_orientation=True)
ice6g_21k_lsmask = iodriver.advanced_field_loader(ice6g_21k_filename,
fieldname="sftlf",
adjust_orientation=True)
ice6g_21k_icemask.invert_data()
ice6g_field.mask_field_with_external_mask(ice6g_21k_icemask.get_data())
ice6g_sinkless_field.update_field_with_partially_masked_data(ice6g_field)
ice6g_field.mask_field_with_external_mask(ice6g_21k_lsmask.get_data())
ice6g_sinkless_field.update_field_with_partially_masked_data(ice6g_field)
iodriver.advanced_field_writer(output_21k_ice6g_orog_sinkless_improved_filename,
ice6g_sinkless_field,fieldname="Topo",clobber=True)
dynamic_lake_operators.advanced_local_minima_finding_driver(output_21k_ice6g_orog_filename,
"Topo",
minima_filename_21k,
minima_fieldname)
dynamic_lake_operators.reduce_connected_areas_to_points(minima_filename_21k,
minima_fieldname,
minima_reduced_filename_21k,
minima_fieldname)
print("minima filename: " + minima_reduced_filename_21k)
print("minima fieldname" + minima_fieldname)
print("ice6g_21k_filename" + ice6g_21k_filename)
print("output21k_orog_filename" + output_21k_ice6g_orog_filename)
def prepare_orography(self,orography_filename,orography_fieldname,timestep,
orography_0k_filename,orography_0k_fieldname,timestep_0k,
glacier_mask_filename,glacier_mask_fieldname,
glacier_mask_timestep,
ls_mask_filename,ls_mask_fieldname,ls_mask_timestep,
ls_mask_0k_filename,ls_mask_0k_fieldname,ls_mask_timestep_0k,
file_label):
tstart = time.time()
flip_ls_mask_0k = False
invert_ls_mask = True
rotate_lsmask_180_lr = True
rotate_lsmask_180_lr_0k = False
original_orography_filename = join(self.orography_path,
"ice5g_v1_2_00_0k_10min.nc")
true_sinks_filename = join(self.truesinks_path,
"truesinks_ICE5G_and_tarasov_upscaled_srtm30plus_"
"north_america_only_data_ALG4_sinkless_glcc_olson"
"_lsmask_0k_20191014_173825_with_grid.nc")
if timestep_0k is not None:
orography_0k = iodriver.advanced_field_loader(orography_0k_filename,
time_slice=timestep_0k,
fieldname=orography_0k_fieldname)
working_orog_0k_filename = self.generated_orography_filepath + \
"_extracted_for_0k_" + \
file_label + ".nc"
iodriver.advanced_field_writer(working_orog_0k_filename,orography_0k,"Topo")
else:
working_orog_0k_filename = orography_filename
if timestep is not None:
orography = iodriver.advanced_field_loader(orography_filename,time_slice=timestep,
fieldname=orography_fieldname)
working_orog_filename = self.generated_orography_filepath + \
"_extracted_for_{}".format(timestep) + \
file_label + ".nc"
iodriver.advanced_field_writer(working_orog_filename,orography,"Topo")
else:
working_orog_filename = orography_filename
orog_corrections_filename = join(self.orography_corrections_fields_path,
"orog_corrs_field_ICE5G_and_tarasov_upscaled_"
"srtm30plus_north_america_only_data_ALG4_sinkless"
"_glcc_olson_lsmask_0k_20170517_003802_g.nc")
intermediary_orography_filename = self.generated_orography_filepath +\
"intermediary_" + file_label + '.nc'
second_intermediary_orography_filename = self.generated_orography_filepath +\
"second_intermediary_" + file_label + '.nc'
#orography_filename = self.generated_orography_filepath + file_label + '.nc'
output_0k_ice5g_orog_filename = self.generated_orography_filepath + "0k_ice5g_lake_" + file_label + '.nc'
output_working_orog_filename = self.generated_orography_filepath + "{}_ice6g_lake_".format(timestep) + file_label + '.nc'
output_intermediary_filtered_working_orog_filename = self.generated_orography_filepath +\
"{}_ice6g_lake_filtered_int_".format(timestep) +\
file_label + '.nc'
output_filtered_working_orog_filename = self.generated_orography_filepath +\
"{}_ice6g_lake_filtered_".format(timestep) +\
file_label + '.nc'
output_working_orog_sinkless_filename = (self.generated_orography_filepath +
"{}_ice6g_lake_sinkless_".format(timestep) + file_label + '.nc')
output_working_orog_sinkless_improved_filename = (self.generated_orography_filepath +
"{}_ice6g_lake_sinkless_improved_".format(timestep) + file_label + '.nc')
orog_diff_filename = (self.generated_orography_filepath + "{}_lake_basins_".format(timestep) +
file_label + '.nc')
rdirs_filename = self.generated_rdir_filepath + file_label + '.nc'
if ls_mask_timestep_0k is not None:
ls_mask_0k = iodriver.advanced_field_loader(ls_mask_0k_filename,
time_slice=ls_mask_timestep_0k,
fieldname=ls_mask_0k_fieldname,
grid_desc_file="/Users/thomasriddick/Documents/data/HDdata/grids/grid_10min.txt")
original_ls_mask_filename= self.generated_ls_mask_filepath + \
"_extracted_for_{}".format(timestep) + \
file_label + ".nc"
iodriver.advanced_field_writer(original_ls_mask_filename,ls_mask_0k,
fieldname=ls_mask_0k_fieldname)
else:
original_ls_mask_filename=ls_mask_0k_filename
original_landsea_mask_fieldname=ls_mask_0k_fieldname
original_ls_mask_with_new_dtype_filename = (self.generated_ls_mask_filepath +
file_label + '_orig' + '.nc')
original_ls_mask_with_grid_filename= (self.generated_ls_mask_filepath +
file_label + '_grid' + '.nc')
minima_filename = self.generated_minima_filepath+file_label+".nc"
minima_working_orog_filename = self.generated_minima_filepath+file_label+"_{}.nc".format(timestep)
minima_reduced_filename = self.generated_minima_filepath+file_label+"_reduced.nc"
minima_reduced_filename_working_orog = \
self.generated_minima_filepath+file_label+"_reduced_{}.nc".format(timestep)
minima_fieldname = "minima"
lakemask_filename= self.lakemask_filepath+"/empty_lakemask.nc"
lakemask_fieldname="lakemask"
glacial_mask_file = join(self.orography_path,"ice5g_v1_2_00_0k_10min.nc")
utilities.change_dtype(input_filename=original_ls_mask_filename,
output_filename=original_ls_mask_with_new_dtype_filename,
input_fieldname=original_landsea_mask_fieldname,
output_fieldname="lsmask",
new_dtype=np.int32,grid_type='LatLong10min')
utilities.advanced_apply_orog_correction_field(original_orography_filename=
original_orography_filename,
orography_corrections_filename=
orog_corrections_filename,
corrected_orography_filename=
intermediary_orography_filename,
original_orography_fieldname=
"orog")
utilities.advanced_replace_corrected_orog_with_orig_for_glcted_grid_points_drivers(
input_corrected_orography_file=intermediary_orography_filename,
input_original_orography_file=original_orography_filename,
input_glacier_mask_file=glacial_mask_file,
out_orography_file=second_intermediary_orography_filename,
input_corrected_orography_fieldname=None,
input_original_orography_fieldname=None,
input_glacier_mask_fieldname=None,
out_orography_fieldname=None)
orography_filename = second_intermediary_orography_filename
iodriver.add_grid_information_to_field(target_filename=
original_ls_mask_with_grid_filename,
original_filename=
original_ls_mask_with_new_dtype_filename,
target_fieldname="lsmask",
original_fieldname="lsmask",
flip_ud_raw=flip_ls_mask_0k,
rotate180lr_raw=rotate_lsmask_180_lr_0k,
grid_desc_file=self.ten_minute_grid_filepath)
dynamic_lake_operators.advanced_local_minima_finding_driver(orography_filename,
"field_value",
minima_filename,
minima_fieldname)
dynamic_lake_operators.reduce_connected_areas_to_points(minima_filename,
minima_fieldname,
minima_reduced_filename,
minima_fieldname)
fill_sinks_driver.advanced_sinkless_flow_directions_generator(filename=orography_filename,
output_filename=rdirs_filename,
ls_mask_filename=
original_ls_mask_with_grid_filename,
truesinks_filename=
true_sinks_filename,
fieldname="field_value",
output_fieldname=
"rdir",
ls_mask_fieldname=
"lsmask",
truesinks_fieldname=
"true_sinks")
dynamic_lake_operators.advanced_burn_carved_rivers_driver(input_orography_file=
orography_filename,
input_orography_fieldname=
"field_value",
input_rdirs_file=
rdirs_filename,
input_rdirs_fieldname=
"rdir",
input_minima_file=
minima_filename,
input_minima_fieldname=
minima_fieldname,
input_lakemask_file=
lakemask_filename,
input_lakemask_fieldname=
lakemask_fieldname,
output_orography_file=
output_0k_ice5g_orog_filename,
output_orography_fieldname=
"Topo",
add_slope = True,
max_exploration_range = 10,
minimum_height_change_threshold = 5.0,
short_path_threshold = 6,
short_minimum_height_change_threshold = 0.25)
new_orography_corrections_filename = path.join(self.orography_corrections_fields_path,
"ice5g_0k_lake_corrs_" + file_label + ".nc")
utilities.advanced_orog_correction_field_generator(original_orography_filename=
original_orography_filename,
corrected_orography_filename=
output_0k_ice5g_orog_filename,
orography_corrections_filename=
new_orography_corrections_filename,
original_orography_fieldname=
"orog",
corrected_orography_fieldname=
"Topo",
orography_corrections_fieldname=
"orog")
print("Time for initial setup: " + str(time.time() - tstart))
utilities.advanced_rebase_orography_driver(orography_filename=
working_orog_filename,
present_day_base_orography_filename=
working_orog_0k_filename,
present_day_reference_orography_filename=
output_0k_ice5g_orog_filename,
rebased_orography_filename=
output_working_orog_filename,
orography_fieldname="Topo",
present_day_base_orography_fieldname="Topo",
present_day_reference_orography_fieldname="Topo",
rebased_orography_fieldname="Topo")
fill_sinks_driver.\
generate_orography_with_sinks_filled_advanced_driver(output_working_orog_filename,
output_working_orog_sinkless_filename,
"Topo",
"Topo",
ls_mask_filename=None,
truesinks_filename=None,
ls_mask_fieldname=None,
truesinks_fieldname=None,
add_slight_slope_when_filling_sinks=False,
slope_param=0.1)
dynamic_lake_operators.\
advanced_shallow_lake_filtering_driver(input_unfilled_orography_file=
output_working_orog_filename,
input_unfilled_orography_fieldname="Topo",
input_filled_orography_file=
output_working_orog_sinkless_filename,
input_filled_orography_fieldname="Topo",
output_unfilled_orography_file=
output_intermediary_filtered_working_orog_filename,
output_unfilled_orography_fieldname="Topo",
minimum_depth_threshold=5.0)
dynamic_lake_operators.\
advanced_narrow_lake_filtering_driver(input_unfilled_orography_file=
output_intermediary_filtered_working_orog_filename,
input_unfilled_orography_fieldname=
"Topo",
input_filled_orography_file=
output_working_orog_sinkless_filename,
input_filled_orography_fieldname=
"Topo",
output_unfilled_orography_file=
output_filtered_working_orog_filename,
output_unfilled_orography_fieldname=
"Topo",
interior_cell_min_masked_neighbors=5,
edge_cell_max_masked_neighbors=4,
max_range=5,
iterations=5)
working_orog_sinkless_field = iodriver.advanced_field_loader(output_working_orog_sinkless_filename,
fieldname="Topo",
adjust_orientation=True)
working_orog_field = iodriver.advanced_field_loader(output_filtered_working_orog_filename,fieldname="Topo",
adjust_orientation=True)
working_orog_icemask = iodriver.advanced_field_loader(glacier_mask_filename,
time_slice=glacier_mask_timestep,
fieldname=glacier_mask_fieldname,
adjust_orientation=True)
working_orog_lsmask = iodriver.advanced_field_loader(ls_mask_filename,
time_slice=ls_mask_timestep,
fieldname=ls_mask_fieldname,
adjust_orientation=False,
grid_desc_file="/Users/thomasriddick/Documents/data/HDdata/grids/grid_10min.txt")
working_orog_icemask.invert_data()
if (invert_ls_mask):
working_orog_lsmask.invert_data()
if (rotate_lsmask_180_lr):
working_orog_lsmask.rotate_field_by_a_hundred_and_eighty_degrees()
working_orog_field.mask_field_with_external_mask(working_orog_icemask.get_data())
working_orog_sinkless_field.update_field_with_partially_masked_data(working_orog_field)
working_orog_field.mask_field_with_external_mask(working_orog_lsmask.get_data())
working_orog_sinkless_field.update_field_with_partially_masked_data(working_orog_field)
iodriver.advanced_field_writer(output_working_orog_sinkless_improved_filename,
working_orog_sinkless_field,fieldname="Topo",clobber=True)
dynamic_lake_operators.advanced_local_minima_finding_driver(output_filtered_working_orog_filename,
"Topo",
minima_working_orog_filename,
minima_fieldname)
dynamic_lake_operators.reduce_connected_areas_to_points(minima_working_orog_filename,
minima_fieldname,
minima_reduced_filename_working_orog,
minima_fieldname)
print("minima filename: " + minima_reduced_filename_working_orog)
print("minima fieldname: " + minima_fieldname)
print("timestep{}_filename: ".format(timestep) + working_orog_filename)
print("timestep{}_orog_filename:".format(timestep) + output_working_orog_filename)
improved_sinkless_orog = iodriver.advanced_field_loader(output_working_orog_sinkless_improved_filename,
fieldname="Topo",adjust_orientation=True)
lake_orog = iodriver.advanced_field_loader(output_filtered_working_orog_filename,
fieldname="Topo",adjust_orientation=True)
improved_sinkless_orog.subtract(lake_orog)
iodriver.advanced_field_writer(orog_diff_filename,improved_sinkless_orog,
fieldname="depth",clobber=True)
def extract_lake_volumes_from_glac1D_basins(self):
overarching_file_label = self._generate_file_label()
transient_data_folder_path = "/Users/thomasriddick/Documents/data/transient_sim_data/1"
timesteps_to_use = [ 950,1000,1050,1100,1150,1200,1250,1300,1350,
1400,1450,1500,1550,1600,1650,1700,1750,1800]
for timestep in timesteps_to_use:
file_label = self._generate_file_label() + "_" + str(timestep)
lake_parameters_filepath = join(transient_data_folder_path,
"lakeparas_prepare_basins_from_glac1D_{}.nc".format(timestep))
basin_catchment_numbers_filepath = join(transient_data_folder_path,
"basin_catchment_numbers_prepare_"
"basins_from_glac1D_{}.nc".format(timestep))
lake_volumes_out_filepath = ("/Users/thomasriddick/Documents/data/temp/"
"lake_volumes_out_{}.nc".format(timestep))
extract_lake_volumes.lake_volume_extraction_driver(lake_parameters_filepath,
basin_catchment_numbers_filepath,
lake_volumes_out_filepath)
def prepare_basins_from_glac1D(self):
overarching_file_label = self._generate_file_label()
# timesteps_to_use = [ 950,1000,1050,1100,1150,1200,1250,1300,1350,
# 1400,1450,1500,1550,1600,1650,1700,1750,1800]
timesteps_to_use = [1250]
timestep_for_0k = 2600
glac_1d_topo_filename = join(self.orography_path,
"GLAC1D_Top01_surf.nc")
ls_mask_filename = join(self.ls_masks_path,
"10min_lsmask_pmu0178_merged.nc")
ls_mask_0k_filename = join(self.ls_masks_path,"generated",
"ls_mask_make_1000m_depth_contour_mask_from_ICE6G_20200721_144332.nc")
glacier_mask_filename = join(self.orography_path,
"GLAC1D_ICEM_10min.nc")
cell_areas_filename_10min = join(self.grid_areas_and_spacings_filepath,
"10min_grid_area_default_R.nc")
for timestep in timesteps_to_use:
file_label = self._generate_file_label() + "_" + str(timestep)
self.prepare_orography(orography_filename=glac_1d_topo_filename,
orography_fieldname="HDCB",
timestep=timestep,
orography_0k_filename=glac_1d_topo_filename,
orography_0k_fieldname="HDCB",
timestep_0k=timestep_for_0k,
glacier_mask_filename=glacier_mask_filename,
glacier_mask_fieldname="ICEM",
glacier_mask_timestep=timestep,
ls_mask_filename=ls_mask_filename,
ls_mask_fieldname="field_value",
ls_mask_timestep=timestep,
#ls_mask_0k_filename=ls_mask_filename,
ls_mask_0k_filename=ls_mask_0k_filename,
#ls_mask_0kfieldname="field_value",
ls_mask_0k_fieldname="lsm",
#ls_mask_timestep_0k=timestep_for_0k,
ls_mask_timestep_0k=None,
file_label=file_label)
working_orography_filename = "/Users/thomasriddick/Documents/data/HDdata/orographys/generated/updated_orog_" + str(timestep) + \
"_ice6g_lake_filtered_" + file_label + ".nc"
lsmask_filename = "/Users/thomasriddick/Documents/data/HDdata/lsmasks/generated/ls_mask_" + file_label + "_grid.nc"
self.prepare_river_directions_with_depressions(working_orography_filename=
working_orography_filename,
lsmask_filename=
lsmask_filename,
orography_fieldname="Topo",
lsmask_fieldname="lsmask",
file_label=file_label)
minima_from_rdirs_filename = ("/Users/thomasriddick/Documents/data/HDdata/minima/"
"minima_" + file_label + "_reduced"
"_" + str(timestep) + "_landonly_from_rdirs.nc")
utilities.advanced_extract_true_sinks_from_rdirs(rdirs_filename=
"/Users/thomasriddick/Documents/data/HDdata/rdirs/generated/"
"updated_RFDs_" + file_label + "_10min_with_depressions.nc",
truesinks_filename=
minima_from_rdirs_filename,
rdirs_fieldname="FDIR",
truesinks_fieldname="minima")
dynamic_lake_operators.\
advanced_basin_evaluation_driver(input_minima_file=
minima_from_rdirs_filename,
input_minima_fieldname="minima",
input_raw_orography_file=
"/Users/thomasriddick/Documents/data/HDdata/orographys/"
"generated/updated_orog_" + str(timestep) +
"_ice6g_lake_filtered_" + file_label + ".nc",
input_raw_orography_fieldname="Topo",
input_corrected_orography_file=
"/Users/thomasriddick/Documents/data/HDdata/orographys/"
"generated/updated_orog_" + str(timestep) +
"_ice6g_lake_filtered_" + file_label + ".nc",
input_corrected_orography_fieldname="Topo",
input_cell_areas_file= cell_areas_filename_10min,
input_cell_areas_fieldname="cell_area",
input_prior_fine_rdirs_file=
"/Users/thomasriddick/Documents/data/HDdata/rdirs/generated/"
"updated_RFDs_" + file_label + "_10min_with_depressions.nc",
input_prior_fine_rdirs_fieldname="FDIR",
input_prior_fine_catchments_file=
"/Users/thomasriddick/Documents/data/HDdata/catchmentmaps/"
"catchmentmap_" + file_label + "_10mins.nc",
input_prior_fine_catchments_fieldname="catchments",
input_coarse_catchment_nums_file=
"/Users/thomasriddick/Documents/data/HDdata/catchmentmaps/"
"catchmentmap_" + file_label + "_30mins.nc",
input_coarse_catchment_nums_fieldname="catchments",
input_coarse_rdirs_file=
"/Users/thomasriddick/Documents/data/HDdata/rdirs/generated/"
"updated_RFDs_" + file_label + "_30min_with_depressions.nc",
input_coarse_rdirs_fieldname="FDIR",
combined_output_filename=
join(self.lake_parameter_file_path,
"lakeparas_" + file_label + ".nc"),
output_filepath=self.lake_parameter_file_path,
output_filelabel=file_label,
output_basin_catchment_nums_filepath=
join(self.basin_catchment_numbers_path,
"basin_catchment_numbers_" + file_label + ".nc"))
with open(self.generated_lake_and_hd_params_log_path +
overarching_file_label + ".log",'a') as f:
f.write("Timestep=" + str(timestep) + '\n')
f.write(self.generated_hd_file_path + file_label + ".nc\n")
f.write(join(self.lake_parameter_file_path,
"lakeparas_" + file_label + ".nc\n"))
f.write(join(self.basin_catchment_numbers_path,
"basin_catchment_numbers_" + file_label + ".nc\n"))
def prepare_river_directions_with_depressions(self,
working_orography_filename,
lsmask_filename,
orography_fieldname,
lsmask_fieldname,
file_label):
rdirs_filename_10min = \
self.generated_rdir_filepath + file_label + "_10min_with_depressions.nc"
rdirs_filename_30min = \
self.generated_rdir_filepath + file_label + "_30min_with_depressions.nc"
determine_river_directions.\
advanced_river_direction_determination_driver(rdirs_filename_10min,
working_orography_filename,
lsmask_filename,
truesinks_filename=None,
rdirs_fieldname="FDIR",
orography_fieldname=orography_fieldname,
lsmask_fieldname=lsmask_fieldname,
truesinks_fieldname=None,
always_flow_to_sea=True,
use_diagonal_nbrs=True,
mark_pits_as_true_sinks=True)
fine_cumulative_flow_filename = (self.generated_flowmaps_filepath + file_label
+ '_10mins.nc')
fine_catchments_filename = (self.generated_catchments_path + file_label
+ '_10mins.nc')
cc.advanced_main(rdirs_filename_10min,"FDIR",
fine_catchments_filename,"catchments",
loop_logfile='/Users/thomasriddick/Documents/data/temp/loop_log.txt',
use_cpp_alg=True)
ftgc.advanced_main(rdirs_filename=rdirs_filename_10min,
output_filename=fine_cumulative_flow_filename,
rdirs_fieldname='FDIR',
output_fieldname='cflow')
cotat_plus_parameters_filename = join(self.cotat_plus_parameters_path,'cotat_plus_standard_params.nl')
self._run_advanced_cotat_plus_upscaling(input_fine_rdirs_filename=
rdirs_filename_10min,
input_fine_cumulative_flow_filename=
fine_cumulative_flow_filename,
output_course_rdirs_filename=
rdirs_filename_30min,
input_fine_rdirs_fieldname="FDIR",
input_fine_cumulative_flow_fieldname="cflow",
output_course_rdirs_fieldname="FDIR",
cotat_plus_parameters_filename=
cotat_plus_parameters_filename,
output_file_label=file_label,
scaling_factor=3)
coarse_cumulative_flow_filename = (self.generated_flowmaps_filepath + file_label
+ '_30mins.nc')
coarse_catchments_filename = (self.generated_catchments_path + file_label
+ '_30mins.nc')
cc.advanced_main(rdirs_filename_30min,"FDIR",
coarse_catchments_filename,"catchments",
loop_logfile='/Users/thomasriddick/Documents/data/temp/loop_log.txt',
use_cpp_alg=True)
ftgc.advanced_main(rdirs_filename=rdirs_filename_30min,
output_filename=coarse_cumulative_flow_filename,
rdirs_fieldname='FDIR',
output_fieldname='cflow')
coarse_orography_filename = (self.generated_orography_filepath + file_label + "with_depressions"
+ '_30mins.nc')
coarse_lsmask_filename = (self.generated_ls_mask_filepath + file_label + "_30mins.nc")
utilities.upscale_field_driver(input_filename=working_orography_filename,
output_filename=coarse_orography_filename,
input_grid_type='LatLong10min',
output_grid_type='HD',
method='Sum', timeslice=None,
scalenumbers=True)
utilities.upscale_field_driver(input_filename=lsmask_filename,
output_filename=coarse_lsmask_filename,
input_grid_type='LatLong10min',
output_grid_type='HD',
method='Mode', timeslice=None,
scalenumbers=True)
transformed_course_rdirs_filename = path.splitext(rdirs_filename_30min)[0] + '_transf' +\
path.splitext(rdirs_filename_30min)[1]
transformed_HD_filled_orography_filename = path.splitext(coarse_orography_filename)[0] + '_transf' +\
path.splitext(coarse_orography_filename)[1]
transformed_HD_ls_mask_filename = path.splitext(coarse_lsmask_filename)[0] + '_transf' +\
path.splitext(coarse_lsmask_filename)[1]
self._apply_transforms_to_field(input_filename=rdirs_filename_30min,
output_filename=transformed_course_rdirs_filename,
flip_ud=False, rotate180lr=True, invert_data=False,
timeslice=None, griddescfile=self.half_degree_grid_filepath,
grid_type='HD')
self._apply_transforms_to_field(input_filename=coarse_orography_filename,
output_filename=transformed_HD_filled_orography_filename,
flip_ud=True, rotate180lr=True, invert_data=False,
timeslice=None, griddescfile=self.half_degree_grid_filepath,
grid_type='HD')
self._apply_transforms_to_field(input_filename=coarse_lsmask_filename,
output_filename=transformed_HD_ls_mask_filename,
flip_ud=False, rotate180lr=False, invert_data=True,
timeslice=None, griddescfile=self.half_degree_grid_filepath,
grid_type='HD')
def prepare_river_directions_with_depressions_from_glac1D(self):
working_orography_filename = "/Users/thomasriddick/Documents/data/HDdata/orographys/generated/updated_orog_1900_ice6g_lake_prepare_orography_20190211_131605.nc"
lsmask_filename = "/Users/thomasriddick/Documents/data/HDdata/lsmasks/generated/ls_mask_prepare_orography_20190211_131605_grid.nc"
orography_fieldname = "Topo"
lsmask_fieldname = "lsmask"
file_label = self._generate_file_label()
self.prepare_river_directions_with_depressions(working_orography_filename,
lsmask_filename,
orography_fieldname,
lsmask_fieldname,
file_label)
def prepare_flow_parameters_from_rdirs(self,rdirs_filepath,orography_filepath,lsmask_filepath,
file_label):
self._generate_flow_parameters(rdir_file=rdirs_filepath,
topography_file=orography_filepath,
inner_slope_file=\
path.join(self.orography_path,'bin_innerslope.dat'),
lsmask_file=lsmask_filepath,
null_file=\
path.join(self.null_fields_filepath,'null.dat'),
area_spacing_file=\
path.join(self.grid_areas_and_spacings_filepath,
'fl_dp_dl.dat'),
orography_variance_file=\
path.join(self.orography_path,'bin_toposig.dat'),
output_dir=path.join(self.flow_params_dirs_path,
'hd_flow_params' + file_label),
paragen_source_label=None,production_run=False,
grid_type="HD")
self._generate_hd_file(rdir_file=path.splitext(rdirs_filepath)[0] + ".dat",
lsmask_file=lsmask_filepath,
null_file=\
path.join(self.null_fields_filepath,'null.dat'),
area_spacing_file=\
path.join(self.grid_areas_and_spacings_filepath,
'fl_dp_dl.dat'),
hd_grid_specs_file=self.half_degree_grid_filepath,
output_file=self.generated_hd_file_path + file_label + '.nc',
paras_dir=path.join(self.flow_params_dirs_path,
'hd_flow_params' + file_label))
def evaluate_glac1D_ts1900_basins(self):
file_label = self._generate_file_label()
dynamic_lake_operators.\
advanced_basin_evaluation_driver(input_minima_file=
"/Users/thomasriddick/Documents/data/HDdata/minima/"
"minima_prepare_orography_20190401_115141_reduced"
"_1900_landonly_from_rdirs.nc",
input_minima_fieldname="minima",
input_raw_orography_file=
"/Users/thomasriddick/Documents/data/HDdata/orographys/"
"generated/updated_orog_1900_ice6g_lake_prepare_orography"
"_20190211_131605.nc",
input_raw_orography_fieldname="Topo",
input_corrected_orography_file=
"/Users/thomasriddick/Documents/data/HDdata/orographys/"
"generated/updated_orog_1900_ice6g_lake_prepare_orography"
"_20190211_131605.nc",
input_corrected_orography_fieldname="Topo",
input_cell_areas_file="/Users/thomasriddick/Documents/"
"data/HDdata/10min_grid_area_default_R.nc",
input_cell_areas_fieldname="cell_area",
input_prior_fine_rdirs_file=
"/Users/thomasriddick/Documents/data/HDdata/rdirs/generated/"
"updated_RFDs_prepare_river_directions_with_depressions_"
"20190401_115141_10min_with_depressions.nc",
input_prior_fine_rdirs_fieldname="FDIR",
input_prior_fine_catchments_file=
"/Users/thomasriddick/Documents/data/HDdata/catchmentmaps/"
"catchmentmap_prepare_river_directions_with_depressions_"
"20190401_115141_10mins.nc",
input_prior_fine_catchments_fieldname="catchments",
input_coarse_catchment_nums_file=
"/Users/thomasriddick/Documents/data/HDdata/catchmentmaps/"
"catchmentmap_prepare_river_directions_with_depressions_"
"20190401_115141_30mins.nc",
input_coarse_catchment_nums_fieldname="catchments",
input_coarse_rdirs_file=
"/Users/thomasriddick/Documents/data/HDdata/rdirs/generated/"
"updated_RFDs_prepare_river_directions_with_depressions_"
"20190401_115141_30min_with_depressions.nc",
input_coarse_rdirs_fieldname="FDIR",
combined_output_filename=
join(self.lake_parameter_file_path,
"lakeparas" + file_label + ".nc"),
output_filepath=self.lake_parameter_file_path,
output_filelabel=file_label,
output_basin_catchment_nums_filepath=
join(self.basin_catchment_numbers_path,
"basin_catchment_numbers" + file_label + ".nc"))
def evaluate_ICE6G_lgm_basins(self):
file_label = self._generate_file_label()
dynamic_lake_operators.\
advanced_basin_evaluation_driver(input_minima_file=
"/Users/thomasriddick/Documents/data/HDdata/minima/"
"minima_prepare_orography_ICE6G_21k_corrected_20180921"
"_155937_reduced_21k_landonly.nc",
input_minima_fieldname="minima",
input_raw_orography_file=
"/Users/thomasriddick/Documents/data/HDdata/orographys/"
"generated/updated_orog_21k_ice6g_lake_prepare_orography"
"_ICE6G_21k_corrected_20180921_155937.nc",
input_raw_orography_fieldname="Topo",
input_corrected_orography_file=
"/Users/thomasriddick/Documents/data/HDdata/orographys/"
"generated/updated_orog_21k_ice6g_lake_prepare_orography"
"_ICE6G_21k_corrected_20180921_155937.nc",
input_corrected_orography_fieldname="Topo",
input_cell_areas_file="/Users/thomasriddick/Documents/"
"data/HDdata/10min_grid_area_default_R.nc",
input_cell_areas_fieldname="cell_area",
input_prior_fine_rdirs_file=
"/Users/thomasriddick/Documents/data/HDdata/rdirs/generated/"
"updated_RFDs_ICE6g_lgm_ALG4_sinkless_no_true_sinks_oceans_"
"lsmask_plus_upscale_""rdirs_tarasov_orog_corrs_20171015_031541.nc",
input_prior_fine_rdirs_fieldname="field_value",
input_prior_fine_catchments_file=
"/Users/thomasriddick/Documents/data/HDdata/catchmentmaps/"
"catchmentmap_unsorted_ICE6g_lgm_ALG4_sinkless_no_true_sinks"
"_oceans_lsmask_plus_upscale_rdirs_tarasov_orog_corrs_20171015_031541.nc",
input_prior_fine_catchments_fieldname="field_value",
input_coarse_catchment_nums_file=
"/Users/thomasriddick/Documents/data/HDdata/catchmentmaps/"
"catchmentmap_ICE6g_lgm_ALG4_sinkless_no_true_sinks"
"_oceans_lsmask_plus_upscale_rdirs_tarasov_orog_corrs"
"_20171015_031541_upscaled_updated.nc",
input_coarse_catchment_nums_fieldname="field_value",
input_coarse_rdirs_file=
"/Users/thomasriddick/Documents/data/HDdata/rdirs/generated/"
"updated_RFDs_ICE6g_lgm_ALG4_sinkless_no_true_sinks_oceans_"
"lsmask_plus_upscale_rdirs_tarasov_orog_corrs_20171015_"
"031541.nc",
input_coarse_rdirs_fieldname="FDIR",
combined_output_filename=
join(self.lake_parameter_file_path,
"lakeparas" + file_label + ".nc"),
output_filepath=self.lake_parameter_file_path,
output_filelabel=file_label)
def connect_catchments_for_glac1D(self):
coarse_catchments_filepath = ("/Users/thomasriddick/Documents/data/HDdata/catchmentmaps/"
"catchmentmap_prepare_basins_from_glac1D_"
"20210205_151552_1250_30mins.nc")
lake_parameters_filepath = ("/Users/thomasriddick/Documents/data/HDdata/lakeparafiles/"
"lakeparas_prepare_basins_from_glac1D_20210205_151552_1250.nc")
river_directions_filepath = ("/Users/thomasriddick/Documents/data/HDdata/rdirs/generated/"
"updated_RFDs_prepare_basins_from_glac1D_20210205_151552_1250_30min_with_depressions.nc")
basin_numbers_filepath = ("/Users/thomasriddick/Documents/data/HDdata/basin_catchment_numbers/"
"basin_catchment_numbers_prepare_basins_from_glac1D"
"_20210205_151552_1250.nc")
connected_coarse_catchments_out_filename = ("/Users/thomasriddick/Documents/data/temp/"
"catchment_1250.nc")
coarse_catchments_fieldname = "catchments"
connected_coarse_catchments_out_fieldname = "catchments"
basin_catchment_numbers_fieldname = "basin_catchment_numbers"
river_directions_fieldname = "FDIR"
cclc.connect_coarse_lake_catchments_driver(coarse_catchments_filepath,
lake_parameters_filepath,
basin_numbers_filepath,
river_directions_filepath,
connected_coarse_catchments_out_filename,
coarse_catchments_fieldname,
connected_coarse_catchments_out_fieldname,
basin_catchment_numbers_fieldname,
river_directions_fieldname)
def connect_catchments_for_transient_run(self):
base_filepath = "/Users/thomasriddick/Documents/data/lake_analysis_runs/lake_analysis_one_21_Jun_2021/lakes/results"
#dates = range(15990,15980,-10)
#dates = range(11000,10990,-10)
dates = [0]
for date in dates:
river_directions_filepath = ("{0}/diag_version_13_date_{1}/30min_rdirs.nc".format(base_filepath,date))
coarse_catchments_filepath = ("{0}/diag_version_13_date_{1}/30min_catchments.nc".format(base_filepath,date))
coarse_catchments_fieldname = "catchments"
# cc.advanced_main(filename=river_directions_filepath,
# fieldname="rdirs",
# output_filename=coarse_catchments_filepath,
# output_fieldname="catchments",
# loop_logfile=("{0}/loops_log_{1}.txt".format(base_filepath,date)),
# use_cpp_alg=True)
lake_parameters_filepath = ("{0}/lakeparas_version_13_date_{1}.nc".format(base_filepath,date))
basin_numbers_filepath = ("{0}/diag_version_13_date_{1}/basin_catchment_numbers.nc".format(base_filepath,date))
connected_coarse_catchments_out_filename = ("{0}/diag_version_13_date_{1}/30min_connected_catchments.nc".\
format(base_filepath,date))
connected_coarse_catchments_out_fieldname = "catchments"
basin_catchment_numbers_fieldname = "basin_catchment_numbers"
river_directions_fieldname = "rdirs"
cumulative_flow_filename=("{0}/diag_version_13_date_{1}/30min_flowtocell.nc".format(base_filepath,date))
cumulative_flow_fieldname="cumulative_flow"
cumulative_flow_out_filename=("{0}/diag_version_13_date_{1}/30min"
"_flowtocell_connected.nc".format(base_filepath,date))
cumulative_flow_out_fieldname="cumulative_flow"
cumulative_river_mouth_flow_out_filename=("{0}/diag_version_13_date_{1}/"
"30min_flowtorivermouths_connected.nc".format(base_filepath,date))
cumulative_river_mouth_flow_out_fieldname="cumulative_flow_to_ocean"
cclc.connect_coarse_lake_catchments_driver(coarse_catchments_filepath,
lake_parameters_filepath,
basin_numbers_filepath,
river_directions_filepath,
connected_coarse_catchments_out_filename,
coarse_catchments_fieldname,
connected_coarse_catchments_out_fieldname,
basin_catchment_numbers_fieldname,
river_directions_fieldname,
cumulative_flow_filename,
cumulative_flow_out_filename,
cumulative_flow_fieldname,
cumulative_flow_out_fieldname)
river_mouth_marking_driver.\
advanced_flow_to_rivermouth_calculation_driver(input_river_directions_filename=
river_directions_filepath,
input_flow_to_cell_filename=
cumulative_flow_out_filename,
output_flow_to_river_mouths_filename=
cumulative_river_mouth_flow_out_filename,
input_river_directions_fieldname=
river_directions_fieldname,
input_flow_to_cell_fieldname=
cumulative_flow_out_fieldname,
output_flow_to_river_mouths_fieldname=
cumulative_river_mouth_flow_out_fieldname)
def extract_volumes_for_transient_run(self):
base_filepath = "/Users/thomasriddick/Documents/data/lake_analysis_runs/lake_analysis_one_21_Jun_2021/lakes/results"
#dates = range(15990,10990,-10)
#dates = range(15990,15980,-10)
dates = [0]
for date in dates:
lake_parameters_filepath = ("{0}/lakeparas_version_13_date_{1}.nc".format(base_filepath,date))
basin_catchment_numbers_filepath = ("{0}/diag_version_13_date_{1}/"
"basin_catchment_numbers.nc".format(base_filepath,date))
lake_volumes_out_filepath = ("{0}/diag_version_13_date_{1}/"
"10min_lake_volumes.nc".format(base_filepath,date))
extract_lake_volumes.\
lake_volume_extraction_driver(lake_parameters_filepath,
basin_catchment_numbers_filepath,
lake_volumes_out_filepath)
def main():
"""Select the revelant runs to make
Select runs by uncommenting them and also the revelant object instantation.
"""
lake_drivers = Dynamic_Lake_Drivers()
#lake_drivers.prepare_orography_ICE5G_0k_uncorrected()
#lake_drivers.prepare_orography_ICE5G_0k_corrected()
#lake_drivers.prepare_orography_ICE6G_21k_corrected()
#lake_drivers.prepare_river_directions_with_depressions_from_glac1D()
#lake_drivers.evaluate_glac1D_ts1900_basins()
#import time
# start = time.time()
#lake_drivers.evaluate_ICE6G_lgm_basins()
# end = time.time()
# print(end - start)
#lake_drivers.prepare_basins_from_glac1D()
#lake_drivers.extract_lake_volumes_from_glac1D_basins()
#lake_drivers.connect_catchments_for_glac1D()
lake_drivers.connect_catchments_for_transient_run()
lake_drivers.extract_volumes_for_transient_run()
if __name__ == '__main__':
main()
| [
"Dynamic_HD_Scripts.base.iodriver.advanced_field_loader",
"Dynamic_HD_Scripts.utilities.utilities.advanced_rebase_orography_driver",
"Dynamic_HD_Scripts.tools.connect_coarse_lake_catchments.connect_coarse_lake_catchments_driver",
"Dynamic_HD_Scripts.base.iodriver.advanced_field_writer",
"Dynamic_HD_Scripts.tools.fill_sinks_driver.generate_orography_with_sinks_filled_advanced_driver",
"Dynamic_HD_Scripts.utilities.utilities.change_dtype",
"Dynamic_HD_Scripts.tools.dynamic_lake_operators.reduce_connected_areas_to_points",
"Dynamic_HD_Scripts.utilities.utilities.advanced_extract_true_sinks_from_rdirs",
"Dynamic_HD_Scripts.tools.fill_sinks_driver.advanced_sinkless_flow_directions_generator",
"Dynamic_HD_Scripts.base.iodriver.add_grid_information_to_field",
"Dynamic_HD_Scripts.tools.river_mouth_marking_driver.advanced_flow_to_rivermouth_calculation_driver",
"Dynamic_HD_Scripts.tools.dynamic_lake_operators.advanced_narrow_lake_filtering_driver",
"Dynamic_HD_Scripts.utilities.utilities.advanced_apply_orog_correction_field",
"os.path.splitext",
"Dynamic_HD_Scripts.tools.dynamic_lake_operators.advanced_shallow_lake_filtering_driver",
"Dynamic_HD_Scripts.utilities.utilities.upscale_field_driver",
"time.time",
"Dynamic_HD_Scripts.utilities.utilities.apply_orog_correction_field",
"Dynamic_HD_Scripts.tools.dynamic_lake_operators.advanced_local_minima_finding_driver",
"Dynamic_HD_Scripts.tools.compute_catchments.advanced_main",
"Dynamic_HD_Scripts.utilities.utilities.advanced_replace_corrected_orog_with_orig_for_glcted_grid_points_drivers",
"Dynamic_HD_Scripts.tools.dynamic_lake_operators.advanced_burn_carved_rivers_driver",
"Dynamic_HD_Scripts.tools.flow_to_grid_cell.advanced_main",
"Dynamic_HD_Scripts.base.field.makeEmptyField",
"os.path.join",
"Dynamic_HD_Scripts.tools.extract_lake_volumes.lake_volume_extraction_driver",
"Dynamic_HD_Scripts.utilities.utilities.replace_corrected_orography_with_original_for_glaciated_grid_points_drivers",
"Dynamic_HD_Scripts.utilities.utilities.advanced_orog_correction_field_generator",
"Dynamic_HD_Scripts.tools.determine_river_directions.advanced_river_direction_determination_driver"
] | [((1205, 1259), 'os.path.join', 'join', (['self.orography_path', '"""ice5g_v1_2_00_0k_10min.nc"""'], {}), "(self.orography_path, 'ice5g_v1_2_00_0k_10min.nc')\n", (1209, 1259), False, 'from os.path import join\n'), ((1769, 1818), 'os.path.join', 'join', (['self.lakemask_filepath', '"""empty_lakemask.nc"""'], {}), "(self.lakemask_filepath, 'empty_lakemask.nc')\n", (1773, 1818), False, 'from os.path import join\n'), ((1898, 1951), 'os.path.join', 'join', (['self.orography_path', '"""Ice6g_c_VM5a_10min_0k.nc"""'], {}), "(self.orography_path, 'Ice6g_c_VM5a_10min_0k.nc')\n", (1902, 1951), False, 'from os.path import join\n'), ((2015, 2104), 'Dynamic_HD_Scripts.base.field.makeEmptyField', 'field.makeEmptyField', ([], {'field_type': '"""Generic"""', 'dtype': 'np.int32', 'grid_type': '"""LatLong10min"""'}), "(field_type='Generic', dtype=np.int32, grid_type=\n 'LatLong10min')\n", (2035, 2104), False, 'from Dynamic_HD_Scripts.base import field\n'), ((2113, 2203), 'Dynamic_HD_Scripts.base.iodriver.advanced_field_loader', 'iodriver.advanced_field_loader', (['ICE5G_0k_file'], {'fieldname': 'ICE5G_0k_orography_fieldname'}), '(ICE5G_0k_file, fieldname=\n ICE5G_0k_orography_fieldname)\n', (2143, 2203), False, 'from Dynamic_HD_Scripts.base import iodriver\n'), ((2246, 2355), 'Dynamic_HD_Scripts.base.iodriver.advanced_field_writer', 'iodriver.advanced_field_writer', (['ICE5G_lakemask_filename', 'lakemask', 'ICE5G_lakemask_fieldname'], {'clobber': '(True)'}), '(ICE5G_lakemask_filename, lakemask,\n ICE5G_lakemask_fieldname, clobber=True)\n', (2276, 2355), False, 'from Dynamic_HD_Scripts.base import iodriver\n'), ((2398, 2558), 'Dynamic_HD_Scripts.tools.dynamic_lake_operators.advanced_local_minima_finding_driver', 'dynamic_lake_operators.advanced_local_minima_finding_driver', (['ICE5G_0k_file', 'ICE5G_0k_orography_fieldname', 'ICE5G_minima_filename', 'ICE5G_minima_fieldname'], {}), '(ICE5G_0k_file,\n ICE5G_0k_orography_fieldname, ICE5G_minima_filename, ICE5G_minima_fieldname\n )\n', (2457, 2558), False, 'from Dynamic_HD_Scripts.tools import dynamic_lake_operators\n'), ((2762, 2927), 'Dynamic_HD_Scripts.tools.dynamic_lake_operators.reduce_connected_areas_to_points', 'dynamic_lake_operators.reduce_connected_areas_to_points', (['ICE5G_minima_filename', 'ICE5G_minima_fieldname', 'ICE5G_minima_reduced_filename', 'ICE5G_minima_fieldname'], {}), '(ICE5G_minima_filename,\n ICE5G_minima_fieldname, ICE5G_minima_reduced_filename,\n ICE5G_minima_fieldname)\n', (2817, 2927), False, 'from Dynamic_HD_Scripts.tools import dynamic_lake_operators\n'), ((3120, 3441), 'Dynamic_HD_Scripts.tools.fill_sinks_driver.advanced_sinkless_flow_directions_generator', 'fill_sinks_driver.advanced_sinkless_flow_directions_generator', ([], {'filename': 'ICE5G_0k_file', 'output_filename': 'ICE5G_flowdirs_filename', 'ls_mask_filename': 'ICE5G_landsea_mask_filename', 'fieldname': 'ICE5G_0k_orography_fieldname', 'output_fieldname': 'ICE5G_flowdirs_fieldname', 'ls_mask_fieldname': 'ICE5G_landsea_mask_fieldname'}), '(filename=\n ICE5G_0k_file, output_filename=ICE5G_flowdirs_filename,\n ls_mask_filename=ICE5G_landsea_mask_filename, fieldname=\n ICE5G_0k_orography_fieldname, output_fieldname=ICE5G_flowdirs_fieldname,\n ls_mask_fieldname=ICE5G_landsea_mask_fieldname)\n', (3181, 3441), False, 'from Dynamic_HD_Scripts.tools import fill_sinks_driver\n'), ((3995, 4569), 'Dynamic_HD_Scripts.tools.dynamic_lake_operators.advanced_burn_carved_rivers_driver', 'dynamic_lake_operators.advanced_burn_carved_rivers_driver', ([], {'input_orography_file': 'ICE5G_0k_file', 'input_orography_fieldname': 'ICE5G_0k_orography_fieldname', 'input_rdirs_file': 'ICE5G_flowdirs_filename', 'input_rdirs_fieldname': 'ICE5G_flowdirs_fieldname', 'input_minima_file': 'ICE5G_minima_reduced_filename', 'input_minima_fieldname': 'ICE5G_minima_fieldname', 'input_lakemask_file': 'ICE5G_lakemask_filename', 'input_lakemask_fieldname': 'ICE5G_lakemask_fieldname', 'output_orography_file': 'ICE5G_output_orog_filename', 'output_orography_fieldname': 'ICE5G_0k_orography_fieldname'}), '(input_orography_file\n =ICE5G_0k_file, input_orography_fieldname=ICE5G_0k_orography_fieldname,\n input_rdirs_file=ICE5G_flowdirs_filename, input_rdirs_fieldname=\n ICE5G_flowdirs_fieldname, input_minima_file=\n ICE5G_minima_reduced_filename, input_minima_fieldname=\n ICE5G_minima_fieldname, input_lakemask_file=ICE5G_lakemask_filename,\n input_lakemask_fieldname=ICE5G_lakemask_fieldname,\n output_orography_file=ICE5G_output_orog_filename,\n output_orography_fieldname=ICE5G_0k_orography_fieldname)\n', (4052, 4569), False, 'from Dynamic_HD_Scripts.tools import dynamic_lake_operators\n'), ((5938, 5992), 'os.path.join', 'join', (['self.orography_path', '"""ice5g_v1_2_00_0k_10min.nc"""'], {}), "(self.orography_path, 'ice5g_v1_2_00_0k_10min.nc')\n", (5942, 5992), False, 'from os.path import join\n'), ((6077, 6266), 'os.path.join', 'join', (['self.orography_corrections_fields_path', '"""orog_corrs_field_ICE5G_and_tarasov_upscaled_srtm30plus_north_america_only_data_ALG4_sinkless_glcc_olson_lsmask_0k_20170517_003802.nc"""'], {}), "(self.orography_corrections_fields_path,\n 'orog_corrs_field_ICE5G_and_tarasov_upscaled_srtm30plus_north_america_only_data_ALG4_sinkless_glcc_olson_lsmask_0k_20170517_003802.nc'\n )\n", (6081, 6266), False, 'from os.path import join\n'), ((7045, 7098), 'os.path.join', 'join', (['self.orography_path', '"""Ice6g_c_VM5a_10min_0k.nc"""'], {}), "(self.orography_path, 'Ice6g_c_VM5a_10min_0k.nc')\n", (7049, 7098), False, 'from os.path import join\n'), ((7812, 7866), 'os.path.join', 'join', (['self.orography_path', '"""ice5g_v1_2_21_0k_10min.nc"""'], {}), "(self.orography_path, 'ice5g_v1_2_21_0k_10min.nc')\n", (7816, 7866), False, 'from os.path import join\n'), ((7874, 8131), 'Dynamic_HD_Scripts.utilities.utilities.change_dtype', 'utilities.change_dtype', ([], {'input_filename': 'original_ls_mask_filename', 'output_filename': 'original_ls_mask_with_new_dtype_filename', 'input_fieldname': 'original_landsea_mask_fieldname', 'output_fieldname': '"""lsmask"""', 'new_dtype': 'np.int32', 'grid_type': '"""LatLong10min"""'}), "(input_filename=original_ls_mask_filename,\n output_filename=original_ls_mask_with_new_dtype_filename,\n input_fieldname=original_landsea_mask_fieldname, output_fieldname=\n 'lsmask', new_dtype=np.int32, grid_type='LatLong10min')\n", (7896, 8131), False, 'from Dynamic_HD_Scripts.utilities import utilities\n'), ((8250, 8546), 'Dynamic_HD_Scripts.utilities.utilities.apply_orog_correction_field', 'utilities.apply_orog_correction_field', ([], {'original_orography_filename': 'original_orography_filename', 'orography_corrections_filename': 'orog_corrections_filename', 'corrected_orography_filename': 'intermediary_orography_filename', 'original_orography_fieldname': '"""orog"""', 'grid_type': '"""LatLong10min"""'}), "(original_orography_filename=\n original_orography_filename, orography_corrections_filename=\n orog_corrections_filename, corrected_orography_filename=\n intermediary_orography_filename, original_orography_fieldname='orog',\n grid_type='LatLong10min')\n", (8287, 8546), False, 'from Dynamic_HD_Scripts.utilities import utilities\n'), ((8815, 9169), 'Dynamic_HD_Scripts.utilities.utilities.replace_corrected_orography_with_original_for_glaciated_grid_points_drivers', 'utilities.replace_corrected_orography_with_original_for_glaciated_grid_points_drivers', ([], {'input_corrected_orography_file': 'intermediary_orography_filename', 'input_original_orography_file': 'original_orography_filename', 'input_glacier_mask_file': 'glacial_mask_file', 'out_orography_file': 'second_intermediary_orography_filename', 'grid_type': '"""LatLong10min"""'}), "(\n input_corrected_orography_file=intermediary_orography_filename,\n input_original_orography_file=original_orography_filename,\n input_glacier_mask_file=glacial_mask_file, out_orography_file=\n second_intermediary_orography_filename, grid_type='LatLong10min')\n", (8900, 9169), False, 'from Dynamic_HD_Scripts.utilities import utilities\n'), ((9211, 9512), 'Dynamic_HD_Scripts.base.iodriver.add_grid_information_to_field', 'iodriver.add_grid_information_to_field', ([], {'target_filename': 'orography_filename', 'original_filename': 'second_intermediary_orography_filename', 'target_fieldname': '"""field_value"""', 'original_fieldname': '"""field_value"""', 'flip_ud_raw': '(True)', 'rotate180lr_raw': '(True)', 'grid_desc_file': 'self.ten_minute_grid_filepath'}), "(target_filename=orography_filename,\n original_filename=second_intermediary_orography_filename,\n target_fieldname='field_value', original_fieldname='field_value',\n flip_ud_raw=True, rotate180lr_raw=True, grid_desc_file=self.\n ten_minute_grid_filepath)\n", (9249, 9512), False, 'from Dynamic_HD_Scripts.base import iodriver\n'), ((9834, 10145), 'Dynamic_HD_Scripts.base.iodriver.add_grid_information_to_field', 'iodriver.add_grid_information_to_field', ([], {'target_filename': 'original_ls_mask_with_grid_filename', 'original_filename': 'original_ls_mask_with_new_dtype_filename', 'target_fieldname': '"""lsmask"""', 'original_fieldname': '"""lsmask"""', 'flip_ud_raw': '(True)', 'rotate180lr_raw': '(True)', 'grid_desc_file': 'self.ten_minute_grid_filepath'}), "(target_filename=\n original_ls_mask_with_grid_filename, original_filename=\n original_ls_mask_with_new_dtype_filename, target_fieldname='lsmask',\n original_fieldname='lsmask', flip_ud_raw=True, rotate180lr_raw=True,\n grid_desc_file=self.ten_minute_grid_filepath)\n", (9872, 10145), False, 'from Dynamic_HD_Scripts.base import iodriver\n'), ((10466, 10599), 'Dynamic_HD_Scripts.tools.dynamic_lake_operators.advanced_local_minima_finding_driver', 'dynamic_lake_operators.advanced_local_minima_finding_driver', (['orography_filename', '"""field_value"""', 'minima_filename', 'minima_fieldname'], {}), "(orography_filename,\n 'field_value', minima_filename, minima_fieldname)\n", (10525, 10599), False, 'from Dynamic_HD_Scripts.tools import dynamic_lake_operators\n'), ((10808, 10945), 'Dynamic_HD_Scripts.tools.dynamic_lake_operators.reduce_connected_areas_to_points', 'dynamic_lake_operators.reduce_connected_areas_to_points', (['minima_filename', 'minima_fieldname', 'minima_reduced_filename', 'minima_fieldname'], {}), '(minima_filename,\n minima_fieldname, minima_reduced_filename, minima_fieldname)\n', (10863, 10945), False, 'from Dynamic_HD_Scripts.tools import dynamic_lake_operators\n'), ((11142, 11410), 'Dynamic_HD_Scripts.tools.fill_sinks_driver.advanced_sinkless_flow_directions_generator', 'fill_sinks_driver.advanced_sinkless_flow_directions_generator', ([], {'filename': 'orography_filename', 'output_filename': 'rdirs_filename', 'ls_mask_filename': 'original_ls_mask_with_grid_filename', 'fieldname': '"""field_value"""', 'output_fieldname': '"""rdir"""', 'ls_mask_fieldname': '"""lsmask"""'}), "(filename=\n orography_filename, output_filename=rdirs_filename, ls_mask_filename=\n original_ls_mask_with_grid_filename, fieldname='field_value',\n output_fieldname='rdir', ls_mask_fieldname='lsmask')\n", (11203, 11410), False, 'from Dynamic_HD_Scripts.tools import fill_sinks_driver\n'), ((11968, 12443), 'Dynamic_HD_Scripts.tools.dynamic_lake_operators.advanced_burn_carved_rivers_driver', 'dynamic_lake_operators.advanced_burn_carved_rivers_driver', ([], {'input_orography_file': 'orography_filename', 'input_orography_fieldname': '"""field_value"""', 'input_rdirs_file': 'rdirs_filename', 'input_rdirs_fieldname': '"""rdir"""', 'input_minima_file': 'minima_filename', 'input_minima_fieldname': 'minima_fieldname', 'input_lakemask_file': 'lakemask_filename', 'input_lakemask_fieldname': 'lakemask_fieldname', 'output_orography_file': 'output_orog_filename', 'output_orography_fieldname': '"""field_value"""'}), "(input_orography_file\n =orography_filename, input_orography_fieldname='field_value',\n input_rdirs_file=rdirs_filename, input_rdirs_fieldname='rdir',\n input_minima_file=minima_filename, input_minima_fieldname=\n minima_fieldname, input_lakemask_file=lakemask_filename,\n input_lakemask_fieldname=lakemask_fieldname, output_orography_file=\n output_orog_filename, output_orography_fieldname='field_value')\n", (12025, 12443), False, 'from Dynamic_HD_Scripts.tools import dynamic_lake_operators\n'), ((13823, 13877), 'os.path.join', 'join', (['self.orography_path', '"""ice5g_v1_2_00_0k_10min.nc"""'], {}), "(self.orography_path, 'ice5g_v1_2_00_0k_10min.nc')\n", (13827, 13877), False, 'from os.path import join\n'), ((13954, 14007), 'os.path.join', 'join', (['self.orography_path', '"""Ice6g_c_VM5a_10min_0k.nc"""'], {}), "(self.orography_path, 'Ice6g_c_VM5a_10min_0k.nc')\n", (13958, 14007), False, 'from os.path import join\n'), ((14070, 14124), 'os.path.join', 'join', (['self.orography_path', '"""Ice6g_c_VM5a_10min_21k.nc"""'], {}), "(self.orography_path, 'Ice6g_c_VM5a_10min_21k.nc')\n", (14074, 14124), False, 'from os.path import join\n'), ((14194, 14383), 'os.path.join', 'join', (['self.orography_corrections_fields_path', '"""orog_corrs_field_ICE5G_and_tarasov_upscaled_srtm30plus_north_america_only_data_ALG4_sinkless_glcc_olson_lsmask_0k_20170517_003802.nc"""'], {}), "(self.orography_corrections_fields_path,\n 'orog_corrs_field_ICE5G_and_tarasov_upscaled_srtm30plus_north_america_only_data_ALG4_sinkless_glcc_olson_lsmask_0k_20170517_003802.nc'\n )\n", (14198, 14383), False, 'from os.path import join\n'), ((15697, 15750), 'os.path.join', 'join', (['self.orography_path', '"""Ice6g_c_VM5a_10min_0k.nc"""'], {}), "(self.orography_path, 'Ice6g_c_VM5a_10min_0k.nc')\n", (15701, 15750), False, 'from os.path import join\n'), ((16645, 16699), 'os.path.join', 'join', (['self.orography_path', '"""ice5g_v1_2_21_0k_10min.nc"""'], {}), "(self.orography_path, 'ice5g_v1_2_21_0k_10min.nc')\n", (16649, 16699), False, 'from os.path import join\n'), ((16707, 16964), 'Dynamic_HD_Scripts.utilities.utilities.change_dtype', 'utilities.change_dtype', ([], {'input_filename': 'original_ls_mask_filename', 'output_filename': 'original_ls_mask_with_new_dtype_filename', 'input_fieldname': 'original_landsea_mask_fieldname', 'output_fieldname': '"""lsmask"""', 'new_dtype': 'np.int32', 'grid_type': '"""LatLong10min"""'}), "(input_filename=original_ls_mask_filename,\n output_filename=original_ls_mask_with_new_dtype_filename,\n input_fieldname=original_landsea_mask_fieldname, output_fieldname=\n 'lsmask', new_dtype=np.int32, grid_type='LatLong10min')\n", (16729, 16964), False, 'from Dynamic_HD_Scripts.utilities import utilities\n'), ((17083, 17379), 'Dynamic_HD_Scripts.utilities.utilities.apply_orog_correction_field', 'utilities.apply_orog_correction_field', ([], {'original_orography_filename': 'original_orography_filename', 'orography_corrections_filename': 'orog_corrections_filename', 'corrected_orography_filename': 'intermediary_orography_filename', 'original_orography_fieldname': '"""orog"""', 'grid_type': '"""LatLong10min"""'}), "(original_orography_filename=\n original_orography_filename, orography_corrections_filename=\n orog_corrections_filename, corrected_orography_filename=\n intermediary_orography_filename, original_orography_fieldname='orog',\n grid_type='LatLong10min')\n", (17120, 17379), False, 'from Dynamic_HD_Scripts.utilities import utilities\n'), ((17648, 18002), 'Dynamic_HD_Scripts.utilities.utilities.replace_corrected_orography_with_original_for_glaciated_grid_points_drivers', 'utilities.replace_corrected_orography_with_original_for_glaciated_grid_points_drivers', ([], {'input_corrected_orography_file': 'intermediary_orography_filename', 'input_original_orography_file': 'original_orography_filename', 'input_glacier_mask_file': 'glacial_mask_file', 'out_orography_file': 'second_intermediary_orography_filename', 'grid_type': '"""LatLong10min"""'}), "(\n input_corrected_orography_file=intermediary_orography_filename,\n input_original_orography_file=original_orography_filename,\n input_glacier_mask_file=glacial_mask_file, out_orography_file=\n second_intermediary_orography_filename, grid_type='LatLong10min')\n", (17733, 18002), False, 'from Dynamic_HD_Scripts.utilities import utilities\n'), ((18044, 18345), 'Dynamic_HD_Scripts.base.iodriver.add_grid_information_to_field', 'iodriver.add_grid_information_to_field', ([], {'target_filename': 'orography_filename', 'original_filename': 'second_intermediary_orography_filename', 'target_fieldname': '"""field_value"""', 'original_fieldname': '"""field_value"""', 'flip_ud_raw': '(True)', 'rotate180lr_raw': '(True)', 'grid_desc_file': 'self.ten_minute_grid_filepath'}), "(target_filename=orography_filename,\n original_filename=second_intermediary_orography_filename,\n target_fieldname='field_value', original_fieldname='field_value',\n flip_ud_raw=True, rotate180lr_raw=True, grid_desc_file=self.\n ten_minute_grid_filepath)\n", (18082, 18345), False, 'from Dynamic_HD_Scripts.base import iodriver\n'), ((18667, 18978), 'Dynamic_HD_Scripts.base.iodriver.add_grid_information_to_field', 'iodriver.add_grid_information_to_field', ([], {'target_filename': 'original_ls_mask_with_grid_filename', 'original_filename': 'original_ls_mask_with_new_dtype_filename', 'target_fieldname': '"""lsmask"""', 'original_fieldname': '"""lsmask"""', 'flip_ud_raw': '(True)', 'rotate180lr_raw': '(True)', 'grid_desc_file': 'self.ten_minute_grid_filepath'}), "(target_filename=\n original_ls_mask_with_grid_filename, original_filename=\n original_ls_mask_with_new_dtype_filename, target_fieldname='lsmask',\n original_fieldname='lsmask', flip_ud_raw=True, rotate180lr_raw=True,\n grid_desc_file=self.ten_minute_grid_filepath)\n", (18705, 18978), False, 'from Dynamic_HD_Scripts.base import iodriver\n'), ((19299, 19432), 'Dynamic_HD_Scripts.tools.dynamic_lake_operators.advanced_local_minima_finding_driver', 'dynamic_lake_operators.advanced_local_minima_finding_driver', (['orography_filename', '"""field_value"""', 'minima_filename', 'minima_fieldname'], {}), "(orography_filename,\n 'field_value', minima_filename, minima_fieldname)\n", (19358, 19432), False, 'from Dynamic_HD_Scripts.tools import dynamic_lake_operators\n'), ((19641, 19778), 'Dynamic_HD_Scripts.tools.dynamic_lake_operators.reduce_connected_areas_to_points', 'dynamic_lake_operators.reduce_connected_areas_to_points', (['minima_filename', 'minima_fieldname', 'minima_reduced_filename', 'minima_fieldname'], {}), '(minima_filename,\n minima_fieldname, minima_reduced_filename, minima_fieldname)\n', (19696, 19778), False, 'from Dynamic_HD_Scripts.tools import dynamic_lake_operators\n'), ((19975, 20243), 'Dynamic_HD_Scripts.tools.fill_sinks_driver.advanced_sinkless_flow_directions_generator', 'fill_sinks_driver.advanced_sinkless_flow_directions_generator', ([], {'filename': 'orography_filename', 'output_filename': 'rdirs_filename', 'ls_mask_filename': 'original_ls_mask_with_grid_filename', 'fieldname': '"""field_value"""', 'output_fieldname': '"""rdir"""', 'ls_mask_fieldname': '"""lsmask"""'}), "(filename=\n orography_filename, output_filename=rdirs_filename, ls_mask_filename=\n original_ls_mask_with_grid_filename, fieldname='field_value',\n output_fieldname='rdir', ls_mask_fieldname='lsmask')\n", (20036, 20243), False, 'from Dynamic_HD_Scripts.tools import fill_sinks_driver\n'), ((20801, 21278), 'Dynamic_HD_Scripts.tools.dynamic_lake_operators.advanced_burn_carved_rivers_driver', 'dynamic_lake_operators.advanced_burn_carved_rivers_driver', ([], {'input_orography_file': 'orography_filename', 'input_orography_fieldname': '"""field_value"""', 'input_rdirs_file': 'rdirs_filename', 'input_rdirs_fieldname': '"""rdir"""', 'input_minima_file': 'minima_filename', 'input_minima_fieldname': 'minima_fieldname', 'input_lakemask_file': 'lakemask_filename', 'input_lakemask_fieldname': 'lakemask_fieldname', 'output_orography_file': 'output_0k_ice5g_orog_filename', 'output_orography_fieldname': '"""Topo"""'}), "(input_orography_file\n =orography_filename, input_orography_fieldname='field_value',\n input_rdirs_file=rdirs_filename, input_rdirs_fieldname='rdir',\n input_minima_file=minima_filename, input_minima_fieldname=\n minima_fieldname, input_lakemask_file=lakemask_filename,\n input_lakemask_fieldname=lakemask_fieldname, output_orography_file=\n output_0k_ice5g_orog_filename, output_orography_fieldname='Topo')\n", (20858, 21278), False, 'from Dynamic_HD_Scripts.tools import dynamic_lake_operators\n'), ((22524, 22982), 'Dynamic_HD_Scripts.utilities.utilities.advanced_rebase_orography_driver', 'utilities.advanced_rebase_orography_driver', ([], {'orography_filename': 'ice6g_21k_filename', 'present_day_base_orography_filename': 'ice6g_0k_filename', 'present_day_reference_orography_filename': 'output_0k_ice5g_orog_filename', 'rebased_orography_filename': 'output_21k_ice6g_orog_filename', 'orography_fieldname': '"""Topo"""', 'present_day_base_orography_fieldname': '"""Topo"""', 'present_day_reference_orography_fieldname': '"""Topo"""', 'rebased_orography_fieldname': '"""Topo"""'}), "(orography_filename=\n ice6g_21k_filename, present_day_base_orography_filename=\n ice6g_0k_filename, present_day_reference_orography_filename=\n output_0k_ice5g_orog_filename, rebased_orography_filename=\n output_21k_ice6g_orog_filename, orography_fieldname='Topo',\n present_day_base_orography_fieldname='Topo',\n present_day_reference_orography_fieldname='Topo',\n rebased_orography_fieldname='Topo')\n", (22566, 22982), False, 'from Dynamic_HD_Scripts.utilities import utilities\n'), ((23524, 23858), 'Dynamic_HD_Scripts.tools.fill_sinks_driver.generate_orography_with_sinks_filled_advanced_driver', 'fill_sinks_driver.generate_orography_with_sinks_filled_advanced_driver', (['output_21k_ice6g_orog_filename', 'output_21k_ice6g_orog_sinkless_filename', '"""Topo"""', '"""Topo"""'], {'ls_mask_filename': 'None', 'truesinks_filename': 'None', 'ls_mask_fieldname': 'None', 'truesinks_fieldname': 'None', 'add_slight_slope_when_filling_sinks': '(False)', 'slope_param': '(0.1)'}), "(\n output_21k_ice6g_orog_filename, output_21k_ice6g_orog_sinkless_filename,\n 'Topo', 'Topo', ls_mask_filename=None, truesinks_filename=None,\n ls_mask_fieldname=None, truesinks_fieldname=None,\n add_slight_slope_when_filling_sinks=False, slope_param=0.1)\n", (23594, 23858), False, 'from Dynamic_HD_Scripts.tools import fill_sinks_driver\n'), ((24432, 24550), 'Dynamic_HD_Scripts.base.iodriver.advanced_field_loader', 'iodriver.advanced_field_loader', (['output_21k_ice6g_orog_sinkless_filename'], {'fieldname': '"""Topo"""', 'adjust_orientation': '(True)'}), "(output_21k_ice6g_orog_sinkless_filename,\n fieldname='Topo', adjust_orientation=True)\n", (24462, 24550), False, 'from Dynamic_HD_Scripts.base import iodriver\n'), ((24693, 24803), 'Dynamic_HD_Scripts.base.iodriver.advanced_field_loader', 'iodriver.advanced_field_loader', (['output_21k_ice6g_orog_filename'], {'fieldname': '"""Topo"""', 'adjust_orientation': '(True)'}), "(output_21k_ice6g_orog_filename, fieldname=\n 'Topo', adjust_orientation=True)\n", (24723, 24803), False, 'from Dynamic_HD_Scripts.base import iodriver\n'), ((24879, 24978), 'Dynamic_HD_Scripts.base.iodriver.advanced_field_loader', 'iodriver.advanced_field_loader', (['ice6g_21k_filename'], {'fieldname': '"""sftgif"""', 'adjust_orientation': '(True)'}), "(ice6g_21k_filename, fieldname='sftgif',\n adjust_orientation=True)\n", (24909, 24978), False, 'from Dynamic_HD_Scripts.base import iodriver\n'), ((25120, 25218), 'Dynamic_HD_Scripts.base.iodriver.advanced_field_loader', 'iodriver.advanced_field_loader', (['ice6g_21k_filename'], {'fieldname': '"""sftlf"""', 'adjust_orientation': '(True)'}), "(ice6g_21k_filename, fieldname='sftlf',\n adjust_orientation=True)\n", (25150, 25218), False, 'from Dynamic_HD_Scripts.base import iodriver\n'), ((25702, 25841), 'Dynamic_HD_Scripts.base.iodriver.advanced_field_writer', 'iodriver.advanced_field_writer', (['output_21k_ice6g_orog_sinkless_improved_filename', 'ice6g_sinkless_field'], {'fieldname': '"""Topo"""', 'clobber': '(True)'}), "(output_21k_ice6g_orog_sinkless_improved_filename\n , ice6g_sinkless_field, fieldname='Topo', clobber=True)\n", (25732, 25841), False, 'from Dynamic_HD_Scripts.base import iodriver\n'), ((25882, 26029), 'Dynamic_HD_Scripts.tools.dynamic_lake_operators.advanced_local_minima_finding_driver', 'dynamic_lake_operators.advanced_local_minima_finding_driver', (['output_21k_ice6g_orog_filename', '"""Topo"""', 'minima_filename_21k', 'minima_fieldname'], {}), "(\n output_21k_ice6g_orog_filename, 'Topo', minima_filename_21k,\n minima_fieldname)\n", (25941, 26029), False, 'from Dynamic_HD_Scripts.tools import dynamic_lake_operators\n'), ((26233, 26378), 'Dynamic_HD_Scripts.tools.dynamic_lake_operators.reduce_connected_areas_to_points', 'dynamic_lake_operators.reduce_connected_areas_to_points', (['minima_filename_21k', 'minima_fieldname', 'minima_reduced_filename_21k', 'minima_fieldname'], {}), '(minima_filename_21k,\n minima_fieldname, minima_reduced_filename_21k, minima_fieldname)\n', (26288, 26378), False, 'from Dynamic_HD_Scripts.tools import dynamic_lake_operators\n'), ((27325, 27336), 'time.time', 'time.time', ([], {}), '()\n', (27334, 27336), False, 'import time\n'), ((27518, 27572), 'os.path.join', 'join', (['self.orography_path', '"""ice5g_v1_2_00_0k_10min.nc"""'], {}), "(self.orography_path, 'ice5g_v1_2_00_0k_10min.nc')\n", (27522, 27572), False, 'from os.path import join\n'), ((27651, 27824), 'os.path.join', 'join', (['self.truesinks_path', '"""truesinks_ICE5G_and_tarasov_upscaled_srtm30plus_north_america_only_data_ALG4_sinkless_glcc_olson_lsmask_0k_20191014_173825_with_grid.nc"""'], {}), "(self.truesinks_path,\n 'truesinks_ICE5G_and_tarasov_upscaled_srtm30plus_north_america_only_data_ALG4_sinkless_glcc_olson_lsmask_0k_20191014_173825_with_grid.nc'\n )\n", (27655, 27824), False, 'from os.path import join\n'), ((29159, 29350), 'os.path.join', 'join', (['self.orography_corrections_fields_path', '"""orog_corrs_field_ICE5G_and_tarasov_upscaled_srtm30plus_north_america_only_data_ALG4_sinkless_glcc_olson_lsmask_0k_20170517_003802_g.nc"""'], {}), "(self.orography_corrections_fields_path,\n 'orog_corrs_field_ICE5G_and_tarasov_upscaled_srtm30plus_north_america_only_data_ALG4_sinkless_glcc_olson_lsmask_0k_20170517_003802_g.nc'\n )\n", (29163, 29350), False, 'from os.path import join\n'), ((33174, 33228), 'os.path.join', 'join', (['self.orography_path', '"""ice5g_v1_2_00_0k_10min.nc"""'], {}), "(self.orography_path, 'ice5g_v1_2_00_0k_10min.nc')\n", (33178, 33228), False, 'from os.path import join\n'), ((33236, 33493), 'Dynamic_HD_Scripts.utilities.utilities.change_dtype', 'utilities.change_dtype', ([], {'input_filename': 'original_ls_mask_filename', 'output_filename': 'original_ls_mask_with_new_dtype_filename', 'input_fieldname': 'original_landsea_mask_fieldname', 'output_fieldname': '"""lsmask"""', 'new_dtype': 'np.int32', 'grid_type': '"""LatLong10min"""'}), "(input_filename=original_ls_mask_filename,\n output_filename=original_ls_mask_with_new_dtype_filename,\n input_fieldname=original_landsea_mask_fieldname, output_fieldname=\n 'lsmask', new_dtype=np.int32, grid_type='LatLong10min')\n", (33258, 33493), False, 'from Dynamic_HD_Scripts.utilities import utilities\n'), ((33612, 33887), 'Dynamic_HD_Scripts.utilities.utilities.advanced_apply_orog_correction_field', 'utilities.advanced_apply_orog_correction_field', ([], {'original_orography_filename': 'original_orography_filename', 'orography_corrections_filename': 'orog_corrections_filename', 'corrected_orography_filename': 'intermediary_orography_filename', 'original_orography_fieldname': '"""orog"""'}), "(original_orography_filename=\n original_orography_filename, orography_corrections_filename=\n orog_corrections_filename, corrected_orography_filename=\n intermediary_orography_filename, original_orography_fieldname='orog')\n", (33658, 33887), False, 'from Dynamic_HD_Scripts.utilities import utilities\n'), ((34270, 34756), 'Dynamic_HD_Scripts.utilities.utilities.advanced_replace_corrected_orog_with_orig_for_glcted_grid_points_drivers', 'utilities.advanced_replace_corrected_orog_with_orig_for_glcted_grid_points_drivers', ([], {'input_corrected_orography_file': 'intermediary_orography_filename', 'input_original_orography_file': 'original_orography_filename', 'input_glacier_mask_file': 'glacial_mask_file', 'out_orography_file': 'second_intermediary_orography_filename', 'input_corrected_orography_fieldname': 'None', 'input_original_orography_fieldname': 'None', 'input_glacier_mask_fieldname': 'None', 'out_orography_fieldname': 'None'}), '(\n input_corrected_orography_file=intermediary_orography_filename,\n input_original_orography_file=original_orography_filename,\n input_glacier_mask_file=glacial_mask_file, out_orography_file=\n second_intermediary_orography_filename,\n input_corrected_orography_fieldname=None,\n input_original_orography_fieldname=None, input_glacier_mask_fieldname=\n None, out_orography_fieldname=None)\n', (34352, 34756), False, 'from Dynamic_HD_Scripts.utilities import utilities\n'), ((34883, 35229), 'Dynamic_HD_Scripts.base.iodriver.add_grid_information_to_field', 'iodriver.add_grid_information_to_field', ([], {'target_filename': 'original_ls_mask_with_grid_filename', 'original_filename': 'original_ls_mask_with_new_dtype_filename', 'target_fieldname': '"""lsmask"""', 'original_fieldname': '"""lsmask"""', 'flip_ud_raw': 'flip_ls_mask_0k', 'rotate180lr_raw': 'rotate_lsmask_180_lr_0k', 'grid_desc_file': 'self.ten_minute_grid_filepath'}), "(target_filename=\n original_ls_mask_with_grid_filename, original_filename=\n original_ls_mask_with_new_dtype_filename, target_fieldname='lsmask',\n original_fieldname='lsmask', flip_ud_raw=flip_ls_mask_0k,\n rotate180lr_raw=rotate_lsmask_180_lr_0k, grid_desc_file=self.\n ten_minute_grid_filepath)\n", (34921, 35229), False, 'from Dynamic_HD_Scripts.base import iodriver\n'), ((35593, 35726), 'Dynamic_HD_Scripts.tools.dynamic_lake_operators.advanced_local_minima_finding_driver', 'dynamic_lake_operators.advanced_local_minima_finding_driver', (['orography_filename', '"""field_value"""', 'minima_filename', 'minima_fieldname'], {}), "(orography_filename,\n 'field_value', minima_filename, minima_fieldname)\n", (35652, 35726), False, 'from Dynamic_HD_Scripts.tools import dynamic_lake_operators\n'), ((35935, 36072), 'Dynamic_HD_Scripts.tools.dynamic_lake_operators.reduce_connected_areas_to_points', 'dynamic_lake_operators.reduce_connected_areas_to_points', (['minima_filename', 'minima_fieldname', 'minima_reduced_filename', 'minima_fieldname'], {}), '(minima_filename,\n minima_fieldname, minima_reduced_filename, minima_fieldname)\n', (35990, 36072), False, 'from Dynamic_HD_Scripts.tools import dynamic_lake_operators\n'), ((36269, 36616), 'Dynamic_HD_Scripts.tools.fill_sinks_driver.advanced_sinkless_flow_directions_generator', 'fill_sinks_driver.advanced_sinkless_flow_directions_generator', ([], {'filename': 'orography_filename', 'output_filename': 'rdirs_filename', 'ls_mask_filename': 'original_ls_mask_with_grid_filename', 'truesinks_filename': 'true_sinks_filename', 'fieldname': '"""field_value"""', 'output_fieldname': '"""rdir"""', 'ls_mask_fieldname': '"""lsmask"""', 'truesinks_fieldname': '"""true_sinks"""'}), "(filename=\n orography_filename, output_filename=rdirs_filename, ls_mask_filename=\n original_ls_mask_with_grid_filename, truesinks_filename=\n true_sinks_filename, fieldname='field_value', output_fieldname='rdir',\n ls_mask_fieldname='lsmask', truesinks_fieldname='true_sinks')\n", (36330, 36616), False, 'from Dynamic_HD_Scripts.tools import fill_sinks_driver\n'), ((37451, 38087), 'Dynamic_HD_Scripts.tools.dynamic_lake_operators.advanced_burn_carved_rivers_driver', 'dynamic_lake_operators.advanced_burn_carved_rivers_driver', ([], {'input_orography_file': 'orography_filename', 'input_orography_fieldname': '"""field_value"""', 'input_rdirs_file': 'rdirs_filename', 'input_rdirs_fieldname': '"""rdir"""', 'input_minima_file': 'minima_filename', 'input_minima_fieldname': 'minima_fieldname', 'input_lakemask_file': 'lakemask_filename', 'input_lakemask_fieldname': 'lakemask_fieldname', 'output_orography_file': 'output_0k_ice5g_orog_filename', 'output_orography_fieldname': '"""Topo"""', 'add_slope': '(True)', 'max_exploration_range': '(10)', 'minimum_height_change_threshold': '(5.0)', 'short_path_threshold': '(6)', 'short_minimum_height_change_threshold': '(0.25)'}), "(input_orography_file\n =orography_filename, input_orography_fieldname='field_value',\n input_rdirs_file=rdirs_filename, input_rdirs_fieldname='rdir',\n input_minima_file=minima_filename, input_minima_fieldname=\n minima_fieldname, input_lakemask_file=lakemask_filename,\n input_lakemask_fieldname=lakemask_fieldname, output_orography_file=\n output_0k_ice5g_orog_filename, output_orography_fieldname='Topo',\n add_slope=True, max_exploration_range=10,\n minimum_height_change_threshold=5.0, short_path_threshold=6,\n short_minimum_height_change_threshold=0.25)\n", (37508, 38087), False, 'from Dynamic_HD_Scripts.tools import dynamic_lake_operators\n'), ((39698, 39796), 'os.path.join', 'path.join', (['self.orography_corrections_fields_path', "('ice5g_0k_lake_corrs_' + file_label + '.nc')"], {}), "(self.orography_corrections_fields_path, 'ice5g_0k_lake_corrs_' +\n file_label + '.nc')\n", (39707, 39796), True, 'import os.path as path\n'), ((39856, 40229), 'Dynamic_HD_Scripts.utilities.utilities.advanced_orog_correction_field_generator', 'utilities.advanced_orog_correction_field_generator', ([], {'original_orography_filename': 'original_orography_filename', 'corrected_orography_filename': 'output_0k_ice5g_orog_filename', 'orography_corrections_filename': 'new_orography_corrections_filename', 'original_orography_fieldname': '"""orog"""', 'corrected_orography_fieldname': '"""Topo"""', 'orography_corrections_fieldname': '"""orog"""'}), "(original_orography_filename\n =original_orography_filename, corrected_orography_filename=\n output_0k_ice5g_orog_filename, orography_corrections_filename=\n new_orography_corrections_filename, original_orography_fieldname='orog',\n corrected_orography_fieldname='Topo', orography_corrections_fieldname=\n 'orog')\n", (39906, 40229), False, 'from Dynamic_HD_Scripts.utilities import utilities\n'), ((40939, 41405), 'Dynamic_HD_Scripts.utilities.utilities.advanced_rebase_orography_driver', 'utilities.advanced_rebase_orography_driver', ([], {'orography_filename': 'working_orog_filename', 'present_day_base_orography_filename': 'working_orog_0k_filename', 'present_day_reference_orography_filename': 'output_0k_ice5g_orog_filename', 'rebased_orography_filename': 'output_working_orog_filename', 'orography_fieldname': '"""Topo"""', 'present_day_base_orography_fieldname': '"""Topo"""', 'present_day_reference_orography_fieldname': '"""Topo"""', 'rebased_orography_fieldname': '"""Topo"""'}), "(orography_filename=\n working_orog_filename, present_day_base_orography_filename=\n working_orog_0k_filename, present_day_reference_orography_filename=\n output_0k_ice5g_orog_filename, rebased_orography_filename=\n output_working_orog_filename, orography_fieldname='Topo',\n present_day_base_orography_fieldname='Topo',\n present_day_reference_orography_fieldname='Topo',\n rebased_orography_fieldname='Topo')\n", (40981, 41405), False, 'from Dynamic_HD_Scripts.utilities import utilities\n'), ((41947, 42277), 'Dynamic_HD_Scripts.tools.fill_sinks_driver.generate_orography_with_sinks_filled_advanced_driver', 'fill_sinks_driver.generate_orography_with_sinks_filled_advanced_driver', (['output_working_orog_filename', 'output_working_orog_sinkless_filename', '"""Topo"""', '"""Topo"""'], {'ls_mask_filename': 'None', 'truesinks_filename': 'None', 'ls_mask_fieldname': 'None', 'truesinks_fieldname': 'None', 'add_slight_slope_when_filling_sinks': '(False)', 'slope_param': '(0.1)'}), "(\n output_working_orog_filename, output_working_orog_sinkless_filename,\n 'Topo', 'Topo', ls_mask_filename=None, truesinks_filename=None,\n ls_mask_fieldname=None, truesinks_fieldname=None,\n add_slight_slope_when_filling_sinks=False, slope_param=0.1)\n", (42017, 42277), False, 'from Dynamic_HD_Scripts.tools import fill_sinks_driver\n'), ((42829, 43285), 'Dynamic_HD_Scripts.tools.dynamic_lake_operators.advanced_shallow_lake_filtering_driver', 'dynamic_lake_operators.advanced_shallow_lake_filtering_driver', ([], {'input_unfilled_orography_file': 'output_working_orog_filename', 'input_unfilled_orography_fieldname': '"""Topo"""', 'input_filled_orography_file': 'output_working_orog_sinkless_filename', 'input_filled_orography_fieldname': '"""Topo"""', 'output_unfilled_orography_file': 'output_intermediary_filtered_working_orog_filename', 'output_unfilled_orography_fieldname': '"""Topo"""', 'minimum_depth_threshold': '(5.0)'}), "(\n input_unfilled_orography_file=output_working_orog_filename,\n input_unfilled_orography_fieldname='Topo', input_filled_orography_file=\n output_working_orog_sinkless_filename, input_filled_orography_fieldname\n ='Topo', output_unfilled_orography_file=\n output_intermediary_filtered_working_orog_filename,\n output_unfilled_orography_fieldname='Topo', minimum_depth_threshold=5.0)\n", (42890, 43285), False, 'from Dynamic_HD_Scripts.tools import dynamic_lake_operators\n'), ((43702, 44249), 'Dynamic_HD_Scripts.tools.dynamic_lake_operators.advanced_narrow_lake_filtering_driver', 'dynamic_lake_operators.advanced_narrow_lake_filtering_driver', ([], {'input_unfilled_orography_file': 'output_intermediary_filtered_working_orog_filename', 'input_unfilled_orography_fieldname': '"""Topo"""', 'input_filled_orography_file': 'output_working_orog_sinkless_filename', 'input_filled_orography_fieldname': '"""Topo"""', 'output_unfilled_orography_file': 'output_filtered_working_orog_filename', 'output_unfilled_orography_fieldname': '"""Topo"""', 'interior_cell_min_masked_neighbors': '(5)', 'edge_cell_max_masked_neighbors': '(4)', 'max_range': '(5)', 'iterations': '(5)'}), "(\n input_unfilled_orography_file=\n output_intermediary_filtered_working_orog_filename,\n input_unfilled_orography_fieldname='Topo', input_filled_orography_file=\n output_working_orog_sinkless_filename, input_filled_orography_fieldname\n ='Topo', output_unfilled_orography_file=\n output_filtered_working_orog_filename,\n output_unfilled_orography_fieldname='Topo',\n interior_cell_min_masked_neighbors=5, edge_cell_max_masked_neighbors=4,\n max_range=5, iterations=5)\n", (43762, 44249), False, 'from Dynamic_HD_Scripts.tools import dynamic_lake_operators\n'), ((44953, 45069), 'Dynamic_HD_Scripts.base.iodriver.advanced_field_loader', 'iodriver.advanced_field_loader', (['output_working_orog_sinkless_filename'], {'fieldname': '"""Topo"""', 'adjust_orientation': '(True)'}), "(output_working_orog_sinkless_filename,\n fieldname='Topo', adjust_orientation=True)\n", (44983, 45069), False, 'from Dynamic_HD_Scripts.base import iodriver\n'), ((45219, 45335), 'Dynamic_HD_Scripts.base.iodriver.advanced_field_loader', 'iodriver.advanced_field_loader', (['output_filtered_working_orog_filename'], {'fieldname': '"""Topo"""', 'adjust_orientation': '(True)'}), "(output_filtered_working_orog_filename,\n fieldname='Topo', adjust_orientation=True)\n", (45249, 45335), False, 'from Dynamic_HD_Scripts.base import iodriver\n'), ((45415, 45570), 'Dynamic_HD_Scripts.base.iodriver.advanced_field_loader', 'iodriver.advanced_field_loader', (['glacier_mask_filename'], {'time_slice': 'glacier_mask_timestep', 'fieldname': 'glacier_mask_fieldname', 'adjust_orientation': '(True)'}), '(glacier_mask_filename, time_slice=\n glacier_mask_timestep, fieldname=glacier_mask_fieldname,\n adjust_orientation=True)\n', (45445, 45570), False, 'from Dynamic_HD_Scripts.base import iodriver\n'), ((45763, 45991), 'Dynamic_HD_Scripts.base.iodriver.advanced_field_loader', 'iodriver.advanced_field_loader', (['ls_mask_filename'], {'time_slice': 'ls_mask_timestep', 'fieldname': 'ls_mask_fieldname', 'adjust_orientation': '(False)', 'grid_desc_file': '"""/Users/thomasriddick/Documents/data/HDdata/grids/grid_10min.txt"""'}), "(ls_mask_filename, time_slice=\n ls_mask_timestep, fieldname=ls_mask_fieldname, adjust_orientation=False,\n grid_desc_file=\n '/Users/thomasriddick/Documents/data/HDdata/grids/grid_10min.txt')\n", (45793, 45991), False, 'from Dynamic_HD_Scripts.base import iodriver\n'), ((46809, 46952), 'Dynamic_HD_Scripts.base.iodriver.advanced_field_writer', 'iodriver.advanced_field_writer', (['output_working_orog_sinkless_improved_filename', 'working_orog_sinkless_field'], {'fieldname': '"""Topo"""', 'clobber': '(True)'}), "(output_working_orog_sinkless_improved_filename,\n working_orog_sinkless_field, fieldname='Topo', clobber=True)\n", (46839, 46952), False, 'from Dynamic_HD_Scripts.base import iodriver\n'), ((46994, 47157), 'Dynamic_HD_Scripts.tools.dynamic_lake_operators.advanced_local_minima_finding_driver', 'dynamic_lake_operators.advanced_local_minima_finding_driver', (['output_filtered_working_orog_filename', '"""Topo"""', 'minima_working_orog_filename', 'minima_fieldname'], {}), "(\n output_filtered_working_orog_filename, 'Topo',\n minima_working_orog_filename, minima_fieldname)\n", (47053, 47157), False, 'from Dynamic_HD_Scripts.tools import dynamic_lake_operators\n'), ((47361, 47529), 'Dynamic_HD_Scripts.tools.dynamic_lake_operators.reduce_connected_areas_to_points', 'dynamic_lake_operators.reduce_connected_areas_to_points', (['minima_working_orog_filename', 'minima_fieldname', 'minima_reduced_filename_working_orog', 'minima_fieldname'], {}), '(\n minima_working_orog_filename, minima_fieldname,\n minima_reduced_filename_working_orog, minima_fieldname)\n', (47416, 47529), False, 'from Dynamic_HD_Scripts.tools import dynamic_lake_operators\n'), ((48046, 48171), 'Dynamic_HD_Scripts.base.iodriver.advanced_field_loader', 'iodriver.advanced_field_loader', (['output_working_orog_sinkless_improved_filename'], {'fieldname': '"""Topo"""', 'adjust_orientation': '(True)'}), "(output_working_orog_sinkless_improved_filename,\n fieldname='Topo', adjust_orientation=True)\n", (48076, 48171), False, 'from Dynamic_HD_Scripts.base import iodriver\n'), ((48251, 48367), 'Dynamic_HD_Scripts.base.iodriver.advanced_field_loader', 'iodriver.advanced_field_loader', (['output_filtered_working_orog_filename'], {'fieldname': '"""Topo"""', 'adjust_orientation': '(True)'}), "(output_filtered_working_orog_filename,\n fieldname='Topo', adjust_orientation=True)\n", (48281, 48367), False, 'from Dynamic_HD_Scripts.base import iodriver\n'), ((48473, 48584), 'Dynamic_HD_Scripts.base.iodriver.advanced_field_writer', 'iodriver.advanced_field_writer', (['orog_diff_filename', 'improved_sinkless_orog'], {'fieldname': '"""depth"""', 'clobber': '(True)'}), "(orog_diff_filename, improved_sinkless_orog,\n fieldname='depth', clobber=True)\n", (48503, 48584), False, 'from Dynamic_HD_Scripts.base import iodriver\n'), ((50271, 50320), 'os.path.join', 'join', (['self.orography_path', '"""GLAC1D_Top01_surf.nc"""'], {}), "(self.orography_path, 'GLAC1D_Top01_surf.nc')\n", (50275, 50320), False, 'from os.path import join\n'), ((50381, 50439), 'os.path.join', 'join', (['self.ls_masks_path', '"""10min_lsmask_pmu0178_merged.nc"""'], {}), "(self.ls_masks_path, '10min_lsmask_pmu0178_merged.nc')\n", (50385, 50439), False, 'from os.path import join\n'), ((50497, 50609), 'os.path.join', 'join', (['self.ls_masks_path', '"""generated"""', '"""ls_mask_make_1000m_depth_contour_mask_from_ICE6G_20200721_144332.nc"""'], {}), "(self.ls_masks_path, 'generated',\n 'ls_mask_make_1000m_depth_contour_mask_from_ICE6G_20200721_144332.nc')\n", (50501, 50609), False, 'from os.path import join\n'), ((50668, 50717), 'os.path.join', 'join', (['self.orography_path', '"""GLAC1D_ICEM_10min.nc"""'], {}), "(self.orography_path, 'GLAC1D_ICEM_10min.nc')\n", (50672, 50717), False, 'from os.path import join\n'), ((50787, 50862), 'os.path.join', 'join', (['self.grid_areas_and_spacings_filepath', '"""10min_grid_area_default_R.nc"""'], {}), "(self.grid_areas_and_spacings_filepath, '10min_grid_area_default_R.nc')\n", (50791, 50862), False, 'from os.path import join\n'), ((58536, 58928), 'Dynamic_HD_Scripts.tools.determine_river_directions.advanced_river_direction_determination_driver', 'determine_river_directions.advanced_river_direction_determination_driver', (['rdirs_filename_10min', 'working_orography_filename', 'lsmask_filename'], {'truesinks_filename': 'None', 'rdirs_fieldname': '"""FDIR"""', 'orography_fieldname': 'orography_fieldname', 'lsmask_fieldname': 'lsmask_fieldname', 'truesinks_fieldname': 'None', 'always_flow_to_sea': '(True)', 'use_diagonal_nbrs': '(True)', 'mark_pits_as_true_sinks': '(True)'}), "(\n rdirs_filename_10min, working_orography_filename, lsmask_filename,\n truesinks_filename=None, rdirs_fieldname='FDIR', orography_fieldname=\n orography_fieldname, lsmask_fieldname=lsmask_fieldname,\n truesinks_fieldname=None, always_flow_to_sea=True, use_diagonal_nbrs=\n True, mark_pits_as_true_sinks=True)\n", (58608, 58928), False, 'from Dynamic_HD_Scripts.tools import determine_river_directions\n'), ((59730, 59913), 'Dynamic_HD_Scripts.tools.compute_catchments.advanced_main', 'cc.advanced_main', (['rdirs_filename_10min', '"""FDIR"""', 'fine_catchments_filename', '"""catchments"""'], {'loop_logfile': '"""/Users/thomasriddick/Documents/data/temp/loop_log.txt"""', 'use_cpp_alg': '(True)'}), "(rdirs_filename_10min, 'FDIR', fine_catchments_filename,\n 'catchments', loop_logfile=\n '/Users/thomasriddick/Documents/data/temp/loop_log.txt', use_cpp_alg=True)\n", (59746, 59913), True, 'from Dynamic_HD_Scripts.tools import compute_catchments as cc\n'), ((59978, 60140), 'Dynamic_HD_Scripts.tools.flow_to_grid_cell.advanced_main', 'ftgc.advanced_main', ([], {'rdirs_filename': 'rdirs_filename_10min', 'output_filename': 'fine_cumulative_flow_filename', 'rdirs_fieldname': '"""FDIR"""', 'output_fieldname': '"""cflow"""'}), "(rdirs_filename=rdirs_filename_10min, output_filename=\n fine_cumulative_flow_filename, rdirs_fieldname='FDIR', output_fieldname\n ='cflow')\n", (59996, 60140), True, 'from Dynamic_HD_Scripts.tools import flow_to_grid_cell as ftgc\n'), ((60245, 60315), 'os.path.join', 'join', (['self.cotat_plus_parameters_path', '"""cotat_plus_standard_params.nl"""'], {}), "(self.cotat_plus_parameters_path, 'cotat_plus_standard_params.nl')\n", (60249, 60315), False, 'from os.path import join\n'), ((61591, 61776), 'Dynamic_HD_Scripts.tools.compute_catchments.advanced_main', 'cc.advanced_main', (['rdirs_filename_30min', '"""FDIR"""', 'coarse_catchments_filename', '"""catchments"""'], {'loop_logfile': '"""/Users/thomasriddick/Documents/data/temp/loop_log.txt"""', 'use_cpp_alg': '(True)'}), "(rdirs_filename_30min, 'FDIR', coarse_catchments_filename,\n 'catchments', loop_logfile=\n '/Users/thomasriddick/Documents/data/temp/loop_log.txt', use_cpp_alg=True)\n", (61607, 61776), True, 'from Dynamic_HD_Scripts.tools import compute_catchments as cc\n'), ((61841, 62004), 'Dynamic_HD_Scripts.tools.flow_to_grid_cell.advanced_main', 'ftgc.advanced_main', ([], {'rdirs_filename': 'rdirs_filename_30min', 'output_filename': 'coarse_cumulative_flow_filename', 'rdirs_fieldname': '"""FDIR"""', 'output_fieldname': '"""cflow"""'}), "(rdirs_filename=rdirs_filename_30min, output_filename=\n coarse_cumulative_flow_filename, rdirs_fieldname='FDIR',\n output_fieldname='cflow')\n", (61859, 62004), True, 'from Dynamic_HD_Scripts.tools import flow_to_grid_cell as ftgc\n'), ((62324, 62557), 'Dynamic_HD_Scripts.utilities.utilities.upscale_field_driver', 'utilities.upscale_field_driver', ([], {'input_filename': 'working_orography_filename', 'output_filename': 'coarse_orography_filename', 'input_grid_type': '"""LatLong10min"""', 'output_grid_type': '"""HD"""', 'method': '"""Sum"""', 'timeslice': 'None', 'scalenumbers': '(True)'}), "(input_filename=working_orography_filename,\n output_filename=coarse_orography_filename, input_grid_type=\n 'LatLong10min', output_grid_type='HD', method='Sum', timeslice=None,\n scalenumbers=True)\n", (62354, 62557), False, 'from Dynamic_HD_Scripts.utilities import utilities\n'), ((62736, 62951), 'Dynamic_HD_Scripts.utilities.utilities.upscale_field_driver', 'utilities.upscale_field_driver', ([], {'input_filename': 'lsmask_filename', 'output_filename': 'coarse_lsmask_filename', 'input_grid_type': '"""LatLong10min"""', 'output_grid_type': '"""HD"""', 'method': '"""Mode"""', 'timeslice': 'None', 'scalenumbers': '(True)'}), "(input_filename=lsmask_filename,\n output_filename=coarse_lsmask_filename, input_grid_type='LatLong10min',\n output_grid_type='HD', method='Mode', timeslice=None, scalenumbers=True)\n", (62766, 62951), False, 'from Dynamic_HD_Scripts.utilities import utilities\n'), ((77411, 77751), 'Dynamic_HD_Scripts.tools.connect_coarse_lake_catchments.connect_coarse_lake_catchments_driver', 'cclc.connect_coarse_lake_catchments_driver', (['coarse_catchments_filepath', 'lake_parameters_filepath', 'basin_numbers_filepath', 'river_directions_filepath', 'connected_coarse_catchments_out_filename', 'coarse_catchments_fieldname', 'connected_coarse_catchments_out_fieldname', 'basin_catchment_numbers_fieldname', 'river_directions_fieldname'], {}), '(coarse_catchments_filepath,\n lake_parameters_filepath, basin_numbers_filepath,\n river_directions_filepath, connected_coarse_catchments_out_filename,\n coarse_catchments_fieldname, connected_coarse_catchments_out_fieldname,\n basin_catchment_numbers_fieldname, river_directions_fieldname)\n', (77453, 77751), True, 'from Dynamic_HD_Scripts.tools import connect_coarse_lake_catchments as cclc\n'), ((27988, 28104), 'Dynamic_HD_Scripts.base.iodriver.advanced_field_loader', 'iodriver.advanced_field_loader', (['orography_0k_filename'], {'time_slice': 'timestep_0k', 'fieldname': 'orography_0k_fieldname'}), '(orography_0k_filename, time_slice=\n timestep_0k, fieldname=orography_0k_fieldname)\n', (28018, 28104), False, 'from Dynamic_HD_Scripts.base import iodriver\n'), ((28415, 28493), 'Dynamic_HD_Scripts.base.iodriver.advanced_field_writer', 'iodriver.advanced_field_writer', (['working_orog_0k_filename', 'orography_0k', '"""Topo"""'], {}), "(working_orog_0k_filename, orography_0k, 'Topo')\n", (28445, 28493), False, 'from Dynamic_HD_Scripts.base import iodriver\n'), ((28617, 28723), 'Dynamic_HD_Scripts.base.iodriver.advanced_field_loader', 'iodriver.advanced_field_loader', (['orography_filename'], {'time_slice': 'timestep', 'fieldname': 'orography_fieldname'}), '(orography_filename, time_slice=timestep,\n fieldname=orography_fieldname)\n', (28647, 28723), False, 'from Dynamic_HD_Scripts.base import iodriver\n'), ((28985, 29057), 'Dynamic_HD_Scripts.base.iodriver.advanced_field_writer', 'iodriver.advanced_field_writer', (['working_orog_filename', 'orography', '"""Topo"""'], {}), "(working_orog_filename, orography, 'Topo')\n", (29015, 29057), False, 'from Dynamic_HD_Scripts.base import iodriver\n'), ((31415, 31622), 'Dynamic_HD_Scripts.base.iodriver.advanced_field_loader', 'iodriver.advanced_field_loader', (['ls_mask_0k_filename'], {'time_slice': 'ls_mask_timestep_0k', 'fieldname': 'ls_mask_0k_fieldname', 'grid_desc_file': '"""/Users/thomasriddick/Documents/data/HDdata/grids/grid_10min.txt"""'}), "(ls_mask_0k_filename, time_slice=\n ls_mask_timestep_0k, fieldname=ls_mask_0k_fieldname, grid_desc_file=\n '/Users/thomasriddick/Documents/data/HDdata/grids/grid_10min.txt')\n", (31445, 31622), False, 'from Dynamic_HD_Scripts.base import iodriver\n'), ((31992, 32097), 'Dynamic_HD_Scripts.base.iodriver.advanced_field_writer', 'iodriver.advanced_field_writer', (['original_ls_mask_filename', 'ls_mask_0k'], {'fieldname': 'ls_mask_0k_fieldname'}), '(original_ls_mask_filename, ls_mask_0k,\n fieldname=ls_mask_0k_fieldname)\n', (32022, 32097), False, 'from Dynamic_HD_Scripts.base import iodriver\n'), ((49674, 49815), 'Dynamic_HD_Scripts.tools.extract_lake_volumes.lake_volume_extraction_driver', 'extract_lake_volumes.lake_volume_extraction_driver', (['lake_parameters_filepath', 'basin_catchment_numbers_filepath', 'lake_volumes_out_filepath'], {}), '(lake_parameters_filepath,\n basin_catchment_numbers_filepath, lake_volumes_out_filepath)\n', (49724, 49815), False, 'from Dynamic_HD_Scripts.tools import extract_lake_volumes\n'), ((53386, 53690), 'Dynamic_HD_Scripts.utilities.utilities.advanced_extract_true_sinks_from_rdirs', 'utilities.advanced_extract_true_sinks_from_rdirs', ([], {'rdirs_filename': "('/Users/thomasriddick/Documents/data/HDdata/rdirs/generated/updated_RFDs_' +\n file_label + '_10min_with_depressions.nc')", 'truesinks_filename': 'minima_from_rdirs_filename', 'rdirs_fieldname': '"""FDIR"""', 'truesinks_fieldname': '"""minima"""'}), "(rdirs_filename=\n '/Users/thomasriddick/Documents/data/HDdata/rdirs/generated/updated_RFDs_'\n + file_label + '_10min_with_depressions.nc', truesinks_filename=\n minima_from_rdirs_filename, rdirs_fieldname='FDIR', truesinks_fieldname\n ='minima')\n", (53434, 53690), False, 'from Dynamic_HD_Scripts.utilities import utilities\n'), ((80350, 80812), 'Dynamic_HD_Scripts.tools.connect_coarse_lake_catchments.connect_coarse_lake_catchments_driver', 'cclc.connect_coarse_lake_catchments_driver', (['coarse_catchments_filepath', 'lake_parameters_filepath', 'basin_numbers_filepath', 'river_directions_filepath', 'connected_coarse_catchments_out_filename', 'coarse_catchments_fieldname', 'connected_coarse_catchments_out_fieldname', 'basin_catchment_numbers_fieldname', 'river_directions_fieldname', 'cumulative_flow_filename', 'cumulative_flow_out_filename', 'cumulative_flow_fieldname', 'cumulative_flow_out_fieldname'], {}), '(coarse_catchments_filepath,\n lake_parameters_filepath, basin_numbers_filepath,\n river_directions_filepath, connected_coarse_catchments_out_filename,\n coarse_catchments_fieldname, connected_coarse_catchments_out_fieldname,\n basin_catchment_numbers_fieldname, river_directions_fieldname,\n cumulative_flow_filename, cumulative_flow_out_filename,\n cumulative_flow_fieldname, cumulative_flow_out_fieldname)\n', (80392, 80812), True, 'from Dynamic_HD_Scripts.tools import connect_coarse_lake_catchments as cclc\n'), ((81409, 81915), 'Dynamic_HD_Scripts.tools.river_mouth_marking_driver.advanced_flow_to_rivermouth_calculation_driver', 'river_mouth_marking_driver.advanced_flow_to_rivermouth_calculation_driver', ([], {'input_river_directions_filename': 'river_directions_filepath', 'input_flow_to_cell_filename': 'cumulative_flow_out_filename', 'output_flow_to_river_mouths_filename': 'cumulative_river_mouth_flow_out_filename', 'input_river_directions_fieldname': 'river_directions_fieldname', 'input_flow_to_cell_fieldname': 'cumulative_flow_out_fieldname', 'output_flow_to_river_mouths_fieldname': 'cumulative_river_mouth_flow_out_fieldname'}), '(\n input_river_directions_filename=river_directions_filepath,\n input_flow_to_cell_filename=cumulative_flow_out_filename,\n output_flow_to_river_mouths_filename=\n cumulative_river_mouth_flow_out_filename,\n input_river_directions_fieldname=river_directions_fieldname,\n input_flow_to_cell_fieldname=cumulative_flow_out_fieldname,\n output_flow_to_river_mouths_fieldname=\n cumulative_river_mouth_flow_out_fieldname)\n', (81482, 81915), False, 'from Dynamic_HD_Scripts.tools import river_mouth_marking_driver\n'), ((83240, 83381), 'Dynamic_HD_Scripts.tools.extract_lake_volumes.lake_volume_extraction_driver', 'extract_lake_volumes.lake_volume_extraction_driver', (['lake_parameters_filepath', 'basin_catchment_numbers_filepath', 'lake_volumes_out_filepath'], {}), '(lake_parameters_filepath,\n basin_catchment_numbers_filepath, lake_volumes_out_filepath)\n', (83290, 83381), False, 'from Dynamic_HD_Scripts.tools import extract_lake_volumes\n'), ((63267, 63302), 'os.path.splitext', 'path.splitext', (['rdirs_filename_30min'], {}), '(rdirs_filename_30min)\n', (63280, 63302), True, 'import os.path as path\n'), ((63454, 63494), 'os.path.splitext', 'path.splitext', (['coarse_orography_filename'], {}), '(coarse_orography_filename)\n', (63467, 63494), True, 'import os.path as path\n'), ((63636, 63673), 'os.path.splitext', 'path.splitext', (['coarse_lsmask_filename'], {}), '(coarse_lsmask_filename)\n', (63649, 63673), True, 'import os.path as path\n'), ((66145, 66197), 'os.path.join', 'path.join', (['self.orography_path', '"""bin_innerslope.dat"""'], {}), "(self.orography_path, 'bin_innerslope.dat')\n", (66154, 66197), True, 'import os.path as path\n'), ((66350, 66398), 'os.path.join', 'path.join', (['self.null_fields_filepath', '"""null.dat"""'], {}), "(self.null_fields_filepath, 'null.dat')\n", (66359, 66398), True, 'import os.path as path\n'), ((66493, 66557), 'os.path.join', 'path.join', (['self.grid_areas_and_spacings_filepath', '"""fl_dp_dl.dat"""'], {}), "(self.grid_areas_and_spacings_filepath, 'fl_dp_dl.dat')\n", (66502, 66557), True, 'import os.path as path\n'), ((66706, 66755), 'os.path.join', 'path.join', (['self.orography_path', '"""bin_toposig.dat"""'], {}), "(self.orography_path, 'bin_toposig.dat')\n", (66715, 66755), True, 'import os.path as path\n'), ((66804, 66872), 'os.path.join', 'path.join', (['self.flow_params_dirs_path', "('hd_flow_params' + file_label)"], {}), "(self.flow_params_dirs_path, 'hd_flow_params' + file_label)\n", (66813, 66872), True, 'import os.path as path\n'), ((67279, 67327), 'os.path.join', 'path.join', (['self.null_fields_filepath', '"""null.dat"""'], {}), "(self.null_fields_filepath, 'null.dat')\n", (67288, 67327), True, 'import os.path as path\n'), ((67406, 67470), 'os.path.join', 'path.join', (['self.grid_areas_and_spacings_filepath', '"""fl_dp_dl.dat"""'], {}), "(self.grid_areas_and_spacings_filepath, 'fl_dp_dl.dat')\n", (67415, 67470), True, 'import os.path as path\n'), ((67721, 67789), 'os.path.join', 'path.join', (['self.flow_params_dirs_path', "('hd_flow_params' + file_label)"], {}), "(self.flow_params_dirs_path, 'hd_flow_params' + file_label)\n", (67730, 67789), True, 'import os.path as path\n'), ((71389, 71458), 'os.path.join', 'join', (['self.lake_parameter_file_path', "('lakeparas' + file_label + '.nc')"], {}), "(self.lake_parameter_file_path, 'lakeparas' + file_label + '.nc')\n", (71393, 71458), False, 'from os.path import join\n'), ((71795, 71886), 'os.path.join', 'join', (['self.basin_catchment_numbers_path', "('basin_catchment_numbers' + file_label + '.nc')"], {}), "(self.basin_catchment_numbers_path, 'basin_catchment_numbers' +\n file_label + '.nc')\n", (71799, 71886), False, 'from os.path import join\n'), ((75784, 75853), 'os.path.join', 'join', (['self.lake_parameter_file_path', "('lakeparas' + file_label + '.nc')"], {}), "(self.lake_parameter_file_path, 'lakeparas' + file_label + '.nc')\n", (75788, 75853), False, 'from os.path import join\n'), ((56861, 56931), 'os.path.join', 'join', (['self.lake_parameter_file_path', "('lakeparas_' + file_label + '.nc')"], {}), "(self.lake_parameter_file_path, 'lakeparas_' + file_label + '.nc')\n", (56865, 56931), False, 'from os.path import join\n'), ((57268, 57360), 'os.path.join', 'join', (['self.basin_catchment_numbers_path', "('basin_catchment_numbers_' + file_label + '.nc')"], {}), "(self.basin_catchment_numbers_path, 'basin_catchment_numbers_' +\n file_label + '.nc')\n", (57272, 57360), False, 'from os.path import join\n'), ((57672, 57744), 'os.path.join', 'join', (['self.lake_parameter_file_path', "('lakeparas_' + file_label + '.nc\\n')"], {}), "(self.lake_parameter_file_path, 'lakeparas_' + file_label + '.nc\\n')\n", (57676, 57744), False, 'from os.path import join\n'), ((57787, 57881), 'os.path.join', 'join', (['self.basin_catchment_numbers_path', "('basin_catchment_numbers_' + file_label + '.nc\\n')"], {}), "(self.basin_catchment_numbers_path, 'basin_catchment_numbers_' +\n file_label + '.nc\\n')\n", (57791, 57881), False, 'from os.path import join\n'), ((63171, 63206), 'os.path.splitext', 'path.splitext', (['rdirs_filename_30min'], {}), '(rdirs_filename_30min)\n', (63184, 63206), True, 'import os.path as path\n'), ((63355, 63395), 'os.path.splitext', 'path.splitext', (['coarse_orography_filename'], {}), '(coarse_orography_filename)\n', (63368, 63395), True, 'import os.path as path\n'), ((63538, 63575), 'os.path.splitext', 'path.splitext', (['coarse_lsmask_filename'], {}), '(coarse_lsmask_filename)\n', (63551, 63575), True, 'import os.path as path\n'), ((40908, 40919), 'time.time', 'time.time', ([], {}), '()\n', (40917, 40919), False, 'import time\n'), ((67108, 67137), 'os.path.splitext', 'path.splitext', (['rdirs_filepath'], {}), '(rdirs_filepath)\n', (67121, 67137), True, 'import os.path as path\n')] |
# $Id: testutils.py 2612 2008-08-11 20:08:49Z graham.klyne $
"""
Test upnp.core.utils
"""
import sys
import string
import unittest
import logging
from coherence.upnp.core.utils import *
# This data is joined using CRLF pairs.
testChunkedData = ['200',
'<?xml version="1.0" ?> ',
'<root xmlns="urn:schemas-upnp-org:device-1-0">',
' <specVersion>',
' <major>1</major> ',
' <minor>0</minor> ',
' </specVersion>',
' <device>',
' <deviceType>urn:schemas-upnp-org:device:MediaRenderer:1</deviceType> ',
' <friendlyName>DMA201</friendlyName> ',
' <manufacturer> </manufacturer> ',
' <manufacturerURL> </manufacturerURL> ',
' <modelDescription>DMA201</modelDescription> ',
' <modelName>DMA</modelName> ',
' <modelNumber>201</modelNumber> ',
' <modelURL> </modelURL> ',
' <serialNumber>0',
'200',
'00000000001</serialNumber> ',
' <UDN>uuid:BE1C49F2-572D-3617-8F4C-BB1DEC3954FD</UDN> ',
' <UPC /> ',
' <serviceList>',
' <service>',
' <serviceType>urn:schemas-upnp-org:service:ConnectionManager:1</serviceType>',
' <serviceId>urn:upnp-org:serviceId:ConnectionManager</serviceId>',
' <controlURL>http://10.63.1.113:4444/CMSControl</controlURL>',
' <eventSubURL>http://10.63.1.113:4445/CMSEvent</eventSubURL>',
' <SCPDURL>/upnpdev.cgi?file=/ConnectionManager.xml</SCPDURL>',
' </service>',
' <service>',
' <serv',
'223',
'iceType>urn:schemas-upnp-org:service:AVTransport:1</serviceType>',
' <serviceId>urn:upnp-org:serviceId:AVTransport</serviceId>',
' <controlURL>http://10.63.1.113:4444/AVTControl</controlURL>',
' <eventSubURL>http://10.63.1.113:4445/AVTEvent</eventSubURL>',
' <SCPDURL>/upnpdev.cgi?file=/AVTransport.xml</SCPDURL>',
' </service>',
' <service>',
' <serviceType>urn:schemas-upnp-org:service:RenderingControl:1</serviceType>',
' <serviceId>urn:upnp-org:serviceId:RenderingControl</serviceId>',
' <controlURL>http://10.63.1.113:4444/RCSControl</',
'c4',
'controlURL>',
' <eventSubURL>http://10.63.1.113:4445/RCSEvent</eventSubURL>',
' <SCPDURL>/upnpdev.cgi?file=/RenderingControl.xml</SCPDURL>',
' </service>',
' </serviceList>',
' </device>',
'</root>'
'',
'0',
'']
testChunkedDataResult = ['<?xml version="1.0" ?> ',
'<root xmlns="urn:schemas-upnp-org:device-1-0">',
' <specVersion>',
' <major>1</major> ',
' <minor>0</minor> ',
' </specVersion>',
' <device>',
' <deviceType>urn:schemas-upnp-org:device:MediaRenderer:1</deviceType> ',
' <friendlyName>DMA201</friendlyName> ',
' <manufacturer> </manufacturer> ',
' <manufacturerURL> </manufacturerURL> ',
' <modelDescription>DMA201</modelDescription> ',
' <modelName>DMA</modelName> ',
' <modelNumber>201</modelNumber> ',
' <modelURL> </modelURL> ',
' <serialNumber>000000000001</serialNumber> ',
' <UDN>uuid:BE1C49F2-572D-3617-8F4C-BB1DEC3954FD</UDN> ',
' <UPC /> ',
' <serviceList>',
' <service>',
' <serviceType>urn:schemas-upnp-org:service:ConnectionManager:1</serviceType>',
' <serviceId>urn:upnp-org:serviceId:ConnectionManager</serviceId>',
' <controlURL>http://10.63.1.113:4444/CMSControl</controlURL>',
' <eventSubURL>http://10.63.1.113:4445/CMSEvent</eventSubURL>',
' <SCPDURL>/upnpdev.cgi?file=/ConnectionManager.xml</SCPDURL>',
' </service>',
' <service>',
' <serviceType>urn:schemas-upnp-org:service:AVTransport:1</serviceType>',
' <serviceId>urn:upnp-org:serviceId:AVTransport</serviceId>',
' <controlURL>http://10.63.1.113:4444/AVTControl</controlURL>',
' <eventSubURL>http://10.63.1.113:4445/AVTEvent</eventSubURL>',
' <SCPDURL>/upnpdev.cgi?file=/AVTransport.xml</SCPDURL>',
' </service>',
' <service>',
' <serviceType>urn:schemas-upnp-org:service:RenderingControl:1</serviceType>',
' <serviceId>urn:upnp-org:serviceId:RenderingControl</serviceId>',
' <controlURL>http://10.63.1.113:4444/RCSControl</controlURL>',
' <eventSubURL>http://10.63.1.113:4445/RCSEvent</eventSubURL>',
' <SCPDURL>/upnpdev.cgi?file=/RenderingControl.xml</SCPDURL>',
' </service>',
' </serviceList>',
' </device>',
'</root>',
''
]
class TestUpnpUtils(unittest.TestCase):
def setUp(self):
self._log = logging.getLogger( "TestUpnpUtils" )
return
def tearDown(self):
return
def testChunked(self):
testData = string.join( testChunkedData, '\r\n' )
self._log.debug( testData )
newData = de_chunk_payload(testData)
self._log.debug( newData )
# see whether we can parse the result
self.assertEqual(newData, string.join( testChunkedDataResult, '\r\n' ))
def getTestSuite():
suite = unittest.TestSuite()
suite.addTest(TestUpnpUtils("testChunked"))
return suite
# Run unit tests directly from command line
if __name__ == "__main__":
if len(sys.argv) > 1:
logging.basicConfig(level=logging.DEBUG)
tests = TestUpnpUtils( sys.argv[1] )
else:
logging.basicConfig(level=logging.ERROR)
tests = getTestSuite()
runner = unittest.TextTestRunner(verbosity=2)
runner.run(tests)
# $Id: testutils.py 2612 2008-08-11 20:08:49Z graham.klyne $
| [
"logging.getLogger",
"unittest.TestSuite",
"string.join",
"logging.basicConfig",
"unittest.TextTestRunner"
] | [((4571, 4591), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (4589, 4591), False, 'import unittest\n'), ((4954, 4990), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (4977, 4990), False, 'import unittest\n'), ((4118, 4152), 'logging.getLogger', 'logging.getLogger', (['"""TestUpnpUtils"""'], {}), "('TestUpnpUtils')\n", (4135, 4152), False, 'import logging\n'), ((4257, 4293), 'string.join', 'string.join', (['testChunkedData', "'\\r\\n'"], {}), "(testChunkedData, '\\r\\n')\n", (4268, 4293), False, 'import string\n'), ((4764, 4804), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (4783, 4804), False, 'import logging\n'), ((4868, 4908), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.ERROR'}), '(level=logging.ERROR)\n', (4887, 4908), False, 'import logging\n'), ((4492, 4534), 'string.join', 'string.join', (['testChunkedDataResult', "'\\r\\n'"], {}), "(testChunkedDataResult, '\\r\\n')\n", (4503, 4534), False, 'import string\n')] |
# Rainbow, by <NAME> <EMAIL>
# Shows a simple rainbow animation.
import time, sys
assert sys.version_info.major == 3, 'Run this program on Python 3.'
try:
import bext
except:
sys.exit('Bext is required to run this. Run `pip install bext` from the shell to install it.')
indent = 0 # How many spaces to indent.
indentIncreasing = True # Whether the indentation is increasing or not.
while True:
print(' ' * indent, end='')
bext.fg('red')
print('##', end='')
bext.fg('yellow')
print('##', end='')
bext.fg('green')
print('##', end='')
bext.fg('blue')
print('##', end='')
bext.fg('cyan')
print('##', end='')
bext.fg('purple')
print('##')
if indentIncreasing:
# Increase the number of spaces:
indent = indent + 1
if indent == 20:
# Change direction:
indentIncreasing = False
else:
# Decrease the number of spaces:
indent = indent - 1
if indent == 0:
# Change direction:
indentIncreasing = True
time.sleep(0.05) # Add a slight pause.
| [
"bext.fg",
"time.sleep",
"sys.exit"
] | [((442, 456), 'bext.fg', 'bext.fg', (['"""red"""'], {}), "('red')\n", (449, 456), False, 'import bext\n'), ((485, 502), 'bext.fg', 'bext.fg', (['"""yellow"""'], {}), "('yellow')\n", (492, 502), False, 'import bext\n'), ((531, 547), 'bext.fg', 'bext.fg', (['"""green"""'], {}), "('green')\n", (538, 547), False, 'import bext\n'), ((576, 591), 'bext.fg', 'bext.fg', (['"""blue"""'], {}), "('blue')\n", (583, 591), False, 'import bext\n'), ((620, 635), 'bext.fg', 'bext.fg', (['"""cyan"""'], {}), "('cyan')\n", (627, 635), False, 'import bext\n'), ((664, 681), 'bext.fg', 'bext.fg', (['"""purple"""'], {}), "('purple')\n", (671, 681), False, 'import bext\n'), ((1063, 1079), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (1073, 1079), False, 'import time, sys\n'), ((185, 289), 'sys.exit', 'sys.exit', (['"""Bext is required to run this. Run `pip install bext` from the shell to install it."""'], {}), "(\n 'Bext is required to run this. Run `pip install bext` from the shell to install it.'\n )\n", (193, 289), False, 'import time, sys\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.