max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tests/user_test.py | jjinno/pygerduty | 144 | 12766171 | from __future__ import absolute_import
import httpretty
import pygerduty
import pygerduty.v2
###################
# Version 1 Tests #
###################
@httpretty.activate
def test_get_user_v1():
body = open('tests/fixtures/user_v1.json').read()
httpretty.register_uri(
httpretty.GET, "https://contosso.pagerduty.com/api/v1/users/PIJ90N7",
body=body, status=200)
p = pygerduty.PagerDuty("contosso", "password")
user = p.users.show("PIJ90N7")
assert user.id == "PIJ90N7"
assert user.name == "<NAME>"
assert user.role == "admin"
@httpretty.activate
def test_list_user_contact_methods_v1():
user_body = open('tests/fixtures/user_v1.json').read()
contact_body = open('tests/fixtures/contacts_v1.json').read()
httpretty.register_uri(
httpretty.GET, "https://contosso.pagerduty.com/api/v1/users/PIJ90N7",
body=user_body, status=200),
httpretty.register_uri(
httpretty.GET, "https://contosso.pagerduty.com/api/v1/users/PIJ90N7/contact_methods",
body=contact_body, status=200)
p = pygerduty.PagerDuty("contosso", "password")
user = p.users.show("PIJ90N7")
contact_methods = [c for c in user.contact_methods.list()]
assert len(contact_methods) == 3
assert len([c for c in contact_methods if c.type == "email"]) == 1
assert len([c for c in contact_methods if c.type == "phone"]) == 1
assert len([c for c in contact_methods if c.type == "SMS"]) == 1
###################
# Version 2 Tests #
###################
@httpretty.activate
def test_get_user_v2():
body = open('tests/fixtures/user_v2.json').read()
httpretty.register_uri(
httpretty.GET, "https://api.pagerduty.com/users/PXPGF42",
body=body, status=200)
p = pygerduty.v2.PagerDuty("password")
user = p.users.show("PXPGF42")
assert user.id == "PXPGF42"
assert user.name == "<NAME>"
assert user.role == "admin"
assert user.self_ == 'https://api.pagerduty.com/users/PXPGF42'
@httpretty.activate
def test_list_user_contact_methods_v2():
user_body = open('tests/fixtures/user_v2.json').read()
contact_body = open('tests/fixtures/contacts_v2.json').read()
httpretty.register_uri(
httpretty.GET, "https://api.pagerduty.com/users/PXPGF42",
body=user_body, status=200)
httpretty.register_uri(
httpretty.GET, "https://api.pagerduty.com/users/PXPGF42/contact_methods",
body=contact_body, status=200)
p = pygerduty.v2.PagerDuty("password")
user = p.users.show("PXPGF42")
contact_methods = [c for c in user.contact_methods.list()]
assert len(contact_methods) == 3
assert len([c for c in contact_methods if c.type == "email"]) == 1
assert len([c for c in contact_methods if c.type == "phone"]) == 1
assert len([c for c in contact_methods if c.type == "SMS"]) == 1
assert user.self_ == 'https://api.pagerduty.com/users/PXPGF42'
@httpretty.activate
def test_user_notification_rules_v2():
user_body = open('tests/fixtures/user_v2.json').read()
notification_body = open('tests/fixtures/notification_v2.json').read()
httpretty.register_uri(
httpretty.GET, "https://api.pagerduty.com/users/PXPGF42",
body=user_body, status=200)
httpretty.register_uri(
httpretty.GET, "https://api.pagerduty.com/users/PXPGF42/notification_rules",
body=notification_body, status=200)
p = pygerduty.v2.PagerDuty("password")
user = p.users.show("PXPGF42")
notification_rules = [n for n in user.notification_rules.list()]
assert len(notification_rules) == 1
assert len([n for n in notification_rules if n.type == "assignment_notification_rule"]) == 1
assert user.self_ == "https://api.pagerduty.com/users/PXPGF42"
def test_clean_response():
mock_response = {
"user" : {
"id": "PHDGK84",
"type": "user",
"self": "https://api.pagerduty.com/users/PHDGK84",
"name": "Snoopy",
"contact_methods": [
{
"address": "<EMAIL>",
"id": "PZMO0JF",
"self": "https://api.pagerduty.com/users/PHDGK84/contact_method/PZMO0JF",
"label": "Default"
},
{
"address": "8928393498",
"id": "PZMN843",
"self": "https://api.pagerduty.com/users/PHDGK84/contact_method/PZMN843",
"label": "Default"
}
],
"notification_rules": [
{
"id": "P8WETWW",
"contact_method": {
"id": "PZMO0JF",
"self": "https://api.pagerduty.com/users/PHDGK84/contact_method/PZMO0JF",
}
}
]
}
}
clean_response = pygerduty.common.clean_response(mock_response)
assert clean_response == {
"user" : {
"id": "PHDGK84",
"type": "user",
"self_": "https://api.pagerduty.com/users/PHDGK84",
"name": "Snoopy",
"contact_methods": [
{
"address": "<EMAIL>",
"id": "PZMO0JF",
"self_": "https://api.pagerduty.com/users/PHDGK84/contact_method/PZMO0JF",
"label": "Default"
},
{
"address": "8928393498",
"id": "PZMN843",
"self_": "https://api.pagerduty.com/users/PHDGK84/contact_method/PZMN843",
"label": "Default"
}
],
"notification_rules": [
{
"id": "P8WETWW",
"contact_method": {
"id": "PZMO0JF",
"self_": "https://api.pagerduty.com/users/PHDGK84/contact_method/PZMO0JF",
}
}
]
}
}
|
server_python/config.py | dkvirus/py-novel | 145 | 12766193 | # config.py
# encoding:utf-8
DEBUG = True
JSON_AS_ASCII = False
|
spacy-annotator/displacy/server.py | aniruddha-adhikary/spacy-dev-resources | 132 | 12766203 | #!/usr/bin/env python
from __future__ import unicode_literals
from __future__ import print_function
import falcon
import spacy
import json
import sys
from spacy.pipeline import EntityRecognizer
import spacy.util
from spacy.tagger import Tagger
from .parse import Entities, TrainEntities
from falcon_cors import CORS
try:
unicode
except NameError:
unicode = str
_models = {}
def get_model(model_name):
if model_name not in _models:
model = spacy.load(model_name)
if model.tagger is None:
model.tagger = Tagger(model.vocab, features=Tagger.feature_templates)
if model.entity is None:
model.entity = EntityRecognizer(model.vocab, entity_types=['PERSON', 'NORP', 'FACILITY', 'ORG', 'GPE',
'LOC', 'PRODUCT', 'EVENT', 'WORK_OF_ART',
'LANGUAGE', 'DATE', 'TIME', 'PERCENT',
'MONEY', 'QUANTITY', 'ORDINAL', 'CARDINAL'])
model.pipeline = [model.tagger, model.entity, model.parser]
_models[model_name] = model
return _models[model_name]
def update_vocabulary(model, texts):
for text in texts:
doc = model.make_doc(text)
for word in doc:
_ = model.vocab[word.orth]
class EntResource(object):
"""Parse text and return displaCy ent's expected output."""
def on_post(self, req, resp):
req_body = req.stream.read()
json_data = json.loads(req_body.decode('utf8'))
paragraphs = json_data.get('paragraphs')
model_name = json_data.get('model', 'en')
try:
model = get_model(model_name)
entities = []
for p in paragraphs:
e = Entities(model, p.get('text'))
entities.append(e.to_json())
resp.body = json.dumps(entities, sort_keys=True, indent=2)
resp.content_type = 'application/json'
resp.status = falcon.HTTP_200
except Exception:
resp.status = falcon.HTTP_500
class TrainEntResource(object):
"""Parse text and use it to train the entity recognizer."""
def on_post(self, req, resp):
req_body = req.stream.read()
json_data = json.loads(req_body.decode('utf8'))
paragraphs = json_data.get('paragraphs')
model_name = json_data.get('model', 'en')
try:
model = get_model(model_name)
texts = [paragraph.get('text') for paragraph in paragraphs]
update_vocabulary(model, texts)
entities = []
for p in paragraphs:
e = TrainEntities(model, p.get('text'), p.get('tags'))
entities.append(e.to_json())
resp.body = json.dumps(entities, sort_keys=True, indent=2)
resp.content_type = 'application/json'
resp.status = falcon.HTTP_200
except Exception:
print("Unexpected error:", sys.exc_info()[0])
resp.status = falcon.HTTP_500
cors = CORS(allow_all_origins=True)
APP = falcon.API(middleware=[cors.middleware])
APP.add_route('/ent', EntResource())
APP.add_route('/train', TrainEntResource())
|
mmpose/core/optimizer/registry.py | chaowentao/mmpose | 367 | 12766204 | from mmcv.utils import Registry
OPTIMIZERS = Registry('optimizers')
|
devil/devil/android/tools/system_app_test.py | Martijnve23/catapult | 1,894 | 12766248 | <reponame>Martijnve23/catapult<gh_stars>1000+
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import unittest
if __name__ == '__main__':
sys.path.append(
os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..')))
from devil import devil_env
from devil.android import device_utils
from devil.android.sdk import adb_wrapper
from devil.android.sdk import version_codes
from devil.android.tools import system_app
with devil_env.SysPath(devil_env.PYMOCK_PATH):
import mock
_PACKAGE_NAME = 'com.android'
_PACKAGE_PATH = '/path/to/com.android.apk'
_PM_LIST_PACKAGES_COMMAND = [
'pm', 'list', 'packages', '-f', '-u', _PACKAGE_NAME
]
_PM_LIST_PACKAGES_OUTPUT_WITH_PATH = [
'package:/path/to/other=' + _PACKAGE_NAME + '.other',
'package:' + _PACKAGE_PATH + '=' + _PACKAGE_NAME
]
_PM_LIST_PACKAGES_OUTPUT_WITHOUT_PATH = [
'package:/path/to/other=' + _PACKAGE_NAME + '.other'
]
class SystemAppTest(unittest.TestCase):
def testDoubleEnableModification(self):
"""Ensures that system app modification logic isn't repeated.
If EnableSystemAppModification uses are nested, inner calls should
not need to perform any of the expensive modification logic.
"""
# pylint: disable=no-self-use,protected-access
mock_device = mock.Mock(spec=device_utils.DeviceUtils)
mock_device.adb = mock.Mock(spec=adb_wrapper.AdbWrapper)
type(mock_device).build_version_sdk = mock.PropertyMock(
return_value=version_codes.LOLLIPOP)
system_props = {}
def dict_setprop(prop_name, value):
system_props[prop_name] = value
def dict_getprop(prop_name):
return system_props.get(prop_name, '')
mock_device.SetProp.side_effect = dict_setprop
mock_device.GetProp.side_effect = dict_getprop
with system_app.EnableSystemAppModification(mock_device):
mock_device.EnableRoot.assert_called_once_with()
mock_device.GetProp.assert_called_once_with(
system_app._ENABLE_MODIFICATION_PROP)
mock_device.SetProp.assert_called_once_with(
system_app._ENABLE_MODIFICATION_PROP, '1')
mock_device.reset_mock()
with system_app.EnableSystemAppModification(mock_device):
self.assertFalse(mock_device.EnableRoot.mock_calls) # assert not called
mock_device.GetProp.assert_called_once_with(
system_app._ENABLE_MODIFICATION_PROP)
self.assertFalse(mock_device.SetProp.mock_calls) # assert not called
mock_device.reset_mock()
mock_device.SetProp.assert_called_once_with(
system_app._ENABLE_MODIFICATION_PROP, '0')
def test_GetApplicationPaths_found(self):
"""Path found in output along with another package having similar name."""
# pylint: disable=protected-access
mock_device = mock.Mock(spec=device_utils.DeviceUtils)
mock_device.RunShellCommand.configure_mock(
return_value=_PM_LIST_PACKAGES_OUTPUT_WITH_PATH)
paths = system_app._GetApplicationPaths(mock_device, _PACKAGE_NAME)
self.assertEqual([_PACKAGE_PATH], paths)
mock_device.RunShellCommand.assert_called_once_with(
_PM_LIST_PACKAGES_COMMAND, check_return=True)
def test_GetApplicationPaths_notFound(self):
"""Path not found in output, only another package with similar name."""
# pylint: disable=protected-access
mock_device = mock.Mock(spec=device_utils.DeviceUtils)
mock_device.RunShellCommand.configure_mock(
return_value=_PM_LIST_PACKAGES_OUTPUT_WITHOUT_PATH)
paths = system_app._GetApplicationPaths(mock_device, _PACKAGE_NAME)
self.assertEqual([], paths)
mock_device.RunShellCommand.assert_called_once_with(
_PM_LIST_PACKAGES_COMMAND, check_return=True)
def test_GetApplicationPaths_noPaths(self):
"""Nothing containing text of package name found in output."""
# pylint: disable=protected-access
mock_device = mock.Mock(spec=device_utils.DeviceUtils)
mock_device.RunShellCommand.configure_mock(return_value=[])
paths = system_app._GetApplicationPaths(mock_device, _PACKAGE_NAME)
self.assertEqual([], paths)
mock_device.RunShellCommand.assert_called_once_with(
_PM_LIST_PACKAGES_COMMAND, check_return=True)
def test_GetApplicationPaths_emptyName(self):
"""Called with empty name, should not return any packages."""
# pylint: disable=protected-access
mock_device = mock.Mock(spec=device_utils.DeviceUtils)
mock_device.RunShellCommand.configure_mock(
return_value=_PM_LIST_PACKAGES_OUTPUT_WITH_PATH)
paths = system_app._GetApplicationPaths(mock_device, '')
self.assertEqual([], paths)
mock_device.RunShellCommand.assert_called_once_with(
_PM_LIST_PACKAGES_COMMAND[:-1] + [''], check_return=True)
if __name__ == '__main__':
unittest.main()
|
sleap/nn/data/training.py | hectorcarrion/sleap | 156 | 12766250 | <reponame>hectorcarrion/sleap
"""Transformers and utilities for training-related operations."""
import numpy as np
import tensorflow as tf
import sleap
from sleap.nn.data.providers import LabelsReader
from sleap.nn.data.utils import expand_to_rank, ensure_list
import attr
from typing import List, Text, Optional, Any, Union, Dict, Tuple, Sequence
from sklearn.model_selection import train_test_split
def split_labels_train_val(
labels: sleap.Labels, validation_fraction: float
) -> Tuple[sleap.Labels, List[int], sleap.Labels, List[int]]:
"""Make a train/validation split from a labels dataset.
Args:
labels: A `sleap.Labels` dataset with labeled frames.
validation_fraction: Fraction of frames to use for validation.
Returns:
A tuple of `(labels_train, idx_train, labels_val, idx_val)`.
`labels_train` and `labels_val` are `sleap.Label` objects containing the
selected frames for each split. Their `videos`, `tracks` and `provenance`
attributes are identical to `labels` even if the split does not contain
instances with a particular video or track.
`idx_train` and `idx_val` are list indices of the labeled frames within the
input labels that were assigned to each split, i.e.:
`labels[idx_train] == labels_train[:]`
If there is only one labeled frame in `labels`, both of the labels will contain
the same frame.
If `validation_fraction` would result in fewer than one label for either split,
it will be rounded to ensure there is at least one label in each.
"""
if len(labels) == 1:
return labels, [0], labels, [0]
# Split indices.
n_val = round(len(labels) * validation_fraction)
n_val = max(min(n_val, len(labels) - 1), 1)
idx_train, idx_val = train_test_split(list(range(len(labels))), test_size=n_val)
# Create labels and keep original metadata.
labels_train = sleap.Labels(labels[idx_train])
labels_train.videos = labels.videos
labels_train.tracks = labels.tracks
labels_train.provenance = labels.provenance
labels_val = sleap.Labels(labels[idx_val])
labels_val.videos = labels.videos
labels_val.tracks = labels.tracks
labels_val.provenance = labels.provenance
return labels_train, idx_train, labels_val, idx_val
def split_labels(
labels: sleap.Labels, split_fractions: Sequence[float]
) -> Tuple[sleap.Labels]:
"""Split a `sleap.Labels` into multiple new ones with random subsets of the data.
Args:
labels: An instance of `sleap.Labels`.
split_fractions: One or more floats between 0 and 1 that specify the fraction of
examples that should be in each dataset. These should add up to <= 1.0.
Fractions of less than 1 element will be rounded up to ensure that is at
least 1 element in each split. One of the fractions may be -1 to indicate
that it should contain all elements left over from the other splits.
Returns:
A tuple of new `sleap.Labels` instances of the same length as `split_fractions`.
Raises:
ValueError: If more than one split fraction is specified as -1.
ValueError: If the splits add up to more than the total available examples.
Note:
Sampling is done without replacement.
"""
# Get indices for labeled frames.
labels_indices = np.arange(len(labels)).astype("int64")
# Compute split sizes.
n_examples = len(labels_indices)
n_examples_per_split = np.array(split_fractions).astype("float64")
if (n_examples_per_split == -1).sum() > 1:
raise ValueError("Only one split fraction can be specified as -1.")
n_examples_per_split[n_examples_per_split == -1] = np.NaN
n_examples_per_split = np.ceil(n_examples_per_split * n_examples)
n_examples_per_split[np.isnan(n_examples_per_split)] = np.maximum(
n_examples - np.nansum(n_examples_per_split), 1
)
n_examples_per_split = n_examples_per_split.astype("int64")
if n_examples_per_split.sum() > n_examples:
raise ValueError("Splits cannot sum to more than the total input labels.")
# Sample and create new Labels instances.
split_labels = []
for n_samples in n_examples_per_split:
# Sample.
sampled_indices = np.random.default_rng().choice(
labels_indices, size=n_samples, replace=False
)
# Create new instance.
split_labels.append(sleap.Labels([labels[int(ind)] for ind in sampled_indices]))
# Exclude the sampled indices from the available indices.
labels_indices = np.setdiff1d(labels_indices, sampled_indices)
return tuple(split_labels)
def split_labels_reader(
labels_reader: LabelsReader, split_fractions: Sequence[float]
) -> Tuple[LabelsReader]:
"""Split a `LabelsReader` into multiple new ones with random subsets of the data.
Args:
labels_reader: An instance of `sleap.nn.data.providers.LabelsReader`. This is a
provider that generates datasets that contain elements read from a
`sleap.Labels` instance.
split_fractions: One or more floats between 0 and 1 that specify the fraction of
examples that should be in each dataset. These should add up to <= 1.0.
Fractions of less than 1 element will be rounded up to ensure that is at
least 1 element in each split. One of the fractions may be -1 to indicate
that it should contain all elements left over from the other splits.
Returns:
A tuple of `LabelsReader` instances of the same length as `split_fractions`. The
indices will be stored in the `example_indices` in each `LabelsReader` instance.
The actual `sleap.Labels` instance will be the same for each instance, only the
`example_indices` that are iterated over will change across splits.
If the input `labels_reader` already has `example_indices`, a subset of these
will be sampled to generate the splits.
Raises:
ValueError: If more than one split fraction is specified as -1.
ValueError: If the splits add up to more than the total available examples.
Note:
Sampling is done without replacement.
"""
# Get available indices.
labels_indices = labels_reader.example_indices
if labels_indices is None:
labels_indices = np.arange(len(labels_reader))
labels_indices = np.array(labels_indices).astype("int64")
# Compute split sizes.
n_examples = len(labels_indices)
n_examples_per_split = np.array(split_fractions).astype("float64")
if (n_examples_per_split == -1).sum() > 1:
raise ValueError("Only one split fraction can be specified as -1.")
n_examples_per_split[n_examples_per_split == -1] = np.NaN
n_examples_per_split = np.ceil(n_examples_per_split * n_examples)
n_examples_per_split[np.isnan(n_examples_per_split)] = np.maximum(
n_examples - np.nansum(n_examples_per_split), 1
)
n_examples_per_split = n_examples_per_split.astype("int64")
if n_examples_per_split.sum() > n_examples:
raise ValueError("Splits cannot sum to more than the total input labels.")
# Sample and create new LabelsReader instances.
split_readers = []
for n_samples in n_examples_per_split:
# Sample.
sampled_indices = np.random.default_rng().choice(
labels_indices, size=n_samples, replace=False
)
# Create new instance.
split_readers.append(
LabelsReader(labels_reader.labels, example_indices=sampled_indices)
)
# Exclude the sampled indices from the available indices.
labels_indices = np.setdiff1d(labels_indices, sampled_indices)
return tuple(split_readers)
@attr.s(auto_attribs=True)
class KeyMapper:
"""Maps example keys to specified outputs.
This is useful for transforming examples into tuples that map onto specific layer
names for training.
Attributes:
key_maps: Dictionary or list of dictionaries with string keys and values of
the form: {input_key: output_key}. If a list, the examples will be in tuples
in the same order.
"""
key_maps: List[Dict[Text, Text]] = attr.ib(
converter=attr.converters.optional(ensure_list)
)
@property
def input_keys(self) -> List[Text]:
"""Return the keys that incoming elements are expected to have."""
input_keys = []
for key_map in self.key_maps:
input_keys.extend(list(key_map.keys()))
return input_keys
@property
def output_keys(self) -> List[Text]:
"""Return the keys that outgoing elements will have. These may be nested."""
output_keys = []
for key_map in self.key_maps:
output_keys.extend(list(key_map.values()))
return output_keys
def transform_dataset(self, ds_input: tf.data.Dataset) -> tf.data.Dataset:
"""Create a dataset with input keys mapped to new key names.
Args:
ds_input: Any `tf.data.Dataset` that generates examples as a dictionary of
tensors with the keys in `input_keys`.
Return:
A dataset that generates examples with the tensors in `input_keys` mapped to
keys in `output_keys` according to the structure in `key_maps`.
"""
def map_keys(example):
"""Local processing function for dataset mapping."""
output_keys = []
for key_map in self.key_maps:
output_keys.append(
{key_out: example[key_in] for key_in, key_out in key_map.items()}
)
return tuple(output_keys)
ds_output = ds_input.map(map_keys)
return ds_output
|
paas-ce/paas/esb/components/bk/apis/cc/add_app.py | renmcc/bk-PaaS | 767 | 12766268 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
from django import forms
from common.forms import BaseComponentForm, ListField
from common.constants import API_TYPE_OP
from components.component import Component
from .toolkit import tools, configs
class AddApp(Component):
"""
apiLabel {{ _("新建业务") }}
apiMethod POST
### {{ _("功能描述") }}
{{ _("新建业务") }}
### {{ _("请求参数") }}
{{ common_args_desc }}
#### {{ _("接口参数") }}
| {{ _("字段") }} | {{ _("类型") }} | {{ _("必选") }} | {{ _("描述") }} |
|-----------|------------|--------|------------|
| app_name | string | {{ _("是") }} | {{ _("业务名") }} |
| maintainers | string | {{ _("是") }} | {{ _("运维人员, 多个人之间用逗号分隔") }} |
| product_pm | string | {{ _("否") }} | {{ _("产品人员,多个人之间用逗号分隔") }} |
| developer | string | {{ _("否") }} | {{ _("开发人员,多个人之间用逗号分隔") }} |
| tester | string | {{ _("否") }} | {{ _("测试人员,多个人之间用逗号分隔") }} |
| operator | string | {{ _("否") }} | {{ _("操作者,多个人之间用逗号分隔") }} |
| company_name | string | {{ _("是") }} | {{ _("公司名,cmdb配置文件中定义的constants.php中的 COMPANY_NAME") }} |
| level | int | {{ _("是") }} | {{ _("业务拓扑级别,2或者3") }} |
| life_cycle | string | {{ _("是") }} | {{ _("生成周期,1: 测试中, 2: 已上线, 3: 停运其中的一个值") }} |
### {{ _("请求参数示例") }}
```python
{
"app_code": "esb_test",
"app_secret": "xxx",
"bk_token": "xxx",
"app_name": "Test",
"maintainers": "admin",
"product_pm": "admin",
"company_name": "CompanyName",
"level": 3,
"life_cycle": "1"
}
```
### 返回结果示例
```python
{
"result": true,
"code": "00",
"message": "",
"data": {
"appId": 25
}
}
```
"""
sys_name = configs.SYSTEM_NAME
api_type = API_TYPE_OP
host = configs.host
class Form(BaseComponentForm):
app_name = forms.CharField(label='business name', required=True)
maintainers = ListField(label='OPS', required=True)
product_pm = ListField(label='PM', required=False)
developer = ListField(label='developer', required=False)
tester = ListField(label='test staff', required=False)
operator = ListField(label='operator', required=False)
company_name = forms.CharField(label='company name', required=True)
level = forms.IntegerField(label='business topology level', required=True)
life_cycle = forms.CharField(label='life cycle', required=True)
def clean(self):
data = self.cleaned_data
return {
'ApplicationName': data['app_name'],
'Maintainers': ','.join(data['maintainers']),
'ProductPm': ','.join(data['product_pm']),
'Developer': ','.join(data['developer']),
'Tester': ','.join(data['tester']),
'Operator': ','.join(data['operator']),
'CompanyName': data['company_name'],
'Level': data['level'],
'LifeCycle': data['life_cycle'],
}
def handle(self):
self.form_data['Creator'] = self.current_user.username
client = tools.CCClient(self)
self.response.payload = client.post_request(
self.host,
'/api/app/addApp',
data=self.form_data,
)
|
quora/wsgi.py | alexricheburton/gittest | 116 | 12766296 | import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "quora.settings")
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
from whitenoise.django import DjangoWhiteNoise
application = Cling(get_wsgi_application())
application = DjangoWhiteNoise(application)
|
example/ner/utils.py | tipevo/webstruct | 210 | 12766301 | <reponame>tipevo/webstruct
# -*- coding: utf-8 -*-
from functools import partial
from tqdm import tqdm
pages_progress = partial(tqdm, unit=' pages', smoothing=False, leave=True)
|
theme/management/commands/reset_quota.py | hydroshare/hydroshare | 178 | 12766318 | <reponame>hydroshare/hydroshare
from django.core.management.base import BaseCommand
from django_irods.storage import IrodsStorage
from django.conf import settings
class Command(BaseCommand):
help = "Reset quota by forcing quota iRODS microservices to recalculate quota for all users."
def handle(self, *args, **options):
istorage = IrodsStorage()
# reset quota for data zone
root_path = '/{}/home/{}'.format(settings.IRODS_ZONE, settings.IRODS_USERNAME)
istorage.setAVU(root_path, 'resetQuotaDir', 1)
# reset quota for user zone
user_root_path = '/{}/home/{}'.format(settings.HS_USER_IRODS_ZONE, settings.HS_IRODS_PROXY_USER_IN_USER_ZONE)
istorage.setAVU(user_root_path, 'resetQuotaDir', 1)
|
readthedocs/rtd_tests/tests/test_version_config.py | mforbes/readthedocs.org | 4,054 | 12766331 | <gh_stars>1000+
from django.test import TestCase
from django_dynamic_fixture import get
from readthedocs.builds.models import Build, Version
from readthedocs.projects.models import Project
class VersionConfigTests(TestCase):
def setUp(self):
self.project = get(Project)
self.version = get(Version, project=self.project)
def test_get_correct_config(self):
build_old = Build.objects.create(
project=self.project,
version=self.version,
_config={'version': 1},
)
build_new = Build.objects.create(
project=self.project,
version=self.version,
_config={'version': 2},
)
build_new_error = Build.objects.create(
project=self.project,
version=self.version,
_config={'version': 3},
success=False,
)
build_new_unfinish = Build.objects.create(
project=self.project,
version=self.version,
_config={'version': 4},
state='building',
)
self.assertEqual(self.version.config, {'version': 2})
def test_get_correct_config_when_same_config(self):
build_old = get(
Build,
project=self.project,
version=self.version,
_config={},
)
build_old.config = {'version': 1}
build_old.save()
build_new = get(
Build,
project=self.project,
version=self.version,
_config={},
)
build_new.config = {'version': 1}
build_new.save()
build_new_error = get(
Build,
project=self.project,
version=self.version,
_config={},
success=False,
)
build_new_error.config = {'version': 3}
build_new_error.save()
build_new_unfinish = get(
Build,
project=self.project,
version=self.version,
_config={},
state='building',
)
build_new_unfinish.config = {'version': 1}
build_new_unfinish.save()
config = self.version.config
self.assertEqual(config, {'version': 1})
|
AET/imagenet/config/ImageNet_Unsupervised.py | pjwu1997/teil_project | 114 | 12766347 | <reponame>pjwu1997/teil_project<filename>AET/imagenet/config/ImageNet_Unsupervised.py
batch_size = 192*4
config = {}
# set the parameters related to the training and testing set
data_train_opt = {}
data_train_opt['batch_size'] = batch_size
data_train_opt['unsupervised'] = True
data_train_opt['epoch_size'] = None
data_train_opt['random_sized_crop'] = False
data_train_opt['dataset_name'] = 'imagenet'
data_train_opt['split'] = 'train'
data_test_opt = {}
data_test_opt['batch_size'] = batch_size
data_test_opt['unsupervised'] = True
data_test_opt['epoch_size'] = None
data_test_opt['random_sized_crop'] = False
data_test_opt['dataset_name'] = 'imagenet'
data_test_opt['split'] = 'val'
config['data_train_opt'] = data_train_opt
config['data_test_opt'] = data_test_opt
config['max_num_epochs'] = 200
net_opt = {}
net_opt['num_classes'] = 8
net_opt['num_stages'] = 4
networks = {}
net_optim_params = {'optim_type': 'sgd', 'lr': 0.01, 'momentum':0.9, 'weight_decay': 5e-4, 'nesterov': True, 'LUT_lr':[(100, 0.01),(150,0.001),(200,0.0001)]}
networks['model'] = {'def_file': 'architectures/AlexNet.py', 'pretrained': None, 'opt': net_opt, 'optim_params': net_optim_params}
config['networks'] = networks
criterions = {}
criterions['loss'] = {'ctype':'MSELoss', 'opt':True}
config['criterions'] = criterions
config['algorithm_type'] = 'UnsupervisedModel'
|
models/fpn_global_local_fmreg_ensemble.py | yinchimaoliang/GLNet | 119 | 12766349 | from .resnet import resnet50
import torch.nn as nn
import torch.nn.functional as F
import torch
import numpy as np
class fpn_module_global(nn.Module):
def __init__(self, numClass):
super(fpn_module_global, self).__init__()
self._up_kwargs = {'mode': 'bilinear'}
# Top layer
self.toplayer = nn.Conv2d(2048, 256, kernel_size=1, stride=1, padding=0) # Reduce channels
# Lateral layers
self.latlayer1 = nn.Conv2d(1024, 256, kernel_size=1, stride=1, padding=0)
self.latlayer2 = nn.Conv2d(512, 256, kernel_size=1, stride=1, padding=0)
self.latlayer3 = nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0)
# Smooth layers
self.smooth1_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.smooth2_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.smooth3_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.smooth4_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.smooth1_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1)
self.smooth2_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1)
self.smooth3_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1)
self.smooth4_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1)
# Classify layers
self.classify = nn.Conv2d(128*4, numClass, kernel_size=3, stride=1, padding=1)
# Local2Global: double #channels ####################################
# Top layer
self.toplayer_ext = nn.Conv2d(2048*2, 256, kernel_size=1, stride=1, padding=0) # Reduce channels
# Lateral layers
self.latlayer1_ext = nn.Conv2d(1024*2, 256, kernel_size=1, stride=1, padding=0)
self.latlayer2_ext = nn.Conv2d(512*2, 256, kernel_size=1, stride=1, padding=0)
self.latlayer3_ext = nn.Conv2d(256*2, 256, kernel_size=1, stride=1, padding=0)
# Smooth layers
self.smooth1_1_ext = nn.Conv2d(256*2, 256, kernel_size=3, stride=1, padding=1)
self.smooth2_1_ext = nn.Conv2d(256*2, 256, kernel_size=3, stride=1, padding=1)
self.smooth3_1_ext = nn.Conv2d(256*2, 256, kernel_size=3, stride=1, padding=1)
self.smooth4_1_ext = nn.Conv2d(256*2, 256, kernel_size=3, stride=1, padding=1)
self.smooth1_2_ext = nn.Conv2d(256*2, 128, kernel_size=3, stride=1, padding=1)
self.smooth2_2_ext = nn.Conv2d(256*2, 128, kernel_size=3, stride=1, padding=1)
self.smooth3_2_ext = nn.Conv2d(256*2, 128, kernel_size=3, stride=1, padding=1)
self.smooth4_2_ext = nn.Conv2d(256*2, 128, kernel_size=3, stride=1, padding=1)
self.smooth = nn.Conv2d(128*4*2, 128*4, kernel_size=3, stride=1, padding=1)
def _concatenate(self, p5, p4, p3, p2):
_, _, H, W = p2.size()
p5 = F.interpolate(p5, size=(H, W), **self._up_kwargs)
p4 = F.interpolate(p4, size=(H, W), **self._up_kwargs)
p3 = F.interpolate(p3, size=(H, W), **self._up_kwargs)
return torch.cat([p5, p4, p3, p2], dim=1)
def _upsample_add(self, x, y):
'''Upsample and add two feature maps.
Args:
x: (Variable) top feature map to be upsampled.
y: (Variable) lateral feature map.
Returns:
(Variable) added feature map.
Note in PyTorch, when input size is odd, the upsampled feature map
with `F.interpolate(..., scale_factor=2, mode='nearest')`
maybe not equal to the lateral feature map size.
e.g.
original input size: [N,_,15,15] ->
conv2d feature map size: [N,_,8,8] ->
upsampled feature map size: [N,_,16,16]
So we choose bilinear upsample which supports arbitrary output sizes.
'''
_, _, H, W = y.size()
return F.interpolate(x, size=(H, W), **self._up_kwargs) + y
def forward(self, c2, c3, c4, c5, c2_ext=None, c3_ext=None, c4_ext=None, c5_ext=None, ps0_ext=None, ps1_ext=None, ps2_ext=None):
# Top-down
if c5_ext is None:
p5 = self.toplayer(c5)
p4 = self._upsample_add(p5, self.latlayer1(c4))
p3 = self._upsample_add(p4, self.latlayer2(c3))
p2 = self._upsample_add(p3, self.latlayer3(c2))
else:
p5 = self.toplayer_ext(torch.cat((c5, c5_ext), dim=1))
p4 = self._upsample_add(p5, self.latlayer1_ext(torch.cat((c4, c4_ext), dim=1)))
p3 = self._upsample_add(p4, self.latlayer2_ext(torch.cat((c3, c3_ext), dim=1)))
p2 = self._upsample_add(p3, self.latlayer3_ext(torch.cat((c2, c2_ext), dim=1)))
ps0 = [p5, p4, p3, p2]
# Smooth
if ps0_ext is None:
p5 = self.smooth1_1(p5)
p4 = self.smooth2_1(p4)
p3 = self.smooth3_1(p3)
p2 = self.smooth4_1(p2)
else:
p5 = self.smooth1_1_ext(torch.cat((p5, ps0_ext[0]), dim=1))
p4 = self.smooth2_1_ext(torch.cat((p4, ps0_ext[1]), dim=1))
p3 = self.smooth3_1_ext(torch.cat((p3, ps0_ext[2]), dim=1))
p2 = self.smooth4_1_ext(torch.cat((p2, ps0_ext[3]), dim=1))
ps1 = [p5, p4, p3, p2]
if ps1_ext is None:
p5 = self.smooth1_2(p5)
p4 = self.smooth2_2(p4)
p3 = self.smooth3_2(p3)
p2 = self.smooth4_2(p2)
else:
p5 = self.smooth1_2_ext(torch.cat((p5, ps1_ext[0]), dim=1))
p4 = self.smooth2_2_ext(torch.cat((p4, ps1_ext[1]), dim=1))
p3 = self.smooth3_2_ext(torch.cat((p3, ps1_ext[2]), dim=1))
p2 = self.smooth4_2_ext(torch.cat((p2, ps1_ext[3]), dim=1))
ps2 = [p5, p4, p3, p2]
# Classify
if ps2_ext is None:
ps3 = self._concatenate(p5, p4, p3, p2)
output = self.classify(ps3)
else:
p = self._concatenate(
torch.cat((p5, ps2_ext[0]), dim=1),
torch.cat((p4, ps2_ext[1]), dim=1),
torch.cat((p3, ps2_ext[2]), dim=1),
torch.cat((p2, ps2_ext[3]), dim=1)
)
ps3 = self.smooth(p)
output = self.classify(ps3)
return output, ps0, ps1, ps2, ps3
class fpn_module_local(nn.Module):
def __init__(self, numClass):
super(fpn_module_local, self).__init__()
self._up_kwargs = {'mode': 'bilinear'}
# Top layer
fold = 2
self.toplayer = nn.Conv2d(2048 * fold, 256, kernel_size=1, stride=1, padding=0) # Reduce channels
# Lateral layers [C]
self.latlayer1 = nn.Conv2d(1024 * fold, 256, kernel_size=1, stride=1, padding=0)
self.latlayer2 = nn.Conv2d(512 * fold, 256, kernel_size=1, stride=1, padding=0)
self.latlayer3 = nn.Conv2d(256 * fold, 256, kernel_size=1, stride=1, padding=0)
# Smooth layers
# ps0
self.smooth1_1 = nn.Conv2d(256 * fold, 256, kernel_size=3, stride=1, padding=1)
self.smooth2_1 = nn.Conv2d(256 * fold, 256, kernel_size=3, stride=1, padding=1)
self.smooth3_1 = nn.Conv2d(256 * fold, 256, kernel_size=3, stride=1, padding=1)
self.smooth4_1 = nn.Conv2d(256 * fold, 256, kernel_size=3, stride=1, padding=1)
# ps1
self.smooth1_2 = nn.Conv2d(256 * fold, 128, kernel_size=3, stride=1, padding=1)
self.smooth2_2 = nn.Conv2d(256 * fold, 128, kernel_size=3, stride=1, padding=1)
self.smooth3_2 = nn.Conv2d(256 * fold, 128, kernel_size=3, stride=1, padding=1)
self.smooth4_2 = nn.Conv2d(256 * fold, 128, kernel_size=3, stride=1, padding=1)
# ps2 is concatenation
# Classify layers
self.smooth = nn.Conv2d(128*4*fold, 128*4, kernel_size=3, stride=1, padding=1)
self.classify = nn.Conv2d(128*4, numClass, kernel_size=3, stride=1, padding=1)
def _concatenate(self, p5, p4, p3, p2):
_, _, H, W = p2.size()
p5 = F.interpolate(p5, size=(H, W), **self._up_kwargs)
p4 = F.interpolate(p4, size=(H, W), **self._up_kwargs)
p3 = F.interpolate(p3, size=(H, W), **self._up_kwargs)
return torch.cat([p5, p4, p3, p2], dim=1)
def _upsample_add(self, x, y):
'''Upsample and add two feature maps.
Args:
x: (Variable) top feature map to be upsampled.
y: (Variable) lateral feature map.
Returns:
(Variable) added feature map.
Note in PyTorch, when input size is odd, the upsampled feature map
with `F.interpolate(..., scale_factor=2, mode='nearest')`
maybe not equal to the lateral feature map size.
e.g.
original input size: [N,_,15,15] ->
conv2d feature map size: [N,_,8,8] ->
upsampled feature map size: [N,_,16,16]
So we choose bilinear upsample which supports arbitrary output sizes.
'''
_, _, H, W = y.size()
return F.interpolate(x, size=(H, W), **self._up_kwargs) + y
def forward(self, c2, c3, c4, c5, c2_ext, c3_ext, c4_ext, c5_ext, ps0_ext, ps1_ext, ps2_ext):
# Top-down
p5 = self.toplayer(torch.cat([c5] + [F.interpolate(c5_ext[0], size=c5.size()[2:], **self._up_kwargs)], dim=1))
p4 = self._upsample_add(p5, self.latlayer1(torch.cat([c4] + [F.interpolate(c4_ext[0], size=c4.size()[2:], **self._up_kwargs)], dim=1)))
p3 = self._upsample_add(p4, self.latlayer2(torch.cat([c3] + [F.interpolate(c3_ext[0], size=c3.size()[2:], **self._up_kwargs)], dim=1)))
p2 = self._upsample_add(p3, self.latlayer3(torch.cat([c2] + [F.interpolate(c2_ext[0], size=c2.size()[2:], **self._up_kwargs)], dim=1)))
ps0 = [p5, p4, p3, p2]
# Smooth
p5 = self.smooth1_1(torch.cat([p5] + [F.interpolate(ps0_ext[0][0], size=p5.size()[2:], **self._up_kwargs)], dim=1))
p4 = self.smooth2_1(torch.cat([p4] + [F.interpolate(ps0_ext[1][0], size=p4.size()[2:], **self._up_kwargs)], dim=1))
p3 = self.smooth3_1(torch.cat([p3] + [F.interpolate(ps0_ext[2][0], size=p3.size()[2:], **self._up_kwargs)], dim=1))
p2 = self.smooth4_1(torch.cat([p2] + [F.interpolate(ps0_ext[3][0], size=p2.size()[2:], **self._up_kwargs)], dim=1))
ps1 = [p5, p4, p3, p2]
p5 = self.smooth1_2(torch.cat([p5] + [F.interpolate(ps1_ext[0][0], size=p5.size()[2:], **self._up_kwargs)], dim=1))
p4 = self.smooth2_2(torch.cat([p4] + [F.interpolate(ps1_ext[1][0], size=p4.size()[2:], **self._up_kwargs)], dim=1))
p3 = self.smooth3_2(torch.cat([p3] + [F.interpolate(ps1_ext[2][0], size=p3.size()[2:], **self._up_kwargs)], dim=1))
p2 = self.smooth4_2(torch.cat([p2] + [F.interpolate(ps1_ext[3][0], size=p2.size()[2:], **self._up_kwargs)], dim=1))
ps2 = [p5, p4, p3, p2]
# Classify
# use ps2_ext
ps3 = self._concatenate(
torch.cat([p5] + [F.interpolate(ps2_ext[0][0], size=p5.size()[2:], **self._up_kwargs)], dim=1),
torch.cat([p4] + [F.interpolate(ps2_ext[1][0], size=p4.size()[2:], **self._up_kwargs)], dim=1),
torch.cat([p3] + [F.interpolate(ps2_ext[2][0], size=p3.size()[2:], **self._up_kwargs)], dim=1),
torch.cat([p2] + [F.interpolate(ps2_ext[3][0], size=p2.size()[2:], **self._up_kwargs)], dim=1)
)
ps3 = self.smooth(ps3)
output = self.classify(ps3)
return output, ps0, ps1, ps2, ps3
class fpn(nn.Module):
def __init__(self, numClass):
super(fpn, self).__init__()
self._up_kwargs = {'mode': 'bilinear'}
# Res net
self.resnet_global = resnet50(True)
self.resnet_local = resnet50(True)
# fpn module
self.fpn_global = fpn_module_global(numClass)
self.fpn_local = fpn_module_local(numClass)
self.c2_g = None; self.c3_g = None; self.c4_g = None; self.c5_g = None; self.output_g = None
self.ps0_g = None; self.ps1_g = None; self.ps2_g = None; self.ps3_g = None
self.c2_l = []; self.c3_l = []; self.c4_l = []; self.c5_l = [];
self.ps00_l = []; self.ps01_l = []; self.ps02_l = []; self.ps03_l = [];
self.ps10_l = []; self.ps11_l = []; self.ps12_l = []; self.ps13_l = [];
self.ps20_l = []; self.ps21_l = []; self.ps22_l = []; self.ps23_l = [];
self.ps0_l = None; self.ps1_l = None; self.ps2_l = None
self.ps3_l = []#; self.output_l = []
self.c2_b = None; self.c3_b = None; self.c4_b = None; self.c5_b = None;
self.ps00_b = None; self.ps01_b = None; self.ps02_b = None; self.ps03_b = None;
self.ps10_b = None; self.ps11_b = None; self.ps12_b = None; self.ps13_b = None;
self.ps20_b = None; self.ps21_b = None; self.ps22_b = None; self.ps23_b = None;
self.ps3_b = []#; self.output_b = []
self.patch_n = 0
self.mse = nn.MSELoss()
self.ensemble_conv = nn.Conv2d(128*4 * 2, numClass, kernel_size=3, stride=1, padding=1)
nn.init.normal_(self.ensemble_conv.weight, mean=0, std=0.01)
# init fpn
for m in self.fpn_global.children():
if hasattr(m, 'weight'): nn.init.normal_(m.weight, mean=0, std=0.01)
if hasattr(m, 'bias'): nn.init.constant_(m.bias, 0)
for m in self.fpn_local.children():
if hasattr(m, 'weight'): nn.init.normal_(m.weight, mean=0, std=0.01)
if hasattr(m, 'bias'): nn.init.constant_(m.bias, 0)
def clear_cache(self):
self.c2_g = None; self.c3_g = None; self.c4_g = None; self.c5_g = None; self.output_g = None
self.ps0_g = None; self.ps1_g = None; self.ps2_g = None; self.ps3_g = None
self.c2_l = []; self.c3_l = []; self.c4_l = []; self.c5_l = [];
self.ps00_l = []; self.ps01_l = []; self.ps02_l = []; self.ps03_l = [];
self.ps10_l = []; self.ps11_l = []; self.ps12_l = []; self.ps13_l = [];
self.ps20_l = []; self.ps21_l = []; self.ps22_l = []; self.ps23_l = [];
self.ps0_l = None; self.ps1_l = None; self.ps2_l = None
self.ps3_l = []; self.output_l = []
self.c2_b = None; self.c3_b = None; self.c4_b = None; self.c5_b = None;
self.ps00_b = None; self.ps01_b = None; self.ps02_b = None; self.ps03_b = None;
self.ps10_b = None; self.ps11_b = None; self.ps12_b = None; self.ps13_b = None;
self.ps20_b = None; self.ps21_b = None; self.ps22_b = None; self.ps23_b = None;
self.ps3_b = []; self.output_b = []
self.patch_n = 0
def _sample_grid(self, fm, bbox, sampleSize):
"""
:param fm: tensor(b,c,h,w) the global feature map
:param bbox: list [b* nparray(x1, y1, x2, y2)] the (x1,y1) is the left_top of bbox, (x2, y2) is the right_bottom of bbox
there are in range [0, 1]. x is corresponding to width dimension and y is corresponding to height dimension
:param sampleSize: (oH, oW) the point to sample in height dimension and width dimension
:return: tensor(b, c, oH, oW) sampled tensor
"""
b, c, h, w = fm.shape
b_bbox = len(bbox)
bbox = [x*2 - 1 for x in bbox] # range transform
if b != b_bbox and b == 1:
fm = torch.cat([fm,]*b_bbox, dim=0)
grid = np.zeros((b_bbox,) + sampleSize + (2,), dtype=np.float32)
gridMap = np.array([[(cnt_w/(sampleSize[1]-1), cnt_h/(sampleSize[0]-1)) for cnt_w in range(sampleSize[1])] for cnt_h in range(sampleSize[0])])
for cnt_b in range(b_bbox):
grid[cnt_b, :, :, 0] = bbox[cnt_b][0] + (bbox[cnt_b][2] - bbox[cnt_b][0])*gridMap[:, :, 0]
grid[cnt_b, :, :, 1] = bbox[cnt_b][1] + (bbox[cnt_b][3] - bbox[cnt_b][1])*gridMap[:, :, 1]
grid = torch.from_numpy(grid).cuda()
return F.grid_sample(fm, grid)
def _crop_global(self, f_global, top_lefts, ratio):
'''
top_lefts: [(top, left)] * b
'''
_, c, H, W = f_global.size()
b = len(top_lefts)
h, w = int(np.round(H * ratio[0])), int(np.round(W * ratio[1]))
# bbox = [ np.array([left, top, left + ratio, top + ratio]) for (top, left) in top_lefts ]
# crop = self._sample_grid(f_global, bbox, (H, W))
crop = []
for i in range(b):
top, left = int(np.round(top_lefts[i][0] * H)), int(np.round(top_lefts[i][1] * W))
# # global's sub-region & upsample
# f_global_patch = F.interpolate(f_global[0:1, :, top:top+h, left:left+w], size=(h, w), mode='bilinear')
f_global_patch = f_global[0:1, :, top:top+h, left:left+w]
crop.append(f_global_patch[0])
crop = torch.stack(crop, dim=0) # stack into mini-batch
return [crop] # return as a list for easy to torch.cat
def _merge_local(self, f_local, merge, f_global, top_lefts, oped, ratio, template):
'''
merge feature maps from local patches, and finally to a whole image's feature map (on cuda)
f_local: a sub_batch_size of patch's feature map
oped: [start, end)
'''
b, _, _, _ = f_local.size()
_, c, H, W = f_global.size() # match global feature size
if merge is None:
merge = torch.zeros((1, c, H, W)).cuda()
h, w = int(np.round(H * ratio[0])), int(np.round(W * ratio[1]))
for i in range(b):
index = oped[0] + i
top, left = int(np.round(H * top_lefts[index][0])), int(np.round(W * top_lefts[index][1]))
merge[:, :, top:top+h, left:left+w] += F.interpolate(f_local[i:i+1], size=(h, w), **self._up_kwargs)
if oped[1] >= len(top_lefts):
template = F.interpolate(template, size=(H, W), **self._up_kwargs)
template = template.expand_as(merge)
# template = Variable(template).cuda()
merge /= template
return merge
def ensemble(self, f_local, f_global):
return self.ensemble_conv(torch.cat((f_local, f_global), dim=1))
def collect_local_fm(self, image_global, patches, ratio, top_lefts, oped, batch_size, global_model=None, template=None, n_patch_all=None):
'''
patches: 1 patch
top_lefts: all top-left
oped: [start, end)
'''
with torch.no_grad():
if self.patch_n == 0:
self.c2_g, self.c3_g, self.c4_g, self.c5_g = global_model.module.resnet_global.forward(image_global)
self.output_g, self.ps0_g, self.ps1_g, self.ps2_g, self.ps3_g = global_model.module.fpn_global.forward(self.c2_g, self.c3_g, self.c4_g, self.c5_g)
# self.output_g = F.interpolate(self.output_g, image_global.size()[2:], mode='nearest')
self.patch_n += patches.size()[0]
self.patch_n %= n_patch_all
self.resnet_local.eval()
self.fpn_local.eval()
c2, c3, c4, c5 = self.resnet_local.forward(patches)
# global's 1x patch cat
output, ps0, ps1, ps2, ps3 = self.fpn_local.forward(
c2, c3, c4, c5,
self._crop_global(self.c2_g, top_lefts[oped[0]:oped[1]], ratio),
c3_ext=self._crop_global(self.c3_g, top_lefts[oped[0]:oped[1]], ratio),
c4_ext=self._crop_global(self.c4_g, top_lefts[oped[0]:oped[1]], ratio),
c5_ext=self._crop_global(self.c5_g, top_lefts[oped[0]:oped[1]], ratio),
ps0_ext=[ self._crop_global(f, top_lefts[oped[0]:oped[1]], ratio) for f in self.ps0_g ],
ps1_ext=[ self._crop_global(f, top_lefts[oped[0]:oped[1]], ratio) for f in self.ps1_g ],
ps2_ext=[ self._crop_global(f, top_lefts[oped[0]:oped[1]], ratio) for f in self.ps2_g ]
)
# output = F.interpolate(output, patches.size()[2:], mode='nearest')
self.c2_b = self._merge_local(c2, self.c2_b, self.c2_g, top_lefts, oped, ratio, template)
self.c3_b = self._merge_local(c3, self.c3_b, self.c3_g, top_lefts, oped, ratio, template)
self.c4_b = self._merge_local(c4, self.c4_b, self.c4_g, top_lefts, oped, ratio, template)
self.c5_b = self._merge_local(c5, self.c5_b, self.c5_g, top_lefts, oped, ratio, template)
self.ps00_b = self._merge_local(ps0[0], self.ps00_b, self.ps0_g[0], top_lefts, oped, ratio, template)
self.ps01_b = self._merge_local(ps0[1], self.ps01_b, self.ps0_g[1], top_lefts, oped, ratio, template)
self.ps02_b = self._merge_local(ps0[2], self.ps02_b, self.ps0_g[2], top_lefts, oped, ratio, template)
self.ps03_b = self._merge_local(ps0[3], self.ps03_b, self.ps0_g[3], top_lefts, oped, ratio, template)
self.ps10_b = self._merge_local(ps1[0], self.ps10_b, self.ps1_g[0], top_lefts, oped, ratio, template)
self.ps11_b = self._merge_local(ps1[1], self.ps11_b, self.ps1_g[1], top_lefts, oped, ratio, template)
self.ps12_b = self._merge_local(ps1[2], self.ps12_b, self.ps1_g[2], top_lefts, oped, ratio, template)
self.ps13_b = self._merge_local(ps1[3], self.ps13_b, self.ps1_g[3], top_lefts, oped, ratio, template)
self.ps20_b = self._merge_local(ps2[0], self.ps20_b, self.ps2_g[0], top_lefts, oped, ratio, template)
self.ps21_b = self._merge_local(ps2[1], self.ps21_b, self.ps2_g[1], top_lefts, oped, ratio, template)
self.ps22_b = self._merge_local(ps2[2], self.ps22_b, self.ps2_g[2], top_lefts, oped, ratio, template)
self.ps23_b = self._merge_local(ps2[3], self.ps23_b, self.ps2_g[3], top_lefts, oped, ratio, template)
self.ps3_b.append(ps3.cpu())
# self.output_b.append(output.cpu()) # each output is 1, 7, h, w
if self.patch_n == 0:
# merged all patches into an image
self.c2_l.append(self.c2_b); self.c3_l.append(self.c3_b); self.c4_l.append(self.c4_b); self.c5_l.append(self.c5_b);
self.ps00_l.append(self.ps00_b); self.ps01_l.append(self.ps01_b); self.ps02_l.append(self.ps02_b); self.ps03_l.append(self.ps03_b)
self.ps10_l.append(self.ps10_b); self.ps11_l.append(self.ps11_b); self.ps12_l.append(self.ps12_b); self.ps13_l.append(self.ps13_b)
self.ps20_l.append(self.ps20_b); self.ps21_l.append(self.ps21_b); self.ps22_l.append(self.ps22_b); self.ps23_l.append(self.ps23_b)
# collected all ps3 and output of patches as a (b) tensor, append into list
self.ps3_l.append(torch.cat(self.ps3_b, dim=0)); # a list of tensors
# self.output_l.append(torch.cat(self.output_b, dim=0)) # a list of 36, 7, h, w tensors
self.c2_b = None; self.c3_b = None; self.c4_b = None; self.c5_b = None;
self.ps00_b = None; self.ps01_b = None; self.ps02_b = None; self.ps03_b = None;
self.ps10_b = None; self.ps11_b = None; self.ps12_b = None; self.ps13_b = None;
self.ps20_b = None; self.ps21_b = None; self.ps22_b = None; self.ps23_b = None;
self.ps3_b = []# ; self.output_b = []
if len(self.c2_l) == batch_size:
self.c2_l = torch.cat(self.c2_l, dim=0)# .cuda()
self.c3_l = torch.cat(self.c3_l, dim=0)# .cuda()
self.c4_l = torch.cat(self.c4_l, dim=0)# .cuda()
self.c5_l = torch.cat(self.c5_l, dim=0)# .cuda()
self.ps00_l = torch.cat(self.ps00_l, dim=0)# .cuda()
self.ps01_l = torch.cat(self.ps01_l, dim=0)# .cuda()
self.ps02_l = torch.cat(self.ps02_l, dim=0)# .cuda()
self.ps03_l = torch.cat(self.ps03_l, dim=0)# .cuda()
self.ps10_l = torch.cat(self.ps10_l, dim=0)# .cuda()
self.ps11_l = torch.cat(self.ps11_l, dim=0)# .cuda()
self.ps12_l = torch.cat(self.ps12_l, dim=0)# .cuda()
self.ps13_l = torch.cat(self.ps13_l, dim=0)# .cuda()
self.ps20_l = torch.cat(self.ps20_l, dim=0)# .cuda()
self.ps21_l = torch.cat(self.ps21_l, dim=0)# .cuda()
self.ps22_l = torch.cat(self.ps22_l, dim=0)# .cuda()
self.ps23_l = torch.cat(self.ps23_l, dim=0)# .cuda()
self.ps0_l = [self.ps00_l, self.ps01_l, self.ps02_l, self.ps03_l]
self.ps1_l = [self.ps10_l, self.ps11_l, self.ps12_l, self.ps13_l]
self.ps2_l = [self.ps20_l, self.ps21_l, self.ps22_l, self.ps23_l]
# self.ps3_l = torch.cat(self.ps3_l, dim=0)# .cuda()
return self.ps3_l, output# self.output_l
def forward(self, image_global, patches, top_lefts, ratio, mode=1, global_model=None, n_patch=None):
if mode == 1:
# train global model
c2_g, c3_g, c4_g, c5_g = self.resnet_global.forward(image_global)
output_g, ps0_g, ps1_g, ps2_g, ps3_g = self.fpn_global.forward(c2_g, c3_g, c4_g, c5_g)
# imsize = image_global.size()[2:]
# output_g = F.interpolate(output_g, imsize, mode='nearest')
return output_g, None
elif mode == 2:
# train global2local model
with torch.no_grad():
if self.patch_n == 0:
# calculate global images only if patches belong to a new set of global images (when self.patch_n % n_patch == 0)
self.c2_g, self.c3_g, self.c4_g, self.c5_g = self.resnet_global.forward(image_global)
self.output_g, self.ps0_g, self.ps1_g, self.ps2_g, self.ps3_g = self.fpn_global.forward(self.c2_g, self.c3_g, self.c4_g, self.c5_g)
# imsize_glb = image_global.size()[2:]
# self.output_g = F.interpolate(self.output_g, imsize_glb, mode='nearest')
self.patch_n += patches.size()[0]
self.patch_n %= n_patch
# train local model #######################################
c2_l, c3_l, c4_l, c5_l = self.resnet_local.forward(patches)
# global's 1x patch cat
output_l, ps0_l, ps1_l, ps2_l, ps3_l = self.fpn_local.forward(c2_l, c3_l, c4_l, c5_l,
self._crop_global(self.c2_g, top_lefts, ratio),
self._crop_global(self.c3_g, top_lefts, ratio),
self._crop_global(self.c4_g, top_lefts, ratio),
self._crop_global(self.c5_g, top_lefts, ratio),
[ self._crop_global(f, top_lefts, ratio) for f in self.ps0_g ],
[ self._crop_global(f, top_lefts, ratio) for f in self.ps1_g ],
[ self._crop_global(f, top_lefts, ratio) for f in self.ps2_g ]
)
# imsize = patches.size()[2:]
# output_l = F.interpolate(output_l, imsize, mode='nearest')
ps3_g2l = self._crop_global(self.ps3_g, top_lefts, ratio)[0] # only calculate loss on 1x
ps3_g2l = F.interpolate(ps3_g2l, size=ps3_l.size()[2:], **self._up_kwargs)
output = self.ensemble(ps3_l, ps3_g2l)
# output = F.interpolate(output, imsize, mode='nearest')
return output, self.output_g, output_l, self.mse(ps3_l, ps3_g2l)
else:
# train local2global model
c2_g, c3_g, c4_g, c5_g = self.resnet_global.forward(image_global)
# local patch cat into global
output_g, ps0_g, ps1_g, ps2_g, ps3_g = self.fpn_global.forward(c2_g, c3_g, c4_g, c5_g, c2_ext=self.c2_l, c3_ext=self.c3_l, c4_ext=self.c4_l, c5_ext=self.c5_l, ps0_ext=self.ps0_l, ps1_ext=self.ps1_l, ps2_ext=self.ps2_l)
# imsize = image_global.size()[2:]
# output_g = F.interpolate(output_g, imsize, mode='nearest')
self.clear_cache()
return output_g, ps3_g |
src/opnsense/scripts/filter/pftop.py | onedr0p/core | 2,109 | 12766350 | <filename>src/opnsense/scripts/filter/pftop.py
#!/usr/local/bin/python3
"""
Copyright (c) 2021 <NAME> <<EMAIL>>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import ujson
import argparse
from lib.states import query_top
if __name__ == '__main__':
# parse input arguments
parser = argparse.ArgumentParser()
parser.add_argument('--filter', help='filter results', default='')
parser.add_argument('--limit', help='limit number of results', default='')
parser.add_argument('--offset', help='offset results', default='')
parser.add_argument('--label', help='label / rule id', default='')
parser.add_argument('--sort_by', help='sort by (field asc|desc)', default='')
inputargs = parser.parse_args()
result = {
'details': query_top(filter_str=inputargs.filter, rule_label=inputargs.label)
}
# sort results
if inputargs.sort_by.strip() != '' and len(result['details']) > 0:
sort_key = inputargs.sort_by.split()[0]
sort_desc = inputargs.sort_by.split()[-1] == 'desc'
if sort_key in result['details'][0]:
if type(result['details'][0][sort_key]) is int:
sorter = lambda k: k[sort_key] if sort_key in k else 0
else:
sorter = lambda k: str(k[sort_key]).lower() if sort_key in k else ''
result['details'] = sorted(result['details'], key=sorter, reverse=sort_desc)
result['total_entries'] = len(result['details'])
# apply offset and limit
if inputargs.offset.isdigit():
result['details'] = result['details'][int(inputargs.offset):]
if inputargs.limit.isdigit() and len(result['details']) >= int(inputargs.limit):
result['details'] = result['details'][:int(inputargs.limit)]
result['total'] = len(result['details'])
print(ujson.dumps(result))
|
evaluate.py | wmylxmj/Anime-Super-Resolution | 120 | 12766362 | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 30 21:24:36 2019
@author: wmy
"""
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from PIL import Image
from keras import backend as K
from keras.losses import mean_absolute_error, mean_squared_error
from keras.models import load_model
from keras.optimizers import Adam
import random
import os
from model import wdsr_a, wdsr_b
from utils import DataLoader
model = wdsr_b(scale=4, num_res_blocks=32)
model.load_weights('./weights/wdsr-b-32-x4.h5')
data_loader = DataLoader(scale=4)
def evaluate_test(model, setpath='datasets/train', difficulty='easy', name='evaluate'):
images = data_loader.search(setpath)
image = random.choice(images)
hr = data_loader.imread(image)
resize = (hr.size[0]//data_loader.scale, hr.size[1]//data_loader.scale)
hidden_scale = random.uniform(1, 3)
radius = random.uniform(1, 3)
if difficulty=='easy':
hidden_scale = random.uniform(1, 1.5)
radius = random.uniform(1, 1.5)
pass
elif difficulty=='normal':
hidden_scale = random.uniform(1.5, 2)
radius = random.uniform(1.5, 2)
pass
elif difficulty=='hard':
hidden_scale = random.uniform(2, 2.5)
radius = random.uniform(2, 2.5)
pass
elif difficulty=='lunatic':
hidden_scale = random.uniform(2.5, 3)
radius = random.uniform(2.5, 3)
pass
else:
raise ValueError("unknown difficulty")
hidden_resize = (int(resize[0]/hidden_scale), int(resize[1]/hidden_scale))
lr = data_loader.gaussianblur(hr, radius)
lr = lr.resize(hidden_resize)
lr = lr.resize(resize)
lr_resize = lr.resize(hr.size)
lr = np.asarray(lr)
sr = model.predict(np.array([lr]))[0]
sr = np.clip(sr, 0, 255)
sr = sr.astype('uint8')
lr = Image.fromarray(lr)
sr = Image.fromarray(sr)
lr_resize.save("images/" + name + "_lr.jpg")
sr.save("images/" + name + "_sr.jpg")
hr.save("images/" + name + "_hr.jpg")
pass
evaluate_test(model, difficulty='easy', name='easy')
evaluate_test(model, difficulty='normal', name='normal')
evaluate_test(model, difficulty='hard', name='hard')
evaluate_test(model, difficulty='lunatic', name='lunatic')
|
Gelatin/compiler/Context.py | Etherbay/Gelatin | 107 | 12766363 | <filename>Gelatin/compiler/Context.py
# Copyright (c) 2010-2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import codecs
def do_next(context):
return 0
def do_skip(context):
return 1
def do_fail(context, message='No matching statement found'):
context._error(message)
def do_say(context, message):
context._msg(message)
return 0
def do_warn(context, message):
context._warn(message)
return 0
def do_return(context, levels=1):
# print "do.return():", -levels
return -levels
def out_create(context, path, data=None):
# print "out.create():", path, data
context.builder.create(path, data)
context.builder.enter(path)
context._trigger(context.on_add, context.re_stack[-1])
context.builder.leave()
return 0
def out_replace(context, path, data=None):
# print "out.replace():", path, data
context.builder.add(path, data, replace=True)
context.builder.enter(path)
context._trigger(context.on_add, context.re_stack[-1])
context.builder.leave()
return 0
def out_add(context, path, data=None):
# print "out.add():", path, data
context.builder.add(path, data)
context.builder.enter(path)
context._trigger(context.on_add, context.re_stack[-1])
context.builder.leave()
return 0
def out_add_attribute(context, path, name, value):
# print "out.add_attribute():", path, name, value
context.builder.add_attribute(path, name, value)
context.builder.enter(path)
context._trigger(context.on_add, context.re_stack[-1])
context.builder.leave()
return 0
def out_open(context, path):
# print "out.open():", path
context.builder.open(path)
context._trigger(context.on_add, context.re_stack[-1])
context.stack[-1].on_leave.append((context.builder.leave, ()))
return 0
def out_enter(context, path):
# print "out.enter():", path
context.builder.enter(path)
context._trigger(context.on_add, context.re_stack[-1])
context.stack[-1].on_leave.append((context.builder.leave, ()))
return 0
def out_enqueue_before(context, regex, path, data=None):
# print "ENQ BEFORE", regex.pattern, path, data
context.on_match_before.append((regex, out_add, (context, path, data)))
return 0
def out_enqueue_after(context, regex, path, data=None):
# print "ENQ AFTER", regex.pattern, path, data
context.on_match_after.append((regex, out_add, (context, path, data)))
return 0
def out_enqueue_on_add(context, regex, path, data=None):
# print "ENQ ON ADD", regex.pattern, path, data
context.on_add.append((regex, out_add, (context, path, data)))
return 0
def out_clear_queue(context):
context._clear_triggers()
return 1
def out_set_root_name(context, name):
context.builder.set_root_name(name)
return 0
class Context(object):
def __init__(self):
self.functions = {'do.fail': do_fail,
'do.return': do_return,
'do.next': do_next,
'do.skip': do_skip,
'do.say': do_say,
'do.warn': do_warn,
'out.create': out_create,
'out.replace': out_replace,
'out.add': out_add,
'out.add_attribute': out_add_attribute,
'out.open': out_open,
'out.enter': out_enter,
'out.enqueue_before': out_enqueue_before,
'out.enqueue_after': out_enqueue_after,
'out.enqueue_on_add': out_enqueue_on_add,
'out.clear_queue': out_clear_queue,
'out.set_root_name': out_set_root_name}
self.lexicon = {}
self.grammars = {}
self.input = None
self.builder = None
self.end = 0
self._init()
def _init(self):
self.start = 0
self.re_stack = []
self.stack = []
self._clear_triggers()
def _clear_triggers(self):
self.on_match_before = []
self.on_match_after = []
self.on_add = []
def _trigger(self, triggers, match):
matching = []
for trigger in triggers:
regex, func, args = trigger
if regex.search(match.group(0)) is not None:
matching.append(trigger)
for trigger in matching:
triggers.remove(trigger)
for trigger in matching:
regex, func, args = trigger
func(*args)
def _match_before_notify(self, match):
self.re_stack.append(match)
self._trigger(self.on_match_before, match)
def _match_after_notify(self, match):
self._trigger(self.on_match_after, match)
self.re_stack.pop()
def _get_lineno(self):
return self.input.count('\n', 0, self.start) + 1
def _get_line(self, number=None):
if number is None:
number = self._get_lineno()
return self.input.split('\n')[number - 1]
def _get_line_position_from_char(self, char):
line_start = char
while line_start != 0:
if self.input[line_start - 1] == '\n':
break
line_start -= 1
line_end = self.input.find('\n', char)
return line_start, line_end
def _format(self, error):
start, end = self._get_line_position_from_char(self.start)
line_number = self._get_lineno()
line = self._get_line()
offset = self.start - start
token_len = 1
output = line + '\n'
if token_len <= 1:
output += (' ' * offset) + '^\n'
else:
output += (' ' * offset) + "'" + ('-' * (token_len - 2)) + "'\n"
output += '%s in line %s' % (error, line_number)
return output
def _msg(self, error):
print(self._format(error))
def _warn(self, error):
sys.stderr.write(self._format(error) + '\n')
def _error(self, error):
raise Exception(self._format(error))
def _eof(self):
return self.start >= self.end
def parse_string(self, input, builder, debug=0):
self._init()
self.input = input
self.builder = builder
self.end = len(input)
self.grammars['input'].parse(self, debug)
if self.start < self.end:
self._error('parser returned, but did not complete')
def parse(self, filename, builder, encoding='utf8', debug=0):
with codecs.open(filename, 'r', encoding=encoding) as input_file:
return self.parse_string(input_file.read(), builder, debug)
def dump(self):
for grammar in self.grammars.values():
print(grammar)
|
src/python/pants/backend/terraform/lint/tffmt/tffmt.py | yoav-orca/pants | 1,806 | 12766378 | <filename>src/python/pants/backend/terraform/lint/tffmt/tffmt.py
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import logging
import textwrap
from pants.backend.terraform.lint.fmt import TerraformFmtRequest
from pants.backend.terraform.style import StyleSetup, StyleSetupRequest
from pants.backend.terraform.tool import TerraformProcess
from pants.backend.terraform.tool import rules as tool_rules
from pants.core.goals.fmt import FmtResult
from pants.core.goals.lint import LintRequest, LintResult, LintResults
from pants.core.util_rules import external_tool
from pants.engine.fs import Digest, MergeDigests
from pants.engine.internals.selectors import Get, MultiGet
from pants.engine.process import FallibleProcessResult, ProcessResult
from pants.engine.rules import collect_rules, rule
from pants.engine.unions import UnionRule
from pants.option.subsystem import Subsystem
from pants.util.logging import LogLevel
logger = logging.getLogger(__name__)
class TfFmtSubsystem(Subsystem):
options_scope = "terraform-fmt"
help = """Terraform fmt options."""
@classmethod
def register_options(cls, register):
super().register_options(register)
register(
"--skip",
type=bool,
default=False,
help=(
f"Don't use `terraform fmt` when running `{register.bootstrap.pants_bin_name} fmt` and "
f"`{register.bootstrap.pants_bin_name} lint`."
),
)
class TffmtRequest(TerraformFmtRequest):
pass
@rule(desc="Format with `terraform fmt`")
async def tffmt_fmt(request: TffmtRequest, tffmt: TfFmtSubsystem) -> FmtResult:
if tffmt.options.skip:
return FmtResult.skip(formatter_name="tffmt")
setup = await Get(StyleSetup, StyleSetupRequest(request, ("fmt",)))
results = await MultiGet(
Get(ProcessResult, TerraformProcess, process)
for _, (process, _) in setup.directory_to_process.items()
)
def format(directory, output):
if len(output.strip()) == 0:
return ""
return textwrap.dedent(
f"""\
Output from `terraform fmt` on files in {directory}:
{output.decode("utf-8")}
"""
)
stdout_content = ""
stderr_content = ""
for directory, result in zip(setup.directory_to_process.keys(), results):
stdout_content += format(directory, result.stdout)
stderr_content += format(directory, result.stderr)
# Merge all of the outputs into a single output.
output_digest = await Get(Digest, MergeDigests(r.output_digest for r in results))
fmt_result = FmtResult(
input=setup.original_digest,
output=output_digest,
stdout=stdout_content,
stderr=stderr_content,
formatter_name="tffmt",
)
return fmt_result
@rule(desc="Lint with `terraform fmt`", level=LogLevel.DEBUG)
async def tffmt_lint(request: TffmtRequest, tffmt: TfFmtSubsystem) -> LintResults:
if tffmt.options.skip:
return LintResults([], linter_name="tffmt")
setup = await Get(StyleSetup, StyleSetupRequest(request, ("fmt", "-check")))
results = await MultiGet(
Get(FallibleProcessResult, TerraformProcess, process)
for _, (process, _) in setup.directory_to_process.items()
)
lint_results = [LintResult.from_fallible_process_result(result) for result in results]
return LintResults(lint_results, linter_name="tffmt")
def rules():
return [
*collect_rules(),
*external_tool.rules(),
*tool_rules(),
UnionRule(LintRequest, TffmtRequest),
UnionRule(TerraformFmtRequest, TffmtRequest),
]
|
third_party/weston/generate_configs.py | zealoussnow/chromium | 14,668 | 12766394 | <gh_stars>1000+
#!/usr/bin/env python
#
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates config files for building Weston."""
from __future__ import print_function
import os
import re
import shutil
import subprocess
import sys
import tempfile
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
CHROMIUM_ROOT_DIR = os.path.abspath(os.path.join(BASE_DIR, '..', '..'))
sys.path.append(os.path.join(CHROMIUM_ROOT_DIR, 'build'))
import gn_helpers
MESON = ['meson']
DEFAULT_BUILD_ARGS = [
'-Dbuild_tests=false',
'--buildtype', 'release',
'-Dbackend-drm-screencast-vaapi=false',
'-Dbackend-rdp=false',
'-Dxwayland=false',
'-Dcolor-management-lcms=false',
'-Dpipewire=false',
'-Dcolor-management-colord=false',
'-Dremoting=false',
'-Dsimple-dmabuf-drm=auto',
'-Dshell-ivi=false',
'-Ddemo-clients=false',
'-Dsimple-clients=egl',
'-Dlauncher-logind=false',
'-Dweston-launch=false',
'-Dscreenshare=false',
'-Dsystemd=false',
'-Dimage-jpeg=false',
'-Dimage-webp=false',
'-Dbackend-drm=false',
'-Dbackend-default=wayland'
]
def PrintAndCheckCall(argv, *args, **kwargs):
print('\n-------------------------------------------------\nRunning %s' %
' '.join(argv))
c = subprocess.check_call(argv, *args, **kwargs)
def RewriteFile(path, search_replace):
with open(path) as f:
contents = f.read()
with open(path, 'w') as f:
for search, replace in search_replace:
contents = re.sub(search, replace, contents)
# Cleanup trailing newlines.
f.write(contents.strip() + '\n')
def AddAttributeInConfig(path):
with open(path) as f:
contents = f.read()
with open(path, 'w') as f:
f.write(contents.strip() + '\n')
f.write('\n' + '__attribute__((visibility("default"))) int main(int argc, char* argv[]);' + '\n')
def CopyConfigsAndCleanup(config_dir, dest_dir):
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
shutil.copy(os.path.join(config_dir, 'config.h'), dest_dir)
shutil.rmtree(config_dir)
def RewriteGitFile(path, data):
with open(path, 'w') as f:
contents = data
# Cleanup trailing newlines.
f.write(contents.strip() + '\n')
def CopyGitConfigsAndCleanup(config_dir, dest_dir):
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
shutil.copy(os.path.join(config_dir, 'git-version.h'), dest_dir)
shutil.rmtree(config_dir)
def GenerateGitConfig(config_dir, env, special_args=[]):
temp_dir = tempfile.mkdtemp()
PrintAndCheckCall(
MESON + DEFAULT_BUILD_ARGS + special_args + [temp_dir],
cwd='src',
env=env)
label = subprocess.check_output(["git", "describe", "--always"]).strip()
label = label.decode("utf-8")
RewriteGitFile(
os.path.join(temp_dir, 'git-version.h'),
"#define BUILD_ID \"{label}\"".format(label=label))
CopyGitConfigsAndCleanup(temp_dir, config_dir)
def GenerateConfig(config_dir, env, special_args=[]):
temp_dir = tempfile.mkdtemp()
PrintAndCheckCall(
MESON + DEFAULT_BUILD_ARGS + special_args + [temp_dir],
cwd='src',
env=env)
CopyConfigsAndCleanup(temp_dir, config_dir)
def ChangeConfigPath():
configfile = os.path.join(BASE_DIR, "config/config.h")
DIRS = ["BINDIR",
"DATADIR",
"LIBEXECDIR",
"LIBWESTON_MODULEDIR",
"MODULEDIR"]
for dir in DIRS:
pattern = "#define {dir} \"/[a-zA-Z0-9\\-_/]+\"".format(dir=dir)
RewriteFile(configfile, [(pattern, "")])
# Add attribute in config.h to suppress all undefined symbol(function) warnings
AddAttributeInConfig(configfile)
def GenerateWestonVersion():
dirname = os.path.join(BASE_DIR, "version/libweston")
if not os.path.exists(dirname):
os.makedirs(dirname)
version_op_file = os.path.join(BASE_DIR, "version/libweston/version.h")
configfile = os.path.join(BASE_DIR, "config/config.h")
version_in_file = os.path.join(BASE_DIR, "src/include/libweston/version.h.in")
version_number = "0.0.0"
with open(configfile, 'r') as f:
for line in f:
if "PACKAGE_VERSION" in line:
package_version_list = (line.strip("\n")).split(" ")
version_number = package_version_list[-1]
version_number_list = (version_number.strip('"\n"')).split(".")
version_number_list.append(version_number.strip("\"\""))
VERSIONS = ["@WESTON_VERSION_MAJOR@", "@WESTON_VERSION_MINOR@",
"@WESTON_VERSION_MICRO@", "@WESTON_VERSION@"]
with open(version_in_file) as f:
contents = f.read()
for version, version_number in zip(VERSIONS, version_number_list):
pattern = version
repl_string = version_number
with open(version_op_file, 'w') as f:
contents = re.sub(pattern, repl_string, contents)
# Cleanup trailing newlines.
f.write(contents.strip() + '\n')
print("Created version.h file from version.h.in\n")
def RemoveUndesiredDefines():
configfile = os.path.join(BASE_DIR, "config/config.h")
# Weston doesn't have a meson option to avoid using memfd_create() method that was
# introduced in GLIBC 2.27. That results in weston failing to run on Xenial based bot as
# it has GLIBC 2.23, because this config might be generated on a system that has newer
# libc libraries that meson checks with has_function() method. Thus, explicitly rewrite
# the config to disable usage of that method.
RewriteFile(configfile, [("#define HAVE_MEMFD_CREATE .*", "")])
def main():
env = os.environ
env['CC'] = 'clang'
GenerateGitConfig('version', env)
GenerateConfig('config', env)
ChangeConfigPath()
RemoveUndesiredDefines()
GenerateWestonVersion()
if __name__ == '__main__':
main()
|
language/serene/claim_tfds.py | Xtuden-com/language | 1,199 | 12766416 | <gh_stars>1000+
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""TFDS for only claims."""
import json
from language.serene import constants
from language.serene import util
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
class ClaimDataset(tfds.core.GeneratorBasedBuilder):
"""Claim only datasets for fever, useful for embedding only claims."""
VERSION = tfds.core.Version("0.1.0")
def __init__(
self, *,
fever_train_path = None,
fever_dev_path = None,
data_dir = None,
config=None):
super().__init__(data_dir=data_dir, config=config)
self._fever_train_path = fever_train_path
self._fever_dev_path = fever_dev_path
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
features=tfds.features.FeaturesDict({
"example_id":
tf.string,
"metadata":
tf.string,
"claim_text":
tfds.features.Text(),
"evidence_text":
tfds.features.Text(),
"wikipedia_url":
tfds.features.Text(),
"sentence_id":
tfds.features.Text(),
"scrape_type":
tfds.features.Text(),
"evidence_label":
tfds.features.ClassLabel(
names=constants.EVIDENCE_MATCHING_CLASSES),
"claim_label":
tfds.features.ClassLabel(names=constants.FEVER_CLASSES)
}))
def _split_generators(self, dl_manager):
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={"filepath": self._fever_train_path}
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={"filepath": self._fever_dev_path}
)
]
def _generate_examples(self, filepath, **kwargs):
fever_claims = util.read_jsonlines(filepath)
for claim in fever_claims:
claim_id = claim["id"]
claim_text = claim["claim"]
claim_label = claim["label"]
example_id = f"{claim_id}"
yield claim_id, {
"example_id": example_id,
"claim_text": claim_text,
"evidence_text": "",
"wikipedia_url": "",
# Ordinarily, this would (possibly) be concatenated to the evidence
# but since this is claim only, I'm using a null integer value
"sentence_id": "-1",
# This label doesn't matter here since its claim only
"evidence_label": constants.NOT_MATCHING,
"claim_label": claim_label,
"scrape_type": "",
"metadata": json.dumps({
"claim_id": claim_id,
})
}
|
kaldi/steps/dict/apply_lexicon_edits.py | ishine/asv-subtools | 370 | 12766417 | #!/usr/bin/env python
# Copyright 2016 <NAME>
# Apache 2.0.
from __future__ import print_function
import argparse
import sys
def GetArgs():
parser = argparse.ArgumentParser(description = "Apply an lexicon edits file (output from subtools/kaldi/steps/dict/select_prons_bayesian.py)to an input lexicon"
"to produce a learned lexicon.",
epilog = "See subtools/kaldi/steps/dict/learn_lexicon_greedy.sh for example")
parser.add_argument("in_lexicon", metavar='<in-lexicon>', type = str,
help = "Input lexicon. Each line must be <word> <phones>.")
parser.add_argument("lexicon_edits_file", metavar='<lexicon-edits-file>', type = str,
help = "Input lexicon edits file containing human-readable & editable"
"pronounciation info. The info for each word is like:"
"------------ an 4086.0 --------------"
"R | Y | 2401.6 | AH N"
"R | Y | 640.8 | AE N"
"P | Y | 1035.5 | IH N"
"R(ef), P(hone-decoding) represents the pronunciation source"
"Y/N means the recommended decision of including this pron or not"
"and the numbers are soft counts accumulated from lattice-align-word outputs. See subtools/kaldi/steps/dict/select_prons_bayesian.py for more details.")
parser.add_argument("out_lexicon", metavar='<out-lexicon>', type = str,
help = "Output lexicon to this file.")
print (' '.join(sys.argv), file=sys.stderr)
args = parser.parse_args()
args = CheckArgs(args)
return args
def CheckArgs(args):
if args.in_lexicon == "-":
args.in_lexicon = sys.stdin
else:
args.in_lexicon_handle = open(args.in_lexicon)
args.lexicon_edits_file_handle = open(args.lexicon_edits_file)
if args.out_lexicon == "-":
args.out_lexicon_handle = sys.stdout
else:
args.out_lexicon_handle = open(args.out_lexicon, "w")
return args
def ReadLexicon(lexicon_file_handle):
lexicon = set()
if lexicon_file_handle:
for line in lexicon_file_handle.readlines():
splits = line.strip().split()
if len(splits) == 0:
continue
if len(splits) < 2:
raise Exception('Invalid format of line ' + line
+ ' in lexicon file.')
word = splits[0]
phones = ' '.join(splits[1:])
lexicon.add((word, phones))
return lexicon
def ApplyLexiconEdits(lexicon, lexicon_edits_file_handle):
if lexicon_edits_file_handle:
for line in lexicon_edits_file_handle.readlines():
# skip all commented lines
if line.startswith('#'):
continue
# read a word from a line like "---- MICROPHONES 200.0 ----".
if line.startswith('---'):
splits = line.strip().strip('-').strip().split()
if len(splits) != 2:
print(splits, file=sys.stderr)
raise Exception('Invalid format of line ' + line
+ ' in lexicon edits file.')
word = splits[0].strip()
else:
# parse the pron and decision 'Y/N' of accepting the pron or not,
# from a line like: 'P | Y | 42.0 | M AY K R AH F OW N Z'
splits = line.split('|')
if len(splits) != 4:
raise Exception('Invalid format of line ' + line
+ ' in lexicon edits file.')
pron = splits[3].strip()
if splits[1].strip() == 'Y':
lexicon.add((word, pron))
elif splits[1].strip() == 'N':
lexicon.discard((word, pron))
else:
raise Exception('Invalid format of line ' + line
+ ' in lexicon edits file.')
return lexicon
def WriteLexicon(lexicon, out_lexicon_handle):
for word, pron in lexicon:
print('{0} {1}'.format(word, pron), file=out_lexicon_handle)
out_lexicon_handle.close()
def Main():
args = GetArgs()
lexicon = ReadLexicon(args.in_lexicon_handle)
ApplyLexiconEdits(lexicon, args.lexicon_edits_file_handle)
WriteLexicon(lexicon, args.out_lexicon_handle)
if __name__ == "__main__":
Main()
|
B03898_02_Codes/B03898_02_02.py | prakharShuklaOfficial/Mastering-Python-for-Finance-source-codes | 446 | 12766424 | <reponame>prakharShuklaOfficial/Mastering-Python-for-Finance-source-codes<filename>B03898_02_Codes/B03898_02_02.py
"""
README
======
This is a Python code.
======
"""
""" Least squares regression with statsmodels """
import numpy as np
import statsmodels.api as sm
# Generate some sample data
num_periods = 9
all_values = np.array([np.random.random(8)
for i in range(num_periods)])
# Filter the data
y_values = all_values[:, 0] # First column values as Y
x_values = all_values[:, 1:] # All other values as X
x_values = sm.add_constant(x_values) # Include the intercept
results = sm.OLS(y_values, x_values).fit() # Regress and fit the model
print results.summary()
print results.params
|
scripts/python/s2-c2-extract-schema.py | ashutoshsingh0223/freebase-triples | 177 | 12766471 | <filename>scripts/python/s2-c2-extract-schema.py
#!/usr/bin/env python
"""
Run with:
$ python this-script.py [path_to_input_file]
"""
import argparse
import datetime
import subprocess
import time
# Globals
# Note: path to the query file has been hardcoded here
# queries.txt file has a schema of [slice_title],[query]
queries = open('queries/queries-schema-for-domains-types-properties').readlines()
def main(input_file):
""" Run the main shell commands
:param input_file: the path to the RDF file you want sliced according to the queries """
query_count = 0
fname_input = input_file
fname_output = "slices-new/fb-rdf-schema-"
fname_rest = "fb-rdf-rest-"
for query in queries:
query = query.split(",")
query_title = query[0].strip().replace(".", "-")
query_raw = query[1].strip()
query_count += 1
fname_output += query_title # Add the 1st column from the queries data to the title
fname_rest += str(query_count) # Increment up the filename for the remainder data
t0 = subprocess.check_output(['gdate','+"%s%3N"'])
p = subprocess.Popen(['gawk',
"{ fname" + '="'+fname_output+'";' + ' fname_rest="' +fname_rest +'"; ' +
'if(' + query_raw + ')' + " { print $0 >> fname; } else { print $0 >> fname_rest; } }",
fname_input])
p.communicate()
t1 = subprocess.check_output(['gdate','+"%s%3N"'])
# Show the runtime stats: initial time, finished time
print(query_title + "\t" + t0.decode('ascii').strip() + "\t" + t1.decode('ascii').strip())
# Reset some of the file names for the next loop
fname_input = fname_rest
fname_rest = "fb-rdf-rest-"
fname_output = "slices-new/fb-rdf-schema-"
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_file', help='Path to the input data file')
args = parser.parse_args()
main(args.input_file)
# main()
|
tools/graph_bag/scripts/test_rmse_utilities.py | limenutt/astrobee | 629 | 12766482 | <gh_stars>100-1000
#!/usr/bin/python
#
# Copyright (c) 2017, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
#
# All rights reserved.
#
# The Astrobee platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
import unittest
import numpy as np
import poses
import rmse_utilities
def make_poses(times, xs, ys, zs):
new_poses = poses.Poses("", "")
new_poses.times = times
new_poses.positions.xs = xs
new_poses.positions.ys = ys
new_poses.positions.zs = zs
return new_poses
class TestRMSESequence(unittest.TestCase):
def test_prune_missing_timestamps_beginning_set(self):
a_times = np.arange(10.0)
xs = np.arange(10.0)
ys = np.arange(10.0) + 1.0
zs = np.arange(10.0) + 2.0
b_times = np.arange(5.0)
poses_a = make_poses(a_times, xs, ys, zs)
poses_b = make_poses(b_times, xs, ys, zs)
trimmed_a, trimmed_b = rmse_utilities.get_same_timestamp_poses(poses_a, poses_b)
self.assertEqual(len(trimmed_a.times), len(trimmed_b.times))
self.assertEqual(len(trimmed_a.times), 5)
self.assertTrue(np.allclose(trimmed_a.times, b_times, rtol=0))
self.assertTrue(np.allclose(trimmed_a.positions.xs, b_times, rtol=0))
self.assertTrue(np.allclose(trimmed_a.positions.ys, b_times + 1, rtol=0))
self.assertTrue(np.allclose(trimmed_a.positions.zs, b_times + 2, rtol=0))
def test_prune_missing_timestamps_middle_set(self):
a_times = np.arange(10.0)
xs = np.arange(10.0)
ys = np.arange(10.0) + 1.0
zs = np.arange(10.0) + 2.0
b_times = np.arange(3.0, 7.0)
poses_a = make_poses(a_times, xs, ys, zs)
poses_b = make_poses(b_times, xs, ys, zs)
trimmed_a, trimmed_b = rmse_utilities.get_same_timestamp_poses(poses_a, poses_b)
self.assertEqual(len(trimmed_a.times), len(trimmed_b.times))
self.assertEqual(len(trimmed_a.times), 4)
self.assertTrue(np.allclose(trimmed_a.times, b_times, rtol=0))
self.assertTrue(np.allclose(trimmed_a.positions.xs, b_times, rtol=0))
self.assertTrue(np.allclose(trimmed_a.positions.ys, b_times + 1, rtol=0))
self.assertTrue(np.allclose(trimmed_a.positions.zs, b_times + 2, rtol=0))
def test_prune_missing_timestamps_end_set(self):
a_times = np.arange(10.0)
xs = np.arange(10.0)
ys = np.arange(10.0) + 1.0
zs = np.arange(10.0) + 2.0
b_times = np.arange(7.0, 10.0)
poses_a = make_poses(a_times, xs, ys, zs)
poses_b = make_poses(b_times, xs, ys, zs)
trimmed_a, trimmed_b = rmse_utilities.get_same_timestamp_poses(poses_a, poses_b)
self.assertEqual(len(trimmed_a.times), len(trimmed_b.times))
self.assertEqual(len(trimmed_a.times), 3)
self.assertTrue(np.allclose(trimmed_a.times, b_times, rtol=0))
self.assertTrue(np.allclose(trimmed_a.positions.xs, b_times, rtol=0))
self.assertTrue(np.allclose(trimmed_a.positions.ys, b_times + 1, rtol=0))
self.assertTrue(np.allclose(trimmed_a.positions.zs, b_times + 2, rtol=0))
def test_prune_missing_timestamps_scattered_set(self):
a_times = np.arange(10.0)
xs = np.arange(10.0)
ys = np.arange(10.0) + 1.0
zs = np.arange(10.0) + 2.0
b_times = np.array([1.0, 5.0, 6.0, 9.0])
poses_a = make_poses(a_times, xs, ys, zs)
poses_b = make_poses(b_times, xs, ys, zs)
trimmed_a, trimmed_b = rmse_utilities.get_same_timestamp_poses(poses_a, poses_b)
self.assertEqual(len(trimmed_a.times), len(trimmed_b.times))
self.assertTrue(np.allclose(trimmed_a.times, b_times, rtol=0))
self.assertTrue(np.allclose(trimmed_a.positions.xs, b_times, rtol=0))
self.assertTrue(np.allclose(trimmed_a.positions.ys, b_times + 1, rtol=0))
self.assertTrue(np.allclose(trimmed_a.positions.zs, b_times + 2, rtol=0))
def test_prune_missing_timestamps_disjoint_set(self):
a_times = np.arange(10.0)
xs = np.arange(10.0)
ys = np.arange(10.0) + 1.0
zs = np.arange(10.0) + 2.0
b_times = np.arange(11, 20)
poses_a = make_poses(a_times, xs, ys, zs)
poses_b = make_poses(b_times, xs, ys, zs)
trimmed_a, trimmed_b = rmse_utilities.get_same_timestamp_poses(poses_a, poses_b)
self.assertEqual(len(trimmed_a.times), 0)
self.assertEqual(len(trimmed_b.times), 0)
def test_prune_missing_timestamps_some_overlap(self):
a_times = np.arange(10.0)
xs = np.arange(10.0)
ys = np.arange(10.0) + 1.0
zs = np.arange(10.0) + 2.0
b_times = np.arange(8.0, 20.0)
poses_a = make_poses(a_times, xs, ys, zs)
poses_b = make_poses(b_times, xs, ys, zs)
expected_time_range = np.arange(8.0, 10.0)
trimmed_a, trimmed_b = rmse_utilities.get_same_timestamp_poses(poses_a, poses_b)
self.assertEqual(len(trimmed_a.times), len(trimmed_b.times))
self.assertTrue(np.allclose(trimmed_a.times, trimmed_b.times, rtol=0))
self.assertTrue(np.allclose(trimmed_a.times, expected_time_range, rtol=0))
self.assertTrue(
np.allclose(trimmed_a.positions.xs, expected_time_range, rtol=0)
)
self.assertTrue(
np.allclose(trimmed_a.positions.ys, expected_time_range + 1, rtol=0)
)
self.assertTrue(
np.allclose(trimmed_a.positions.zs, expected_time_range + 2, rtol=0)
)
def test_rmse_same_poses(self):
a_times = np.arange(10.0)
xs = np.arange(10.0)
ys = np.arange(10.0) + 1.0
zs = np.arange(10.0) + 2.0
poses_a = make_poses(a_times, xs, ys, zs)
poses_b = make_poses(a_times, xs, ys, zs)
rmse = rmse_utilities.rmse_timestamped_poses(poses_a, poses_b)
self.assertTrue(np.isclose(rmse, 0, rtol=0))
def test_rmse_off_by_one(self):
a_times = np.arange(10.0)
xs = np.arange(10.0)
ys = np.arange(10.0) + 1.0
zs = np.arange(10.0) + 2.0
poses_a = make_poses(a_times, xs, ys, zs)
poses_b = make_poses(a_times, xs + 1, ys, zs)
rmse = rmse_utilities.rmse_timestamped_poses(poses_a, poses_b)
self.assertTrue(np.isclose(rmse, 1.0, rtol=0))
def test_rmse_all_off_by_one(self):
a_times = np.arange(10.0)
xs = np.arange(10.0)
ys = np.arange(10.0) + 1.0
zs = np.arange(10.0) + 2.0
poses_a = make_poses(a_times, xs, ys, zs)
poses_b = make_poses(a_times, xs + 1, ys + 1, zs + 1)
rmse = rmse_utilities.rmse_timestamped_poses(poses_a, poses_b)
self.assertTrue(np.isclose(rmse, math.sqrt(3.0), rtol=0))
if __name__ == "__main__":
unittest.main()
|
source_code/1-1-urllib.py | VickyMin1994/easy-scraping-tutorial | 708 | 12766510 | from urllib.request import urlopen
# if has Chinese, apply decode()
html = urlopen("https://mofanpy.com/static/scraping/basic-structure.html").read().decode('utf-8')
print(html)
import re
res = re.findall(r"<title>(.+?)</title>", html)
print("\nPage title is: ", res[0])
# Page title is: Scraping tutorial 1 | 莫烦Python
res = re.findall(r"<p>(.*?)</p>", html, flags=re.DOTALL) # re.DOTALL if multi line
print("\nPage paragraph is: ", res[0])
# Page paragraph is:
# 这是一个在 <a href="https://mofanpy.com/">莫烦Python</a>
# <a href="https://mofanpy.com/tutorials/scraping">爬虫教程</a> 中的简单测试.
res = re.findall(r'href="(.*?)"', html)
print("\nAll links: ", res)
# All links: ['https://mofanpy.com/static/img/description/tab_icon.png', 'https://mofanpy.com/', 'https://mofanpy.com/tutorials/scraping'] |
tests/integration/workflows/nodejs_npm_esbuild/test_nodejs_npm_with_esbuild.py | awslabs/aws-lambda-builders | 180 | 12766541 | <filename>tests/integration/workflows/nodejs_npm_esbuild/test_nodejs_npm_with_esbuild.py<gh_stars>100-1000
import os
import shutil
import tempfile
from unittest import TestCase
from aws_lambda_builders.builder import LambdaBuilder
from aws_lambda_builders.exceptions import WorkflowFailedError
from aws_lambda_builders.workflows.nodejs_npm.npm import SubprocessNpm
from aws_lambda_builders.workflows.nodejs_npm.utils import OSUtils
from aws_lambda_builders.workflows.nodejs_npm_esbuild.esbuild import EsbuildExecutionError
from aws_lambda_builders.workflows.nodejs_npm_esbuild.utils import EXPERIMENTAL_FLAG_ESBUILD
from parameterized import parameterized
class TestNodejsNpmWorkflowWithEsbuild(TestCase):
"""
Verifies that `nodejs_npm` workflow works by building a Lambda using NPM
"""
TEST_DATA_FOLDER = os.path.join(os.path.dirname(__file__), "testdata")
def setUp(self):
self.artifacts_dir = tempfile.mkdtemp()
self.scratch_dir = tempfile.mkdtemp()
self.dependencies_dir = tempfile.mkdtemp()
self.no_deps = os.path.join(self.TEST_DATA_FOLDER, "no-deps-esbuild")
self.builder = LambdaBuilder(language="nodejs", dependency_manager="npm-esbuild", application_framework=None)
def tearDown(self):
shutil.rmtree(self.artifacts_dir)
shutil.rmtree(self.scratch_dir)
@parameterized.expand([("nodejs12.x",), ("nodejs14.x",), ("nodejs16.x",)])
def test_doesnt_build_without_feature_flag(self, runtime):
source_dir = os.path.join(self.TEST_DATA_FOLDER, "with-deps-esbuild")
with self.assertRaises(EsbuildExecutionError) as context:
self.builder.build(
source_dir,
self.artifacts_dir,
self.scratch_dir,
os.path.join(source_dir, "package.json"),
runtime=runtime,
)
self.assertEqual(str(context.exception), "Esbuild Failed: Feature flag must be enabled to use this workflow")
@parameterized.expand([("nodejs12.x",), ("nodejs14.x",), ("nodejs16.x",)])
def test_builds_javascript_project_with_dependencies(self, runtime):
source_dir = os.path.join(self.TEST_DATA_FOLDER, "with-deps-esbuild")
options = {"entry_points": ["included.js"]}
self.builder.build(
source_dir,
self.artifacts_dir,
self.scratch_dir,
os.path.join(source_dir, "package.json"),
runtime=runtime,
options=options,
experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD],
)
expected_files = {"included.js", "included.js.map"}
output_files = set(os.listdir(self.artifacts_dir))
self.assertEqual(expected_files, output_files)
@parameterized.expand([("nodejs12.x",), ("nodejs14.x",), ("nodejs16.x",)])
def test_builds_javascript_project_with_multiple_entrypoints(self, runtime):
source_dir = os.path.join(self.TEST_DATA_FOLDER, "with-deps-esbuild-multiple-entrypoints")
options = {"entry_points": ["included.js", "included2.js"]}
self.builder.build(
source_dir,
self.artifacts_dir,
self.scratch_dir,
os.path.join(source_dir, "package.json"),
runtime=runtime,
options=options,
experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD],
)
expected_files = {"included.js", "included.js.map", "included2.js", "included2.js.map"}
output_files = set(os.listdir(self.artifacts_dir))
self.assertEqual(expected_files, output_files)
@parameterized.expand([("nodejs12.x",), ("nodejs14.x",), ("nodejs16.x",)])
def test_builds_typescript_projects(self, runtime):
source_dir = os.path.join(self.TEST_DATA_FOLDER, "with-deps-esbuild-typescript")
options = {"entry_points": ["included.ts"]}
self.builder.build(
source_dir,
self.artifacts_dir,
self.scratch_dir,
os.path.join(source_dir, "package.json"),
runtime=runtime,
options=options,
experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD],
)
expected_files = {"included.js", "included.js.map"}
output_files = set(os.listdir(self.artifacts_dir))
self.assertEqual(expected_files, output_files)
@parameterized.expand([("nodejs12.x",), ("nodejs14.x",), ("nodejs16.x",)])
def test_builds_with_external_esbuild(self, runtime):
osutils = OSUtils()
npm = SubprocessNpm(osutils)
source_dir = os.path.join(self.TEST_DATA_FOLDER, "no-deps-esbuild")
esbuild_dir = os.path.join(self.TEST_DATA_FOLDER, "esbuild-binary")
npm.run(["ci"], cwd=esbuild_dir)
binpath = npm.run(["bin"], cwd=esbuild_dir)
options = {"entry_points": ["included.js"]}
self.builder.build(
source_dir,
self.artifacts_dir,
self.scratch_dir,
os.path.join(source_dir, "package.json"),
runtime=runtime,
options=options,
executable_search_paths=[binpath],
experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD],
)
expected_files = {"included.js", "included.js.map"}
output_files = set(os.listdir(self.artifacts_dir))
self.assertEqual(expected_files, output_files)
@parameterized.expand([("nodejs12.x",), ("nodejs14.x",), ("nodejs16.x",)])
def test_no_options_passed_to_esbuild(self, runtime):
source_dir = os.path.join(self.TEST_DATA_FOLDER, "with-deps-esbuild")
with self.assertRaises(WorkflowFailedError) as context:
self.builder.build(
source_dir,
self.artifacts_dir,
self.scratch_dir,
os.path.join(source_dir, "package.json"),
runtime=runtime,
experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD],
)
self.assertEqual(str(context.exception), "NodejsNpmEsbuildBuilder:EsbuildBundle - entry_points not set ({})")
@parameterized.expand([("nodejs12.x",), ("nodejs14.x",), ("nodejs16.x",)])
def test_bundle_with_implicit_file_types(self, runtime):
source_dir = os.path.join(self.TEST_DATA_FOLDER, "implicit-file-types")
options = {"entry_points": ["included", "implicit"]}
self.builder.build(
source_dir,
self.artifacts_dir,
self.scratch_dir,
os.path.join(source_dir, "package.json"),
runtime=runtime,
options=options,
experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD],
)
expected_files = {"included.js.map", "implicit.js.map", "implicit.js", "included.js"}
output_files = set(os.listdir(self.artifacts_dir))
self.assertEqual(expected_files, output_files)
@parameterized.expand([("nodejs12.x",), ("nodejs14.x",), ("nodejs16.x",)])
def test_bundles_project_without_dependencies(self, runtime):
source_dir = os.path.join(self.TEST_DATA_FOLDER, "no-package-esbuild")
options = {"entry_points": ["included"]}
osutils = OSUtils()
npm = SubprocessNpm(osutils)
esbuild_dir = os.path.join(self.TEST_DATA_FOLDER, "esbuild-binary")
npm.run(["ci"], cwd=esbuild_dir)
binpath = npm.run(["bin"], cwd=esbuild_dir)
self.builder.build(
source_dir,
self.artifacts_dir,
self.scratch_dir,
os.path.join(source_dir, "package.json"),
runtime=runtime,
options=options,
experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD],
executable_search_paths=[binpath],
)
expected_files = {"included.js.map", "included.js"}
output_files = set(os.listdir(self.artifacts_dir))
self.assertEqual(expected_files, output_files)
@parameterized.expand([("nodejs12.x",), ("nodejs14.x",), ("nodejs16.x",)])
def test_builds_project_with_remote_dependencies_without_download_dependencies_with_dependencies_dir(self, runtime):
source_dir = os.path.join(self.TEST_DATA_FOLDER, "with-deps-no-node_modules")
options = {"entry_points": ["included.js"]}
osutils = OSUtils()
npm = SubprocessNpm(osutils)
esbuild_dir = os.path.join(self.TEST_DATA_FOLDER, "esbuild-binary")
npm.run(["ci"], cwd=esbuild_dir)
binpath = npm.run(["bin"], cwd=esbuild_dir)
self.builder.build(
source_dir,
self.artifacts_dir,
self.scratch_dir,
os.path.join(source_dir, "package.json"),
options=options,
runtime=runtime,
dependencies_dir=self.dependencies_dir,
download_dependencies=False,
experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD],
executable_search_paths=[binpath],
)
expected_files = {"included.js.map", "included.js"}
output_files = set(os.listdir(self.artifacts_dir))
self.assertEqual(expected_files, output_files)
@parameterized.expand([("nodejs12.x",), ("nodejs14.x",), ("nodejs16.x",)])
def test_builds_project_with_remote_dependencies_with_download_dependencies_and_dependencies_dir(self, runtime):
source_dir = os.path.join(self.TEST_DATA_FOLDER, "with-deps-no-node_modules")
options = {"entry_points": ["included.js"]}
self.builder.build(
source_dir,
self.artifacts_dir,
self.scratch_dir,
os.path.join(source_dir, "package.json"),
runtime=runtime,
options=options,
dependencies_dir=self.dependencies_dir,
download_dependencies=True,
experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD],
)
expected_files = {"included.js.map", "included.js"}
output_files = set(os.listdir(self.artifacts_dir))
self.assertEqual(expected_files, output_files)
expected_modules = "minimal-request-promise"
output_modules = set(os.listdir(os.path.join(self.dependencies_dir, "node_modules")))
self.assertIn(expected_modules, output_modules)
expected_dependencies_files = {"node_modules"}
output_dependencies_files = set(os.listdir(os.path.join(self.dependencies_dir)))
self.assertNotIn(expected_dependencies_files, output_dependencies_files)
@parameterized.expand([("nodejs12.x",), ("nodejs14.x",), ("nodejs16.x",)])
def test_builds_project_with_remote_dependencies_without_download_dependencies_without_dependencies_dir(
self, runtime
):
source_dir = os.path.join(self.TEST_DATA_FOLDER, "with-deps-no-node_modules")
with self.assertRaises(EsbuildExecutionError) as context:
self.builder.build(
source_dir,
self.artifacts_dir,
self.scratch_dir,
os.path.join(source_dir, "package.json"),
runtime=runtime,
dependencies_dir=None,
download_dependencies=False,
experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD],
)
self.assertEqual(str(context.exception), "Esbuild Failed: Lambda Builders encountered and invalid workflow")
@parameterized.expand([("nodejs12.x",), ("nodejs14.x",), ("nodejs16.x",)])
def test_builds_project_without_combine_dependencies(self, runtime):
source_dir = os.path.join(self.TEST_DATA_FOLDER, "with-deps-no-node_modules")
options = {"entry_points": ["included.js"]}
self.builder.build(
source_dir,
self.artifacts_dir,
self.scratch_dir,
os.path.join(source_dir, "package.json"),
runtime=runtime,
options=options,
dependencies_dir=self.dependencies_dir,
download_dependencies=True,
combine_dependencies=False,
experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD],
)
expected_files = {"included.js.map", "included.js"}
output_files = set(os.listdir(self.artifacts_dir))
self.assertEqual(expected_files, output_files)
expected_modules = "minimal-request-promise"
output_modules = set(os.listdir(os.path.join(self.dependencies_dir, "node_modules")))
self.assertIn(expected_modules, output_modules)
expected_dependencies_files = {"node_modules"}
output_dependencies_files = set(os.listdir(os.path.join(self.dependencies_dir)))
self.assertNotIn(expected_dependencies_files, output_dependencies_files)
@parameterized.expand([("nodejs12.x",), ("nodejs14.x",), ("nodejs16.x",)])
def test_builds_javascript_project_with_external(self, runtime):
source_dir = os.path.join(self.TEST_DATA_FOLDER, "with-deps-esbuild-externals")
options = {"entry_points": ["included.js"], "external": ["minimal-request-promise"]}
self.builder.build(
source_dir,
self.artifacts_dir,
self.scratch_dir,
os.path.join(source_dir, "package.json"),
runtime=runtime,
options=options,
experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD],
)
expected_files = {"included.js", "included.js.map"}
output_files = set(os.listdir(self.artifacts_dir))
self.assertEqual(expected_files, output_files)
with open(str(os.path.join(self.artifacts_dir, "included.js"))) as f:
js_file = f.read()
# Check that the module has been require() instead of bundled
self.assertIn('require("minimal-request-promise")', js_file)
@parameterized.expand([("nodejs12.x",), ("nodejs14.x",), ("nodejs16.x",)])
def test_builds_javascript_project_with_loader(self, runtime):
osutils = OSUtils()
source_dir = os.path.join(self.TEST_DATA_FOLDER, "no-deps-esbuild-loader")
options = {"entry_points": ["included.js"], "loader": [".reference=json"]}
self.builder.build(
source_dir,
self.artifacts_dir,
self.scratch_dir,
os.path.join(source_dir, "package.json"),
runtime=runtime,
options=options,
experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD],
)
expected_files = {"included.js", "included.js.map"}
output_files = set(os.listdir(self.artifacts_dir))
self.assertEqual(expected_files, output_files)
included_js_path = os.path.join(self.artifacts_dir, "included.js")
# check that the .reference file is correctly bundled as code by running the result
self.assertEqual(
osutils.check_output(included_js_path),
str.encode(
"===\n"
"The Muses\n"
"===\n"
"\n"
"\tcalliope: eloquence and heroic poetry\n"
"\terato: lyric or erotic poetry\n"
"\tmelpomene: tragedy\n"
"\tpolymnia: sacred poetry\n"
"\tterpsichore: dance\n"
"\tthalia: comedy\n"
"\turania: astronomy and astrology"
),
)
|
elegantrl/envs/starcraft/smac_maps.py | tnerush71/ElegantRL | 759 | 12766557 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from pysc2.maps import lib
from smac.env.starcraft2.maps import smac_maps
map_param_registry = {
"1o_10b_vs_1r": {
"n_agents": 11,
"n_enemies": 1,
"limit": 50,
"a_race": "Z",
"b_race": "Z",
"unit_type_bits": 2,
"map_type": "overload_bane"
},
"1o_2r_vs_4r": {
"n_agents": 3,
"n_enemies": 4,
"limit": 50,
"a_race": "Z",
"b_race": "Z",
"unit_type_bits": 2,
"map_type": "overload_roach"
},
"bane_vs_hM": {
"n_agents": 3,
"n_enemies": 2,
"limit": 30,
"a_race": "Z",
"b_race": "T",
"unit_type_bits": 2,
"map_type": "bZ_hM"
}
}
smac_maps.map_param_registry.update(map_param_registry)
def get_map_params(map_name):
map_param_registry = smac_maps.get_smac_map_registry()
return map_param_registry[map_name]
for name in map_param_registry.keys():
globals()[name] = type(name, (smac_maps.SMACMap,), dict(filename=name))
|
tests/cyclic/bar.py | topwebmaster/factory_boy | 1,932 | 12766570 | # Copyright: See the LICENSE file.
"""Helper to test circular factory dependencies."""
import factory
class Bar:
def __init__(self, foo, y):
self.foo = foo
self.y = y
class BarFactory(factory.Factory):
class Meta:
model = Bar
y = 13
foo = factory.SubFactory('cyclic.foo.FooFactory')
|
examples/plot_sine_wave_2d.py | ktanishqk/py-earth | 360 | 12766597 | """
==================================
Plotting two simple sine functions
==================================
A simple example plotting a fit of two sine functions.
"""
import numpy
import matplotlib.pyplot as plt
from pyearth import Earth
# Create some fake data
numpy.random.seed(2)
m = 10000
n = 10
X = 80 * numpy.random.uniform(size=(m, n)) - 40
y1 = 100 * \
numpy.abs(numpy.sin((X[:, 6]) / 10) - 4.0) + \
10 * numpy.random.normal(size=m)
y2 = 100 * \
numpy.abs(numpy.sin((X[:, 6]) / 2) - 8.0) + \
5 * numpy.random.normal(size=m)
# Fit an Earth model
model = Earth(max_degree=3, minspan_alpha=.5)
y_mix = numpy.concatenate((y1[:, numpy.newaxis], y2[:, numpy.newaxis]), axis=1)
model.fit(X, y_mix)
# Print the model
print(model.trace())
print(model.summary())
# Plot the model
y_hat = model.predict(X)
fig = plt.figure()
ax = fig.add_subplot(1, 2, 1)
ax.plot(X[:, 6], y_mix[:, 0], 'r.')
ax.plot(X[:, 6], model.predict(X)[:, 0], 'b.')
ax = fig.add_subplot(1, 2, 2)
ax.plot(X[:, 6], y_mix[:, 1], 'r.')
ax.plot(X[:, 6], model.predict(X)[:, 1], 'b.')
plt.show()
|
setup.py | walles/px | 149 | 12766599 | #!/usr/bin/env python
import os
import re
import shutil
import filecmp
import tempfile
import subprocess
from setuptools import setup
VERSIONFILE = "px/version.py"
git_version = (
subprocess.check_output(["git", "describe", "--dirty"]).decode("utf-8").strip()
)
with tempfile.NamedTemporaryFile(suffix=".py", delete=False) as tmp:
tmp.write(b"# NOTE: Auto generated by setup.py, no touchie!\n")
tmp.write(b'VERSION = "%s"\n' % bytearray(git_version, "utf_8"))
# Flushing is required for filecmp.cmp() to work (below)
tmp.flush()
if not os.path.isfile(VERSIONFILE):
# No version file found
shutil.move(tmp.name, VERSIONFILE)
elif not filecmp.cmp(tmp.name, VERSIONFILE):
# Version file needs updating
shutil.move(tmp.name, VERSIONFILE)
else:
# VERSIONFILE was already up to date. If we touch it in this
# case, it will have its file timestamp updated, which will
# force the slow px_integration_test.py tests to get rerun.
#
# Just clean up our tempfile and be merry.
os.remove(tmp.name)
requirements = None
with open("requirements.txt") as reqsfile:
requirements = reqsfile.readlines()
with open(os.path.join(os.path.dirname(__file__), "README.rst")) as fp:
LONG_DESCRIPTION = fp.read()
if not re.match(r"^[0-9]+\.[0-9]+\.[0-9]+$", git_version):
# Setuptools wants nice version numbers
git_version = "0.0.0"
setup(
name="pxpx",
version=git_version,
description="ps and top for Human Beings",
long_description=LONG_DESCRIPTION,
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/walles/px",
license="MIT",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT License",
"Operating System :: MacOS",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: System :: Monitoring",
"Topic :: System :: Systems Administration",
"Topic :: Utilities",
],
packages=["px"],
install_requires=requirements,
# See: http://setuptools.readthedocs.io/en/latest/setuptools.html#setting-the-zip-safe-flag
zip_safe=True,
setup_requires=[
"pytest-runner",
],
tests_require=[
"pytest",
],
entry_points={
"console_scripts": ["px = px.px:main", "ptop = px.px:main"],
}
# Note that we're by design *not* installing man pages here.
# Using "data_files=" only puts the man pages in the egg file,
# and installing that egg doesn't put them on the destination
# system.
#
# After trying to figure this out for a bit, my conclusion is
# that "pip install" simply isn't meant for installing any man
# pages.
#
# /<EMAIL> 2018aug27
)
|
zproc/context.py | pycampers/zproc | 106 | 12766600 | import atexit
import multiprocessing
import pprint
import signal
import time
from contextlib import suppress
from typing import Callable, Union, Any, List, Mapping, Sequence, Tuple, cast
from . import util
from .consts import DEFAULT_NAMESPACE
from .process import Process
from .server import tools
from .state.state import State
from .task.map_plus import map_plus
from .task.swarm import Swarm
class ProcessList(list):
def __str__(self):
return ProcessList.__qualname__ + ": " + pprint.pformat(list(self))
def __repr__(self):
return "<" + self.__str__() + ">"
@staticmethod
def _wait_or_catch_exc(
process: Process, timeout: Union[int, float] = None
) -> Union[Exception, Any]:
try:
return process.wait(timeout)
except Exception as e:
return e
def wait(
self, timeout: Union[int, float] = None, safe: bool = False
) -> List[Union[Any, Exception]]:
"""
Call :py:meth:`~Process.wait()` on all the Processes in this list.
:param timeout:
Same as :py:meth:`~Process.wait()`.
This parameter controls the timeout for all the Processes combined,
not a single :py:meth:`~Process.wait()` call.
:param safe:
Suppress any errors that occur while waiting for a Process.
The return value of failed :py:meth:`~Process.wait()` calls are substituted with the ``Exception`` that occurred.
:return:
A ``list`` containing the values returned by child Processes of this Context.
"""
if safe:
_wait = self._wait_or_catch_exc
else:
_wait = Process.wait
if timeout is None:
return [_wait(process) for process in self]
else:
final = time.time() + timeout
return [_wait(process, final - time.time()) for process in self]
def start(self):
"""
Call :py:meth:`~Process.start()` on all the child processes of this Context
Ignores if a Process is already started, unlike :py:meth:`~Process.start()`,
which throws an ``AssertionError``.
"""
with suppress(AssertionError):
for process in self:
process.start()
def stop(self):
"""
Call :py:meth:`~Process.stop()` on all the Processes in this list.
Retains the same order as ``Context.process_list``.
:return:
A ``list`` containing the exitcodes of the child Processes of this Context.
"""
return [proc.stop() for proc in self]
class Context:
#: The :py:class:`multiprocessing.Process` object for the server.
server_process: multiprocessing.Process
def __init__(
self,
server_address: str = None,
*,
start_server: bool = True,
backend: Callable = multiprocessing.Process,
wait: bool = False,
cleanup: bool = True,
namespace: str = DEFAULT_NAMESPACE,
**process_kwargs
) -> None:
r"""
Provides a high level interface to :py:class:`State` and :py:class:`Process`.
Primarily used to manage and launch processes.
All processes launched using a Context, share the same state.
Don't share a Context object between Processes / Threads.
A Context object is not thread-safe.
:param server_address:
The address of the server.
If this is set to ``None``, a random address will be generated.
:param start_server:
Whether to start the ZProc server.
It is started automatically by default.
If this is set to ``None``, then you must either -
- Start a server using a different Context object.
- Start one manually, using :py:func:`start_server`.
In both cases,
it the user's responsibility to make sure that the ``server_address`` argument
is satisfied.
.. note::
If the server is not started before-hand,
the Context object will block infinitely, waiting for the server to respond.
In case you want to play around,
the :py:func:`ping` function is handy,
since it let's you *detect* the presence of a server at a given address.
:param backend:
.. include:: /api/snippets/backend.rst
:param wait:
Wait for all running process to finish their work before exiting.
Alternative to manually calling :py:meth:`~Context.wait` at exit.
:param cleanup:
Whether to cleanup the process tree before exiting.
Registers a signal handler for ``SIGTERM``, and an ``atexit`` handler.
:param \*\*process_kwargs:
Keyword arguments that :py:class:`~Process` takes,
except ``server_address`` and ``target``.
If provided,
these will be used while creating processes using this Context.
"""
#: A :py:class:`ProcessList` object containing all Processes created under this Context.
self.process_list = ProcessList()
#: Passed on from the constructor. This is read-only.
self.backend = backend
#: Passed on from the constructor. This is read-only.
self.namespace = namespace
#: Passed on from the constructor.
self.process_kwargs = process_kwargs
self.process_kwargs.setdefault("namespace", self.namespace)
self.process_kwargs.setdefault("backend", self.backend)
self.server_address = cast(str, server_address)
"""The server's address.
This holds the address this Context is connected to,
not necessarily the value provided in the constructor.
This is read-only."""
if start_server:
self.start_server()
assert self.server_address is not None, (
"Couldn't determine the server address. "
"Hint: Either provide the `server_address` parameter, "
"or pass `start_server=True`."
)
# register cleanup before wait, so that wait runs before cleanup.
# (order of execution is reversed)
if cleanup:
atexit.register(util.clean_process_tree)
if util.is_main_thread():
signal.signal(signal.SIGTERM, util.clean_process_tree)
if wait:
atexit.register(self.wait)
def __str__(self):
return "%s - server: %r at %#x" % (
self.__class__.__qualname__,
self.server_address,
id(self),
)
def __repr__(self):
return util.enclose_in_brackets(self.__str__())
def create_state(self, value: dict = None, *, namespace: str = None):
"""
Creates a new :py:class:`State` object, sharing the same zproc server as this Context.
:param value:
If provided, call ``state.update(value)``.
:param namespace:
Use this as the namespace for the :py:class:`State` object,
instead of this :py:class:`Context`\ 's namespace.
:return:
A :py:class:`State` object.
"""
if namespace is None:
namespace = self.namespace
state = State(self.server_address, namespace=namespace)
if value is not None:
state.update(value)
return state
def create_swarm(self, count: int = None):
swarm = Swarm(self.server_address, namespace=self.namespace)
swarm.start(count)
return swarm
def start_server(self) -> Tuple[multiprocessing.Process, str]:
out = tools.start_server(self.server_address, backend=self.backend)
self.server_process, self.server_address = out
return out
def _process(
self, target: Callable = None, **process_kwargs
) -> Union[Process, Callable]:
r"""
Produce a child process bound to this context.
Can be used both as a function and decorator:
.. code-block:: python
:caption: Usage
@zproc.process(pass_context=True) # you may pass some arguments here
def p1(ctx):
print('hello', ctx)
@zproc.process # or not...
def p2(state):
print('hello', state)
def p3(state):
print('hello', state)
zproc.process(p3) # or just use as a good ol' function
:param target:
Passed on to the :py:class:`Process` constructor.
*Must be omitted when using this as a decorator.*
:param \*\*process_kwargs:
.. include:: /api/context/params/process_kwargs.rst
:return: The :py:class:`Process` instance produced.
"""
process = Process(
self.server_address, target, **{**self.process_kwargs, **process_kwargs}
)
self.process_list.append(process)
return process
def spawn(self, *targets: Callable, count: int = 1, **process_kwargs):
r"""
Produce one or many child process(s) bound to this context.
:param \*targets:
Passed on to the :py:class:`Process` constructor, one at a time.
:param count:
The number of processes to spawn for each item in ``targets``.
:param \*\*process_kwargs:
.. include:: /api/context/params/process_kwargs.rst
:return:
A ``ProcessList`` of the :py:class:`Process` instance(s) produced.
"""
if not targets:
def wrapper(target: Callable):
return self.spawn(target, count=count, **process_kwargs)
return wrapper
if len(targets) * count == 1:
return self._process(targets[0], **process_kwargs)
return ProcessList(
self._process(target, **process_kwargs)
for _ in range(count)
for target in targets
)
def spawn_map(
self,
target: Callable,
map_iter: Sequence[Any] = None,
*,
map_args: Sequence[Sequence[Any]] = None,
args: Sequence = None,
map_kwargs: Sequence[Mapping[str, Any]] = None,
kwargs: Mapping = None,
**process_kwargs
):
return ProcessList(
map_plus(
lambda *args, **kwargs: self._process(
target, args=args, kwargs=kwargs, **process_kwargs
),
map_iter,
map_args,
args,
map_kwargs,
kwargs,
)
)
def wait(
self, timeout: Union[int, float] = None, safe: bool = False
) -> List[Union[Any, Exception]]:
"""
alias for :py:meth:`ProcessList.wait()`
"""
return self.process_list.wait(timeout, safe)
def start_all(self):
"""
alias for :py:meth:`ProcessList.start_all()`
"""
return self.process_list.start()
def stop_all(self):
"""
alias for :py:meth:`ProcessList.stop_all()`
"""
return self.process_list.stop()
def ping(self, **kwargs):
r"""
Ping the zproc server.
:param \*\*kwargs: Keyword arguments that :py:func:`ping` takes, except ``server_address``.
:return: Same as :py:func:`ping`
"""
return tools.ping(self.server_address, **kwargs)
|
demos/graphql/graph/chat/db_utils.py | hzlmn/aiohttp-demos | 649 | 12766605 | <gh_stars>100-1000
from aiopg.sa import SAConnection as SAConn
from aiopg.sa.result import RowProxy
from graph.types import RowsProxy
from graph.constants import OBJECT_NOT_FOUND_ERROR
from graph.chat.tables import (
rooms,
messages,
)
__all__ = [
'select_rooms',
'select_messages_by_room_id',
'select_room',
'create_message',
'delete_message',
]
# selects
async def select_rooms(conn: SAConn) -> RowsProxy:
cursor = await conn.execute(
rooms.select().order_by(rooms.c.id)
)
return await cursor.fetchall()
async def select_room(conn: SAConn, id: int) -> RowProxy:
cursor = await conn.execute(
rooms.select().where(rooms.c.id == id)
)
item = await cursor.fetchone()
assert item, OBJECT_NOT_FOUND_ERROR
return item
async def select_messages_by_room_id(conn: SAConn, room_id: int) -> RowsProxy:
query = messages\
.select()\
.where(messages.c.room_id == room_id)\
.order_by(messages.c.id)
cursor = await conn.execute(query)
return await cursor.fetchall()
# create
async def create_message(
conn: SAConn,
room_id: int,
owner_id: int,
body: str,
) -> RowProxy:
query = messages\
.insert()\
.values(body=body, owner_id=owner_id, room_id=room_id)\
.returning(messages.c.id, messages.c.owner_id)
res = await conn.execute(query)
return await res.fetchone()
# delete
async def delete_message(conn: SAConn, id: int) -> None:
await conn.execute(
messages.delete().where(messages.c.id == id)
)
|
dltools/hooks.py | geodekid/frnn | 299 | 12766622 | """Defines hooks that can run during training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import lasagne
import numpy as np
from sklearn import metrics
class LoggingHook(object):
"""This hook writes information to a log file."""
def __init__(self, logger):
"""Initializes a new instance of the LoggingHook class.
Args:
logger: A logger instance.
"""
self._logger = logger
def update(self, **kwargs):
"""Executes the hook.
Args:
**kwargs: Optimizer state dictionary.
"""
self._logger.log(
key="status",
message="Log at iteration %d" % kwargs["update_counter"]
)
self._logger.log(
key="update_counter",
message=kwargs["update_counter"]
)
self._logger.log(
key="update_runtime",
message=kwargs["runtime"]
)
self._logger.log(
key="losses",
message=np.asarray(kwargs["losses"])
)
class SnapshotHook(object):
"""Hook for storing snapshots of the network's weights."""
def __init__(self, filename, network, interval):
"""Initializes a new instance of the SnapshotHook class.
Args:
filename: The base filename of the model.
network: The network instance to store.
interval: The snapshot interval.
"""
self._filename = filename
self._network = network
self._interval = interval
def update(self, **kwargs):
"""Executed the hook.
Args:
**kwargs: The optimizer dictionary.
"""
# Run the hook now?
if kwargs["update_counter"] % self._interval == 0:
# Yes
np.savez(
"%s_snapshot_%d.npz" % (
self._filename, kwargs["update_counter"]),
*lasagne.layers.get_all_param_values(self._network))
class SegmentationValidationHook(object):
"""Performs a validation run for semantic segmentation."""
def __init__(self, val_fn, data_provider, logger, interval=300,
num_classes=19):
"""Initializes a new instance of the SegmentationValidationHook class.
Args:
val_fn: A function that returns the predictions for each image and
a list of losses.
data_provider: A chianti data provider.
logger: A logger instance.
interval: The validation interval.
"""
self._val_fn = val_fn
self._data_provider = data_provider
self._logger = logger
self._interval = interval
self._num_classes = num_classes
def update(self, **kwargs):
"""Runs the validation hook."""
update_now = kwargs["update_counter"] % self._interval == 0
if update_now and kwargs["update_counter"] > 0:
self._logger.log(
key="validation_checkpoint",
message=kwargs["update_counter"]
)
self._logger.log(
key="status",
message="-> Start validation run"
)
# Initialize the confusion matrix
conf_matrix = np.zeros(
(self._num_classes, self._num_classes)).astype('int64')
accumulated_loss = 0
self._data_provider.reset()
for batch_counter in range(self._data_provider.get_num_batches()):
self._logger.log(
key="status",
message="--> Validate batch %d/%d" % (
batch_counter + 1,
self._data_provider.get_num_batches()))
batch = self._data_provider.next()
images = batch[0]
targets = batch[1]
predictions, loss = self._val_fn(images, targets)
accumulated_loss += loss
# Mark the don't care predictions
# Flatten the predictions and targets
flat_predictions = predictions.flatten()
non_void_pixels = (np.max(targets, axis=1) != 0.0).flatten()
flat_targets = np.argmax(targets, axis=1).flatten()
# Select the non-don't cares
flat_targets = flat_targets[non_void_pixels]
flat_predictions = flat_predictions[non_void_pixels]
conf_matrix += metrics.confusion_matrix(
flat_targets,
flat_predictions,
labels=np.arange(self._num_classes, dtype='int64'))
accumulated_loss /= self._data_provider.get_num_batches()
self._logger.log(
key="conf_matrix",
message=conf_matrix
)
self._logger.log(
key="validation_loss",
message=accumulated_loss
)
|
sahara-10.0.0/sahara/plugins/edp.py | scottwedge/OpenStack-Stein | 161 | 12766624 | <gh_stars>100-1000
# Copyright (c) 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.service.edp import hdfs_helper
from sahara.service.edp import job_utils
from sahara.service.edp.oozie import engine as oozie_engine
from sahara.service.edp.oozie.workflow_creator import workflow_factory
from sahara.service.edp.spark import engine as spark_engine
from sahara.service.edp.storm import engine as storm_engine
from sahara.utils import edp
JOB_TYPE_HIVE = edp.JOB_TYPE_HIVE
JOB_TYPE_SPARK = edp.JOB_TYPE_SPARK
JOB_TYPE_JAVA = edp.JOB_TYPE_JAVA
JOB_TYPE_SHELL = edp.JOB_TYPE_SHELL
JOB_TYPE_PIG = edp.JOB_TYPE_PIG
JOB_TYPE_STORM = edp.JOB_TYPE_STORM
JOB_TYPE_PYLEUS = edp.JOB_TYPE_PYLEUS
JOB_TYPE_MAPREDUCE = edp.JOB_TYPE_MAPREDUCE
JOB_TYPE_MAPREDUCE_STREAMING = edp.JOB_TYPE_MAPREDUCE_STREAMING
JOB_TYPES_ALL = edp.JOB_TYPES_ALL
JOB_STATUS_SUCCEEDED = edp.JOB_STATUS_SUCCEEDED
class PluginsStormJobEngine(storm_engine.StormJobEngine):
def __init__(self, cluster, **kwargs):
super(PluginsStormJobEngine, self).__init__(cluster)
class PluginsStormPyleusJobEngine(storm_engine.StormPyleusJobEngine):
def __init__(self, cluster, **kwargs):
super(PluginsStormPyleusJobEngine, self).__init__(cluster)
class PluginsSparkJobEngine(spark_engine.SparkJobEngine):
def __init__(self, cluster, **kwargs):
super(PluginsSparkJobEngine, self).__init__(cluster)
class PluginsSparkShellJobEngine(spark_engine.SparkShellJobEngine):
def __init__(self, cluster, **kwargs):
super(PluginsSparkShellJobEngine, self).__init__(cluster)
class PluginsOozieJobEngine(oozie_engine.OozieJobEngine):
def __init__(self, cluster, **kwargs):
super(PluginsOozieJobEngine, self).__init__(cluster)
def get_hive_shared_conf_path(hdfs_user, **kwargs):
return edp.get_hive_shared_conf_path(hdfs_user)
def compare_job_type(job_type, *args, **kwargs):
return edp.compare_job_type(job_type, *args, **kwargs)
def get_builtin_binaries(job, configs, **kwargs):
return edp.get_builtin_binaries(job, configs)
def create_dir_hadoop2(r, dir_name, hdfs_user, **kwargs):
hdfs_helper.create_dir_hadoop2(r, dir_name, hdfs_user)
def create_hbase_common_lib(r, **kwargs):
hdfs_helper.create_hbase_common_lib(r)
def get_plugin(cluster, **kwargs):
return job_utils.get_plugin(cluster)
def get_possible_job_config(job_type, **kwargs):
return workflow_factory.get_possible_job_config(job_type)
def get_possible_mapreduce_configs(**kwargs):
return workflow_factory.get_possible_mapreduce_configs()
|
examples/tensorboard/projector_demo.py | dwolfschlaeger/guildai | 694 | 12766648 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf # Requires Tensorflow >=2.1
from tensorboard.plugins import projector
import tensorflow_datasets as tfds
# This demo expands upon the word embeddings tutorial found
# here: https://www.tensorflow.org/tutorials/text/word_embeddings)
# and is intended to demonstrate the use of the embedding projector.
LOG_DIR = os.getenv("LOGDIR") or "/tmp/projector_demo" # Tensorboard log dir
METADATA_FNAME = "meta.tsv" # Labels will be stored here
STEP = 0
# Load imdb reviews dataset
(train_data, test_data), info = tfds.load(
"imdb_reviews/subwords8k",
split=(tfds.Split.TRAIN, tfds.Split.TEST),
with_info=True,
as_supervised=True,
)
encoder = info.features["text"].encoder
# shuffle, pad, and train the data.
train_batches = train_data.shuffle(1000).padded_batch(10, padded_shapes=((None,), ()))
test_batches = test_data.shuffle(1000).padded_batch(10, padded_shapes=((None,), ()))
train_batch, train_labels = next(iter(train_batches))
embedding_dim = 16
# Create a basic embedding layer
embedding = tf.keras.layers.Embedding(encoder.vocab_size, embedding_dim)
model = tf.keras.Sequential(
[
embedding,
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(16, activation="relu"),
tf.keras.layers.Dense(1),
]
)
# Compile model
model.compile(
optimizer="adam",
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=["accuracy"],
)
# Train model
history = model.fit(
train_batches, epochs=1, validation_data=test_batches, validation_steps=20
)
# Fetch the embedding layer and get the weights.
# Make sure to remove the first element, as it is padding.
weights = tf.Variable(model.layers[0].get_weights()[0][1:])
def register_embedding(weights, labels, log_dir) -> None:
"""Saves a metadata file (labels) and a checkpoint (derived from weights)
and configures the Embedding Projector to read from the appropriate locations.
Args:
weights: tf.Variable with the weights of the embedding layer to be displayed.
labels: list of labels corresponding to the weights.
logdir: Directory into which to store the config file, as a `str`.
"""
# Create a checkpoint from embedding, the filename and key are
# name of the tensor.
checkpoint = tf.train.Checkpoint(embedding=weights)
checkpoint.save(os.path.join(LOG_DIR, "embedding.ckpt"))
# Save Labels separately on a line-by-line manner.
with open(os.path.join(log_dir, METADATA_FNAME), "w") as f:
for label in labels:
f.write("{}\n".format(label))
# Set up config
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
# The name of the tensor will be suffixed by `/.ATTRIBUTES/VARIABLE_VALUE`
embedding.tensor_name = "embedding/.ATTRIBUTES/VARIABLE_VALUE"
embedding.metadata_path = METADATA_FNAME
projector.visualize_embeddings(log_dir, config)
# Save Files
register_embedding(weights, encoder.subwords, LOG_DIR)
|
alipay/aop/api/domain/AlipayFincoreComplianceCrossborderMerchantBatchqueryModel.py | antopen/alipay-sdk-python-all | 213 | 12766670 | <reponame>antopen/alipay-sdk-python-all
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.BaseCrossborderMerchantInfo import BaseCrossborderMerchantInfo
class AlipayFincoreComplianceCrossborderMerchantBatchqueryModel(object):
def __init__(self):
self._biz_source = None
self._org_list = None
self._out_biz_no = None
self._total = None
@property
def biz_source(self):
return self._biz_source
@biz_source.setter
def biz_source(self, value):
self._biz_source = value
@property
def org_list(self):
return self._org_list
@org_list.setter
def org_list(self, value):
if isinstance(value, list):
self._org_list = list()
for i in value:
if isinstance(i, BaseCrossborderMerchantInfo):
self._org_list.append(i)
else:
self._org_list.append(BaseCrossborderMerchantInfo.from_alipay_dict(i))
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def total(self):
return self._total
@total.setter
def total(self, value):
self._total = value
def to_alipay_dict(self):
params = dict()
if self.biz_source:
if hasattr(self.biz_source, 'to_alipay_dict'):
params['biz_source'] = self.biz_source.to_alipay_dict()
else:
params['biz_source'] = self.biz_source
if self.org_list:
if isinstance(self.org_list, list):
for i in range(0, len(self.org_list)):
element = self.org_list[i]
if hasattr(element, 'to_alipay_dict'):
self.org_list[i] = element.to_alipay_dict()
if hasattr(self.org_list, 'to_alipay_dict'):
params['org_list'] = self.org_list.to_alipay_dict()
else:
params['org_list'] = self.org_list
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.total:
if hasattr(self.total, 'to_alipay_dict'):
params['total'] = self.total.to_alipay_dict()
else:
params['total'] = self.total
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayFincoreComplianceCrossborderMerchantBatchqueryModel()
if 'biz_source' in d:
o.biz_source = d['biz_source']
if 'org_list' in d:
o.org_list = d['org_list']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'total' in d:
o.total = d['total']
return o
|
plato/datasources/femnist.py | cuiboyuan/plato | 135 | 12766697 | <reponame>cuiboyuan/plato
"""
The Federated EMNIST dataset.
The Federated EMNIST dataset originates from the EMNIST dataset, which contains
817851 images, each of which is a 28x28 greyscale image in 1 out of 62 classes.
The difference between the Federated EMNIST dataset and its original counterpart
is that this dataset is already partitioned by the client ID, using the data
provider IDs included in the original EMNIST dataset. As a result of this
partitioning, there are 3597 clients in total, each of which has 227.37 images
on average (std is 88.84). For each client, 90% data samples are used for
training, while the remaining samples are used for testing.
Reference:
<NAME>, <NAME>, <NAME>, and <NAME>, "EMNIST: Extending MNIST to
handwritten letters," in the 2017 International Joint Conference on Neural
Networks (IJCNN).
"""
import json
import logging
import os
import numpy as np
from torch.utils.data import Dataset
from torchvision import transforms
from plato.config import Config
from plato.datasources import base
class CustomDictDataset(Dataset):
""" Custom dataset from a dictionary with support of transforms. """
def __init__(self, loaded_data, transform=None):
""" Initializing the custom dataset. """
super().__init__()
self.loaded_data = loaded_data
self.transform = transform
def __getitem__(self, index):
sample = self.loaded_data['x'][index]
target = self.loaded_data['y'][index]
if self.transform:
sample = self.transform(sample)
return sample, target
def __len__(self):
return len(self.loaded_data['y'])
class ReshapeListTransform:
""" The transform that reshapes an image. """
def __init__(self, new_shape):
self.new_shape = new_shape
def __call__(self, img):
return np.array(img, dtype=np.float32).reshape(self.new_shape)
class DataSource(base.DataSource):
"""The FEMNIST dataset."""
def __init__(self, client_id=0):
super().__init__()
self.trainset = None
self.testset = None
root_path = os.path.join(Config().data.data_path, 'FEMNIST',
'packaged_data')
if client_id == 0:
# If we are on the federated learning server
data_dir = os.path.join(root_path, 'test')
data_url = "https://jiangzhifeng.s3.us-east-2.amazonaws.com/FEMNIST/test/" \
+ str(client_id) + ".zip"
else:
data_dir = os.path.join(root_path, 'train')
data_url = "https://jiangzhifeng.s3.us-east-2.amazonaws.com/FEMNIST/train/" \
+ str(client_id) + ".zip"
if not os.path.exists(os.path.join(data_dir, str(client_id))):
logging.info(
"Downloading the Federated EMNIST dataset "
"with the client datasets pre-partitioned. This may take a while.",
)
self.download(url=data_url, data_path=data_dir)
loaded_data = DataSource.read_data(
file_path=os.path.join(data_dir, str(client_id), 'data.json'))
_transform = transforms.Compose([
ReshapeListTransform((28, 28, 1)),
transforms.ToPILImage(),
transforms.RandomCrop(28,
padding=2,
padding_mode="constant",
fill=1.0),
transforms.RandomResizedCrop(28,
scale=(0.8, 1.2),
ratio=(4. / 5., 5. / 4.)),
transforms.RandomRotation(5, fill=1.0),
transforms.ToTensor(),
transforms.Normalize(0.9637, 0.1597),
])
dataset = CustomDictDataset(loaded_data=loaded_data,
transform=_transform)
if client_id == 0: # testing dataset on the server
self.testset = dataset
else: # training dataset on one of the clients
self.trainset = dataset
@staticmethod
def read_data(file_path):
""" Reading the dataset specific to a client_id. """
with open(file_path, 'r') as fin:
loaded_data = json.load(fin)
return loaded_data
def num_train_examples(self):
return len(self.trainset)
def num_test_examples(self):
return len(self.testset)
|
mmtbx/kinemage/__init__.py | dperl-sol/cctbx_project | 155 | 12766711 | <filename>mmtbx/kinemage/__init__.py
from __future__ import absolute_import, division, print_function
def kin_vec(start_key, start_xyz, end_key, end_xyz, width=None):
start_altloc = start_key[0:1]
if start_altloc == ' ':
start_altloc_txt = ""
else:
start_altloc_txt = " '%s'" % start_altloc.lower()
end_altloc = end_key[0:1]
if end_altloc == ' ':
end_altloc_txt = ""
else:
end_altloc_txt = " '%s'" % end_altloc.lower()
if width is None:
return "{%s} P%s %.3f %.3f %.3f {%s} L%s %.3f %.3f %.3f\n" % (
start_key,
start_altloc_txt,
start_xyz[0],
start_xyz[1],
start_xyz[2],
end_key,
end_altloc_txt,
end_xyz[0],
end_xyz[1],
end_xyz[2])
else:
return "{%s} P%s %.3f %.3f %.3f {%s} L%s width%d %.3f %.3f %.3f\n" % (
start_key,
start_altloc_txt,
start_xyz[0],
start_xyz[1],
start_xyz[2],
end_key,
end_altloc_txt,
width,
end_xyz[0],
end_xyz[1],
end_xyz[2])
|
tests/tests_preprocessing/test_datetime_transformer.py | stjordanis/mljar-supervised | 1,882 | 12766723 | <filename>tests/tests_preprocessing/test_datetime_transformer.py
import unittest
import tempfile
import json
import numpy as np
import pandas as pd
from supervised.preprocessing.datetime_transformer import DateTimeTransformer
class DateTimeTransformerTest(unittest.TestCase):
def test_transformer(self):
d = {
"col1": [
"2020/06/01",
"2020/06/02",
"2020/06/03",
"2021/06/01",
"2022/06/01",
]
}
df = pd.DataFrame(data=d)
df["col1"] = pd.to_datetime(df["col1"])
df_org = df.copy()
transf = DateTimeTransformer()
transf.fit(df, "col1")
df = transf.transform(df)
self.assertTrue(df.shape[0] == 5)
self.assertTrue("col1" not in df.columns)
self.assertTrue("col1_Year" in df.columns)
transf2 = DateTimeTransformer()
transf2.from_json(transf.to_json())
df2 = transf2.transform(df_org)
self.assertTrue("col1" not in df2.columns)
self.assertTrue("col1_Year" in df2.columns)
|
supersqlite/idxchk.py | plasticity-admin/supersqlite | 687 | 12766731 | #!/usr/bin/python
'''idxchk.py - pretty print indexes used in a query
Ported to Python by <NAME> (<EMAIL>).
Requires pysqlite2, sqlite3 (comes with Python 2.5+) or apsw.
Version 1.01 2008-03-07 Fix to list index method name thanks to <NAME>.
Added sqlite3 support.
Version 1.0 2006-07-18 Initial version.
Placed in the public domain. I know no Tcl, corrections welcome.
'''
import sys
try:
from pysqlite2 import dbapi2
sqlite_connect, SQLError = dbapi2.connect, dbapi2.OperationalError
except ImportError:
try:
from sqlite3 import dbapi2
sqlite_connect, SQLError = dbapi2.connect, dbapi2.OperationalError
except ImportError:
import apsw
sqlite_connect, SQLError = apsw.Connection, apsw.SQLError
debug = False # if true, displays SQL.
verbose = False
dbname = ''
sql = ''
if '-debug' in sys.argv:
debug = True
sys.argv.remove('-debug')
if '-v' in sys.argv:
verbose = True
sys.argv.remove('-v')
if len(sys.argv) <= 1:
print 'usage: %s [-v] [-debug] dbfile [sqlcmds ...]' % sys.argv[0]
print
print ' -v verbose output: opcodes, databases, tables, cursors'
print ' -debug show the internal SQL queries'
print ' dbfile a valid sqlite3 database file or ":memory:"'
print " sqlcmds one or more sql statements separated by ';'"
print
print 'The last sqlcmd is explained, preceeding ones are executed.'
print 'If sqlcmds is omitted, then read sqlcmds from stdin.'
sys.exit(1)
dbname = sys.argv[1]
# if sql parm is missing, read from stdin
if len(sys.argv) > 2:
sql = ' '.join(sys.argv[2:]) + ' \n;\n'
else:
sql = sys.stdin.read()
# Connect to database.
session = sqlite_connect(dbname)
def DO(sql, params={}, cur=session.cursor()):
'Run some SQL.'
if debug:
print '>>>', '\n...'.join(sql.split('\n'))
if params:
print ' %s' % params
try:
cur.execute(sql, params)
rows = []
for row in cur: # apsw doesn't support cur.fetchall()
rows.append(row)
return rows
except SQLError:
print >>sys.stderr, "BAD SQL:", sql
raise
# find the last sql statement, others are executed first
# eg, if temp tables are created and indexed, attach other db's, etc.
idxsql = ''
while len(idxsql) == 0:
sqlcmds = sql.split(';')
if sqlcmds:
presql = sqlcmds[:-1]
idxsql = sqlcmds[-1].strip()
sql = ';'.join(presql)
else:
print 'no sqlcmds to explain'
session.close()
sys.exit(2)
# execute any pre sql first
cnt = 1
for s in presql:
s = s.strip()
if s:
if verbose:
print 'exec sql %d' % cnt
print '----------------------------------------------------------'
print s.replace('\n', ' ')[:50], '.....'
print
try:
DO(s)
except SQLError as e:
print 'sql error while executing statement %d:' % cnt
print s + '\n\nerror message:\n' + str(e)
session.close()
sys.exit(3)
cnt += 1
try:
vcode = DO('EXPLAIN ' + idxsql)
except SQLError as e:
print 'sql error while explaining statement %d:' % cnt
print idxsql + '\n\nerror message:\n' + str(e)
session.close()
sys.exit(4)
# get database names, in case the presql attached any other dbs or temp tables
if verbose:
print 'dbnum dbname'
print '------ ---------------------------------------------------'
dbarr = {}
for dbnum, dbname, dbfile in DO('pragma database_list'):
dbarr[dbnum] = dbname
if verbose:
print '%6d %s (%s)' % (dbnum, dbname, dbfile)
prevint = -1
idxtbl = {}
nesting = []
cursors = []
# collect cursors on first pass
for addr, opcode, p1, p2, p3 in vcode:
if opcode == 'Integer':
prevint = p1
elif opcode == 'OpenRead':
if prevint == -1: # previous opcode was not Integer!
continue
dbnum = prevint
if dbnum not in dbarr:
# explained statement is probably creating a temp table
dbarr[dbnum] = 'temp'
if dbarr[dbnum] == 'temp':
temp = 'temp_'
else:
temp = ''
if dbarr[dbnum] != 'main' and dbarr[dbnum] != 'temp':
dbname = dbarr[dbnum] + '.'
else:
dbname = ''
if p2 == 1: # opening sqlite_master itself, skip
continue
schemasql = '''SELECT type, name, tbl_name, rootpage
FROM %(dbname)ssqlite_%(temp)smaster
WHERE rootpage = %(p2)s''' % locals()
type, name, tbl_name, rootpage = DO(schemasql)[0]
cursors.append((p1, type, dbname + name, name, tbl_name))
else:
# reset int value, if preceeding opcode not Integer
prevint = -1
if verbose:
print
print 'explain sql'
print '----------------------------------------------------------'
print idxsql
print ''
print 'opcodes'
print '----------------------------------------------------------'
for addr, opcode, p1, p2, p3 in vcode:
print '%s|%s|%s|%s|%s' % (addr, opcode, p1, p2, p3)
print
prevint = -1 # not present in the original Tcl - bug?
for addr, opcode, p1, p2, p3 in vcode:
if opcode == 'Integer':
prevint = p1
elif opcode == 'OpenRead':
if prevint == -1: # previous opcode was not Integer!
continue
dbnum = prevint
if dbnum not in dbarr:
# explained statement is probably creating a temp table
dbarr[dbnum] = 'temp'
if dbarr[dbnum] == 'temp':
temp = 'temp_'
else:
temp = ''
if dbarr[dbnum] != 'main' and dbarr[dbnum] != 'temp':
dbname = dbarr[dbnum] + '.'
else:
dbname = ''
schemasql = '''SELECT type, name, tbl_name, rootpage
FROM %(dbname)ssqlite_%(temp)smaster
WHERE rootpage = %(p2)s''' % locals()
type, name, tbl_name, rootpage = DO(schemasql)[0]
idxtab = dbname + tbl_name
#cursors.append((p1, type, dbnamename))
if type == 'index':
# get info for table, all indexes, and this index
pr_tbl_info = DO('pragma table_info(%s)' % tbl_name)
pr_idx_list = DO('pragma index_list(%s)' % tbl_name)
pr_idx_info = DO('pragma index_info(%s)' % name)
cols = []
pkcollist = []
# sort index column names and assemble index columns
ielems = []
for seq, cid, iname in pr_idx_info:
ielems.append((seq, cid, iname))
for seq, cid, iname in sorted(ielems):
cols.append(iname)
pkcollist.append(iname)
cols = '(%s)' % ','.join(cols)
# if index itself is unique
unique = ''
for iseq, iname, isuniq in pr_idx_list:
if name == iname and isuniq:
unique = ' UNIQUE'
break
cols += unique
# index is primary key if all pkcollist names are in table pk cols
i = -1
# for cid, cname, ctype, ispk in pr_tbl_info: # outdated.
for cid, cname, ctype, notnull, dflt_value, ispk in pr_tbl_info:
try:
ispk = int(ispk)
except ValueError:
continue
if ispk:
try:
i = pkcollist.index(cname)
except ValueError:
# didn't find a pk column in the list of index columns
break
# remove this column name from pkcollist
del pkcollist[i]
if i >= 0 and not pkcollist:
# found all of the table pk columns in the pkcollist
cols += ' PRIMARY KEY'
idxtbl[idxtab] = idxtbl.get(idxtab, [])
idxtbl[idxtab].append((name, cols))
elif type == 'table':
# if not in idxtbl array, add it with empty index info
if idxtab not in idxtbl:
idxtbl[idxtab] = []
if idxtab not in nesting:
nesting.append(idxtab)
elif opcode == 'NotExists' or opcode == 'MoveGe' or opcode == 'MoveLt':
# check for possible primary key usage
for cp1, ctype, ctab, cname, ctbl in cursors:
if p1 == cp1 and ctype == 'table':
idxtbl[ctab].append(('<pk>', '<integer primary key or rowid>'))
break
else:
# reset int value, if preceeding opcode not Integer
prevint = -1
if verbose:
print 'table open order (probable join table nesting)'
print '-----------------------------------------------------------'
lev = 0
for tab in nesting:
print '| ' * lev + tab
lev += 1
if lev > 1:
print '| ' * lev
print
print 'cursor type name'
print '------ ------ ----------------------------------------------'
for cur in cursors:
num, type, fullname, name, tbl = cur
print '%6d %-6.6s %s' % (num, type, fullname)
print
# remove any duplicate indexes per each table
for tbl, idxlist in idxtbl.items():
idxtbl[tbl] = sorted(list(set(idxlist)))
# pretty print in column format
# first, figure out column widths
len1 = 6
len2 = 10
len3 = 10
for tbl, idxlist in idxtbl.items():
if len(tbl) > len1:
len1 = len(tbl)
for idx, idxdef in idxlist:
if len(idx) > len2:
len2 = len(idx)
if len(idxdef) > len3:
len3 = len(idxdef)
fmt = '%-{len1}.{len1}s %-{len2}.{len2}s %-{len3}.{len3}s'
# Substitute in for each "{lenX}" in fmt:
fmt = fmt.replace('%', '%%').replace('{', '%(').replace('}', ')s') % locals()
print fmt % ('table', 'index(es)', 'column(s)')
print fmt % ('-' * len1, '-' * len2, '-' * len3)
# now print in order of table open nesting
for tbl in nesting:
t = tbl
idxlist = idxtbl[tbl]
if not idxlist:
print fmt % (tbl, '(none)', '')
else:
for ientry in idxlist:
idx, idxdef = ientry
print fmt % (tbl, idx, idxdef)
#tbl = ''
try:
del idxtbl[t]
except KeyError:
pass
# print any other indexes where index was opened, but not table
for tbl in idxtbl:
idxlist = idxtbl[tbl]
if not idxlist:
print fmt % (tbl, '(none)', '')
else:
for ientry in idxlist:
idx, idxdef = ientry
print fmt % (tbl, idx, idxdef)
#tbl = ''
print '\nSQLite version:', DO('SELECT sqlite_version()')[0][0]
session.close()
|
somaticseq/utilities/attach_pileupVAF.py | bioinform/somaticseq | 159 | 12766737 | <filename>somaticseq/utilities/attach_pileupVAF.py
#!/usr/bin/env python3
# Supports Insertion/Deletion as well as SNVs
# Last updated: 8/29/2015
import math, argparse, sys, os, gzip
import re
import somaticseq.genomicFileHandler.genomic_file_handlers as genome
import somaticseq.genomicFileHandler.pileup_reader as pileup
nan = float('nan')
inf = float('inf')
parser = argparse.ArgumentParser(description='Given either a tumor-only or tumor-normal VCF file (requires SAMPLE NAME specified), and pileup file, it will attach VAF calculated from pileup file to the VCF file. The pileup file can also be streamed in.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-myvcf', '--my-vcf-file', type=str, help='My VCF', required=True, default=None)
parser.add_argument('-normal', '--normal-sample-name', type=str, help='Normal Sample Name', required=False, default='NORMAL')
parser.add_argument('-tumor', '--tumor-sample-name', type=str, help='Tumor Sample Name', required=False, default='TUMOR')
parser.add_argument('-Npileup', '--normal-pileup-file', type=str, help='Normal VCF File', required=False, default=None)
parser.add_argument('-Tpileup', '--tumor-pileup-file', type=str, help='Tumor VCF File', required=True)
parser.add_argument('-fai', '--reference-fasta-fai', type=str, help='Use the fasta.fai file to get the valid contigs', required=False, default=None)
parser.add_argument('-dict', '--reference-fasta-dict', type=str, help='Use the reference dict file to get the valid contigs', required=False, default=None)
# From pileup:
parser.add_argument('-plVAF', '--pileup-variant-allele-frequency', action='store_true', help='Variant Allele Frequency calculated from pileup file', required=False)
parser.add_argument('-plDP4', '--pileup-DP4', action='store_true', help='DP4 from pileup file', required=False)
# output file
parser.add_argument('-outfile', '--output-file', type=str, help='Output File Name', required=True)
args = parser.parse_args()
##
my_vcf = args.my_vcf_file
Tpileup = args.tumor_pileup_file
Npileup = args.normal_pileup_file
tumor_name = args.tumor_sample_name
normal_name = args.normal_sample_name
fai_file = args.reference_fasta_fai
dict_file = args.reference_fasta_dict
outfile = args.output_file
nan = float('nan')
#### Append headers according to user selection ####
header_append = []
format_append = []
if args.pileup_DP4:
header_append.append('##FORMAT=<ID=plDP4,Number=4,Type=Integer,Description="DP4 from pileup: ref forward, ref reverse, alt forward, alt reverse">')
format_append.append('plDP4')
if args.pileup_variant_allele_frequency:
header_append.append('##FORMAT=<ID=plVAF,Number=1,Type=Float,Description="Variant allele frequency calculated from pileup">')
format_append.append('plVAF')
# Start Working by opening files:
try:
my_vcf = genome.open_textfile(my_vcf)
Tpileup = genome.open_textfile(Tpileup)
outhandle = open(outfile, 'w')
Npileup = genome.open_textfile(Npileup)
except AttributeError:
pass
if Npileup:
npileup_line = Npileup.readline().rstrip('\n')
if Tpileup:
tpileup_line = Tpileup.readline().rstrip('\n')
# Add the extra headers:
out_vcf_headers = genome.vcf_header_modifier( my_vcf, addons=header_append )
# Find out where the tumor and normal samples are in the vcf files, i.e., which column.
# Then, Assuming there are two sample names in "my vcf," the one that appears first should have an index of 0, and the next one is 1:
main_header = out_vcf_headers[3].split('\t')
vcf_idxT = main_header.index(tumor_name)
idxT = vcf_idxT - 9
try:
vcf_idxN = main_header.index(normal_name)
idxN = vcf_idxN - 9
except ValueError:
vcf_idxN = None
idxN = None
# Write the headers to the output vcf file:
outhandle.write(out_vcf_headers[0] + '\n') ##fileformat=VCFv4.1
[ outhandle.write(out_i + '\n') for out_i in out_vcf_headers[1] ]
[ outhandle.write(out_i + '\n') for out_i in out_vcf_headers[2] ]
outhandle.write(out_vcf_headers[3] + '\n') #CHROM...
# Convert contig_sequence to chrom_seq dict:
if dict_file:
chrom_seq = genome.faiordict2contigorder(dict_file, 'dict')
elif fai_file:
chrom_seq = genome.faiordict2contigorder(fai_file, 'fai')
else:
raise Exception('I need a fai or dict file, or else I do not know the contig order.')
pattern_chrom = r'|'.join(chrom_seq)
r_chrom = r'(' + pattern_chrom + r')'
pattern_chr_position = r_chrom + r'\t[0-9]+'
# Figure out the order of NORMAL and TUMOR
if idxN != None:
if Npileup and idxN==0:
external_pileups = [ [Npileup, Tpileup], [npileup_line, tpileup_line] ]
elif Npileup and idx==1:
external_pileups = [ [Tpileup, Npileup], [tpileup_line, npileup_line] ]
elif not Npileup:
external_pileups = [ [Tpileup], [tpileup_line] ]
else:
external_pileups = [ [Tpileup], [tpileup_line] ]
line_i = my_vcf.readline().rstrip('\n')
while line_i:
my_coordinate = re.search( pattern_chr_position, line_i )
if my_coordinate:
my_coordinate = my_coordinate.group()
else:
print(line_i, file=sys.stderr)
raise Exception('Your VCF file has a contig that does not exist.')
# my_vcf:
vcf_i = genome.Vcf_line( line_i )
# Modify the FORMAT column:
field_items = vcf_i.get_sample_variable()
field_items.extend( format_append )
field_format_line = ':'.join( field_items )
###########################################################################################
###################### Find the same coordinate in the pileup file ########################
# Line up the order of reading the two files the same order as the sample columns in my_vcf:
samples_collect = []
for SM_idx,current_vcf in enumerate( external_pileups[0] ):
latest_pileup_run = genome.catchup(my_coordinate, external_pileups[1][SM_idx], current_vcf, chrom_seq)
latest_sample = pileup.Pileup_line(latest_pileup_run[1])
sample_append = []
# If the position exists in this samtools generated vcf file:
if latest_pileup_run[0]:
assert vcf_i.position == latest_sample.position
# Figure out alternate pattern:
first_alt_call = vcf_i.altbase.split(',')[0]
base_calls = latest_sample.base_reads()
if base_calls:
# SNV
if len(first_alt_call) == len(vcf_i.refbase):
ref_for, ref_rev, alt_for, alt_rev = base_calls[0], base_calls[1], base_calls[2].count(first_alt_call.upper()), base_calls[3].count(first_alt_call.lower())
# Insertion:
elif len(first_alt_call) > len(vcf_i.refbase):
inserted = first_alt_call[ len(vcf_i.refbase):: ]
ref_for, ref_rev, alt_for, alt_rev = base_calls[0], base_calls[1], base_calls[6].count(inserted.upper()), base_calls[7].count(inserted.lower())
# Deletion:
elif len(first_alt_call) < len(vcf_i.refbase):
deleted = vcf_i.refbase[ len(first_alt_call) :: ]
ref_for, ref_rev, alt_for, alt_rev = base_calls[0], base_calls[1], base_calls[4].count(deleted.upper()), base_calls[5].count(deleted.lower())
else:
ref_for = ref_rev = alt_for = alt_rev = 0
### Pre-defined material ###
### If user wants DP4 ###
if args.pileup_DP4:
pl_DP4 = '{},{},{},{}'.format( ref_for, ref_rev, alt_for, alt_rev )
sample_append.append( pl_DP4 )
### If user wants VAF ###
if args.pileup_variant_allele_frequency:
try:
pl_vaf = ( alt_for + alt_rev ) / ( alt_for + alt_rev + ref_for + ref_rev )
except ZeroDivisionError:
pl_vaf = 0
pl_vaf = '%.3g' % pl_vaf
sample_append.append( pl_vaf )
# Reset the current line:
sample_items = list( vcf_i.get_sample_item(idx=SM_idx, out_type='l')[1] )
sample_items.extend( sample_append )
sample_out = ':'.join( sample_items )
# Reset the current line:
external_pileups[1][SM_idx] = latest_sample.pileup_line
# New format and sample columns:
samples_collect.append( sample_out )
# If the position does not exist in pileup file:
else:
sample_items = list( vcf_i.get_sample_item(idx=SM_idx, out_type='l')[1] )
sample_append = ['.' if i!='plDP4' else '.,.,.,.' for i in format_append ]
sample_items.extend( sample_append )
sample_out = ':'.join( sample_items )
samples_collect.append( sample_out )
external_pileups[1][SM_idx] = latest_sample.pileup_line
### Write out will have a few different possible situations ###
# If NORMAL and TUMOR both exist in the designated VCF file:
if vcf_idxT and vcf_idxN:
# But the Nvcf is not supplied, modified the NORMAL column to reflect change in FORMAT column:
if not Npileup:
normal_items = list( vcf_i.get_sample_item(idx=idxN, out_type='l')[1] )
extra_normal_items = ['.' if i!='plDP4' else '.,.,.,.' for i in format_append ]
normal_out = ':'.join( extra_normal_items )
samples_collect.append( normal_out )
# Write out:
out_i = '\t'.join(( vcf_i.chromosome, str(vcf_i.position), vcf_i.identifier, vcf_i.refbase, vcf_i.altbase, vcf_i.qual, vcf_i.filters, vcf_i.info, field_format_line, samples_collect[0], samples_collect[1] ))
outhandle.write( out_i + '\n' )
# Only TUMOR exists in the designated VCF file:
if not vcf_idxN:
# Write out:
out_i = '\t'.join(( vcf_i.chromosome, str(vcf_i.position), vcf_i.identifier, vcf_i.refbase, vcf_i.altbase, vcf_i.qual, vcf_i.filters, vcf_i.info, field_format_line, samples_collect[0] ))
outhandle.write( out_i + '\n' )
# Read the next line in the designated VCF file:
line_i = my_vcf.readline().rstrip('\n')
# Close files:
my_vcf.close()
Tpileup.close()
outhandle.close()
if Npileup != None:
Npileup.close()
|
zinnia/urls/comments.py | Boondockers-Welcome/django-blog-zinnia | 1,522 | 12766743 | """Urls for the Zinnia comments"""
from django.conf.urls import url
from zinnia.urls import _
from zinnia.views.comments import CommentSuccess
urlpatterns = [
url(_(r'^success/$'),
CommentSuccess.as_view(),
name='comment_success'),
]
|
lib/python/treadmill/tests/keytabs_test.py | krcooke/treadmill | 133 | 12766775 | """Unit test for keytabs
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import os
import shutil
import tempfile
import unittest
import mock
from treadmill import keytabs
class KeytabsTest(unittest.TestCase):
"""test keytabs function
"""
def setUp(self):
self.spool_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.spool_dir)
def _touch_file(self, name):
with io.open(os.path.join(self.spool_dir, name), 'w'):
pass
@mock.patch('treadmill.subproc.check_call')
def test_add_keytabs_to_file(self, mock_check_call):
"""test add keytabs princ files into dest file
"""
self._touch_file('HTTP#foo@realm')
self._touch_file('HTTP#bar@realm')
self._touch_file('host#foo@realm')
self._touch_file('host#bar@realm')
keytabs.add_keytabs_to_file(self.spool_dir, 'host', 'krb5.keytab')
try:
mock_check_call.assert_called_once_with(
[
'kt_add', 'krb5.keytab',
os.path.join(self.spool_dir, 'host#foo@realm'),
os.path.join(self.spool_dir, 'host#bar@realm'),
]
)
except AssertionError:
# then should called with files in other order
mock_check_call.assert_called_once_with(
[
'kt_add', 'krb5.keytab',
os.path.join(self.spool_dir, 'host#bar@realm'),
os.path.join(self.spool_dir, 'host#foo@realm'),
]
)
if __name__ == '__main__':
unittest.main()
|
tests/flatpages_tests/urls.py | ni-ning/django | 61,676 | 12766781 | <gh_stars>1000+
from django.contrib.flatpages.sitemaps import FlatPageSitemap
from django.contrib.sitemaps import views
from django.urls import include, path
urlpatterns = [
path(
'flatpages/sitemap.xml', views.sitemap,
{'sitemaps': {'flatpages': FlatPageSitemap}},
name='django.contrib.sitemaps.views.sitemap'),
path('flatpage_root/', include('django.contrib.flatpages.urls')),
path('accounts/', include('django.contrib.auth.urls')),
]
|
examples/plugin_example/plugin.py | pfnet/pysen | 423 | 12766818 | import dataclasses
import pathlib
import subprocess
from typing import DefaultDict, List, Sequence
import dacite
from pysen.command import CommandBase
from pysen.component import ComponentBase, RunOptions
from pysen.path import change_dir
from pysen.plugin import PluginBase
from pysen.pyproject_model import Config, PluginConfig
from pysen.reporter import Reporter
from pysen.runner_options import PathContext
from pysen.setting import SettingFile
class ShellCommand(CommandBase):
def __init__(self, name: str, base_dir: pathlib.Path, cmd: Sequence[str]) -> None:
self._name = name
self._base_dir = base_dir
self._cmd = cmd
@property
def name(self) -> str:
return self._name
def __call__(self, reporter: Reporter) -> int:
with change_dir(self._base_dir):
try:
ret = subprocess.run(self._cmd)
reporter.logger.info(f"{self._cmd} returns {ret.returncode}")
return ret.returncode
except BaseException as e:
reporter.logger.info(
f"an error occured while executing: {self._cmd}\n{e}"
)
return 255
class ShellComponent(ComponentBase):
def __init__(self, name: str, cmd: Sequence[str], targets: Sequence[str]) -> None:
self._name = name
self._cmd = cmd
self._targets = targets
@property
def name(self) -> str:
return self._name
def export_settings(
self, paths: PathContext, files: DefaultDict[str, SettingFile],
) -> None:
print(f"Called export_settings at {self._name}: do nothing")
@property
def targets(self) -> Sequence[str]:
return self._targets
def create_command(
self, target: str, paths: PathContext, options: RunOptions
) -> CommandBase:
assert target in self._targets
return ShellCommand(self._name, paths.base_dir, self._cmd)
@dataclasses.dataclass
class ShellPluginConfig:
name: str
command: List[str]
targets: List[str]
class ShellPlugin(PluginBase):
def load(
self, file_path: pathlib.Path, config_data: PluginConfig, root: Config
) -> Sequence[ComponentBase]:
assert (
config_data.config is not None
), f"{config_data.location}.config must be not None"
config = dacite.from_dict(
ShellPluginConfig, config_data.config, dacite.Config(strict=True)
)
return [ShellComponent(config.name, config.command, config.targets)]
# NOTE(igarashi): This is the entry point of a plugin method
def plugin() -> PluginBase:
return ShellPlugin()
|
xc/common/libraries/generate_verilog.py | bl0x/symbiflow-arch-defs | 183 | 12766855 | """Transforms the XML module definitions parsed from the PDF into a verilog representation"""
from lxml import etree
from datetime import datetime
def format_port(name, width, type, **kwargs):
wstr = '' if int(width) == 1 else '[%s:0]\t' % width
return '\t%s\t%s%s;\n' % (type, wstr, name)
def format_attrib(name, type, default, **kwargs):
if type == 'STRING':
default = '"%s"' % default # need to ensure strings are quoted
return '\tparameter %s = %s;\n' % (name, default)
def process(infile, outfile):
tree = etree.parse(infile)
root = tree.getroot()
with open(outfile, "w") as output:
output.write(
'// Automatically generated from %s on %s\n\n' %
(infile, datetime.now().isoformat())
)
for module in root.getchildren():
ports = module.xpath('port')
attrs = module.xpath('attribute')
output.write(
'module %s (%s);\n' % (
module.attrib['name'],
', '.join([port.attrib['name'] for port in ports])
)
)
for port in ports:
output.write(format_port(**dict(port.attrib)))
if len(attrs):
output.write('\n')
for attr in attrs:
output.write(format_attrib(**dict(attr.attrib)))
output.write('endmodule\n\n')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--input', '-i', nargs='?', default='cells_xtra.xml')
parser.add_argument('--output', '-o', nargs='?', default='cells_xtra.v')
args = parser.parse_args()
process(args.input, args.output)
|
dataloader.py | husnejahan/DeepAR-Pytorch | 213 | 12766862 | <gh_stars>100-1000
from __future__ import division
import numpy as np
import torch
import os
import logging
from torch.utils.data import DataLoader, Dataset, Sampler
logger = logging.getLogger('DeepAR.Data')
class TrainDataset(Dataset):
def __init__(self, data_path, data_name, num_class):
self.data = np.load(os.path.join(data_path, f'train_data_{data_name}.npy'))
self.label = np.load(os.path.join(data_path, f'train_label_{data_name}.npy'))
self.train_len = self.data.shape[0]
logger.info(f'train_len: {self.train_len}')
logger.info(f'building datasets from {data_path}...')
def __len__(self):
return self.train_len
def __getitem__(self, index):
return (self.data[index,:,:-1],int(self.data[index,0,-1]), self.label[index])
class TestDataset(Dataset):
def __init__(self, data_path, data_name, num_class):
self.data = np.load(os.path.join(data_path, f'test_data_{data_name}.npy'))
self.v = np.load(os.path.join(data_path, f'test_v_{data_name}.npy'))
self.label = np.load(os.path.join(data_path, f'test_label_{data_name}.npy'))
self.test_len = self.data.shape[0]
logger.info(f'test_len: {self.test_len}')
logger.info(f'building datasets from {data_path}...')
def __len__(self):
return self.test_len
def __getitem__(self, index):
return (self.data[index,:,:-1],int(self.data[index,0,-1]),self.v[index],self.label[index])
class WeightedSampler(Sampler):
def __init__(self, data_path, data_name, replacement=True):
v = np.load(os.path.join(data_path, f'train_v_{data_name}.npy'))
self.weights = torch.as_tensor(np.abs(v[:,0])/np.sum(np.abs(v[:,0])), dtype=torch.double)
logger.info(f'weights: {self.weights}')
self.num_samples = self.weights.shape[0]
logger.info(f'num samples: {self.num_samples}')
self.replacement = replacement
def __iter__(self):
return iter(torch.multinomial(self.weights, self.num_samples, self.replacement).tolist())
def __len__(self):
return self.num_samples |
functions/process_data.py | donglinwu6066/SDEdit | 330 | 12766898 | import torch
import os
def download_process_data(path="colab_demo"):
os.makedirs(path, exist_ok=True)
print("Downloading data")
torch.hub.download_url_to_file('https://image-editing-test-12345.s3-us-west-2.amazonaws.com/colab_examples/lsun_bedroom1.pth', os.path.join(path, 'lsun_bedroom1.pth'))
torch.hub.download_url_to_file('https://image-editing-test-12345.s3-us-west-2.amazonaws.com/colab_examples/lsun_bedroom2.pth', os.path.join(path, 'lsun_bedroom2.pth'))
torch.hub.download_url_to_file('https://image-editing-test-12345.s3-us-west-2.amazonaws.com/colab_examples/lsun_bedroom3.pth', os.path.join(path, 'lsun_bedroom3.pth'))
torch.hub.download_url_to_file('https://image-editing-test-12345.s3-us-west-2.amazonaws.com/colab_examples/lsun_edit.pth', os.path.join(path, 'lsun_edit.pth'))
torch.hub.download_url_to_file('https://image-editing-test-12345.s3-us-west-2.amazonaws.com/colab_examples/lsun_church.pth', os.path.join(path, 'lsun_church.pth'))
print("Data downloaded")
|
src/frequent_phrase_mining/frequent_pattern_mining.py | paperplanet/SegPhrase | 275 | 12766901 | from sets import Set
def frequentPatternMining(tokens, patternOutputFilename, threshold):
dict = {}
tokensNumber = len(tokens)
for i in xrange(tokensNumber):
token = tokens[i]
if token == '$':
continue
if token in dict:
dict[token].append(i)
else:
dict[token] = [i]
print "# of distinct tokens = ", len(dict)
patternOutput = open(patternOutputFilename, 'w')
frequentPatterns = []
patternLength = 1
while (len(dict) > 0):
if patternLength > 6:
break
#print "working on length = ", patternLength
patternLength += 1
newDict = {}
for pattern, positions in dict.items():
occurrence = len(positions)
if occurrence >= threshold:
frequentPatterns.append(pattern)
patternOutput.write(pattern + "," + str(occurrence) + "\n")
for i in positions:
if i + 1 < tokensNumber:
if tokens[i + 1] == '$':
continue
newPattern = pattern + " " + tokens[i + 1]
if newPattern in newDict:
newDict[newPattern].append(i + 1)
else:
newDict[newPattern] = [i + 1]
dict.clear()
dict = newDict
patternOutput.close()
return frequentPatterns
|
web/server/codechecker_server/migrations/report/versions/f8291ab1d6be_fix_setting_analysis_info_id_seq.py | ryankurte/codechecker | 1,601 | 12766905 | <filename>web/server/codechecker_server/migrations/report/versions/f8291ab1d6be_fix_setting_analysis_info_id_seq.py<gh_stars>1000+
"""Fix setting analysis_info_id_seq
Revision ID: f8291ab1d6be
Revises: <PASSWORD>
Create Date: 2021-07-15 16:49:05.354455
"""
# revision identifiers, used by Alembic.
revision = 'f8291ab1d6be'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
from alembic import op
def upgrade():
ctx = op.get_context()
dialect = ctx.dialect.name
if dialect == 'postgresql':
op.execute("""
SELECT SETVAL(
'analysis_info_id_seq',
(SELECT MAX(id) + 1 FROM analysis_info)
)
""")
|
test/test_path.py | afermanian/signatory | 156 | 12766915 | <gh_stars>100-1000
# Copyright 2019 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
"""Tests the Path class."""
import copy
import gc
import pytest
import random
import torch
import warnings
import weakref
from helpers import helpers as h
from helpers import validation as v
tests = ['Path']
depends = ['signature', 'logsignature']
signatory = v.validate_tests(tests, depends)
def _update_lengths_update_grads(maxlength):
update_lengths = []
update_grads = []
num = int(torch.randint(low=0, high=3, size=(1,)))
for _ in range(num):
update_lengths.append(int(torch.randint(low=1, high=maxlength, size=(1,))))
update_grads.append(random.choice([True, False]))
return update_lengths, update_grads
def test_path():
"""Tests that Path behaves correctly."""
# Test small edge cases thoroughly
for device in h.get_devices():
for batch_size in (1, 2):
for input_stream, basepoints in zip((1, 2), ((True, h.without_grad, h.with_grad),
(False, True, h.without_grad, h.with_grad))):
for input_channels in (1, 2):
for depth in (1, 2):
for scalar_term in (True, False):
for path_grad in (False, True):
basepoint = random.choice(basepoints)
update_lengths, update_grads = _update_lengths_update_grads(3)
_test_path(device, path_grad, batch_size, input_stream, input_channels, depth,
basepoint, update_lengths, update_grads, scalar_term, extrarandom=False,
which='all')
# Randomly test larger cases
for _ in range(50):
device = random.choice(h.get_devices())
batch_size = random.choice((1, 2, 5))
input_stream = random.choice([3, 6, 10])
input_channels = random.choice([1, 2, 6])
depth = random.choice([1, 2, 4, 6])
basepoint = random.choice([False, True, h.without_grad, h.with_grad])
path_grad = random.choice([False, True])
update_lengths, update_grads = _update_lengths_update_grads(10)
scalar_term = random.choice([False, True])
_test_path(device, path_grad, batch_size, input_stream, input_channels, depth,
basepoint, update_lengths, update_grads, scalar_term, extrarandom=True, which='random')
# Do at least one large test
for device in h.get_devices():
_test_path(device, path_grad=True, batch_size=5, input_stream=10, input_channels=6, depth=6,
basepoint=True, update_lengths=[5, 6], update_grads=[False, True], scalar_term=False,
extrarandom=False, which='none')
def _randint(value):
return torch.randint(low=0, high=value, size=(1,)).item()
def _test_path(device, path_grad, batch_size, input_stream, input_channels, depth, basepoint, update_lengths,
update_grads, scalar_term, extrarandom, which):
path = h.get_path(batch_size, input_stream, input_channels, device, path_grad)
basepoint = h.get_basepoint(batch_size, input_channels, device, basepoint)
path_obj = signatory.Path(path, depth, basepoint=basepoint, scalar_term=scalar_term)
if isinstance(basepoint, torch.Tensor):
full_path = torch.cat([basepoint.unsqueeze(1), path], dim=1)
elif basepoint is True:
full_path = torch.cat([torch.zeros(batch_size, 1, input_channels, device=device, dtype=torch.double), path],
dim=1)
else:
full_path = path
if not path_grad and not (isinstance(basepoint, torch.Tensor) and basepoint.requires_grad):
backup_path_obj = copy.deepcopy(path_obj)
# derived objects to test
copy_path_obj = copy.copy(path_obj)
shuffle_path_obj1, perm1 = path_obj.shuffle()
shuffle_path_obj2, perm2 = copy.deepcopy(path_obj).shuffle_()
getitem1 = _randint(batch_size)
getitem_path_obj1 = path_obj[getitem1] # integer
all_derived = [(copy_path_obj, slice(None)),
(shuffle_path_obj1, perm1),
(shuffle_path_obj2, perm2),
(getitem_path_obj1, getitem1)]
start = _randint(batch_size)
end = _randint(batch_size)
getitem2 = slice(start, end)
getitem3 = torch.randint(low=0, high=batch_size, size=(_randint(int(1.5 * batch_size)),))
getitem4 = torch.randint(low=0, high=batch_size, size=(_randint(int(1.5 * batch_size)),)).numpy()
getitem5 = torch.randint(low=0, high=batch_size, size=(_randint(int(1.5 * batch_size)),)).tolist()
try:
getitem_path_obj2 = path_obj[getitem2] # slice, perhaps a 'null' slice
except IndexError as e:
if start >= end:
pass
else:
pytest.fail(str(e))
else:
all_derived.append((getitem_path_obj2, getitem2))
try:
getitem_path_obj3 = path_obj[getitem3] # 1D tensor
except IndexError as e:
if len(getitem3) == 0:
pass
else:
pytest.fail(str(e))
else:
all_derived.append((getitem_path_obj3, getitem3))
try:
getitem_path_obj4 = path_obj[getitem4] # array
except IndexError as e:
if len(getitem4) == 0:
pass
else:
pytest.fail(str(e))
else:
all_derived.append((getitem_path_obj4, getitem4))
try:
getitem_path_obj5 = path_obj[getitem5] # list
except IndexError as e:
if len(getitem5) == 0:
pass
else:
pytest.fail(str(e))
else:
all_derived.append((getitem_path_obj5, getitem5))
if which == 'random':
all_derived = [random.choice(all_derived)]
elif which == 'none':
all_derived = []
for derived_path_obj, derived_index in all_derived:
# tests that the derived objects do what they claim to do
_test_derived(path_obj, derived_path_obj, derived_index, extrarandom)
# tests that the derived objects are consistent wrt themselves
full_path_ = full_path[derived_index]
if isinstance(derived_index, int):
full_path_ = full_path_.unsqueeze(0)
_test_path_obj(full_path_.size(0), input_channels, device, derived_path_obj, full_path_, depth,
update_lengths, update_grads, scalar_term, extrarandom)
# tests that the changes to the derived objects have not affected the original
assert path_obj == backup_path_obj
# finally test the original object
_test_path_obj(batch_size, input_channels, device, path_obj, full_path, depth, update_lengths, update_grads,
scalar_term, extrarandom)
def _test_path_obj(batch_size, input_channels, device, path_obj, full_path, depth, update_lengths, update_grads,
scalar_term, extrarandom):
# First of all test a Path with no updates
_test_signature(path_obj, full_path, depth, scalar_term, extrarandom)
_test_logsignature(path_obj, full_path, depth, extrarandom)
_test_equality(path_obj)
assert path_obj.depth == depth
if len(update_lengths) > 1:
# Then test Path with variable amounts of updates
for length, grad in zip(update_lengths, update_grads):
new_path = torch.rand(batch_size, length, input_channels, dtype=torch.double, device=device,
requires_grad=grad)
path_obj.update(new_path)
full_path = torch.cat([full_path, new_path], dim=1)
_test_signature(path_obj, full_path, depth, scalar_term, extrarandom)
_test_logsignature(path_obj, full_path, depth, extrarandom)
_test_equality(path_obj)
assert path_obj.depth == depth
def _test_signature(path_obj, full_path, depth, scalar_term, extrarandom):
def candidate(start=None, end=None):
return path_obj.signature(start, end)
def true(start, end):
return signatory.signature(full_path[:, start:end], depth, scalar_term=scalar_term)
def extra(true_signature):
assert (path_obj.signature_size(-3), path_obj.signature_size(-1)) == true_signature.shape
assert path_obj.signature_channels() == true_signature.size(-1)
assert path_obj.shape == full_path.shape
assert path_obj.channels() == full_path.size(-1)
_test_operation(path_obj, candidate, true, extra, '_BackwardShortcutBackward', extrarandom)
def _test_logsignature(path_obj, full_path, depth, extrarandom):
if extrarandom:
if random.choice([True, False, False]):
modes = h.all_modes
else:
modes = (h.expand_mode, h.words_mode)
else:
modes = h.all_modes
for mode in modes:
def candidate(start=None, end=None):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message="The logsignature with mode='brackets' has been requested on "
"the GPU.", category=UserWarning)
return path_obj.logsignature(start, end, mode=mode)
def true(start, end):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message="The logsignature with mode='brackets' has been requested on "
"the GPU.", category=UserWarning)
return signatory.logsignature(full_path[:, start:end], depth, mode=mode)
def extra(true_logsignature):
if mode != h.expand_mode:
assert (path_obj.logsignature_size(-3),
path_obj.logsignature_size(-1)) == true_logsignature.shape
assert path_obj.logsignature_channels() == true_logsignature.size(-1)
_test_operation(path_obj, candidate, true, extra, '_SignatureToLogsignatureFunctionBackward', extrarandom)
def _test_equality(path_obj):
assert path_obj == path_obj
assert not (path_obj != path_obj)
shuffled_path_obj, perm = path_obj.shuffle()
assert shuffled_path_obj == path_obj[perm]
assert not (shuffled_path_obj != path_obj[perm])
def _test_derived(path_obj, derived_path_obj, derived_index, extrarandom):
def candidate(start=None, end=None):
return torch.cat(derived_path_obj.path, dim=-2)
def true(start, end):
return torch.cat(path_obj.path, dim=-2)[derived_index]
def extra(true_path):
pass
_test_operation(path_obj, candidate, true, extra, None, extrarandom)
def candidate(start=None, end=None):
return derived_path_obj.signature(start, end)
def true(start, end):
return path_obj.signature(start, end)[derived_index]
def extra(true_signature):
pass
_test_operation(path_obj, candidate, true, extra, '_BackwardShortcutBackward', extrarandom)
if extrarandom:
if random.choice([True, False, False]):
modes = h.all_modes
else:
modes = (h.expand_mode, h.words_mode)
else:
modes = h.all_modes
for mode in modes:
def candidate(start=None, end=None):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message="The logsignature with mode='brackets' has been requested on "
"the GPU.", category=UserWarning)
return derived_path_obj.logsignature(start, end, mode=mode)
def true(start, end):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message="The logsignature with mode='brackets' has been requested on "
"the GPU.", category=UserWarning)
return path_obj.logsignature(start, end, mode=mode)[derived_index]
def extra(true_logsignature):
pass
_test_operation(path_obj, candidate, true, extra, '_SignatureToLogsignatureFunctionBackward', extrarandom)
def _boundaries(length):
yield -length - 1
yield -length
yield -1
yield 0
yield 1
yield length - 1
yield length
yield None
def _start_end(length, extrarandom):
for start in _boundaries(length):
for end in _boundaries(length):
if (not extrarandom) or random.choice([True, False]):
yield start, end
for _ in range(5):
start = int(torch.randint(low=-length, high=length, size=(1,)))
end = int(torch.randint(low=-length, high=length, size=(1,)))
yield start, end
def _test_operation(path_obj, candidate, true, extra, backward_name, extrarandom):
# We perform multiple tests here.
# Test #1: That the memory usage is consistent
# Test #2: That the backward 'ctx' is correctly garbage collected
# Test #3: The forward accuracy of a particular operation
# Test #4: The backward accuracy of the same operation
def one_iteration(start, end):
gc.collect()
if torch.cuda.is_available():
torch.cuda.synchronize()
torch.cuda.reset_max_memory_allocated()
try:
tensor = candidate(start, end)
except ValueError as e:
try:
true(start, end)
except ValueError:
return 0
else:
pytest.fail(str(e))
try:
true_tensor = true(start, end)
except ValueError as e:
pytest.fail(str(e))
h.diff(tensor, true_tensor) # Test #3
extra(true_tensor) # Any extra tests
if tensor.requires_grad:
grad = torch.rand_like(tensor)
tensor.backward(grad)
path_grads = []
for path in path_obj.path:
if path.grad is None:
path_grads.append(None)
else:
path_grads.append(path.grad.clone())
path.grad.zero_()
true_tensor.backward(grad)
for path, path_grad in zip(path_obj.path, path_grads):
if path_grad is None:
assert (path.grad is None) or (path.grad.nonzero().numel() == 0)
else:
h.diff(path.grad, path_grad) # Test #4
path.grad.zero_()
ctx = tensor.grad_fn
assert type(ctx).__name__ == backward_name
ref = weakref.ref(ctx)
del ctx
del tensor
gc.collect()
assert ref() is None # Test #2
if torch.cuda.is_available():
torch.cuda.synchronize()
return torch.cuda.max_memory_allocated()
else:
return 0
# Computations involving the start or not operate differently, so we take the max over both
memory_used = max(one_iteration(0, None), one_iteration(1, None))
for start, end in _start_end(path_obj.size(1), extrarandom):
# This one seems to be a bit inconsistent with how much memory is used on each run, so we give some
# leeway by doubling
assert one_iteration(start, end) <= 2 * memory_used
|
CommandRecognition/model.py | c-z-h123/https-github.com-Ryuk17-SpeechAlgorithms | 338 | 12766923 | <reponame>c-z-h123/https-github.com-Ryuk17-SpeechAlgorithms<filename>CommandRecognition/model.py
"""
@FileName: model.py
@Description: Implement model
@Author: Ryuk
@CreateDate: 2020/05/12
@LastEditTime: 2020/05/12
@LastEditors: Please set LastEditors
@Version: v0.1
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class basicBlock(nn.Module):
expansion = 1
def __init__(self, in_channels, out_channels, stride=1):
super(basicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels)
# shortcut is a convolution layer with BatchNormalization
self.shortcut = nn.Sequential()
if stride != 1 or in_channels != self.expansion * in_channels:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, self.expansion * out_channels, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * out_channels)
)
def forward(self, input):
x = F.relu(self.bn1(self.conv1(input)))
x = self.bn2(self.conv2(x))
x += self.shortcut(input)
x = F.relu(x)
return x
class bottleneckBlock(nn.Module):
expansion = 4
def __init__(self, in_channels, out_channels, stride=1):
super(bottleneckBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=stride, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels)
self.conv3 = nn.Conv2d(out_channels, self.expansion * out_channels, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * out_channels)
if stride != 1 or in_channels != self.expansion * out_channels:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, self.expansion * out_channels, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * out_channels)
)
def forward(self, input):
x = F.relu(self.bn1(self.conv1(input)))
x = F.relu(self.bn2(self.conv2(x)))
x = self.bn3(self.conv3(x))
x += self.shortcut(input)
x = F.relu(x)
return x
class Resnet(nn.Module):
def __init__(self, block, num_blocks, num_classes=6):
super(Resnet, self).__init__()
self.in_channels = 64
self.conv1 = nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, block, out_channels, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_channels, out_channels, stride))
self.in_channels = out_channels * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = F.avg_pool2d(x, 4)
x = x.view(x.size(0), -1)
x = self.linear(x)
return x
def ResNet18():
return Resnet(basicBlock, [2, 2, 2, 2])
def ResNet152():
return Resnet(bottleneckBlock, [3, 8, 36, 3])
def main():
x = torch.randn(1, 1, 50, 32)
net = ResNet18()
print(net(x))
if __name__ == "__main__":
main() |
depth_upsampling/losses/gradient_loss.py | Levintsky/ARKitScenes | 237 | 12766951 | import torch
import dataset_keys
def div_by_mask_sum(loss: torch.Tensor, mask_sum: torch.Tensor):
return loss / torch.max(mask_sum, torch.ones_like(mask_sum))
class SafeTorchLog(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
"""
In the forward pass we receive a Tensor containing the input and return
a Tensor containing the output. ctx is a context object that can be used
to stash information for backward computation. You can cache arbitrary
objects for use in the backward pass using the ctx.save_for_backward method.
"""
input_abs = torch.abs(input) + 1e-9
ctx.save_for_backward(input_abs)
return torch.log(input_abs)
@staticmethod
def backward(ctx, grad_output):
"""
In the backward pass we receive a Tensor containing the gradient of the loss
with respect to the output, and we need to compute the gradient of the loss
with respect to the input.
"""
(input_abs,) = ctx.saved_tensors
grad_input = grad_output.clone()
return grad_input * (1.0 / input_abs) / 2.302585093 # ln(10)
safe_torch_log = SafeTorchLog.apply
def create_gradient_log_loss(log_prediction_d, mask, log_gt):
# compute log difference
log_d_diff = log_prediction_d - log_gt
log_d_diff = torch.mul(log_d_diff, mask)
# compute vertical gradient
v_gradient = torch.abs(log_d_diff[:, :, 2:, :] - log_d_diff[:, :, :-2, :])
v_mask = torch.mul(mask[:, :, 2:, :], mask[:, :, :-2, :])
v_gradient = torch.mul(v_gradient, v_mask)
# compute horizontal gradient
h_gradient = torch.abs(log_d_diff[:, :, :, 2:] - log_d_diff[:, :, :, :-2])
h_mask = torch.mul(mask[:, :, :, 2:], mask[:, :, :, :-2])
h_gradient = torch.mul(h_gradient, h_mask)
# sum up gradients
grad_loss = torch.sum(h_gradient, dim=[1, 2, 3]) + torch.sum(v_gradient, dim=[1, 2, 3])
num_valid_pixels = torch.sum(mask, dim=[1, 2, 3])
grad_loss = div_by_mask_sum(grad_loss, num_valid_pixels)
return grad_loss
def create_gradient_log_loss_4_scales(log_prediction, log_ground_truth, mask):
log_prediction_d = log_prediction
log_gt = log_ground_truth
mask = mask
log_prediction_d_scale_1 = log_prediction_d[:, :, fc00:db20:35b:7399::5, ::2]
log_prediction_d_scale_2 = log_prediction_d_scale_1[:, :, fc00:db20:35b:7399::5, ::2]
log_prediction_d_scale_3 = log_prediction_d_scale_2[:, :, fc00:db20:35b:7399::5, ::2]
mask_scale_1 = mask[:, :, fc00:db20:35b:7399::5, ::2]
mask_scale_2 = mask_scale_1[:, :, fc00:db20:35b:7399::5, ::2]
mask_scale_3 = mask_scale_2[:, :, fc00:db20:35b:7399::5, ::2]
log_gt_scale_1 = log_gt[:, :, fc00:db20:35b:7399::5, ::2]
log_gt_scale_2 = log_gt_scale_1[:, :, fc00:db20:35b:7399::5, ::2]
log_gt_scale_3 = log_gt_scale_2[:, :, fc00:db20:35b:7399::5, ::2]
gradient_loss_scale_0 = create_gradient_log_loss(log_prediction_d, mask, log_gt)
gradient_loss_scale_1 = create_gradient_log_loss(
log_prediction_d_scale_1, mask_scale_1, log_gt_scale_1
)
gradient_loss_scale_2 = create_gradient_log_loss(
log_prediction_d_scale_2, mask_scale_2, log_gt_scale_2
)
gradient_loss_scale_3 = create_gradient_log_loss(
log_prediction_d_scale_3, mask_scale_3, log_gt_scale_3
)
gradient_loss_4_scales = (
gradient_loss_scale_0 + gradient_loss_scale_1 + gradient_loss_scale_2 + gradient_loss_scale_3
)
return gradient_loss_4_scales
def gradient_loss(outputs, inputs):
valid_mask = inputs[dataset_keys.VALID_MASK_IMG]
gt_depth = inputs[dataset_keys.HIGH_RES_DEPTH_IMG]
prediction = outputs[dataset_keys.PREDICTION_DEPTH_IMG]
log_prediction = safe_torch_log(prediction)
log_gt = safe_torch_log(gt_depth)
loss = create_gradient_log_loss_4_scales(log_prediction, log_gt, valid_mask)
loss = torch.mean(loss)
return loss
|
tests/roots/test-epub-anchor-id/conf.py | samdoran/sphinx | 4,973 | 12766984 | <filename>tests/roots/test-epub-anchor-id/conf.py
def setup(app):
app.add_crossref_type(directivename="setting", rolename="setting")
|
dataset/waveform_dataset.py | wimmerb/Wave-U-Net-for-Speech-Enhancement | 166 | 12766989 | import os
import librosa
from torch.utils import data
from util.utils import sample_fixed_length_data_aligned
class Dataset(data.Dataset):
def __init__(self,
dataset,
limit=None,
offset=0,
sample_length=16384,
mode="train"):
"""Construct dataset for training and validation.
Args:
dataset (str): *.txt, the path of the dataset list file. See "Notes."
limit (int): Return at most limit files in the list. If None, all files are returned.
offset (int): Return files starting at an offset within the list. Use negative values to offset from the end of the list.
sample_length(int): The model only supports fixed-length input. Use sample_length to specify the feature size of the input.
mode(str): If mode is "train", return fixed-length signals. If mode is "validation", return original-length signals.
Notes:
dataset list file:
<noisy_1_path><space><clean_1_path>
<noisy_2_path><space><clean_2_path>
...
<noisy_n_path><space><clean_n_path>
e.g.
/train/noisy/a.wav /train/clean/a.wav
/train/noisy/b.wav /train/clean/b.wav
...
Return:
(mixture signals, clean signals, filename)
"""
super(Dataset, self).__init__()
dataset_list = [line.rstrip('\n') for line in open(os.path.abspath(os.path.expanduser(dataset)), "r")]
dataset_list = dataset_list[offset:]
if limit:
dataset_list = dataset_list[:limit]
assert mode in ("train", "validation"), "Mode must be one of 'train' or 'validation'."
self.length = len(dataset_list)
self.dataset_list = dataset_list
self.sample_length = sample_length
self.mode = mode
def __len__(self):
return self.length
def __getitem__(self, item):
mixture_path, clean_path = self.dataset_list[item].split(" ")
filename = os.path.splitext(os.path.basename(mixture_path))[0]
mixture, _ = librosa.load(os.path.abspath(os.path.expanduser(mixture_path)), sr=None)
clean, _ = librosa.load(os.path.abspath(os.path.expanduser(clean_path)), sr=None)
if self.mode == "train":
# The input of model should be fixed-length in the training.
mixture, clean = sample_fixed_length_data_aligned(mixture, clean, self.sample_length)
return mixture.reshape(1, -1), clean.reshape(1, -1), filename
else:
return mixture.reshape(1, -1), clean.reshape(1, -1), filename
|
paddlespeech/t2s/audio/codec.py | jerryuhoo/PaddleSpeech | 1,379 | 12767017 | <filename>paddlespeech/t2s/audio/codec.py
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import paddle
# x: [0: 2**bit-1], return: [-1, 1]
def label_2_float(x, bits):
return 2 * x / (2**bits - 1.) - 1.
#x: [-1, 1], return: [0, 2**bits-1]
def float_2_label(x, bits):
assert abs(x).max() <= 1.0
x = (x + 1.) * (2**bits - 1) / 2
return x.clip(0, 2**bits - 1)
# y: [-1, 1], mu: 2**bits, return: [0, 2**bits-1]
# see https://en.wikipedia.org/wiki/%CE%9C-law_algorithm
# be careful the input `mu` here, which is +1 than that of the link above
def encode_mu_law(x, mu):
mu = mu - 1
fx = np.sign(x) * np.log(1 + mu * np.abs(x)) / np.log(1 + mu)
return np.floor((fx + 1) / 2 * mu + 0.5)
# from_labels = True:
# y: [0: 2**bit-1], mu: 2**bits, return: [-1,1]
# from_labels = False:
# y: [-1, 1], return: [-1, 1]
def decode_mu_law(y, mu, from_labels=True):
# TODO: get rid of log2 - makes no sense
if from_labels:
y = label_2_float(y, math.log2(mu))
mu = mu - 1
x = paddle.sign(y) / mu * ((1 + mu)**paddle.abs(y) - 1)
return x
|
components/mpas-seaice/testing_and_setup/testcases/square/1D_velocity_hex/plot_method_comparison.py | Fa-Li/E3SM | 235 | 12767027 | <reponame>Fa-Li/E3SM
from netCDF4 import Dataset
import numpy as np
import matplotlib.pyplot as plt
import math
fig, axes = plt.subplots()
subcycleNumber = 7680
operatorMethods = ["wachspress","pwl","weak"]
for operatorMethod in operatorMethods:
# data in
filenameIn = "./output_hex_%s_%i/output.2000.nc" %(operatorMethod,subcycleNumber)
filein = Dataset(filenameIn, "r")
nCells = len(filein.dimensions["nCells"])
nVertices = len(filein.dimensions["nVertices"])
vertexDegree = len(filein.dimensions["vertexDegree"])
nTimes = len(filein.dimensions["Time"])
cellsOnVertex = filein.variables["cellsOnVertex"][:]
cellsOnVertex -= 1
xVertex = filein.variables["xVertex"][:]
yVertex = filein.variables["yVertex"][:]
xCell = filein.variables["xCell"][:]
yCell = filein.variables["yCell"][:]
uVelocity = filein.variables["uVelocity"][-1,:]
vVelocity = filein.variables["vVelocity"][-1,:]
uVelocities = filein.variables["uVelocity"][:,:]
filein.close()
xmin = np.amin(xVertex)
xmax = np.amax(xVertex)
ymin = np.amin(yVertex)
ymax = np.amax(yVertex)
us = []
for iTime in range(0,nTimes):
x = []
u = []
for iVertex in range(0,nVertices):
if (math.fabs(yVertex[iVertex] - 508068.236886871) < 1e-8):
x.append(xVertex[iVertex])
u.append(uVelocities[iTime,iVertex])
x = np.array(x)
u = np.array(u)
sortedIdxs = x.argsort()
x = x[sortedIdxs]
u = u[sortedIdxs]
us.append(math.sqrt(np.sum(np.power(u,2))))
if (iTime == nTimes-1):
axes.plot(x, u, label=operatorMethod)
#axes.plot(x, np.zeros(x.shape[0]), zorder=1, c='k')
uAir = 1.0
rhoair = 1.3
rhow = 1026.0
cocn = 0.00536
cair = 0.0012
Pstar = 2.75e4
Cstar = 20.0
e = 2
alpha = math.sqrt(1.0 + math.pow(1.0 / e, 2))
Lx = 1280000
uu = []
for xx in x:
a = xx / Lx
v = 2.0 * a
dadx = (1.0 / Lx)
dvdx = 2.0 * dadx
oceanStressCoeff = rhow * cocn * a
airStress = rhoair * uAir * uAir * a * cair
P = Pstar * v * math.exp(-Cstar * (1-a))
dPdx = Pstar * math.exp(-Cstar * (1-a)) * (dvdx + v * Cstar * dadx)
print(xx, a, -Cstar * (1-a), P, dPdx)
u = max((airStress - 0.5*(alpha + 1.0) * dPdx) / oceanStressCoeff, 0.0)
uu.append(u)
axes.plot(x, uu, zorder=2, c='r')
axes.set_xlabel("time")
axes.set_ylabel("uVelocity")
axes.legend()
plt.savefig("1D_velocity_operator.png",dpi=300)
|
doorman/models.py | ESGuardian/doorman-docker | 614 | 12767062 | # -*- coding: utf-8 -*-
import datetime as dt
import string
import uuid
from flask_login import UserMixin
from doorman.database import (
Column,
Table,
ForeignKey,
Index,
Model,
SurrogatePK,
db,
reference_col,
relationship,
ARRAY,
JSONB,
INET,
declared_attr,
)
from doorman.extensions import bcrypt
querypacks = Table(
'query_packs',
Column('pack.id', db.Integer, ForeignKey('pack.id')),
Column('query.id', db.Integer, ForeignKey('query.id'))
)
pack_tags = Table(
'pack_tags',
Column('tag.id', db.Integer, ForeignKey('tag.id')),
Column('pack.id', db.Integer, ForeignKey('pack.id'), index=True)
)
node_tags = Table(
'node_tags',
Column('tag.id', db.Integer, ForeignKey('tag.id')),
Column('node.id', db.Integer, ForeignKey('node.id'), index=True)
)
query_tags = Table(
'query_tags',
Column('tag.id', db.Integer, ForeignKey('tag.id')),
Column('query.id', db.Integer, ForeignKey('query.id'), index=True)
)
file_path_tags = Table(
'file_path_tags',
Column('tag.id', db.Integer, ForeignKey('tag.id')),
Column('file_path.id', db.Integer, ForeignKey('file_path.id'), index=True)
)
class Tag(SurrogatePK, Model):
value = Column(db.String, nullable=False, unique=True)
nodes = relationship(
'Node',
secondary=node_tags,
back_populates='tags',
)
packs = relationship(
'Pack',
secondary=pack_tags,
back_populates='tags',
)
queries = relationship(
'Query',
secondary=query_tags,
back_populates='tags',
)
file_paths = relationship(
'FilePath',
secondary=file_path_tags,
back_populates='tags',
)
def __init__(self, value, **kwargs):
self.value = value
def __repr__(self):
return '<Tag: {0.value}>'.format(self)
@property
def packs_count(self):
return db.session.object_session(self) \
.query(Pack.id).with_parent(self, 'packs').count()
@property
def nodes_count(self):
return db.session.object_session(self) \
.query(Node.id).with_parent(self, 'nodes').count()
@property
def queries_count(self):
return db.session.object_session(self) \
.query(Query.id).with_parent(self, 'queries').count()
@property
def file_paths_count(self):
return db.session.object_session(self) \
.query(FilePath.id).with_parent(self, 'file_paths').count()
class Query(SurrogatePK, Model):
name = Column(db.String, nullable=False)
sql = Column(db.String, nullable=False)
interval = Column(db.Integer, default=3600)
platform = Column(db.String)
version = Column(db.String)
description = Column(db.String)
value = Column(db.String)
removed = Column(db.Boolean, nullable=False, default=True)
shard = Column(db.Integer)
packs = relationship(
'Pack',
secondary=querypacks,
back_populates='queries',
)
tags = relationship(
'Tag',
secondary=query_tags,
back_populates='queries',
lazy='joined',
)
def __init__(self, name, query=None, sql=None, interval=3600, platform=None,
version=None, description=None, value=None, removed=True,
shard=None, **kwargs):
self.name = name
self.sql = query or sql
self.interval = int(interval)
self.platform = platform
self.version = version
self.description = description
self.value = value
self.removed = removed
self.shard = shard
def __repr__(self):
return '<Query: {0.name}>'.format(self)
def to_dict(self):
return {
'query': self.sql,
'interval': self.interval,
'platform': self.platform,
'version': self.version,
'description': self.description,
'value': self.value,
'removed': self.removed,
'shard': self.shard,
}
class Pack(SurrogatePK, Model):
name = Column(db.String, nullable=False, unique=True)
platform = Column(db.String)
version = Column(db.String)
description = Column(db.String)
shard = Column(db.Integer)
queries = relationship(
'Query',
secondary=querypacks,
back_populates='packs',
)
tags = relationship(
'Tag',
secondary=pack_tags,
back_populates='packs',
)
def __init__(self, name, platform=None, version=None,
description=None, shard=None, **kwargs):
self.name = name
self.platform = platform
self.version = version
self.description = description
self.shard = shard
def __repr__(self):
return '<Pack: {0.name}>'.format(self)
def to_dict(self):
queries = {}
discovery = []
for query in self.queries:
if 'discovery' in (t.value for t in query.tags):
discovery.append(query.sql)
else:
queries[query.name] = query.to_dict()
return {
'platform': self.platform,
'version': self.version,
'shard': self.shard,
'discovery': discovery,
'queries': queries,
}
class Node(SurrogatePK, Model):
node_key = Column(db.String, nullable=False, unique=True)
enroll_secret = Column(db.String)
enrolled_on = Column(db.DateTime)
host_identifier = Column(db.String)
last_checkin = Column(db.DateTime)
node_info = Column(JSONB, default={}, nullable=False)
is_active = Column(db.Boolean, default=True, nullable=False)
last_ip = Column(INET, nullable=True)
tags = relationship(
'Tag',
secondary=node_tags,
back_populates='nodes',
lazy='joined',
)
def __init__(self, host_identifier, node_key=None,
enroll_secret=None, enrolled_on=None, last_checkin=None,
is_active=True, last_ip=None,
**kwargs):
self.node_key = node_key or str(uuid.uuid4())
self.host_identifier = host_identifier
self.enroll_secret = enroll_secret
self.enrolled_on = enrolled_on
self.last_checkin = last_checkin
self.is_active = is_active
self.last_ip = last_ip
def __repr__(self):
return '<Node-{0.id}: node_key={0.node_key}, host_identifier={0.host_identifier}>'.format(self)
def get_config(self, **kwargs):
from doorman.utils import assemble_configuration
return assemble_configuration(self)
def get_new_queries(self, **kwargs):
from doorman.utils import assemble_distributed_queries
return assemble_distributed_queries(self)
@property
def display_name(self):
if 'display_name' in self.node_info and self.node_info['display_name']:
return self.node_info['display_name']
elif 'hostname' in self.node_info and self.node_info['hostname']:
return self.node_info['hostname']
elif 'computer_name' in self.node_info and self.node_info['computer_name']:
return self.node_info['computer_name']
else:
return self.host_identifier
@property
def packs(self):
return db.session.object_session(self) \
.query(Pack) \
.join(pack_tags, pack_tags.c['pack.id'] == Pack.id) \
.join(node_tags, node_tags.c['tag.id'] == pack_tags.c['tag.id']) \
.filter(node_tags.c['node.id'] == self.id) \
.options(db.lazyload('*'))
@property
def queries(self):
return db.session.object_session(self) \
.query(Query) \
.join(query_tags, query_tags.c['query.id'] == Query.id) \
.join(node_tags, node_tags.c['tag.id'] == query_tags.c['tag.id']) \
.filter(node_tags.c['node.id'] == self.id) \
.options(db.lazyload('*'))
@property
def file_paths(self):
return db.session.object_session(self) \
.query(FilePath) \
.join(file_path_tags, file_path_tags.c['file_path.id'] == FilePath.id) \
.join(node_tags, node_tags.c['tag.id'] == file_path_tags.c['tag.id']) \
.filter(node_tags.c['node.id'] == self.id) \
.options(db.lazyload('*'))
def to_dict(self):
# NOTE: deliberately not including any secret values in here, for now.
return {
'id': self.id,
'display_name': self.display_name,
'enrolled_on': self.enrolled_on,
'host_identifier': self.host_identifier,
'last_checkin': self.last_checkin,
'node_info': self.node_info.copy(),
'last_ip': self.last_ip,
'is_active': self.is_active
}
class FilePath(SurrogatePK, Model):
category = Column(db.String, nullable=False, unique=True)
target_paths = Column(db.String)
tags = relationship(
'Tag',
secondary=file_path_tags,
back_populates='file_paths',
lazy='joined',
)
def __init__(self, category=None, target_paths=None, *args, **kwargs):
self.category = category
if target_paths is not None:
self.set_paths(*target_paths)
elif args:
self.set_paths(*args)
else:
self.target_paths = ''
def to_dict(self):
return {
self.category: self.get_paths()
}
def get_paths(self):
return self.target_paths.split('!!')
def set_paths(self, *target_paths):
self.target_paths = '!!'.join(target_paths)
class ResultLog(SurrogatePK, Model):
name = Column(db.String, nullable=False)
timestamp = Column(db.DateTime, default=dt.datetime.utcnow)
action = Column(db.String)
columns = Column(JSONB)
node_id = reference_col('node', nullable=False)
node = relationship(
'Node',
backref=db.backref('result_logs', lazy='dynamic')
)
def __init__(self, name=None, action=None, columns=None, timestamp=None,
node=None, node_id=None, **kwargs):
self.name = name
self.action = action
self.columns = columns or {}
self.timestamp = timestamp
if node:
self.node = node
elif node_id:
self.node_id = node_id
@declared_attr
def __table_args__(cls):
return (
Index('idx_%s_node_id_timestamp_desc' % cls.__tablename__,
'node_id', cls.timestamp.desc()),
)
class StatusLog(SurrogatePK, Model):
line = Column(db.Integer)
message = Column(db.String)
severity = Column(db.Integer)
filename = Column(db.String)
created = Column(db.DateTime, default=dt.datetime.utcnow)
version = Column(db.String)
node_id = reference_col('node', nullable=False)
node = relationship(
'Node',
backref=db.backref('status_logs', lazy='dynamic')
)
def __init__(self, line=None, message=None, severity=None,
filename=None, created=None, node=None, node_id=None,
version=None, **kwargs):
self.line = int(line)
self.message = message
self.severity = int(severity)
self.filename = filename
self.created = created
self.version = version
if node:
self.node = node
elif node_id:
self.node_id = node_id
@declared_attr
def __table_args__(cls):
return (
Index('idx_%s_node_id_created_desc' % cls.__tablename__,
'node_id', cls.created.desc()),
)
class DistributedQuery(SurrogatePK, Model):
description = Column(db.String, nullable=True)
sql = Column(db.String, nullable=False)
timestamp = Column(db.DateTime, default=dt.datetime.utcnow)
not_before = Column(db.DateTime, default=dt.datetime.utcnow)
def __init___(self, sql, description=None, not_before=None):
self.sql = sql
self.description = description
self.not_before = not_before
class DistributedQueryTask(SurrogatePK, Model):
NEW = 0
PENDING = 1
COMPLETE = 2
FAILED = 3
guid = Column(db.String, nullable=False, unique=True)
status = Column(db.Integer, default=0, nullable=False)
timestamp = Column(db.DateTime)
distributed_query_id = reference_col('distributed_query', nullable=False)
distributed_query = relationship(
'DistributedQuery',
backref=db.backref('tasks',
cascade='all, delete-orphan',
lazy='dynamic'),
)
node_id = reference_col('node', nullable=False)
node = relationship(
'Node',
backref=db.backref('distributed_queries', lazy='dynamic'),
)
def __init__(self, node=None, node_id=None,
distributed_query=None, distributed_query_id=None):
self.guid = str(uuid.uuid4())
if node:
self.node = node
elif node_id:
self.node_id = node_id
if distributed_query:
self.distributed_query = distributed_query
elif distributed_query_id:
self.distributed_query_id = distributed_query_id
@declared_attr
def __table_args__(cls):
return (
Index('idx_%s_node_id_status' % cls.__tablename__, 'node_id', 'status'),
)
class DistributedQueryResult(SurrogatePK, Model):
columns = Column(JSONB)
timestamp = Column(db.DateTime, default=dt.datetime.utcnow)
distributed_query_task_id = reference_col('distributed_query_task', nullable=False)
distributed_query_task = relationship(
'DistributedQueryTask',
backref=db.backref('results',
cascade='all, delete-orphan',
lazy='joined'),
)
distributed_query_id = reference_col('distributed_query', nullable=False)
distributed_query = relationship(
'DistributedQuery',
backref=db.backref('results',
cascade='all, delete-orphan',
lazy='joined'),
)
def __init__(self, columns, distributed_query=None, distributed_query_task=None):
self.columns = columns
self.distributed_query = distributed_query
self.distributed_query_task = distributed_query_task
class Rule(SurrogatePK, Model):
name = Column(db.String, nullable=False)
alerters = Column(ARRAY(db.String), nullable=False)
description = Column(db.String, nullable=True)
conditions = Column(JSONB)
updated_at = Column(db.DateTime, nullable=False, default=dt.datetime.utcnow)
def __init__(self, name, alerters, description=None, conditions=None, updated_at=None):
self.name = name
self.description = description
self.alerters = alerters
self.conditions = conditions
self.updated_at = updated_at
@property
def template(self):
return string.Template("{name}\r\n\r\n{description}".format(
name=self.name, description=self.description or '')
)
class User(UserMixin, SurrogatePK, Model):
username = Column(db.String(80), unique=True, nullable=False)
email = Column(db.String)
password = Column(db.String, nullable=True)
created_at = Column(db.DateTime, nullable=False, default=dt.datetime.utcnow)
# oauth related stuff
social_id = Column(db.String)
first_name = Column(db.String)
last_name = Column(db.String)
def __init__(self, username, password=<PASSWORD>, email=None, social_id=None,
first_name=None, last_name=None):
self.username = username
self.email = email
if password:
self.set_password(password)
else:
self.password = <PASSWORD>
self.social_id = social_id
self.first_name = first_name
self.last_name = last_name
def set_password(self, password):
self.update(password=<PASSWORD>password_hash(password))
return
def check_password(self, value):
if not self.password:
# still do the computation
return bcrypt.generate_password_hash(value) and False
return bcrypt.check_password_hash(self.password, value)
|
direction_net/pano_utils/transformation.py | DionysisChristopoulos/google-research | 23,901 | 12767093 | <filename>direction_net/pano_utils/transformation.py
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformations for equirectangular and perspective images.
The coordinate system is the same as OpenGL's, where -Z is the camera looking
direction, +Y points up and +X points right.
Rotations are applied as pre-multiplication in all cases.
"""
import math
from pano_utils import geometry
from pano_utils import math_utils
import tensorflow.compat.v1 as tf
import tensorflow_addons as tfa
def equirectangular_sampler(images, spherical_coordinates):
"""Sample panorama images using a grid of spherical coordinates.
Args:
images: a 4-D tensor of shape `[BATCH, HEIGHT, WIDTH, CHANNELS]`.
spherical_coordinates: a float32 tensor with shape
[BATCH, sampling_height, sampling_width, 2] representing spherical
coordinates (colatitude, azimuth) of the sampling grids.
Returns:
a 4-D tensor of shape `[BATCH, sampling_height, sampling_width, CHANNELS]`
representing resampled images.
Raises:
ValueError: 'images' or 'spherical_coordinates' has the wrong dimensions.
"""
with tf.name_scope(
None, 'equirectangular_sampler', [images, spherical_coordinates]):
if len(images.shape) != 4:
raise ValueError("'images' has the wrong dimensions.")
if spherical_coordinates.shape[-1] != 2:
raise ValueError("'spherical_coordinates' has the wrong dimensions.")
shape = images.shape.as_list()
height, width = shape[1], shape[2]
padded_images = geometry.equirectangular_padding(images, [[1, 1], [1, 1]])
colatitude, azimuth = tf.split(spherical_coordinates, [1, 1], -1)
# The colatitude of the equirectangular image goes from 0 (the top row)
# to pi (the bottom), not inclusively. The azimuth goes from 0
# (the leftmost column) to 2*pi (the rightmost column).
# For example, azimuth-colatitude (0, pi/2) is the mid pixel in the first
# column of the equirect image.
# Convert spherical coordinates to equirectangular coordinates on images.
# +1 in the end because of the padding.
x_pano = (tf.mod(azimuth / math.pi, 2) * width / 2.0 - 0.5) + 1
y_pano = ((colatitude / math.pi) * height - 0.5) + 1
pano_coordinates = tf.concat([x_pano, y_pano], -1)
remapped = tfa.image.resampler(padded_images, pano_coordinates)
return remapped
def rectilinear_projection(images,
resolution,
fov,
rotations):
"""Convert equirectangular panoramic images to perspective images.
First, the panorama images are rotated by the input parameter "rotations".
Then, the region with the field of view "fov" centered at camera's look-at -Z
axis is projected into perspective images. The -Z axis corresponds to the
spherical coordinates (pi/2, pi/2) which is (HEIGHT/2, WIDTH/4) on the pano.
Args:
images: a 4-D tensor of shape `[BATCH, HEIGHT, WIDTH, CHANNELS]`.
resolution: a 2-D tuple or list containing the resolution of desired output.
fov: (float) camera's horizontal field of view in degrees.
rotations: [BATCH, 3, 3] rotation matrices.
Returns:
4-D tensor of shape `[BATCH, HEIGHT, WIDTH, CHANNELS]`
Raises:
ValueError: 'images' has the wrong dimensions.
ValueError: 'images' is not a float tensor.
ValueError: 'rotations' has the wrong dimensions.
"""
with tf.name_scope(None, 'rectilinear_projection',
[images, resolution, fov, rotations]):
if len(images.shape) != 4:
raise ValueError("'images' has the wrong dimensions.")
if images.dtype != tf.float32 and images.dtype != tf.float64:
raise ValueError("'images' must be a float tensor.")
if rotations.shape[-2:] != [3, 3]:
raise ValueError("'rotations' has the wrong dimensions.")
shape = images.shape.as_list()
batch = shape[0]
cartesian_coordinates = geometry.generate_cartesian_grid(resolution, fov)
# create batch -> [batch, height, width, 3]
cartesian_coordinates = tf.tile(
tf.expand_dims(cartesian_coordinates, axis=0), [batch, 1, 1, 1])
# The rotation matrices have to be [batch, height, width, 3, 3].
flip_x = tf.constant([[-1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])
rotations = tf.matmul(flip_x,
tf.matmul(rotations, flip_x, transpose_a=True))
rotated_coordinates = tf.matmul(
rotations[:, tf.newaxis, tf.newaxis],
tf.expand_dims(cartesian_coordinates, -1), transpose_a=True)
axis_convert = tf.constant([[0., 0., 1.], [1., 0., 0.], [0., 1., 0.]])
rotated_coordinates = tf.matmul(axis_convert, rotated_coordinates)
rotated_coordinates = tf.squeeze(rotated_coordinates, -1)
spherical_coordinates = geometry.cartesian_to_spherical(rotated_coordinates)
# The azimuth of 'spherical_coordinates' decreases from left to right but
# the x should increase from left to right.
spherical_coordinates = tf.reverse(spherical_coordinates, [2])
return equirectangular_sampler(images, spherical_coordinates)
def rotate_pano(images, rotations):
"""Rotate Panoramic images.
Convert the spherical coordinates (colatitude, azimuth) to Cartesian (x, y, z)
then apply SO(3) rotation matrices. Finally, convert them back to spherical
coordinates and remap the equirectangular images.
Note1: The rotations are applied to the sampling sphere instead of the camera.
The camera actually rotates R^T. I_out(x) = I_in(R * x), x are points in the
camera frame.
Note2: It uses a simple linear interpolation for now instead of slerp, so the
pixel values are not accurate but visually plausible.
Args:
images: a 4-D tensor of shape `[BATCH, HEIGHT, WIDTH, CHANNELS]`.
rotations: [BATCH, 3, 3] rotation matrices.
Returns:
4-D tensor of shape `[BATCH, HEIGHT, WIDTH, CHANNELS]`.
Raises:
ValueError: if the `images` or 'rotations' has the wrong dimensions.
"""
with tf.name_scope(None, 'rotate_pano', [images, rotations]):
if len(images.shape) != 4:
raise ValueError("'images' has the wrong dimensions.")
if rotations.shape[-2:] != [3, 3]:
raise ValueError("'rotations' must have 3x3 dimensions.")
shape = images.shape.as_list()
batch, height, width = shape[0], shape[1], shape[2]
spherical = tf.expand_dims(
geometry.generate_equirectangular_grid([height, width]), 0)
spherical = tf.tile(spherical, [batch, 1, 1, 1])
cartesian = geometry.spherical_to_cartesian(spherical)
axis_convert = tf.constant([[0., 1., 0.], [0., 0., -1.], [-1., 0., 0.]])
cartesian = tf.matmul(axis_convert, tf.expand_dims(cartesian, -1))
rotated_cartesian = tf.matmul(
rotations[:, tf.newaxis, tf.newaxis], cartesian)
rotated_cartesian = tf.squeeze(
tf.matmul(axis_convert, rotated_cartesian, transpose_a=True), -1)
rotated_spherical = geometry.cartesian_to_spherical(rotated_cartesian)
return equirectangular_sampler(images, rotated_spherical)
def rotate_image_in_3d(images,
input_rotations,
input_fov,
output_fov,
output_shape):
"""Return reprojected perspective view images given a rotated camera.
This function applies a homography H = K_output * R^T * K_input' where
K_output and K_input are the output and input camera intrinsics, R is the
rotation from the input images' frame to the target frame.
Args:
images: [BATCH, HEIGHT, WIDTH, CHANNEL] perspective view images.
input_rotations: [BATCH, 3, 3] rotations matrices from current camera frame
to target camera frame.
input_fov: [BATCH] a 1-D tensor (float32) of input field of view in degrees.
output_fov: (float) output field of view in degrees.
output_shape: a 2-D list of output dimension [height, width].
Returns:
reprojected images [BATCH, height, width, CHANNELS].
"""
with tf.name_scope(
None, 'rotate_image_in_3d',
[images, input_rotations, input_fov, output_fov, output_shape]):
if len(images.shape) != 4:
raise ValueError("'images' has the wrong dimensions.")
if input_rotations.shape[-2:] != [3, 3]:
raise ValueError("'input_rotations' must have 3x3 dimensions.")
shape = images.shape.as_list()
batch, height, width = shape[0], shape[1], shape[2]
cartesian = geometry.generate_cartesian_grid(output_shape, output_fov)
cartesian = tf.tile(
cartesian[tf.newaxis, :, :, :, tf.newaxis], [batch, 1, 1, 1, 1])
input_rotations = tf.tile(input_rotations[:, tf.newaxis, tf.newaxis, :],
[1]+output_shape+[1, 1])
cartesian = tf.squeeze(
tf.matmul(input_rotations, cartesian, transpose_a=True), -1)
image_coordinates = -cartesian[:, :, :, :2] / cartesian[:, :, :, -1:]
x, y = tf.split(image_coordinates, [1, 1], -1)
w = 2 * tf.tan(math_utils.degrees_to_radians(input_fov / 2))
h = 2 * tf.tan(math_utils.degrees_to_radians(input_fov / 2))
w = w[:, tf.newaxis, tf.newaxis, tf.newaxis]
h = h[:, tf.newaxis, tf.newaxis, tf.newaxis]
nx = x*width / w + width / 2 - 0.5
ny = -y * height / h + height / 2 - 0.5
return tfa.image.resampler(images, tf.concat([nx, ny], -1))
def rotate_image_on_pano(images, rotations, fov, output_shape):
"""Transform perspective images to equirectangular images after rotations.
Return equirectangular panoramic images in which the input perspective images
embedded in after the rotation R from the input images' frame to the target
frame. The image with the field of view "fov" centered at camera's look-at -Z
axis is projected onto the pano. The -Z axis corresponds to the spherical
coordinates (pi/2, pi/2) which is (HEIGHT/2, WIDTH/4) on the pano.
Args:
images: [BATCH, HEIGHT, WIDTH, CHANNEL] perspective view images.
rotations: [BATCH, 3, 3] rotations matrices.
fov: (float) images' field of view in degrees.
output_shape: a 2-D list of output dimension [height, width].
Returns:
equirectangular images [BATCH, height, width, CHANNELS].
"""
with tf.name_scope(None, 'rotate_image_on_pano',
[images, rotations, fov, output_shape]):
if len(images.shape) != 4:
raise ValueError("'images' has the wrong dimensions.")
if rotations.shape[-2:] != [3, 3]:
raise ValueError("'rotations' must have 3x3 dimensions.")
shape = images.shape.as_list()
batch, height, width = shape[0], shape[1], shape[2]
# Generate a mesh grid on a sphere.
spherical = geometry.generate_equirectangular_grid(output_shape)
cartesian = geometry.spherical_to_cartesian(spherical)
cartesian = tf.tile(
cartesian[tf.newaxis, :, :, :, tf.newaxis], [batch, 1, 1, 1, 1])
axis_convert = tf.constant([[0., -1., 0.], [0., 0., 1.], [1., 0., 0.]])
cartesian = tf.matmul(axis_convert, cartesian)
cartesian = tf.squeeze(
tf.matmul(rotations[:, tf.newaxis, tf.newaxis], cartesian), -1)
# Only take one hemisphere. (camera lookat direction)
hemisphere_mask = tf.cast(cartesian[:, :, :, -1:] < 0, tf.float32)
image_coordinates = cartesian[:, :, :, :2] / cartesian[:, :, :, -1:]
x, y = tf.split(image_coordinates, [1, 1], -1)
# Map pixels on equirectangular pano to perspective image.
nx = -x * width / (2 * tf.tan(
math_utils.degrees_to_radians(fov / 2))) + width / 2 - 0.5
ny = y * height / (2 * tf.tan(
math_utils.degrees_to_radians(fov / 2))) + height / 2 - 0.5
transformed = hemisphere_mask * tfa.image.resampler(
images, tf.concat([nx, ny], -1))
return transformed
|
docs/examples/led_board_2.py | NotBobTheBuilder/gpiozero | 743 | 12767106 | from gpiozero import LEDBoard
from signal import pause
leds = LEDBoard(5, 6, 13, 19, 26, pwm=True)
leds.value = (0.2, 0.4, 0.6, 0.8, 1.0)
pause()
|
site/flask/lib/python2.7/site-packages/openid/consumer/__init__.py | theholyhades1/tartanHacks2015 | 5,079 | 12767158 | <reponame>theholyhades1/tartanHacks2015<filename>site/flask/lib/python2.7/site-packages/openid/consumer/__init__.py<gh_stars>1000+
"""
This package contains the portions of the library used only when
implementing an OpenID consumer.
"""
__all__ = ['consumer', 'discover']
|
utils/test_analysis.py | ShenLeixian/data2vis | 103 | 12767209 | import os
import json
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
# t_stat, p_val = stats.ttest_ind(sample1, sample2, equal_var=False)
test_result_dir = "utils/testresults"
all_results = {}
aggregate_terms = [
"count", "valid", "missing", "distinct", "sum", "mean", "average",
"variance", "variancep", "stdev", "stdevp", "stderr", "median", "q1", "q3",
"ci0", "ci1", "min", "max", "argmin", "argmax"
]
file_paths = [
"/vizmodeluninat5.json", "/vizmodeluninat10.json",
"/vizmodeluninat15.json", "/vizmodeluninat20.json", "/vizmodeluni5.json",
"/vizmodeluni10.json", "/vizmodeluni15.json", "/vizmodeluni20.json",
"/vizmodelbi5.json", "/vizmodelbi10.json", "/vizmodelbi15.json",
"/vizmodelbi20.json"
]
def analyze_test_suite(test_dataset_directory):
# for subdir, dirs, files in os.walk(test_dataset_directory):
# for file in files:
# filepath = subdir + os.sep + file
# if filepath.endswith(
# "json") and not filepath.endswith("lsit.json"):
for filepath in file_paths:
filepath = test_result_dir + filepath
# data = json.load(open(filepath))
# print(filepath)
analyze_data(filepath)
def is_valid_aggregate(agg_val):
if (agg_val not in aggregate_terms):
# print("issh", agg_val)
return False
else:
return True
def computer_anova():
print("anova")
def analyze_data(filepath):
data = json.load(open(filepath))
beam_width = data["beamwidth"]
valid_json_array = []
valid_vega_array = []
phantom_count_array = []
x = list(range(0, 100))
for row in data["data"]:
valid_json_count = row["validjsoncount"] / beam_width
valid_json_array.append(valid_json_count)
valid_vega_count = row["validvegacount"]
vs_array = row["vegaspecarray"]
# mark specs with incorrect aggregation value as invalid vega
for vs_row in vs_array:
if ("aggregate" in vs_row["encoding"]["y"]):
if not is_valid_aggregate(
vs_row["encoding"]["y"]["aggregate"]):
valid_vega_count -= 1
else:
if ("aggregate" in vs_row["encoding"]["x"]):
if not is_valid_aggregate(
vs_row["encoding"]["x"]["aggregate"]):
valid_vega_count -= 1
# print(valid_vega_count, row["validjsoncount"])
valid_vegap_count = valid_vega_count
valid_vega_count = valid_vega_count / beam_width
valid_vega_array.append(valid_vega_count)
if (valid_vega_count == 0):
phantom_count = 0
else:
phantom_count = row["phantomcount"] / valid_vegap_count
phantom_count_array.append(phantom_count)
# print("Count", row["phantomcount"], valid_vegap_count)
# print(x, valid_json_array)
# plt.plot(x, valid_json_array)
# plt.plot(x, valid_vega_array)
# plt.plot(x, phantom_count_array)
# plt.show()
print(
filepath.split("vizmodel")[1], "Json:",
round(np.mean(valid_json_array), 3), "Vega",
round(np.mean(valid_vega_array), 3), "Mean % Phantom",
round(np.mean(phantom_count_array), 3))
result = {"json:": valid_json_array, "vega": valid_vega_array}
analyze_test_suite(test_result_dir)
# data = json.load(open("utils/testresults/vizmodelbi15.json"))
# print(len(data["data"]))
# analyze_data("utils/testresults/vizmodeluninat15.json")
|
safekit/models/tiered_lm.py | duebukua/safekit | 117 | 12767213 | <filename>safekit/models/tiered_lm.py<gh_stars>100-1000
#!/usr/bin/env python
"""
This is a two tiered language model for anomaly detection, where the second tier LSTM (log line level)
takes the concatenation of the average sentence vector and final hidden state
from the lower tier (token level) LSTM as input, creating a new context vector and hidden state
for the given user.
Example Command for running a model configuration
-------------------------------------------------
**Raw (character token) tiered model** (The jagged parameter lets the model know there are variable length sequences) ::
python safekit/models/tiered_lm.py results/ safekit/features/specs/lm/lanl_char_config.json data_examples/lanl/lm_feats/raw_day_split/ -test -skipsos -jagged
.. Note ::
The output results will be printed to /tmp/lanl_result/ and then moved to results/ upon completion
to avoid experiment slowdown of constant network traffic.
File name convention:
---------------------
- em: embedding size for token embedding
- ns: number of loglines per user per mini-batch for trunctated back propagation through time
- mb: Minibatch size (mini-batch over users)
- lr: learnrate (step size for gradient descent)
- cl: context layers (number of hidden layers for top level (log line level) context rnn)
- lml: language model layers (number of hidden layers for the bottom level, token level, rnn)
- rs: random seed for reproducible results
stdout
------
For each mini-batch the following is printed to standard output ::
batchsize line_number second status filename index current_loss
Where:
- batchsize: The size of the mini-batch
- line_number: Line number from original auth.txt file (may be off by 1)
- second: The second of the first event in the mini-batch
- status: Whether the model is updating or merely forward propagating
- filename: The current file being processed
- index: The number of samples processed to date
- current_loss: The average loss over the mini-batch
File output
-----------
::
batch_num line second day user red loss
Where:
- batch_num: The mini-batch this event was a part of
- line: Line number from original auth.txt file (may be off by 1)
- second: The second which the event occurred on
- day: The day the event occurred on
- user: The user who performed the event
- red: Whether this event was a labeled red team activity (1 for red team activity 0 otherwise)
- loss: The anomaly score for this event
.. Note ::
The runtime of the experiment is also printed to a file called runtimes.txt at the end of training
Input Data
----------
The format of the input makes the following assumptions:
- Input files are together in datafolder, one file for each day.
- Input files are plain text files with one line of integers per log line representing meta data and the tokens from log text.
- Input format for fixed length sequences ::
line_nums second day user red logtokenid1 .... logtokenid_SentenceLen
- Zero paded Input format for jagged sequences ::
line_nums second day user red SentenceLen logtokenid1 .... logtokenid_SentenceLen 0 0 .... 0
"""
import os
import sys
# So we can run this code on arbitrary environment which has tensorflow but not safekit installed
cyberpath = '/'.join(os.path.realpath(__file__).split('/')[:-3])
sys.path.insert(0, cyberpath)
import tensorflow as tf
import numpy as np
import time
from safekit.batch import OnlineLMBatcher
from simple_lm import write_results, CELL
from safekit.tf_ops import lm_rnn, bidir_lm_rnn
from safekit.graph_training_utils import ModelRunner
from safekit.util import get_mask, Parser
import json
import math
def return_parser():
parser = Parser()
parser.add_argument('results_folder', type=str,
help='The folder to print results to.')
parser.add_argument('config', type=str,
help='The data spec.')
parser.add_argument("datafolder", type=str,
help="File with token features")
parser.add_argument('-encoding', type=str, default=None,
help='Can be "oct", "raw" or "word"')
parser.add_argument("-em", type=int, default=5,
help="Dimension of token embeddings")
parser.add_argument("-numsteps", type=int, default=3,
help="length of unrolled context_rnn, number of log lines per user per train step")
parser.add_argument('-mb', type=int, default=64,
help='Number of users in mini-batch.')
parser.add_argument('-learnrate', type=float, default=0.001,
help='Step size for gradient descent.')
parser.add_argument("-context_layers", type=int, nargs='+', default=[10],
help='List of hidden layer sizes for context lstm.')
parser.add_argument('-lm_layers', type=int, nargs='+', default=[5],
help='List of hidden layer sizes for token lstm.')
parser.add_argument('-debug', action='store_true',
help='Use this flag to print feed dictionary contents and dimensions.')
parser.add_argument('-random_seed', type=int, default=5,
help='Random seed for reproducible experiments.')
parser.add_argument('-jagged', action='store_true',
help='Whether using sequences of variable length (Input should'
'be zero-padded to max_sequence_length.')
parser.add_argument('-skipsos', action='store_true',
help='Whether to skip a start of sentence token.')
parser.add_argument('-bidir', action='store_true',
help='Whether to use bidirectional lstm for lower tier.')
parser.add_argument('-test', action='store_true',
help='Whether to run on a subset of the data (5000 lines from days 1,2,3) or the entire set.')
parser.add_argument('-verbose', type=int, default=1,
help='Whether to print loss during training.')
parser.add_argument('-delimiter', type=str, default=',',
help="Delimiter for input text file")
parser.add_argument('-cell_type', type=str, default='lstm',
help='Can be either "lstm", "ident_ran", or "ran"')
parser.add_argument('-upper_cell_type', type=str, default='lstm',
help='Can be either "lstm", "ident_ran", or "ran"')
return parser
class ContextRNN:
"""
Log line level LSTM cell that keeps track of it's last lstm state tuple
"""
def __init__(self, layers, initial_state,
cell=tf.nn.rnn_cell.LSTMCell):
"""
:param layers: List of hidden layer sizes.
:param initial_state: List of numlayers lists of tensors (cell_state, hidden_state),
or List of lstm state tuples (which are named tuples of tensors (c=cell_state, h=hidden_state)
:param cell: Type of rnn cell to use.
"""
self.cell_type = cell
self.cell_stack = tf.nn.rnn_cell.MultiRNNCell([self.cell_type(cell_size) for cell_size in layers])
self.layers = layers
self.state = initial_state
def __call__(self, lower_outputs, final_hidden, seq_len):
"""
:param line_input: The input for current time step.
:param state: The cell state output by ContextRnn from previous time step.
:param seq_len: A 1D tensor of of size mb giving lengths of sequences in mb for this time step
:return: (tensor, LSTMStateTuple) output, state
"""
ctxt_input = ContextRNN._create_input(lower_outputs, final_hidden, seq_len)
output, self.state = self.cell_stack(ctxt_input, self.state)
return output, self.state
@staticmethod
def _create_input(lower_outputs, final_hidden, seq_len):
"""
:param lower_outputs: The list of output Tensors from the token level rnn
:param final_hidden: The final hidden state from the token level rnn
:param seq_len: A 1D tensor of of size mb giving lengths of token level sequences in mb for this time step
:return: A tensor which is the concatenation of the hidden state averages and final hidden state from lower
tier model. Used as input to context rnn
"""
if seq_len is not None:
mean_hidden = tf.reduce_sum(tf.stack(lower_outputs, axis=0), axis=0)/seq_len
else:
mean_hidden = tf.reduce_mean(tf.stack(lower_outputs, axis=0), axis=0)
return tf.concat([mean_hidden, final_hidden], 1)
def tiered_lm(token_set_size, embedding_size, ph_dict, context_layers, lm_layers,
numsteps, bidir=False, jagged=False):
"""
:param token_set_size: (int) Number of unique tokens in token set
:param embedding_size: (int) Dimensionality of token embeddings
:param ph_dict: dictionary of tensorflow placeholders and lists of tensorflow placeholders
:param context_layers: List of hidden layer sizes for stacked context LSTM
:param lm_layers: list of hidden layer sizes for stacked sentence LSTM
:param numsteps: How many steps (log lines) to unroll the upper tier RNN
:param bidir: Whether to use bidirectional LSTM for lower tier model
:param jagged: Whether or not variable length sequences are used
:return: total_loss (scalar tensor),
context_vector (tensor),
line_loss_matrix (tensor), Losses for each line in mini-batch
context_state (LSTMStateTuple) Final state of upper tier model
"""
if bidir:
language_model = bidir_lm_rnn
else:
language_model = lm_rnn
# =========================================================
# ========== initialize token level lstm variables ========
# =========================================================
if jagged:
ph_dict['lens'] = []
ph_dict['masks'] = []
context_vector = tf.placeholder(tf.float32, [None, ctxt_size], name="context_vector")
ph_dict['context_vector'] = context_vector
tf.add_to_collection('context_vector', ph_dict['context_vector'])
token_embed = tf.Variable(tf.truncated_normal([token_set_size, embedding_size])) # Initial embeddings vocab X embedding size
total_loss = 0.0
# =========================================================
# ======= initialize log line level (context) lstm ========
# =========================================================
ph_dict['c_state_init'] = [tf.placeholder(tf.float32, [None, c_size]) for c_size in context_layers]
ph_dict['h_state_init'] = [tf.placeholder(tf.float32, [None, h_size]) for h_size in context_layers]
context_init = [tf.nn.rnn_cell.LSTMStateTuple(ph_dict['c_state_init'][i],
ph_dict['h_state_init'][i])
for i in range(len(context_layers))]
ctxt_rnn = ContextRNN(context_layers, context_init, cell=CELL[args.upper_cell_type])
# =========================================================
# ======= initiate loop that ties together tiered lstm ====
# =========================================================
with tf.variable_scope("reuse_scope") as vscope:
for i in range(numsteps):
x = tf.placeholder(tf.int64, [None, sentence_length])
t = tf.placeholder(tf.int64, [None, sentence_length-2*bidir])
ph_dict['x'].append(x)
ph_dict['t'].append(t)
if jagged:
seq_len = tf.placeholder(tf.int32, [None])
ph_dict['lens'].append(seq_len)
else:
seq_len = None
token_losses, hidden_states, final_hidden = language_model(x, t, token_embed, lm_layers,
seq_len=seq_len,
context_vector=context_vector,
cell=CELL[args.cell_type])
if jagged:
ph_dict['masks'].append(tf.placeholder(tf.float32, [None, sentence_length-2*bidir]))
token_losses *= ph_dict['masks'][-1]
line_losses = tf.reduce_sum(token_losses, axis=1) # batch_size X 1
sequence_lengths = tf.reshape(tf.cast(ph_dict['lens'][-1], tf.float32), (-1, 1))
else:
line_losses = tf.reduce_mean(token_losses, axis=1) # batch_size X 1
sequence_lengths = None
avgloss = tf.reduce_mean(line_losses) # scalar
total_loss += avgloss
if i == 0:
line_loss_matrix = tf.reshape(line_losses, [1, -1])
tf.add_to_collection('first_line_loss_matrix', line_loss_matrix)
else:
line_loss_matrix = tf.concat((line_loss_matrix, tf.reshape(line_losses, [1, -1])), 0)
context_vector, context_state = ctxt_rnn(hidden_states,
final_hidden,
sequence_lengths)
tf.add_to_collection('context_vector', context_vector)
tf.add_to_collection('context_state', context_state)
tf.get_variable_scope().reuse_variables()
total_loss /= float(numsteps)
return total_loss, context_vector, line_loss_matrix, context_state
if __name__ == "__main__":
# ===========================================================================
# =========================PARSE ARGUMENTS===================================
# ===========================================================================
args = return_parser().parse_args()
conf = json.load(open(args.config, 'r'))
assert all(x == args.context_layers[0] for x in args.context_layers), 'Different sized context layers not supported.'
assert args.numsteps > 1, 'Must have at least two upper tier time steps to build graph for tiered lstm.'
if not args.results_folder.endswith('/'):
args.results_folder += '/'
tf.set_random_seed(args.random_seed)
np.random.seed(args.random_seed)
sentence_length = (conf['sentence_length'] - 1) - int(args.skipsos) + int(args.bidir)
token_set_size = conf['token_set_size']
ctxt_size = args.context_layers[0]
direction = ('fwd', 'bidir')[args.bidir]
results_file = 'tier_%s_%s_%s_%s__em_%s__ns_%s__mb_%s__lr_%s__cl_%s__lml_%s__rs_%s' % (direction,
args.encoding,
args.cell_type,
time.ctime(time.time()).replace(' ', '-'),
args.em,
args.numsteps,
args.mb,
args.learnrate,
args.context_layers[0],
args.lm_layers[0],
args.random_seed)
# if the -test flag passed, store predictions in a temporary file
if "lanl_results" not in os.listdir("/tmp"):
os.system("mkdir /tmp/lanl_results; chmod g+rwx /tmp/lanl_results")
outfile = open("/tmp/lanl_results/" + results_file, 'w')
outfile.write("batch line second day user red loss\n")
mode = ('fixed', 'update')
jag = int(args.jagged)
skipsos = int(args.skipsos)
# ===========================================================================
# =========================BUILD GRAPH=======================================
# ===========================================================================
ph_dict = {'x': [], 't': []}
dummy_loss = tf.constant(1)
total_loss, context_vector, line_loss_matrix, context_state = tiered_lm(token_set_size, args.em,
ph_dict,
args.context_layers,
args.lm_layers,
args.numsteps,
bidir=args.bidir,
jagged=args.jagged)
tiered_network_model = ModelRunner(total_loss, ph_dict, learnrate=args.learnrate,
debug=args.debug, decay=True,
decay_rate=0.99, decay_steps=20)
# ===========================================================================
# =========================TRAINING LOOP=====================================
# ===========================================================================
init_triple = (np.zeros([1, ctxt_size], np.float32), # context
[np.zeros([1, c_size], np.float32) for c_size in args.context_layers], # state
[np.zeros([1, h_size], np.float32) for h_size in args.context_layers]) # hidden
start_time = time.time()
def trainday(is_training, f, states, logs):
num_processed = 0
data = OnlineLMBatcher(args.datafolder + f, init_triple,
batch_size=args.mb, num_steps=args.numsteps, skiprows=0)
do_update = is_training
if states is not None:
data.state_triples = states
batch, state_triple = data.next_batch()
batch_num = 0
stragglers = False
while batch is not None:
if data.flush:
do_update = False
if len(batch.shape) == 2: # Straggler log lines that don't fit into num_steps by end of day are run in large batches one step at a time
stragglers = True
batch = batch.reshape((1, batch.shape[0], batch.shape[1]))
endx = batch.shape[2] - int(not args.bidir)
endt = batch.shape[2] - int(args.bidir)
datadict = {'line': batch[:, :, 0],
'second': batch[:, :, 1],
'day': batch[:, :, 2],
'user': batch[:, :, 3],
'red': batch[:, :, 4],
'x': [batch[0, :, 5 + jag + skipsos:endx]] * args.numsteps,
't': [batch[0, :, 6 + jag + skipsos:endt]] * args.numsteps,
'context_vector': state_triple['context_vector'],
'c_state_init': state_triple['c_state_init'],
'h_state_init': state_triple['h_state_init']}
if args.jagged:
datadict['lens'] = [batch[0, :, 5] - skipsos] * args.numsteps
datadict['masks'] = [get_mask(seq_length - 2 * args.bidir, sentence_length - 2 * args.bidir) for
seq_length in datadict['lens']]
for i in range(len(datadict['x'])):
assert np.all(datadict['lens'][i] <= datadict['x'][i].shape[1]), \
'Sequence found greater than num_tokens_predicted'
assert np.nonzero(datadict['lens'][i])[0].shape[0] == datadict['lens'][i].shape[0], \
'Sequence lengths must be greater than zero.' \
'Found zero length sequence in datadict["lengths"]: %s' % datadict['lens']
first_output_context_state = tf.get_collection('context_state')[0]
eval_tensors = ([total_loss,
tf.get_collection('context_vector')[1],
tf.get_collection('first_line_loss_matrix')[0]] +
[state_tuple.c for state_tuple in first_output_context_state] +
[state_tuple.h for state_tuple in first_output_context_state])
else: # Ordinary batching and matrix flush batching
batch = np.transpose(batch, axes=(1, 0, 2))
endx = batch.shape[2] - int(not args.bidir)
endt = batch.shape[2] - int(args.bidir)
datadict = {'line': batch[:, :, 0],
'second': batch[:, :, 1],
'day': batch[:, :, 2],
'user': batch[:, :, 3],
'red': batch[:, :, 4],
'x': [batch[i, :, 5 + jag + skipsos:endx] for i in range(args.numsteps)],
't': [batch[i, :, 6 + jag + skipsos:endt] for i in range(args.numsteps)],
'context_vector': state_triple['context_vector'],
'c_state_init': state_triple['c_state_init'],
'h_state_init': state_triple['h_state_init']}
if args.jagged:
datadict['lens'] = [batch[i, :, 5] - skipsos for i in range(args.numsteps)]
datadict['masks'] = [get_mask(seq_length-args.bidir-args.skipsos,
sentence_length-2*args.bidir) for seq_length in datadict['lens']]
for i in range(len(datadict['x'])):
assert np.all(datadict['lens'][i] <= datadict['x'][i].shape[1]), \
'Sequence found greater than num_tokens_predicted'
assert np.nonzero(datadict['lens'][i])[0].shape[0] == datadict['lens'][i].shape[0], \
'Sequence lengths must be greater than zero.' \
'Found zero length sequence in datadict["lengths"]: %s' % datadict['lens']
eval_tensors = ([total_loss, context_vector, line_loss_matrix] +
[state_tuple.c for state_tuple in context_state] +
[state_tuple.h for state_tuple in context_state])
# output dims: 0: Nothing, 1 (total_loss): scalar, 2 (context_vector): num_users X hidden_size,
# 3 (line_loss_matrix): num_users X num_steps
output = tiered_network_model.train_step(datadict, eval_tensors=eval_tensors,
update=do_update)
loss, context, loss_matrix = output[1], output[2], output[3]
current_context_state = output[4:4 + len(args.context_layers)]
current_context_hidden = output[4 + len(args.context_layers):4 + 2*len(args.context_layers)]
data.update_state_triples([context, current_context_state, current_context_hidden])
if args.verbose:
print('%s %s %s %s %s %s %r' % (datadict['day'].shape[1],
datadict['line'][0][0],
datadict['second'][0][0],
mode[do_update],
f,
data.line_num, loss))
if math.isnan(loss) or math.isinf(loss):
print('Exiting due to divergence!')
exit(1)
if not is_training:
num_processed += batch.shape[0] * batch.shape[1]
if not stragglers:
assert loss_matrix.shape[0] * loss_matrix.shape[1] == batch.shape[0] * batch.shape[1], 'Batch size %s is different from output size %s. May be losing datapoints.' % (batch.shape, loss_matrix.shape)
write_results(datadict, loss_matrix, outfile, batch_num)
else:
assert loss_matrix[0, :].shape[0] == batch.shape[0] * batch.shape[1], 'Batch size is different from output size. May be losing datapoints.'
write_results(datadict, loss_matrix[0, :], outfile, batch_num)
batch, state_triple = data.next_batch()
batch_num += 1
return data.state_triples, data.user_logs, num_processed
weekend_days = conf["weekend_days"]
if args.test:
files = conf["test_files"] # 5000 lines from each of day 0, day 1 and day 2
else:
files = [str(i) + '.txt' for i in range(conf["num_days"]) if i not in weekend_days]
states1 = None
logs1 = None
number_processed = 0
for idx, f in enumerate(files[:-1]):
states1, logs1, num_processed = trainday(True, f, states1, logs1)
states2, logs2, num_processed = trainday(False, files[idx + 1], states1, logs1)
number_processed += num_processed
outfile.close()
total_time = time.time() - start_time
print('elapsed time: %s' % total_time)
os.system("mv /tmp/lanl_results/%s %s" % (results_file, args.results_folder + results_file))
print('number processed', number_processed)
|
app/grandchallenge/reader_studies/templatetags/get_ground_truth.py | njmhendrix/grand-challenge.org | 101 | 12767218 | <filename>app/grandchallenge/reader_studies/templatetags/get_ground_truth.py
from django import template
register = template.Library()
@register.simple_tag
def get_ground_truth(obj, image, question):
"""
Get the ground truth value for the image/question combination in reader
study obj.
"""
ground_truths = obj.statistics["ground_truths"]
return ground_truths[image][question]
|
inference/modulated_detection.py | kylevedder/mvits_for_class_agnostic_od | 114 | 12767231 | import numpy as np
import torch
from PIL import Image
import torchvision.transforms as T
from infer import Inference
from utils.nms import nms
torch.set_grad_enabled(False)
def class_agnostic_nms(boxes, scores, iou=0.5):
if len(boxes) > 1:
boxes, scores = nms(np.array(boxes), np.array(scores), iou)
return list(boxes), list(scores)
else:
return boxes, scores
def generate_image_crops(img, num_crops=8):
"""
Note: num_crops must be greater than 2 and of multiple of 2
"""
assert num_crops > 2
assert num_crops % 2 == 0
# Get the image width and height
img_w, img_h = img.size
crops = []
coordinates = []
crops.append(img)
coordinates.append((0, 0, img_w, img_h))
crop_chunks_x = int(num_crops / 2)
crop_chunks_y = int(num_crops / crop_chunks_x)
x_inc = int(img_w / crop_chunks_y)
y_inc = int(img_h / crop_chunks_y)
x_space = np.linspace(0, img_w - x_inc, crop_chunks_y)
y_spcae = np.linspace(0, img_h - y_inc, int(num_crops / crop_chunks_y))
if num_crops > 1:
for x in x_space:
for y in y_spcae:
x1, y1 = x, y
x2, y2 = x1 + x_inc, y1 + y_inc
crops.append((img.crop((x1, y1, x2, y2))).resize((img_w, img_h)))
coordinates.append((x1, y1, x2, y2))
return crops, coordinates, (img_w, img_h)
def scale_boxes(boxes, coordinates, img_dims):
x1, y1, x2, y2 = coordinates
img_w, img_h = img_dims
w = x2 - x1
h = y2 - y1
for b in boxes:
b[0], b[1], b[2], b[3] = int((b[0] / img_w) * w) + x1, int((b[1] / img_h) * h) + y1, \
int((b[2] / img_w) * w) + x1, int((b[3] / img_h) * h) + y1
return boxes
class ModulatedDetection(Inference):
"""
The class supports the inference using both MDETR & MDef-DETR models.
"""
def __init__(self, model, confidence_thresh=0.0):
Inference.__init__(self, model)
self.conf_thresh = confidence_thresh
self.transform = T.Compose([
T.Resize(800),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
@staticmethod
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
(x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=1)
def rescale_bboxes(self, out_bbox, size):
img_w, img_h = size
b = self.box_cxcywh_to_xyxy(out_bbox)
b = b * torch.tensor([img_w, img_h, img_w, img_h], dtype=torch.float32)
return b
def infer_image(self, image_path, **kwargs):
caption = kwargs["caption"]
# Read the image
im = Image.open(image_path)
imq = np.array(im)
if len(imq.shape) != 3:
im = im.convert('RGB')
img = self.transform(im).unsqueeze(0).cuda()
# propagate through the models
memory_cache = self.model(img, [caption], encode_and_save=True)
outputs = self.model(img, [caption], encode_and_save=False, memory_cache=memory_cache)
# keep only predictions with self.conf_thresh+ confidence
probas = 1 - outputs['pred_logits'].softmax(-1)[0, :, -1].cpu()
keep = (probas > self.conf_thresh).cpu()
# convert boxes from [0; 1] to image scales
bboxes_scaled = self.rescale_bboxes(outputs['pred_boxes'].cpu()[0, keep], im.size)
kept_probs = probas[keep]
# Convert outputs to the required format
bboxes = list(bboxes_scaled.numpy())
probs = list(kept_probs.numpy())
boxes, scores = [], []
for b, conf in zip(bboxes, probs):
boxes.append([int(b[0]), int(b[1]), int(b[2]), int(b[3])])
scores.append(conf)
# Read image, perform inference, parse results, append the predicted boxes to detections
return boxes, scores
def infer_image_multi_crop(self, image_path, **kwargs):
caption = kwargs["caption"]
# Read the image
im = Image.open(image_path)
crops, coordinates, img_dims = generate_image_crops(im)
imgs = [self.transform(crop).unsqueeze(0).cuda() for crop in crops]
imgs = torch.cat(imgs)
# propagate through the models
memory_cache = self.model(imgs, [caption for i in range(imgs.shape[0])], encode_and_save=True)
outputs = self.model(imgs, [caption], encode_and_save=False, memory_cache=memory_cache)
all_boxes = []
all_scores = []
for i in range(len(crops)):
# keep only predictions with self.conf_thresh+ confidence
probas = 1 - outputs['pred_logits'].softmax(-1)[i, :, -1].cpu()
keep = (probas > self.conf_thresh).cpu()
# convert boxes from [0; 1] to image scales
bboxes_scaled = self.rescale_bboxes(outputs['pred_boxes'].cpu()[i, keep], im.size)
kept_probs = probas[keep]
# Convert outputs to the required format
bboxes = list(bboxes_scaled.numpy())
probs = list(kept_probs.numpy())
boxes, scores = [], []
for b, conf in zip(bboxes, probs):
boxes.append([int(b[0]), int(b[1]), int(b[2]), int(b[3])])
scores.append(conf)
# Read image, perform inference, parse results, append the predicted boxes to detections
boxes = scale_boxes(boxes, coordinates[i], img_dims)
all_boxes += boxes
all_scores += scores
all_boxes = class_agnostic_nms(all_boxes, all_scores)
return all_boxes, all_scores
|
Calibration/EcalAlCaRecoProducers/python/alcastreamEcalEtaCalib_cff.py | ckamtsikis/cmssw | 852 | 12767235 | import FWCore.ParameterSet.Config as cms
import HLTrigger.HLTfilters.hltHighLevel_cfi
ecaletaCalibHLT = HLTrigger.HLTfilters.hltHighLevel_cfi.hltHighLevel.clone(
# HLTPaths = ['AlCa_EcalEta'],
eventSetupPathsKey='EcalCalEtaCalib',
throw = False
)
|
format.py | my-personal-forks/dart-sublime-bundle | 182 | 12767244 | <gh_stars>100-1000
# Copyright (c) 2014, <NAME>. Please see the AUTHORS file for details.
# All rights reserved. Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.)
from subprocess import PIPE
from subprocess import Popen
import sublime
import sublime_plugin
from Dart.sublime_plugin_lib import PluginLogger
from Dart.sublime_plugin_lib.plat import supress_window
from Dart import analyzer
from Dart.lib.sdk import DartFormat
_logger = PluginLogger(__name__)
class DartFormatCommand(sublime_plugin.WindowCommand):
'''Formats the selected text in Sublime Text using `dartfmt`.
Notes:
- Can be used as a build system.
'''
def run(self, **kwargs):
view = self.window.active_view()
if not view:
return
analyzer.g_server.send_format_file(view)
class DartReplaceRegion(sublime_plugin.TextCommand):
def run(self, edit, region, text):
reg = sublime.Region(*region)
self.view.replace(edit, reg, text)
self.view.run_command('reindent')
|
InvenTree/part/migrations/0011_part_revision.py | ArakniD/InvenTree | 656 | 12767274 | # Generated by Django 2.2.2 on 2019-06-20 11:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('part', '0010_auto_20190620_2135'),
]
operations = [
migrations.AddField(
model_name='part',
name='revision',
field=models.CharField(blank=True, help_text='Part revision or version number', max_length=100),
),
]
|
mtgjson5/classes/mtgjson_deck_header.py | 0az/mtgjson | 512 | 12767309 | <gh_stars>100-1000
"""
MTGJSON Singular Deck Header Object
"""
from typing import Any, Dict
from ..classes.mtgjson_deck import MtgjsonDeckObject
from ..utils import to_camel_case
class MtgjsonDeckHeaderObject:
"""
MTGJSON Singular Deck Header Object
"""
code: str
file_name: str
name: str
release_date: str
type: str
def __init__(self, output_deck: MtgjsonDeckObject) -> None:
"""
Initialize the header given a deck
"""
self.code = output_deck.code
self.file_name = output_deck.file_name
self.name = output_deck.name
self.release_date = output_deck.release_date
self.type = output_deck.type
def to_json(self) -> Dict[str, Any]:
"""
Support json.dump()
:return: JSON serialized object
"""
return {
to_camel_case(key): value
for key, value in self.__dict__.items()
if "__" not in key and not callable(value)
}
|
exercises/bob/example.py | kishankj/python | 1,177 | 12767326 | <gh_stars>1000+
def response(hey_bob):
hey_bob = hey_bob.strip()
if _is_silence(hey_bob):
return 'Fine. Be that way!'
if _is_shouting(hey_bob):
if _is_question(hey_bob):
return "Calm down, I know what I'm doing!"
else:
return 'Whoa, chill out!'
elif _is_question(hey_bob):
return 'Sure.'
else:
return 'Whatever.'
def _is_silence(hey_bob):
return hey_bob == ''
def _is_shouting(hey_bob):
return hey_bob.isupper()
def _is_question(hey_bob):
return hey_bob.endswith('?')
|
chaos_genius/alerts/utils.py | rsohlot/chaos_genius | 320 | 12767375 | """Common utilities for alerts and alert digests."""
import os
from math import floor, log10
from typing import List, Optional, Union
from jinja2 import Environment, FileSystemLoader, select_autoescape
from chaos_genius.alerts.email import send_static_alert_email
from chaos_genius.core.utils.round import round_number
from chaos_genius.settings import CHAOSGENIUS_WEBAPP_URL
class AlertException(Exception):
"""A general exception in a specific alert.
Stores and prints alert ID and KPI ID.
"""
def __init__(self, message: str, alert_id: int, kpi_id: Optional[int] = None):
"""Initialize a new alert exception.
Args:
message: exception message.
alert_id: ID of alert where this originated from.
kpi_id: ID of KPI associated with the alert.
"""
if kpi_id:
message = f"(KPI: {kpi_id}, Alert: {alert_id}) {message}"
else:
message = f"(Alert: {alert_id}) {message}"
super().__init__(message)
def webapp_url_prefix():
"""Constructs webapp URL prefix with a trailing slash.
If not setup, this will be an invalid URL with an appropriate message.
TODO: redirect to docs link showing how to setup instead of invalid URL.
"""
if not CHAOSGENIUS_WEBAPP_URL:
return "Webapp URL not setup. Please setup CHAOSGENIUS_WEBAPP_URL in the environment file./"
forward_slash = "/" if not CHAOSGENIUS_WEBAPP_URL[-1] == "/" else ""
return f"{CHAOSGENIUS_WEBAPP_URL}{forward_slash}"
def change_message_from_percent(percent_change: Union[str, int, float]) -> str:
"""Creates a change message from given percentage change.
percent_change will be:
- "–" in case the last data point was missing or both the points had values 0
- 0 (int) in case there was no change
- positive value (int/float) in case there was an increase
- negative value (int/float) in case there was a decrease
"""
if isinstance(percent_change, str):
return percent_change
elif percent_change == 0:
return "No change (–)"
elif percent_change > 0:
return f"Increased by ({percent_change}%)"
else:
return f"Decreased by ({percent_change}%)"
def find_percentage_change(
curr_val: Union[int, float], prev_val: Optional[Union[int, float]]
) -> Union[int, float, str]:
"""Calculates percentage change between previous and current value."""
if prev_val is None:
# previous point wasn't found
return "–"
elif curr_val == 0 and prev_val == curr_val:
# both current and previous value are 0
return "–"
elif prev_val == 0:
# previous value is 0, but current value isn't
sign_ = "+" if curr_val > 0 else "-"
return sign_ + "inf"
else:
change = curr_val - prev_val
percentage_change = (change / prev_val) * 100
return round_number(percentage_change)
def send_email_using_template(
template_name: str,
recipient_emails: List[str],
subject: str,
files: List[dict],
**kwargs,
) -> None:
"""Sends an email using a template."""
path = os.path.join(os.path.dirname(__file__), "email_templates")
env = Environment(
loader=FileSystemLoader(path), autoescape=select_autoescape(["html", "xml"])
)
template = env.get_template(template_name)
send_static_alert_email(recipient_emails, subject, template.render(**kwargs), files)
HRN_PREFIXES = {
-9: "n",
-6: "µ",
-3: "m",
0: "",
3: "K",
6: "M",
9: "B",
12: "T",
}
def _get_exponent(num: float) -> int:
"""Returns the power of 10 to which the number is raised to."""
if num == 0:
return 0
return floor(log10(abs(num)))
def human_readable(num: float) -> str:
"""Returns the human readable format of a number."""
exponent = _get_exponent(num)
new_exponent = min((3 * floor(exponent / 3)), 12)
precision = 10 ** (new_exponent)
new_val = round(num / precision, 3)
human_readable_format = str(new_val) + HRN_PREFIXES[new_exponent]
return human_readable_format
|
recipes/Python/576957_Asynchronous_subprocess_using/recipe-576957.py | tdiprima/code | 2,023 | 12767393 | <gh_stars>1000+
#!/usr/bin/env python
"""asyncsubproc.py: Asynchronous subprocess communication using asyncore.
The `AsyncPopen` class wraps the I/O pipes from `Popen` in asynchronous
dispatchers, providing asynchronous communication with the subprocess using
`asyncore.loop()` to read and write in parallel with other I/O. The
`SubprocessExecutor` class wraps `AsyncPopen` in an `Executor`, allowing
inline subprocess execution using a generator.
Full-duplex Communication:
Data that the subprocess writes might not be made available to the parent until
the subprocess calls `flush()` or exits; thus, a parent which attempts to write
data, read a response, and then write new data contingent on the response might
find itself deadlocked. There seems to be no way for the parent process to
force flushing of the subprocess output; changing the value of the `bufsize`
parameter to `Popen()` to zero (or any other value) doesn't do it, and
`asyncore.file_dispatcher` already sets `O_NONBLOCK` on the pipes.
Subprocess Exit:
Detecting subprocess exit while avoiding zombie subprocesses can be tricky in
asynchronous code. Calling `wait()` on a subprocess would block, leaving three
alternatives for checking for subprocess exit:
1) Exit the asynchronous select loop (e.g. `asyncore.loop()`) occasionally
to call `poll()` on any unterminated subprocesses. This requires maintaining a
list of all unterminated subprocess objects, along with any context needed to
handle the subprocess exit.
2) Set a handler for `SIGCHLD` which calls `os.waitpid(-1, os.WNOHANG)`,
and then use the return value to locate the asynchronous process object and
handle the subprocess exit. This must be done in a loop to avoid missing
consolidated signals, requires maintaining a list of all unterminated
subprocesses, and is limited by reentrancy restrictions on signal handlers.
3) Check for `stdout` and `stderr` to both be closed, which can be done as
part of the asynchronous loop which reads data. This requires that at least one
of `stdout` and `stderr` be a pipe, but an asynchronous subprocess is probably
unnecessary in the first place if neither is a pipe. There is no absolute
guarantee that the subprocess has exited when `stdout` and `stderr` have
closed, but once they have, no more data is coming. However, because `wait()`
is not being called on the subprocesses, special care has to be taken to avoid
leaving zombie subproceses. There are again three alternatives:
a) Set `SIGCHLD` to `SIG_IGN`. This should work on most varieties of UNIX
including Mac OS X. However, it prevents collecting the exit status of the
subprocess; `poll()` will return `None` and `wait()` will raise an `OSError`
exception.
b) Set a handler for `SIGCHLD` as in solution (2) above; if this is to be
implemented, it may be better to simply implement solution (2) rather than
waiting for the output pipes to close in the first place.
c) Call `wait()` on the subprocess after stdout and stderr are closed.
While this will block (briefly), it should be reasonably safe unless the
subprocess does something very unusual.
`SubprocessExecutor` waits for `stdout` and `stderr` to both be closed, and
then calls `wait()` on the subprocess if no handler for `SIGCHLD` is set.
References:
http://code.activestate.com/recipes/577600/ [queued SIGALRM alarms]
http://code.activestate.com/recipes/576965/ [event-based asynchronous pattern]
http://code.activestate.com/recipes/576967/ [asynchronous pipe I/O]
"""
import os
import sys
import signal
import threading
from traceback import print_exc
from subprocess import Popen, PIPE
from logging import ERROR, INFO
import alarm
from asyncpipes import PipeDispatcher, InputPipeDispatcher, OutputPipeDispatcher
from worker import Executor
from observer import Observable
if __name__ == '__main__':
import optparse
from asyncore import loop
from string import digits
from time import sleep
from worker import execute, ExecutionQueue
__version__ = '$Revision: 3414 $'.split()[1]
__usage__ = 'usage: %prog [options] [data]'
class AsyncPopen(Observable, Popen):
"""An extension to Popen which creates a subprocess with asynchronous
pipes for input and output. Pipe output can be read using an Observer
pattern while asyncore.loop() is run.
Also contains additional small extensions, such as a subprocess timeout
and a fix to handling of signals for subprocesses.
"""
def __init__(self, argv, map=None, timeout=None, close_when_done=True,
stdin=PIPE, stdout=PIPE, stderr=PIPE, preexec_fn=None, bufsize=0, **popen_keyw):
"""Accepts all the same arguments and keywords as `subprocess.Popen`.
Input or outputs specified as `PIPE` (now the default) for are wrapped
in an asynchronous pipe dispatcher.
The timeout is used to create an alarm, which can be cancelled by
calling `cancel_timeout()`, `communicate()`, `wait()` or `kill()`.
"""
Observable.__init__(self)
self._map = map
# Create the subprocess itself, wrapping preexec_fn in the clear_signals call
Popen.__init__(self, argv, preexec_fn=lambda: self.clear_signals(preexec_fn),
stdin=stdin, stdout=stdout, stderr=stderr, **popen_keyw)
# Set the timeout on the subprocess. If it fails, ignore the failure.
try:
fto = float(timeout)
self._alarmobj = alarm.alarm(fto, self.kill) if fto > 0 else None
except:
self._alarmobj = None
# Wrap the pipe I/O. Sets the Popen and pipe buffer sizes the same; perhaps not optimal.
if stdout == PIPE:
self.stdout = OutputPipeDispatcher(self.stdout, map=map, ignore_broken_pipe=True,
universal_newlines=self.universal_newlines, maxdata=bufsize)
self.stdout.obs_add(self._pipe_event)
if stderr == PIPE:
self.stderr = OutputPipeDispatcher(self.stderr, map=map, ignore_broken_pipe=True,
universal_newlines=self.universal_newlines, maxdata=bufsize)
self.stderr.obs_add(self._pipe_event)
if stdin == PIPE:
self.stdin = InputPipeDispatcher(self.stdin, map=map, ignore_broken_pipe=True,
close_when_done=close_when_done, maxdata=bufsize)
self.stdin.obs_add(self._pipe_event)
def cancel_timeout(self, logger=None):
if not self._alarmobj: return
try:
alarm.cancel(self._alarmobj)
except:
if logger: logger.debug("Error canceling child PID %d alarm" % child.pid, exc_info=1)
finally:
self._alarmobj = None
def wait(self, logger=None):
returncode = Popen.wait(self)
self.cancel_timeout(logger=logger)
return returncode
@staticmethod
def clear_signals(preexec_fn):
"""Wraps any preexec_fn in order to clear any signal handlers."""
for s in range(1, signal.NSIG):
try:
if s not in [signal.SIGKILL, signal.SIGSTOP]: signal.signal(s, signal.SIG_DFL)
except:
pass
if callable(preexec_fn): preexec_fn()
def kill(self):
"""Kill the child process with extreme prejudice."""
try:
if self.returncode is None: os.kill(self.pid, signal.SIGKILL)
finally:
self.cancel_timeout()
def fetch_output(self, clear=True):
"""Fetch data from the subprocess output pipes.
An output file not set to a pipe returns an empty string.
"""
outdata = self.stdout.fetch_data(clear) if self.stdout is not None else ''
errdata = self.stderr.fetch_data(clear) if self.stderr is not None else ''
return outdata, errdata
def output_closed(self):
"""Return true if both subprocess output pipes are closed.
Can be used to detected the termination of the subprocess. An output
file not sent to a pipe is ignored.
"""
outread = self.stdout.readable() if self.stdout is not None else False
errread = self.stderr.readable() if self.stderr is not None else False
return not (outread or errread)
def _pipe_event(self, observed, event):
"""Forward events on the pipes. The forwarded events contain the pipe
event and the pipe itself as a two-element tuple."""
self._obs_notify((event, observed))
class SubprocessExecutor(Executor):
"""Executes subprocesses, reading and writing data using `asyncore`.
For each subprocess to be created, the generator must yield either the
object to be passed to the `argv` argument of the `Popen` constructor,
or a dictionary containing a required `argv` key, an optional `input` key
containing a string to be written to `stdin` of the subprocess, and keys
corresponding to the keyword parameters to `AsyncPopen` (the same keywords
as the `child_spawn()` method).
Once the subprocess has exited, the executor will call `send()` on the
generator, passing a 4-element tuple containing the data read from
`stdout` and `stderr`, the exit status returned by `Popen.poll()`, and the
pid of the subprocess. The generator can then yield the parameters for
another subprocess.
"""
def __init__(self, generator, exc_handler=print_exc, logger=None, **async_popen_keyw):
"""Initialize a subprocess executor.
Additional keyword parameters to this constructor (usually passed
through the decorator) will be passed to `AsyncPopen`.
"""
Executor.__init__(self, generator, exc_handler)
self._logger = logger
self.__async_popen_dict = async_popen_keyw
self.__current_child = None
def _execute(self, logger=None, **async_popen_keyw):
"""Iterate the generator to completion (in the calling thread).
The generator must yield the parameters for the first subprocess,
which will be passed to `_spawn()`.
Additional keyword parameters passed to this object when called will
be passed to `AsyncPopen` (and override values passed to this object's
constructor).
"""
self.__async_popen_dict.update(async_popen_keyw)
if logger is not None: self._logger = logger
# Get the command to be executed from the generator
self.__coerce_and_spawn(self.next())
def _pipe_closed(self, observed, event):
"""Called when one of the output pipes (stdout or stderr) is closed.
Once both are closed, declare the subprocess finished and call
`_child_exit()`.
"""
if observed.output_closed(): self._child_exit(observed)
def _child_exit(self, child):
"""Called once `stdout` and `stderr` are both closed.
Cleans up the subprocess, and then passes the subprocess results tom
the generator by calling `send()`. If the generator yields parameters
for another subprocess, calls `_child_spawn()`.
"""
self.__current_child = None
# Close stdin for the child, so that it knows it won't be getting more data
try:
if child.stdin is not None: child.stdin.close()
except:
if self._logger: self._logger.debug("Error closing stdin for PID %d" % child.pid, exc_info=1)
# Wait for the child if there's no signal handler
if signal.getsignal(signal.SIGCHLD) == signal.SIG_DFL:
try:
# This will cancel the alarm
returncode = child.wait(logger=self._logger)
except:
if self._logger: self._logger.debug("Error waiting for child PID %d" % child.pid, exc_info=1)
else: print_exc(file=sys.stderr)
else:
child.cancel_timeout(logger=self._logger)
# This next will return None unless an exit status injector has been set up.
returncode = child.poll()
# Extract the result from the child process; and move on with the executor
try:
outdata, errdata = child.fetch_output()
child_result = (outdata, errdata, returncode, child.pid)
if self._logger: self._logger.debug("PID %d exited with code %s" % (child.pid, returncode))
self.__coerce_and_spawn(self.send(child_result))
except:
self.throw(*sys.exc_info())
def close(self):
"""Kill the subprocess when closing the generator."""
child = self.__current_child
if child:
try:
child.kill()
except:
if self._logger: self._logger.exception("Error killing child PID %d" % child.pid)
else: print_exc(file=sys.stderr)
else:
self.__current_child = None
Executor.close(self)
def __coerce_and_spawn(self, arg):
"""Coerce the argument into a call to `_child_spawn()`"""
try:
self._child_spawn(**arg)
except:
self._child_spawn(argv=arg)
def _child_spawn(self, argv=None, input=None, **async_popen_keyw):
"""Create the subprocess and send the data to the input pipe. Called
with the value(s) yielded by the generator.
If a subprocess is to be spawned, the `argv` keyword must be supplied
with a non-empty value. The value passed to the `input` keyword will
be written to `stdin` of the subprocess.
Additional keyword parameters passed to this method will
be passed to `AsyncPopen` (and override values passed to this object's
constructor).
"""
if self.stopped(): return
# Merge the keyword arguments together to pass to AsyncPopen
async_popen_dict = self.__async_popen_dict.copy()
async_popen_dict.update(async_popen_keyw)
if input: async_popen_dict["stdin"] = PIPE
# Create the subprocess itself
if self._logger: self._logger.debug("Spawning subprocess %s" % argv)
self.__current_child = AsyncPopen(argv, **async_popen_dict)
if self._logger: self._logger.debug("Spawned subprocess %s with PID %d" % (argv, self.__current_child.pid))
# Listen for both output pipes to close, and push the data to stdin
self.__current_child.obs_add(self._pipe_closed, criteria=PipeDispatcher.PIPE_CLOSED)
if input: self.__current_child.stdin.push_data(str(input))
if __name__ == '__main__':
def printdata(data, pid, channame):
print '[%d] %s %d bytes received: %r' % (pid, channame, len(data), data)
execq = ExecutionQueue()
@execute(execq, SubprocessExecutor)
def spawn_child(argv, data, child, loops):
"""Spawn a cascade of subprocesses."""
for lp in range(1, loops + 1):
(stdout, stderr, stat, pid) = yield {'argv': argv, 'input': '%s%s' % (data, '\n')}
printdata(stdout, pid, 'stdout')
printdata(stderr, pid, 'stderr')
print "Loop %d child %d [%d] exited with status %s" % (lp, child, pid, stat)
if stat == 0 and data == stdout.rstrip()[::-1]: data = stdout[:-1]
def run_child(pause, exitstat):
"""Run the subprocess code; a simple string inverter."""
line = sys.stdin.readline().strip()
sleep(pause / 2.0)
# Write and close both pipes to show that it waits for exit anyway.
print line[::-1]
print >>sys.stderr, line
sys.stdout.close()
sys.stderr.close()
sleep(pause / 2.0)
sys.exit(exitstat)
optparser = optparse.OptionParser(usage=__usage__, version=__version__)
optparser.disable_interspersed_args()
optparser.add_option('--loops', type='int', metavar='N', default=3,
help='Number of times to iterate each child [%default]')
optparser.add_option('--children', type='int', metavar='N', default=3,
help='Number of children to spawn [%default]')
optparser.add_option('--timeout', type='float', metavar='SECONDS', default=10.0,
help='Maximum time subprocess is allowed to run [%default sec]')
optparser.add_option('--no-signal', dest='nosignal', action='store_true', default=False,
help='Ignore signals from child processes.')
childopts = optparse.OptionGroup(optparser, 'Child options')
childopts.add_option('--child', action='store_true', help=optparse.SUPPRESS_HELP)
childopts.add_option('--pause', type='float', metavar='SECONDS', default=2.0,
help='Time to pause in the child process [%default sec]')
childopts.add_option('--exitstat', type='int', metavar='STATUS', default=0,
help='Child exit status [%default]')
optparser.add_option_group(childopts)
(options, args) = optparser.parse_args()
if options.child:
run_child(options.pause, options.exitstat)
else:
# Run the parent process code: start the first child and send data.
if options.nosignal: signal.signal(signal.SIGCHLD, signal.SIG_IGN)
sys.argv.insert(1, '--child')
# Create and queue the children, and then loop asyncore
data = ' '.join(args) if len(args) else digits
for ch in range(1, options.children + 1):
spawn_child(sys.argv, data, ch, options.loops)(timeout=options.timeout)
loop()
os.system('ps -ef')
|
rnns/gru.py | anoidgit/zero | 111 | 12767423 | # coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from func import linear
from rnns import cell as cell
class gru(cell.Cell):
"""The Gated Recurrent Unit."""
def __init__(self, d, ln=False, scope='gru'):
super(gru, self).__init__(d, ln=ln, scope=scope)
def get_init_state(self, shape=None, x=None, scope=None):
return self._get_init_state(
self.d, shape=shape, x=x, scope=scope)
def fetch_states(self, x):
with tf.variable_scope(
"fetch_state_{}".format(self.scope or "gru")):
g = linear(x, self.d * 2,
bias=False, ln=self.ln, scope="gate_x")
h = linear(x, self.d,
bias=False, ln=self.ln, scope="hide_x")
return g, h
def __call__(self, h_, x):
# h_: the previous hidden state
# x_g/x: the current input state for gate
# x_h/x: the current input state for hidden
"""
z = sigmoid(h_, x)
r = sigmoid(h_, x)
h' = tanh(x, r * h_)
h = z * h_ + (1. - z) * h'
"""
with tf.variable_scope(
"cell_{}".format(self.scope or "gru")):
x_g, x_h = x
h_g = linear(h_, self.d * 2,
ln=self.ln, scope="gate_h")
z, r = tf.split(
tf.sigmoid(x_g + h_g), 2, -1)
h_h = linear(h_ * r, self.d,
ln=self.ln, scope="hide_h")
h = tf.tanh(x_h + h_h)
h = z * h_ + (1. - z) * h
return h
|
docs_Ismail_Geles/benchmark/utils/format.py | isgeles/SMARTS | 554 | 12767458 | # MIT License
#
# Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
def pretty_dict(d, indent=0):
"""Pretty the output format of a dictionary.
Parameters
----------
d
dict, the input dictionary instance.
indent
int, indent level, non-negative.
Returns
-------
res
str, the output string
"""
res = ""
for k, v in d.items():
res += "\t" * indent + str(k)
if isinstance(v, dict):
res += "\n" + pretty_dict(v, indent + 1)
else:
res += ": " + str(v) + "\n"
return res
|
mmskeleton/deprecated/datasets/recognition.py | fserracant/mmskeleton | 1,347 | 12767463 | import os
import numpy as np
import json
import torch
from .utils import skeleton
class SkeletonDataset(torch.utils.data.Dataset):
""" Feeder for skeleton-based action recognition
Arguments:
data_path: the path to data folder
random_choose: If true, randomly choose a portion of the input sequence
random_move: If true, randomly perfrom affine transformation
window_size: The length of the output sequence
repeat: times of repeating the dataset
data_subscripts: subscript expression of einsum operation.
In the default case, the shape of output data is `(channel, vertex, frames, person)`.
To permute the shape to `(channel, frames, vertex, person)`,
set `data_subscripts` to 'cvfm->cfvm'.
"""
def __init__(self,
data_dir,
random_choose=False,
random_move=False,
window_size=-1,
num_track=1,
data_subscripts=None,
repeat=1):
self.data_dir = data_dir
self.random_choose = random_choose
self.random_move = random_move
self.window_size = window_size
self.num_track = num_track
self.data_subscripts = data_subscripts
self.files = [
os.path.join(self.data_dir, f) for f in os.listdir(self.data_dir)
] * repeat
def __len__(self):
return len(self.files)
def __getitem__(self, index):
with open(self.files[index]) as f:
data = json.load(f)
resolution = data['info']['resolution']
category_id = data['category_id']
annotations = data['annotations']
num_frame = data['info']['num_frame']
num_keypoints = data['info']['num_keypoints']
channel = data['info']['keypoint_channels']
num_channel = len(channel)
# get data
data = np.zeros(
(num_channel, num_keypoints, num_frame, self.num_track),
dtype=np.float32)
for a in annotations:
person_id = a['id'] if a['person_id'] is None else a['person_id']
frame_index = a['frame_index']
if person_id < self.num_track and frame_index < num_frame:
data[:, :, frame_index, person_id] = np.array(
a['keypoints']).transpose()
# normalization
if self.normalization:
for i, c in enumerate(channel):
if c == 'x':
data[i] = data[i] / resolution[0] - 0.5
if c == 'y':
data[i] = data[i] / resolution[1] - 0.5
if c == 'score' or c == 'visibility':
mask = (data[i] == 0)
for j in range(num_channel):
if c != j:
data[j][mask] = 0
# permute
if self.data_subscripts is not None:
data = np.einsum(self.data_subscripts, data)
# augmentation
if self.random_choose:
data = skeleton.random_choose(data, self.window_size)
elif self.window_size > 0:
data = skeleton.auto_pading(data, self.window_size)
if self.random_move:
data = skeleton.random_move(data)
return data, category_id |
rastervision_pytorch_learner/rastervision/pytorch_learner/learner_pipeline.py | theoway/raster-vision | 1,577 | 12767466 | from rastervision.pipeline.pipeline import Pipeline
from rastervision.pytorch_learner import LearnerConfig
class LearnerPipeline(Pipeline):
"""Simple Pipeline that is a wrapper around Learner.main()
This supports the ability to use the pytorch_learner package to train models using
the RV pipeline package and its runner functionality without the rest of RV.
"""
commands = ['train']
gpu_commands = ['train']
def train(self):
learner_cfg: LearnerConfig = self.config.learner
learner = learner_cfg.build(learner_cfg, self.tmp_dir)
learner.main()
|
python/fate_arch/common/__init__.py | QuantumA/FATE | 715 | 12767470 | <filename>python/fate_arch/common/__init__.py
from fate_arch.common._types import FederatedMode, FederatedCommunicationType, EngineType, CoordinationProxyService, \
CoordinationCommunicationProtocol
from fate_arch.common._types import BaseType, Party, DTable
|
us/tests/test_us.py | Juh10/python-us | 346 | 12767473 | <reponame>Juh10/python-us<filename>us/tests/test_us.py
from itertools import chain
import jellyfish # type: ignore
import pytest # type: ignore
import pytz
import us
# attribute
def test_attribute():
for state in us.STATES_AND_TERRITORIES:
assert state == getattr(us.states, state.abbr)
def test_valid_timezones():
for state in us.STATES_AND_TERRITORIES:
if state.capital:
assert pytz.timezone(state.capital_tz)
for tz in state.time_zones:
assert pytz.timezone(tz)
# During migration from SQLite to Python classes, a duplicate
# time zone had been found
assert len(state.time_zones) == len(set(state.time_zones))
# maryland lookup
def test_fips():
assert us.states.lookup("24") == us.states.MD
assert us.states.lookup("51") != us.states.MD
def test_abbr():
assert us.states.lookup("MD") == us.states.MD
assert us.states.lookup("md") == us.states.MD
assert us.states.lookup("VA") != us.states.MD
assert us.states.lookup("va") != us.states.MD
def test_name():
assert us.states.lookup("Maryland") == us.states.MD
assert us.states.lookup("maryland") == us.states.MD
assert us.states.lookup("Maryland", field="name") == us.states.MD
assert us.states.lookup("maryland", field="name") is None
assert us.states.lookup("murryland") == us.states.MD
assert us.states.lookup("Virginia") != us.states.MD
# lookups
def test_abbr_lookup():
for state in us.STATES:
assert us.states.lookup(state.abbr) == state
def test_fips_lookup():
for state in us.STATES:
assert us.states.lookup(state.fips) == state
def test_name_lookup():
for state in us.STATES:
assert us.states.lookup(state.name) == state
def test_obsolete_lookup():
for state in us.OBSOLETE:
assert us.states.lookup(state.name) is None
# test metaphone
def test_jellyfish_metaphone():
for state in chain(us.STATES_AND_TERRITORIES, us.OBSOLETE):
assert state.name_metaphone == jellyfish.metaphone(state.name)
# mappings
def test_mapping():
states = us.STATES[:5]
assert us.states.mapping("abbr", "fips", states=states) == dict(
(s.abbr, s.fips) for s in states
)
def test_obsolete_mapping():
mapping = us.states.mapping("abbr", "fips")
for state in us.states.OBSOLETE:
assert state.abbr not in mapping
def test_custom_mapping():
mapping = us.states.mapping("abbr", "fips", states=[us.states.DC, us.states.MD])
assert len(mapping) == 2
assert "DC" in mapping
assert "MD" in mapping
# known bugs
def test_kentucky_uppercase():
assert us.states.lookup("kentucky") == us.states.KY
assert us.states.lookup("KENTUCKY") == us.states.KY
def test_wayoming():
assert us.states.lookup("Wyoming") == us.states.WY
assert us.states.lookup("Wayoming") is None
def test_dc():
assert us.states.DC not in us.STATES
assert us.states.lookup("DC") == us.states.DC
assert us.states.lookup("District of Columbia") == us.states.DC
assert "DC" in us.states.mapping("abbr", "name")
# shapefiles
@pytest.mark.skip
def test_head():
import requests
for state in us.STATES_AND_TERRITORIES:
for url in state.shapefile_urls().values():
resp = requests.head(url)
assert resp.status_code == 200
# counts
def test_obsolete():
assert len(us.OBSOLETE) == 3
def test_states():
assert len(us.STATES) == 50
def test_territories():
assert len(us.TERRITORIES) == 5
def test_contiguous():
# Lower 48
assert len(us.STATES_CONTIGUOUS) == 48
def test_continental():
# Lower 48 + Alaska
assert len(us.STATES_CONTINENTAL) == 49
def test_dc():
assert us.states.DC not in us.STATES
|
ciphey/basemods/Decoders/base64_url.py | AlexandruValeanu/Ciphey | 9,908 | 12767490 | import base64
from typing import Dict, Optional
from ciphey.iface import Config, Decoder, ParamSpec, T, U, registry
@registry.register
class Base64_url(Decoder[str]):
def decode(self, ctext: T) -> Optional[U]:
"""
Performs Base64 URL decoding
"""
ctext_padding = ctext + "=" * (4 - len(ctext) % 4)
try:
return base64.urlsafe_b64decode(ctext_padding).decode("utf-8")
except Exception:
return None
@staticmethod
def priority() -> float:
# Not expected to show up often, but also very fast to check.
return 0.05
def __init__(self, config: Config):
super().__init__(config)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return None
@staticmethod
def getTarget() -> str:
return "base64_url"
|
bayesiancoresets/coreset/__init__.py | trevorcampbell/hilbert-coresets | 118 | 12767529 | from .hilbert import HilbertCoreset
from .sampling import UniformSamplingCoreset
from .sparsevi import SparseVICoreset
from .bpsvi import BatchPSVICoreset
|
tests/conftest.py | amagge/flair | 3,957 | 12767539 | import pytest
from pathlib import Path
@pytest.fixture(scope="module")
def resources_path():
return Path(__file__).parent / "resources"
@pytest.fixture(scope="module")
def tasks_base_path(resources_path):
return resources_path / "tasks"
@pytest.fixture(scope="module")
def results_base_path(resources_path):
return resources_path / "results"
def pytest_addoption(parser):
parser.addoption(
"--runslow", action="store_true", default=False, help="run slow tests"
)
parser.addoption(
"--runintegration",
action="store_true",
default=False,
help="run integration tests",
)
def pytest_collection_modifyitems(config, items):
if config.getoption("--runslow") and config.getoption("--runintegration"):
return
if not config.getoption("--runslow"):
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
if not config.getoption("--runintegration"):
skip_integration = pytest.mark.skip(
reason="need --runintegration option to run"
)
for item in items:
if "integration" in item.keywords:
item.add_marker(skip_integration)
|
torchcde/__init__.py | jb-c/torchcde | 247 | 12767550 | <reponame>jb-c/torchcde
from .interpolation_base import InterpolationBase
from .interpolation_cubic import natural_cubic_spline_coeffs, natural_cubic_coeffs, CubicSpline
from .interpolation_linear import linear_interpolation_coeffs, LinearInterpolation
from .interpolation_hermite_cubic_bdiff import hermite_cubic_coefficients_with_backward_differences
from .log_ode import logsignature_windows, logsig_windows
from .misc import TupleControl
from .solver import cdeint
__version__ = "0.2.5"
|
dtech_instagram/InstagramAPI/src/http/Response/LoginResponse.py | hideki-saito/InstagramAPP_Flask | 126 | 12767567 | <gh_stars>100-1000
from .Response import Response
class LoginResponse(Response):
def __init__(self, response):
self.username = None
self.has_anonymous_profile_picture = None
self.profile_pic_url = None
self.profile_pic_id = None
self.full_name = None
self.pk = None
self.is_private = None
if 'logged_in_user' in response and 'username' in response['logged_in_user']:
self.username = response['logged_in_user']['username']
self.has_anonymous_profile_picture = response['logged_in_user']['has_anonymous_profile_picture']
self.profile_pic_url = response['logged_in_user']['profile_pic_url']
self.full_name = response['logged_in_user']['full_name']
self.pk = response['logged_in_user']['pk']
self.is_private = response['logged_in_user']['is_private']
else:
self.setMessage(response['message'])
self.setStatus(response['status'])
def getUsername(self):
return self.username
def getHasAnonymousProfilePicture(self):
return self.has_anonymous_profile_picture
def getProfilePicUrl(self):
return self.profile_pic_url
def getProfilePicId(self):
return self.profile_pic_id
def getFullName(self):
return self.full_name
def getUsernameId(self):
return str(self.pk)
def getIsPrivate(self):
return self.is_private
|
pypykatz/commons/winapi/constants.py | wisdark/pypykatz | 1,861 | 12767608 | #!/usr/bin/env python3
#
# Author:
# <NAME> (@skelsec)
#
PROCESS_QUERY_INFORMATION = 0x0400
PROCESS_VM_READ = 0x0010
PROCESS_VM_WRITE = 0x0020
PROCESS_VM_OPERATION = 0x0008
PROCESS_CREATE_THREAD = 0x0002
# Standard access rights
DELETE = 0x00010000
READ_CONTROL = 0x00020000
WRITE_DAC = 0x00040000
WRITE_OWNER = 0x00080000
SYNCHRONIZE = 0x00100000
STANDARD_RIGHTS_REQUIRED = 0x000F0000
STANDARD_RIGHTS_READ = READ_CONTROL
STANDARD_RIGHTS_WRITE = READ_CONTROL
STANDARD_RIGHTS_EXECUTE = READ_CONTROL
STANDARD_RIGHTS_ALL = 0x001F0000
SPECIFIC_RIGHTS_ALL = 0x0000FFFF
#--- Constants ----------------------------------------------------------------
privnames = {
"SE_ASSIGNPRIMARYTOKEN_NAME" : "SeAssignPrimaryTokenPrivilege",
"SE_AUDIT_NAME" : "SeAuditPrivilege",
"SE_BACKUP_NAME" : "SeBackupPrivilege",
"SE_CHANGE_NOTIFY_NAME" : "SeChangeNotifyPrivilege",
"SE_CREATE_GLOBAL_NAME" : "SeCreateGlobalPrivilege",
"SE_CREATE_PAGEFILE_NAME" : "SeCreatePagefilePrivilege",
"SE_CREATE_PERMANENT_NAME" : "SeCreatePermanentPrivilege",
"SE_CREATE_SYMBOLIC_LINK_NAME" : "SeCreateSymbolicLinkPrivilege",
"SE_CREATE_TOKEN_NAME" : "SeCreateTokenPrivilege",
"SE_DEBUG_NAME" : "SeDebugPrivilege",
"SE_ENABLE_DELEGATION_NAME" : "SeEnableDelegationPrivilege",
"SE_IMPERSONATE_NAME" : "SeImpersonatePrivilege",
"SE_INC_BASE_PRIORITY_NAME" : "SeIncreaseBasePriorityPrivilege",
"SE_INCREASE_QUOTA_NAME" : "SeIncreaseQuotaPrivilege",
"SE_INC_WORKING_SET_NAME" : "SeIncreaseWorkingSetPrivilege",
"SE_LOAD_DRIVER_NAME" : "SeLoadDriverPrivilege",
"SE_LOCK_MEMORY_NAME" : "SeLockMemoryPrivilege",
"SE_MACHINE_ACCOUNT_NAME" : "SeMachineAccountPrivilege",
"SE_MANAGE_VOLUME_NAME" : "SeManageVolumePrivilege",
"SE_PROF_SINGLE_PROCESS_NAME" : "SeProfileSingleProcessPrivilege",
"SE_RELABEL_NAME" : "SeRelabelPrivilege",
"SE_REMOTE_SHUTDOWN_NAME" : "SeRemoteShutdownPrivilege",
"SE_RESTORE_NAME" : "SeRestorePrivilege",
"SE_SECURITY_NAME" : "SeSecurityPrivilege",
"SE_SHUTDOWN_NAME" : "SeShutdownPrivilege",
"SE_SYNC_AGENT_NAME" : "SeSyncAgentPrivilege",
"SE_SYSTEM_ENVIRONMENT_NAME" : "SeSystemEnvironmentPrivilege",
"SE_SYSTEM_PROFILE_NAME" : "SeSystemProfilePrivilege",
"SE_SYSTEMTIME_NAME" : "SeSystemtimePrivilege",
"SE_TAKE_OWNERSHIP_NAME" : "SeTakeOwnershipPrivilege",
"SE_TCB_NAME" : "SeTcbPrivilege",
"SE_TIME_ZONE_NAME" : "SeTimeZonePrivilege",
"SE_TRUSTED_CREDMAN_ACCESS_NAME" : "SeTrustedCredManAccessPrivilege",
"SE_UNDOCK_NAME" : "SeUndockPrivilege",
"SE_UNSOLICITED_INPUT_NAME" : "SeUnsolicitedInputPrivilege"
}
# Privilege constants
SE_ASSIGNPRIMARYTOKEN_NAME = "SeAssignPrimaryTokenPrivilege"
SE_AUDIT_NAME = "SeAuditPrivilege"
SE_BACKUP_NAME = "SeBackupPrivilege"
SE_CHANGE_NOTIFY_NAME = "SeChangeNotifyPrivilege"
SE_CREATE_GLOBAL_NAME = "SeCreateGlobalPrivilege"
SE_CREATE_PAGEFILE_NAME = "SeCreatePagefilePrivilege"
SE_CREATE_PERMANENT_NAME = "SeCreatePermanentPrivilege"
SE_CREATE_SYMBOLIC_LINK_NAME = "SeCreateSymbolicLinkPrivilege"
SE_CREATE_TOKEN_NAME = "SeCreateTokenPrivilege"
SE_DEBUG_NAME = "SeDebugPrivilege"
SE_ENABLE_DELEGATION_NAME = "SeEnableDelegationPrivilege"
SE_IMPERSONATE_NAME = "SeImpersonatePrivilege"
SE_INC_BASE_PRIORITY_NAME = "SeIncreaseBasePriorityPrivilege"
SE_INCREASE_QUOTA_NAME = "SeIncreaseQuotaPrivilege"
SE_INC_WORKING_SET_NAME = "SeIncreaseWorkingSetPrivilege"
SE_LOAD_DRIVER_NAME = "SeLoadDriverPrivilege"
SE_LOCK_MEMORY_NAME = "SeLockMemoryPrivilege"
SE_MACHINE_ACCOUNT_NAME = "SeMachineAccountPrivilege"
SE_MANAGE_VOLUME_NAME = "SeManageVolumePrivilege"
SE_PROF_SINGLE_PROCESS_NAME = "SeProfileSingleProcessPrivilege"
SE_RELABEL_NAME = "SeRelabelPrivilege"
SE_REMOTE_SHUTDOWN_NAME = "SeRemoteShutdownPrivilege"
SE_RESTORE_NAME = "SeRestorePrivilege"
SE_SECURITY_NAME = "SeSecurityPrivilege"
SE_SHUTDOWN_NAME = "SeShutdownPrivilege"
SE_SYNC_AGENT_NAME = "SeSyncAgentPrivilege"
SE_SYSTEM_ENVIRONMENT_NAME = "SeSystemEnvironmentPrivilege"
SE_SYSTEM_PROFILE_NAME = "SeSystemProfilePrivilege"
SE_SYSTEMTIME_NAME = "SeSystemtimePrivilege"
SE_TAKE_OWNERSHIP_NAME = "SeTakeOwnershipPrivilege"
SE_TCB_NAME = "SeTcbPrivilege"
SE_TIME_ZONE_NAME = "SeTimeZonePrivilege"
SE_TRUSTED_CREDMAN_ACCESS_NAME = "SeTrustedCredManAccessPrivilege"
SE_UNDOCK_NAME = "SeUndockPrivilege"
SE_UNSOLICITED_INPUT_NAME = "SeUnsolicitedInputPrivilege"
SE_CREATE_TOKEN = 2
SE_ASSIGNPRIMARYTOKEN = 3
SE_LOCK_MEMORY=4
SE_INCREASE_QUOTA=5
SE_UNSOLICITED_INPUT=6
SE_TCB=7
SE_SECURITY=8
SE_TAKE_OWNERSHIP=9
SE_LOAD_DRIVER=10
SE_SYSTEM_PROFILE=11
SE_SYSTEMTIME=12
SE_PROF_SINGLE_PROCESS=13
SE_INC_BASE_PRIORITY=14
SE_CREATE_PAGEFILE=15
SE_CREATE_PERMANENT=16
SE_BACKUP=17
SE_RESTORE=18
SE_SHUTDOWN=19
SE_DEBUG=20
SE_AUDIT=21
SE_SYSTEM_ENVIRONMENT=22
SE_CHANGE_NOTIFY=23
SE_REMOTE_SHUTDOWN=24
SE_UNDOCK=25
SE_SYNC_AGENT=26
SE_ENABLE_DELEGATION=27
SE_MANAGE_VOLUME=28
SE_IMPERSONATE=29
SE_CREATE_GLOBAL=30
SE_TRUSTED_CREDMAN_ACCESS=31
SE_RELABEL=32
SE_INC_WORKING_SET=33
SE_TIME_ZONE=34
SE_CREATE_SYMBOLIC_LINK=35
SE_PRIVILEGE_ENABLED_BY_DEFAULT = 0x00000001
SE_PRIVILEGE_ENABLED = 0x00000002
SE_PRIVILEGE_REMOVED = 0x00000004
SE_PRIVILEGE_USED_FOR_ACCESS = 0x80000000
TOKEN_ADJUST_PRIVILEGES = 0x00000020
LOGON_WITH_PROFILE = 0x00000001
LOGON_NETCREDENTIALS_ONLY = 0x00000002
# Token access rights
TOKEN_ASSIGN_PRIMARY = 0x0001
TOKEN_DUPLICATE = 0x0002
TOKEN_IMPERSONATE = 0x0004
TOKEN_QUERY = 0x0008
TOKEN_QUERY_SOURCE = 0x0010
TOKEN_ADJUST_PRIVILEGES = 0x0020
TOKEN_ADJUST_GROUPS = 0x0040
TOKEN_ADJUST_DEFAULT = 0x0080
TOKEN_ADJUST_SESSIONID = 0x0100
TOKEN_READ = (STANDARD_RIGHTS_READ | TOKEN_QUERY)
TOKEN_ALL_ACCESS = (STANDARD_RIGHTS_REQUIRED | TOKEN_ASSIGN_PRIMARY |
TOKEN_DUPLICATE | TOKEN_IMPERSONATE | TOKEN_QUERY | TOKEN_QUERY_SOURCE |
TOKEN_ADJUST_PRIVILEGES | TOKEN_ADJUST_GROUPS | TOKEN_ADJUST_DEFAULT |
TOKEN_ADJUST_SESSIONID)
#dont ask me...
TOKEN_MANIP_ACCESS = (TOKEN_QUERY | TOKEN_READ | TOKEN_IMPERSONATE | TOKEN_QUERY_SOURCE | TOKEN_DUPLICATE | TOKEN_ASSIGN_PRIMARY | (131072 | 4))
# typedef enum _SECURITY_IMPERSONATION_LEVEL {
# SecurityAnonymous,
# SecurityIdentification,
# SecurityImpersonation,
# SecurityDelegation
# } SECURITY_IMPERSONATION_LEVEL, *PSECURITY_IMPERSONATION_LEVEL;
SecurityAnonymous = 0
SecurityIdentification = 1
SecurityImpersonation = 2
SecurityDelegation = 3
TokenPrimary = 1
TokenImpersonation = 2
# Predefined HKEY values
HKEY_CLASSES_ROOT = 0x80000000
HKEY_CURRENT_USER = 0x80000001
HKEY_LOCAL_MACHINE = 0x80000002
HKEY_USERS = 0x80000003
HKEY_PERFORMANCE_DATA = 0x80000004
HKEY_CURRENT_CONFIG = 0x80000005
# Registry access rights
KEY_ALL_ACCESS = 0xF003F
KEY_CREATE_LINK = 0x0020
KEY_CREATE_SUB_KEY = 0x0004
KEY_ENUMERATE_SUB_KEYS = 0x0008
KEY_EXECUTE = 0x20019
KEY_NOTIFY = 0x0010
KEY_QUERY_VALUE = 0x0001
KEY_READ = 0x20019
KEY_SET_VALUE = 0x0002
KEY_WOW64_32KEY = 0x0200
KEY_WOW64_64KEY = 0x0100
KEY_WRITE = 0x20006
# Registry value types
REG_NONE = 0
REG_SZ = 1
REG_EXPAND_SZ = 2
REG_BINARY = 3
REG_DWORD = 4
REG_DWORD_LITTLE_ENDIAN = REG_DWORD
REG_DWORD_BIG_ENDIAN = 5
REG_LINK = 6
REG_MULTI_SZ = 7
REG_RESOURCE_LIST = 8
REG_FULL_RESOURCE_DESCRIPTOR = 9
REG_RESOURCE_REQUIREMENTS_LIST = 10
REG_QWORD = 11
REG_QWORD_LITTLE_ENDIAN = REG_QWORD |
tensorflow_graphics/projects/cvxnet/train.py | Liang813/graphics | 2,759 | 12767670 | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training Loop."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow_graphics.projects.cvxnet.lib import datasets
from tensorflow_graphics.projects.cvxnet.lib import models
from tensorflow_graphics.projects.cvxnet.lib import utils
tf.disable_eager_execution()
flags = tf.app.flags
logging = tf.logging
tf.logging.set_verbosity(tf.logging.INFO)
utils.define_flags()
FLAGS = flags.FLAGS
def main(unused_argv):
tf.set_random_seed(2191997)
np.random.seed(6281996)
logging.info("=> Starting ...")
# Select dataset.
logging.info("=> Preparing datasets ...")
data = datasets.get_dataset(FLAGS.dataset, "train", FLAGS)
batch = tf.data.make_one_shot_iterator(data).get_next()
# Select model.
logging.info("=> Creating {} model".format(FLAGS.model))
model = models.get_model(FLAGS.model, FLAGS)
optimizer = tf.train.AdamOptimizer(FLAGS.lr)
# Set up the graph
train_loss, train_op, global_step = model.compute_loss(
batch, training=True, optimizer=optimizer)
# Training hooks
stop_hook = tf.train.StopAtStepHook(last_step=FLAGS.max_steps)
summary_writer = tf.summary.FileWriter(FLAGS.train_dir)
ops = tf.get_collection(tf.GraphKeys.SUMMARIES)
summary_hook = tf.train.SummarySaverHook(
save_steps=100, summary_writer=summary_writer, summary_op=ops)
step_counter_hook = tf.train.StepCounterHook(summary_writer=summary_writer)
hooks = [stop_hook, step_counter_hook, summary_hook]
logging.info("=> Start training loop ...")
with tf.train.MonitoredTrainingSession(
checkpoint_dir=FLAGS.train_dir,
hooks=hooks,
scaffold=None,
save_checkpoint_steps=FLAGS.save_every,
save_checkpoint_secs=None,
save_summaries_steps=None,
save_summaries_secs=None,
log_step_count_steps=None,
max_wait_secs=3600) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run([batch, train_loss, global_step, train_op])
if __name__ == "__main__":
tf.app.run(main)
|
scripts/isqrt.py | Marlon-Lazo-Coronado/tiny-bignum-c | 331 | 12767678 | <reponame>Marlon-Lazo-Coronado/tiny-bignum-c
#isqrt.py
import math
def isqrt(n):
if n == 0: return 0
high = n
low = 0
calcMid = lambda: (high - low) / 2 + low + 1
mid = calcMid()
while high > low:
sq = mid**2
if sq > n:
high = mid - 1
else:
low = mid
mid = calcMid()
return low
if __name__ == "__main__":
for i in range(10000000):
sq = isqrt(i)
if sq != int(math.sqrt(i)):
print "Failed on {}: {}".format(i, sq)
elif i % 100000==0: print i
|
utils/wfuzzbasicauthbrute/wfuzz/plugins/scripts/robots.py | ismailbozkurt/kubebot | 171 | 12767703 | <reponame>ismailbozkurt/kubebot
import re
from urlparse import urlparse, urljoin
from framework.plugins.api import DiscoveryPlugin
from framework.plugins.api import url_filename
from externals.moduleman.plugin import moduleman_plugin
@moduleman_plugin
class robots(DiscoveryPlugin):
name = "robots"
description = "Parses robots.txt looking for new content. Optional: discovery.bl=\".txt,.gif\""
category = ["default", "active", "discovery"]
priority = 99
def validate(self, fuzzresult):
return url_filename(fuzzresult) == "robots.txt" and fuzzresult.code == 200
def process(self, fuzzresult):
# Shamelessly (partially) copied from w3af's plugins/discovery/robotsReader.py
for line in fuzzresult.history.fr_content().split('\n'):
line = line.strip()
if len(line) > 0 and line[0] != '#' and (line.upper().find('ALLOW') == 0 or\
line.upper().find('DISALLOW') == 0 or line.upper().find('SITEMAP') == 0):
url = line[ line.find(':') + 1 : ]
url = url.strip(" *")
if url and not self.blacklisted_extension(url):
self.queue_url(urljoin(fuzzresult.url, url))
|
ps2000Examples/streaming_mode/streaming_mode_gathering.py | LauritzRaisch/picosdk-python-wrappers | 114 | 12767754 | from time import time_ns
from ctypes import POINTER, c_int16, c_uint32
import matplotlib.pyplot as plt
import numpy as np
from picosdk.ps2000 import ps2000
from picosdk.functions import assert_pico2000_ok
from picosdk.ctypes_wrapper import C_CALLBACK_FUNCTION_FACTORY
from enum import IntEnum
class Channel(IntEnum):
PS2000_CHANNEL_A = 0
PS2000_CHANNEL_B = 1
class PotentialRange(IntEnum):
PS2000_10MV = 0
PS2000_20MV = 1
PS2000_50MV = 2
PS2000_100MV = 3
PS2000_200MV = 4
PS2000_500MV = 5
PS2000_1V = 6
PS2000_2V = 7
PS2000_5V = 8
PS2000_10V = 9
PS2000_20V = 10
class TimeUnit(IntEnum):
FEMTOSECOND = 0
PICOSECOND = 1
NANOSECOND = 2
MICROSECOND = 3
MILLISECOND = 4
SECOND = 5
CALLBACK = C_CALLBACK_FUNCTION_FACTORY(None, POINTER(POINTER(c_int16)), c_int16, c_uint32, c_int16, c_int16, c_uint32)
# reimplement this because the other one only takes ctypes
def adc_to_mv(values, range_, bitness=16):
v_ranges = [10, 20, 50, 100, 200, 500, 1_000, 2_000, 5_000, 10_000, 20_000]
return [(x * v_ranges[range_]) / (2**(bitness - 1) - 1) for x in values]
def determine_time_unit(interval_ns):
unit = 0
units = ['ns', 'us', 'ms', 's']
while interval_ns > 5_000:
interval_ns /= 1000
unit += 1
return interval_ns, units[unit]
class StreamingDevice:
def __init__(self, gather_values, potential_range=PotentialRange.PS2000_50MV):
self.device = ps2000.open_unit()
self.potential_range = potential_range
self.gather_values = gather_values
res = ps2000.ps2000_set_channel(self.device.handle, Channel.PS2000_CHANNEL_A, True, True, potential_range)
assert_pico2000_ok(res)
# start 'fast-streaming' mode
res = ps2000.ps2000_run_streaming_ns(
self.device.handle,
500,
TimeUnit.NANOSECOND,
100_000,
False,
1,
50_000
)
assert_pico2000_ok(res)
self.start_time = time_ns()
self.end_time = time_ns()
def close(self):
ps2000.ps2000_stop(self.device.handle)
self.device.close()
def gather(self):
adc_values = []
def get_overview_buffers(buffers, _overflow, _triggered_at, _triggered, _auto_stop, n_values):
adc_values.extend(buffers[0][0:n_values])
callback = CALLBACK(get_overview_buffers)
while len(adc_values) < self.gather_values:
ps2000.ps2000_get_streaming_last_values(
self.device.handle,
callback
)
self.end_time = time_ns()
return adc_to_mv(adc_values, self.potential_range)
stream = StreamingDevice(6_000_000)
values = stream.gather()
stream.close()
print('Values gathered: {}'.format(len(values)))
fig, ax = plt.subplots()
interval, units = determine_time_unit(stream.end_time - stream.start_time)
ax.set_xlabel('time/{}'.format(units))
ax.set_ylabel('voltage/mV')
ax.plot(np.linspace(0, interval, len(values)), values)
plt.show()
|
tests/test_history.py | l1kw1d/stashboard | 761 | 12767755 | <filename>tests/test_history.py
from datetime import datetime
from datetime import date
from datetime import timedelta
from base import TestbedTest
from models import Event
from models import Service
from models import Status
class HistoryTest(TestbedTest):
def setUp(self):
super(HistoryTest, self).setUp()
Status.load_defaults()
self.service = Service(slug="account", name="Account",
description="The BEST SERVICE")
self.service.put()
def test_history_order(self):
start = date(2011, 4, 13)
up = Status.get_by_slug("up")
history = self.service.history(5, up, start=start)
self.assertEquals(len(history), 5)
history_days = [ h["day"] for h in history ]
expected = [
date(2011, 4, 12),
date(2011, 4, 11),
date(2011, 4, 10),
date(2011, 4, 9),
date(2011, 4, 8),
]
self.assertEquals(history_days, expected)
def test_history_order_early_month(self):
start = date(2011, 4, 2)
up = Status.get_by_slug("up")
history = self.service.history(5, up, start=start)
history_days = [ h["day"] for h in history ]
expected = [
date(2011, 4, 1),
date(2011, 3, 31),
date(2011, 3, 30),
date(2011, 3, 29),
date(2011, 3, 28),
]
self.assertEquals(history_days, expected)
for h in history:
self.assertFalse(h["information"])
def test_history_order_late_month(self):
start = date(2011, 4, 5)
up = Status.get_by_slug("up")
history = self.service.history(5, up, start=start)
history_days = [ h["day"] for h in history ]
expected = [
date(2011, 4, 4),
date(2011, 4, 3),
date(2011, 4, 2),
date(2011, 4, 1),
date(2011, 3, 31),
]
self.assertEquals(history_days, expected)
def test_history_no_errors_boundary(self):
down = Status.get_by_slug("down")
up = Status.get_by_slug("up")
now = datetime(2011, 4, 5)
event = Event(status=down, service=self.service, start=now, message="HEY")
event.put()
history = self.service.history(5, up, start=date(2011, 4, 5))
self.assertEquals(history[0]["information"], False)
def test_history_one_error(self):
down = Status.get_by_slug("down")
up = Status.get_by_slug("up")
now = datetime(2011, 4, 4, 12)
event = Event(status=down, service=self.service, start=now, message="HEY")
event.put()
history = self.service.history(5, up, start=date(2011, 4, 5))
self.assertEquals(history[0]["information"], True)
self.assertEquals(history[0]["name"], "information")
def test_history_one_error_boundary(self):
down = Status.get_by_slug("down")
up = Status.get_by_slug("up")
now = datetime(2011, 3, 31)
event = Event(status=down, service=self.service, start=now, message="HEY")
event.put()
history = self.service.history(5, up, start=date(2011, 4, 5))
self.assertEquals(history[4]["information"], True)
self.assertEquals(history[4]["name"], "information")
def test_history_count(self):
up = Status.get_by_slug("up")
history = self.service.history(10, up, start=date(2011, 4, 5))
self.assertEquals(len(history), 10)
def test_history_current_status(self):
down = Status.get_by_slug("down")
up = Status.get_by_slug("up")
now = datetime(2011, 4, 4, 12, 51)
event = Event(status=down, service=self.service, start=now, message="HEY")
event.put()
history, = self.service.history(1, up, start=date(2011, 4, 5))
self.assertEquals(history["information"], True)
|
src/detext/layers/multi_layer_perceptron.py | StarWang/detext | 1,229 | 12767777 | <gh_stars>1000+
from typing import List
import tensorflow as tf
class MultiLayerPerceptron(tf.keras.layers.Layer):
""" A multi layer perceptron """
def __init__(self, num_hidden: List[int], activations: List, prefix: str = ''):
""" Initializes the layer
:param num_hidden: list of hidden layer sizes
:param activations: list of activations for dense layer
:param prefix: prefix of hidden layer name
"""
super(MultiLayerPerceptron, self).__init__()
assert len(num_hidden) == len(activations), "num hidden and activations must contain the same number of elements"
self.mlp = []
for i, (hidden_size, activation) in enumerate(zip(num_hidden, activations)):
if hidden_size == 0:
continue
layer = tf.keras.layers.Dense(units=hidden_size, use_bias=True, activation=activation,
name=f'{prefix}hidden_projection_{str(i)}')
self.mlp.append(layer)
def call(self, inputs, **kwargs):
""" Applies multi-layer perceptron on given inputs
:return output Shape=inputs.shape[:-1] + [num_hidden[-1]]
"""
x = inputs
for layer in self.mlp:
x = layer(x)
return x
|
python3/pracmln/utils/latexmath2png.py | seba90/pracmln | 123 | 12767802 | #!/usr/bin/python2.5
# Until Python 2.6
from dnutils import logs
from pracmln.utils import locs
"""
Converts LaTeX math to png images.
Run latexmath2png.py --help for usage instructions.
"""
"""
Author:
<NAME> <<EMAIL>>
URL: http://www.kamilkisiel.net
Revision History:
2007/04/20 - Initial version
TODO:
- Make handling of bad input more graceful?
---
Some ideas borrowed from Kjell Fauske's article at http://fauskes.net/nb/htmleqII/
Licensed under the MIT License:
Copyright (c) 2007 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
import os
import tempfile
from PIL import Image
import base64
logger = logs.getlogger(__name__, logs.DEBUG)
# Default packages to use when generating output
default_packages = [
'amsmath',
'amsthm',
'amssymb',
'bm'
]
def __build_preamble(packages, declarations):
preamble = '\documentclass{article}\n'
for p in packages:
preamble += "\\usepackage{{{}}}\n".format(p)
for d in declarations:
preamble += '{}\n'.format(d)
preamble += "\pagestyle{empty}\n\\begin{document}\n"
return preamble
def __write_output(infile, outdir, workdir='.', filename='', size=1, svg=True):
try:
# Generate the DVI file. NOTE: no output in stdout, as it is piped into /dev/null!
latexcmd = 'latex -halt-on-error -output-directory {} {} >/dev/null'.format(workdir, infile)
rc = os.system(latexcmd)
# Something bad happened, abort
if rc != 0:
raise Exception('latex error')
# Convert the DVI file to PNG's
dvifile = infile.replace('.tex', '.dvi')
outfilename = os.path.join(outdir, filename)
if svg:
dvicmd = "dvisvgm -v 0 -o {}.svg --no-fonts {}".format(outfilename, dvifile)
else:
dvicmd = "dvipng -q* -T tight -x {} -z 9 -bg Transparent -o {}.png {} >/dev/null".format(size * 1000, outfilename, dvifile)
rc = os.system(dvicmd)
if rc != 0:
raise Exception('{} error'.format('dvisvgm error' if svg else'dvipng'))
finally:
# Cleanup temporaries
basefile = infile.replace('.tex', '')
tempext = ['.aux', '.dvi', '.log']
for te in tempext:
tempfile = basefile + te
if os.path.exists(tempfile):
os.remove(tempfile)
def math2png(content, outdir, packages=default_packages, declarations=[], filename='', size=1, svg=True):
"""
Generate png images from $$...$$ style math environment equations.
Parameters:
content - A string containing latex math environment formulas
outdir - Output directory for PNG images
packages - Optional list of packages to include in the LaTeX preamble
declarations - Optional list of declarations to add to the LaTeX preamble
filename - Optional filename for output files
size - Scale factor for output
"""
outfilename = '/tmp/default.tex'
# Set the working directory
workdir = tempfile.gettempdir()
# Get a temporary file
fd, texfile = tempfile.mkstemp('.tex', 'eq', workdir, True)
try:
content = content.replace('$', r'\$')
# Create the TeX document and save to tempfile
fileContent = '{}$${}$$\n\end{{document}}'.format(__build_preamble(packages, declarations), content)
with os.fdopen(fd, 'w+') as f:
f.write(fileContent)
__write_output(texfile, outdir, workdir=workdir, filename=filename, size=size, svg=svg)
outfilename = os.path.join(outdir, '{}.{}'.format(filename, 'svg' if svg else 'png'))
except:
logger.error('Unable to create image. A reason you encounter '
'this error might be that you are either missing latex '
'packages for generating .dvi files or {} for '
'generating the {} image from the .dvi file.'.format('dvisvgm' if svg else 'dvipng', 'svg' if svg else 'png'))
outfilename = os.path.join(locs.etc, 'default.{}'.format('svg' if svg else 'png'))
finally:
if svg:
with open(outfilename, 'r') as outfile:
filecontent = outfile.read()
ratio = 1
else:
# determine image size
im = Image.open(outfilename)
width, height = im.size
ratio = float(width)/float(height)
# create base64 encoded file content
png = open(outfilename)
filecontent = base64.b64encode(png.read())
# cleanup and delete temporary files
if os.path.exists(texfile) and locs.etc not in outfilename:
os.remove(texfile)
if os.path.exists(outfilename) and locs.etc not in outfilename:
os.remove(outfilename)
return filecontent, ratio |
lib/python2.7/site-packages/samba/tests/common.py | abankalarm/pth-toolkit | 480 | 12767846 | <filename>lib/python2.7/site-packages/samba/tests/common.py
# Unix SMB/CIFS implementation. Tests for common.py routines
# Copyright (C) <NAME> 2011
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Tests for samba.common"""
import samba, os
import samba.tests
from samba.common import *
from samba.samdb import SamDB
class CommonTests(samba.tests.TestCase):
def test_normalise_int32(self):
self.assertEquals('17', normalise_int32(17))
self.assertEquals('17', normalise_int32('17'))
self.assertEquals('-123', normalise_int32('-123'))
self.assertEquals('-1294967296', normalise_int32('3000000000'))
def test_dsdb_Dn(self):
sam = samba.Ldb(url='dntest.ldb')
dn1 = dsdb_Dn(sam, "DC=foo,DC=bar")
dn2 = dsdb_Dn(sam, "B:8:0000000D:<GUID=b3f0ec29-17f4-452a-b002-963e1909d101>;DC=samba,DC=example,DC=com")
self.assertEquals(dn2.binary, "0000000D")
self.assertEquals(13, dn2.get_binary_integer())
os.unlink('dntest.ldb')
|
zoomus/components/live_stream.py | seantibor/zoomus | 178 | 12767860 | from __future__ import absolute_import
from zoomus import util
from zoomus.components import base
class LiveStreamComponentV2(base.BaseComponent):
def update(self, **kwargs):
"""
Use this API to update the meeting's stream information.
Expects:
- meeting_id: int
- stream_url: string (URL)
- stream_key: string
- page_url: string (URL)
"""
util.require_keys(kwargs, "meeting_id")
return self.patch_request(
"/meetings/{}/livestream".format(kwargs.get("meeting_id")), data=kwargs
)
def update_status(self, **kwargs):
"""
Use this API to update the status of a meeting's live stream.
Expects:
- meeting_id: int
- action (start|stop)
- settings: dict
"""
util.require_keys(kwargs, "meeting_id")
return self.patch_request(
"/meetings/{}/livestream/status".format(kwargs.get("meeting_id")),
data=kwargs,
)
|
tests/test_contrib_debug_toolbar_flask.py | proofit404/userstories | 187 | 12767866 | <gh_stars>100-1000
import pytest
@pytest.mark.xfail
def test_contrib_is_available():
from stories.contrib.debug_toolbars.flask import StoriesPanel # noqa: F401
|
titus/test/producer/testCart.py | jmilleralpine/hadrian | 127 | 12767871 | #!/usr/bin/env python
# Copyright (C) 2014 Open Data ("Open Data" refers to
# one or more of the following companies: Open Data Partners LLC,
# Open Data Research LLC, or Open Data Capital LLC.)
#
# This file is part of Hadrian.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import unittest
import numpy
from titus.genpy import PFAEngine
from titus.producer.tools import look
from titus.producer.cart import *
class TestProducerCart(unittest.TestCase):
@staticmethod
def data():
while True:
x = random.uniform(0, 10)
y = random.uniform(0, 10)
if x < 4.0:
if y < 6.0:
z = random.gauss(5, 1)
else:
z = random.gauss(8, 1)
else:
if y < 2.0:
z = random.gauss(1, 1)
else:
z = random.gauss(2, 1)
if z < 0.0:
z = 0.0
elif z >= 10.0:
z = 9.99999
a = "A" + str(int(x))
b = "B" + str(int(y/2) * 2)
c = "C" + str(int(z/3) * 3)
yield (x, y, z, a, b, c)
def testCartMustBuildNumericalNumerical(self):
random.seed(12345)
numpy.seterr(divide="ignore", invalid="ignore")
dataset = Dataset.fromIterable(((x, y, z) for (x, y, z, a, b, c) in TestProducerCart.data()), 100000, ("x", "y", "z"))
tree = TreeNode.fromWholeDataset(dataset, "z")
tree.splitMaxDepth(2)
doc = tree.pfaDocument({"type": "record", "name": "Datum", "fields": [{"name": "x", "type": "double"}, {"name": "y", "type": "double"}]}, "TreeNode")
# look(doc, maxDepth=8)
self.assertEqual(doc["cells"]["tree"]["init"]["field"], "x")
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["value"], 4.00, places=2)
self.assertEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["field"], "y")
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["value"], 6.00, places=2)
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["pass"]["double"], 5.00, places=2)
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["fail"]["double"], 8.02, places=2)
self.assertEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["field"], "y")
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["value"], 2.00, places=2)
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["pass"]["double"], 1.09, places=2)
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["fail"]["double"], 2.00, places=2)
engine, = PFAEngine.fromJson(doc)
self.assertAlmostEqual(engine.action({"x": 2.0, "y": 3.0}), 5.00, places=2)
self.assertAlmostEqual(engine.action({"x": 2.0, "y": 8.0}), 8.02, places=2)
self.assertAlmostEqual(engine.action({"x": 7.0, "y": 1.0}), 1.09, places=2)
self.assertAlmostEqual(engine.action({"x": 7.0, "y": 5.0}), 2.00, places=2)
doc = tree.pfaDocument(
{"type": "record", "name": "Datum", "fields": [{"name": "x", "type": "double"}, {"name": "y", "type": "double"}]},
"TreeNode",
nodeScores=True, datasetSize=True, predictandUnique=True, nTimesVariance=True, gain=True)
# look(doc, maxDepth=8)
engine, = PFAEngine.fromJson(doc)
def testCartMustBuildNumericalCategorical(self):
random.seed(12345)
numpy.seterr(divide="ignore", invalid="ignore")
dataset = Dataset.fromIterable(((x, y, c) for (x, y, z, a, b, c) in TestProducerCart.data()), 100000, ("x", "y", "c"))
tree = TreeNode.fromWholeDataset(dataset, "c")
tree.splitMaxDepth(2)
doc = tree.pfaDocument({"type": "record", "name": "Datum", "fields": [{"name": "x", "type": "double"}, {"name": "y", "type": "double"}]}, "TreeNode")
# look(doc, maxDepth=8)
self.assertEqual(doc["cells"]["tree"]["init"]["field"], "x")
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["value"], 4.00, places=2)
self.assertEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["field"], "y")
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["value"], 6.00, places=2)
self.assertEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["pass"]["string"], "C3")
self.assertEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["fail"]["string"], "C6")
self.assertEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["field"], "y")
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["value"], 2.00, places=2)
self.assertEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["pass"]["string"], "C0")
self.assertEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["fail"]["string"], "C0")
engine, = PFAEngine.fromJson(doc)
self.assertEqual(engine.action({"x": 2.0, "y": 3.0}), "C3")
self.assertEqual(engine.action({"x": 2.0, "y": 8.0}), "C6")
self.assertEqual(engine.action({"x": 7.0, "y": 1.0}), "C0")
self.assertEqual(engine.action({"x": 7.0, "y": 5.0}), "C0")
doc = tree.pfaDocument(
{"type": "record", "name": "Datum", "fields": [{"name": "x", "type": "double"}, {"name": "y", "type": "double"}]},
"TreeNode",
nodeScores=True, datasetSize=True, predictandDistribution=True, predictandUnique=True, entropy=True, gain=True)
# look(doc, maxDepth=8)
engine, = PFAEngine.fromJson(doc)
def testCartMustBuildCategoricalNumerical(self):
random.seed(12345)
numpy.seterr(divide="ignore", invalid="ignore")
dataset = Dataset.fromIterable(((a, b, z) for (x, y, z, a, b, c) in TestProducerCart.data()), 100000, ("a", "b", "z"))
tree = TreeNode.fromWholeDataset(dataset, "z")
tree.splitMaxDepth(2)
doc = tree.pfaDocument({"type": "record", "name": "Datum", "fields": [{"name": "a", "type": "string"}, {"name": "b", "type": "string"}]}, "TreeNode")
# look(doc, maxDepth=8)
self.assertEqual(doc["cells"]["tree"]["init"]["field"], "a")
self.assertEqual(doc["cells"]["tree"]["init"]["value"], ["A0", "A1", "A2", "A3"])
self.assertEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["field"], "b")
self.assertEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["value"], ["B6", "B8"])
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["pass"]["double"], 8.02, places=2)
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["fail"]["double"], 5.00, places=2)
self.assertEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["field"], "b")
self.assertEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["value"], ["B0"])
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["pass"]["double"], 1.09, places=2)
self.assertAlmostEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["fail"]["double"], 2.00, places=2)
engine, = PFAEngine.fromJson(doc)
self.assertAlmostEqual(engine.action({"a": "A1", "b": "B6"}), 8.02, places=2)
self.assertAlmostEqual(engine.action({"a": "A1", "b": "B2"}), 5.00, places=2)
self.assertAlmostEqual(engine.action({"a": "A5", "b": "B0"}), 1.09, places=2)
self.assertAlmostEqual(engine.action({"a": "A5", "b": "B4"}), 2.00, places=2)
doc = tree.pfaDocument(
{"type": "record", "name": "Datum", "fields": [{"name": "a", "type": "string"}, {"name": "b", "type": "string"}]},
"TreeNode",
nodeScores=True, datasetSize=True, predictandUnique=True, nTimesVariance=True, gain=True)
# look(doc, maxDepth=8)
engine, = PFAEngine.fromJson(doc)
def testCartMustBuildCategoricalCategorical(self):
random.seed(12345)
numpy.seterr(divide="ignore", invalid="ignore")
dataset = Dataset.fromIterable(((a, b, c) for (x, y, z, a, b, c) in TestProducerCart.data()), 100000, ("a", "b", "c"))
tree = TreeNode.fromWholeDataset(dataset, "c")
tree.splitMaxDepth(2)
doc = tree.pfaDocument({"type": "record", "name": "Datum", "fields": [{"name": "a", "type": "string"}, {"name": "b", "type": "string"}]}, "TreeNode")
# look(doc, maxDepth=8)
self.assertEqual(doc["cells"]["tree"]["init"]["field"], "a")
self.assertEqual(doc["cells"]["tree"]["init"]["value"], ["A0", "A1", "A2", "A3"])
self.assertEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["field"], "b")
self.assertEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["value"], ["B6", "B8"])
self.assertEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["pass"]["string"], "C6")
self.assertEqual(doc["cells"]["tree"]["init"]["pass"]["TreeNode"]["fail"]["string"], "C3")
self.assertEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["field"], "b")
self.assertEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["value"], ["B0"])
self.assertEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["pass"]["string"], "C0")
self.assertEqual(doc["cells"]["tree"]["init"]["fail"]["TreeNode"]["fail"]["string"], "C0")
engine, = PFAEngine.fromJson(doc)
self.assertEqual(engine.action({"a": "A1", "b": "B6"}), "C6")
self.assertEqual(engine.action({"a": "A1", "b": "B2"}), "C3")
self.assertEqual(engine.action({"a": "A5", "b": "B0"}), "C0")
self.assertEqual(engine.action({"a": "A5", "b": "B4"}), "C0")
doc = tree.pfaDocument(
{"type": "record", "name": "Datum", "fields": [{"name": "a", "type": "string"}, {"name": "b", "type": "string"}]},
"TreeNode",
nodeScores=True, datasetSize=True, predictandDistribution=True, predictandUnique=True, entropy=True, gain=True)
# look(doc, maxDepth=8)
engine, = PFAEngine.fromJson(doc)
if __name__ == "__main__":
unittest.main()
|
lightreid/models/backbones/__init__.py | nataliamiccini/light-reid | 296 | 12767883 | <gh_stars>100-1000
from .resnet import resnet18, resnet34, resnet50, resnet101, resnet152
from .resnet import resnet18ibna, resnet34ibna, resnet50ibna, resnet101ibna, resnet152ibna
from .transformers import *
__cnnbackbone_factory = {
# resnet series
'resnet18': resnet18,
'resnet34': resnet34,
'resnet50': resnet50,
'resnet101': resnet101,
'resnet152': resnet152,
'resnet18ibna': resnet18ibna,
'resnet34ibna': resnet34ibna,
'resnet50ibna': resnet50ibna,
'resnet101ibna': resnet101ibna,
'resnet152ibna': resnet152ibna,
# vision transformer series
'vit_small_patch16_224': vit_small_patch16_224,
'vit_base_patch16_224': vit_base_patch16_224,
'vit_base_patch32_224': vit_base_patch32_224,
'vit_base_patch16_384': vit_base_patch16_384,
'vit_base_patch32_384': vit_base_patch32_384,
'vit_large_patch16_224': vit_large_patch16_224,
'vit_large_patch32_224': vit_large_patch32_224,
'vit_large_patch16_384': vit_large_patch16_384,
'vit_large_patch32_384': vit_large_patch32_384,
'vit_base_patch16_224_in21k': vit_base_patch16_224_in21k,
'vit_base_patch32_224_in21k': vit_base_patch32_224_in21k,
'vit_large_patch16_224_in21k': vit_large_patch16_224_in21k,
'vit_large_patch32_224_in21k': vit_large_patch32_224_in21k,
'vit_huge_patch14_224_in21k': vit_huge_patch14_224_in21k,
'vit_deit_tiny_patch16_224': vit_deit_tiny_patch16_224,
'vit_deit_small_patch16_224': vit_deit_small_patch16_224,
'vit_deit_base_patch16_224': vit_deit_base_patch16_224,
'vit_deit_base_patch16_384': vit_deit_base_patch16_384,
'vit_deit_tiny_distilled_patch16_224': vit_deit_tiny_distilled_patch16_224,
'vit_deit_small_distilled_patch16_224': vit_deit_small_distilled_patch16_224,
'vit_deit_base_distilled_patch16_224': vit_deit_base_distilled_patch16_224,
'vit_deit_base_distilled_patch16_384': vit_deit_base_distilled_patch16_384,
'vit_base_patch16_224_miil_in21k': vit_base_patch16_224_miil_in21k,
'vit_base_patch16_224_miil': vit_base_patch16_224_miil,
}
def build_cnnbackbone(name, pretrained=True, **kwargs):
return __cnnbackbone_factory[name](pretrained=pretrained, **kwargs) |
lib/blackboxprotobuf/lib/protofile.py | nccgroup/blackboxprotobuf | 261 | 12767899 | """
Python methods for importing and exporting '.proto' files from the BBP type
definition format.
"""
# TODO get custom exceptions for these methods
import io
import re
import logging
from blackboxprotobuf.lib.exceptions import TypedefException
import blackboxprotobuf.lib.api
PROTO_FILE_TYPE_MAP = {
"uint": "uint64",
"int": "int64",
"sint": "sint64",
"fixed32": "fixed32",
"sfixed32": "sfixed32",
"float": "float",
"fixed64": "fixed64",
"sfixed64": "sfixed64",
"double": "double",
"bytes": "bytes",
"bytes_hex": "bytes",
"string": "string",
}
PACKABLE_TYPES = [
"uint",
"int",
"sint",
"fixed32",
"sfixed32",
"float",
"fixed64",
"sfixed64",
"double",
]
# Inverse of the above, but we have to include more types
PROTO_FILE_TYPE_TO_BBP = {
"double": "double",
"float": "float",
"int32": "int",
"int64": "int",
"uint32": "uint",
"uint64": "uint",
"sint32": "sint",
"sint64": "sint",
"fixed32": "fixed32",
"fixed64": "fixed64",
"sfixed32": "sfixed32",
"sfixed64": "sfixed64",
"bool": "uint",
"string": "string",
# should be default_binary_type, but can't handle that well here
"bytes": "bytes",
}
NAME_REGEX = re.compile(r"\A[a-zA-Z_][a-zA-Z0-9_]*\Z")
# add packed types to the list
for packable_type in PACKABLE_TYPES:
packed_type = "packed_" + packable_type
PROTO_FILE_TYPE_MAP[packed_type] = PROTO_FILE_TYPE_MAP[packable_type]
def _print_message(message_name, typedef, output_file, depth=0):
indent = u" " * depth
if not NAME_REGEX.match(message_name):
raise TypedefException("Message name: %s is not valid" % message_name)
# sort typedef for better looking output
typedef = blackboxprotobuf.lib.api.sort_typedef(typedef)
message_name = message_name.strip()
output_file.write(u"\n")
output_file.write(indent)
output_file.write(u"message %s {\n" % message_name)
for field_number, field_typedef in typedef.items():
# TODO Default to all fields as repeated? or optional
proto_type = None
field_name = None
field_options = ""
# a repeated field with one element is indistinduishable from a
# repeated field so we just put repeated if we have proof that it is
# repeatable, but this might be wrong sometimes
# maybe some sort of protobuf discovery tool can detect this
is_repeated = field_typedef.get("seen_repeated", False)
if "name" in field_typedef and field_typedef["name"] != "":
field_name = field_typedef["name"]
field_name = field_name.strip()
if not NAME_REGEX.match(field_name):
field_name = None
if field_name is None:
field_name = u"field%s" % field_number
if field_typedef["type"] == "message":
# If we have multiple typedefs, this means is something like the Any
# message, and has to be manually reparsed to each type
if "alt_typedefs" in field_typedef:
proto_type = "bytes"
else:
proto_type = field_name + "_type"
_print_message(
proto_type, field_typedef["message_typedef"], output_file, depth + 1
)
else:
if field_typedef["type"] not in PROTO_FILE_TYPE_MAP:
raise TypedefException(
"Type %s does not have a mapping to protobuf types."
% field_typedef["type"]
)
proto_type = PROTO_FILE_TYPE_MAP[field_typedef["type"]]
# we're using proto3 syntax. Repeated numeric fields are packed by default
# if it's repeated and not packed, then make sure we specify it's not packed
if is_repeated and field_typedef["type"] in PACKABLE_TYPES:
field_options = u" [packed=false]"
# if it's a packed type, we'll explicitoly set that too, can't hurt
elif field_typedef["type"].startswith("packed_"):
field_options = u" [packed=true]"
is_repeated = True
output_file.write(indent)
output_file.write(
u" %s%s %s = %s%s;\n"
% (
"repeated " if is_repeated else "",
proto_type,
field_name,
field_number,
field_options,
)
)
output_file.write(indent)
output_file.write(u"}\n\n")
def export_proto(typedef_map, output_filename=None, output_file=None, package=None):
"""Export the given type definitons as a '.proto' file. Typedefs are
expected as a dictionary of {'message_name': typedef }
Write to output_file or output_filename if provided, otherwise return a string
output_filename will be overwritten if it exists
"""
return_string = False
if output_filename is not None:
output_file = io.open(output_filename, "w+")
if output_file is None:
return_string = True
output_file = io.StringIO()
# preamble
output_file.write(u'syntax = "proto3";\n\n')
if package:
output_file.write(u"package %s;\n\n" % package)
for typedef_name, typedef in typedef_map.items():
_print_message(typedef_name, typedef, output_file)
if return_string:
return output_file.getvalue()
# close the file if we opened it
elif output_filename is not None:
output_file.close()
return None
MESSAGE_START_REGEX = re.compile(r"^message +([a-zA-Z_0-9]+) *{.*")
FIELD_REGEX = re.compile(
r"^ *(repeated|optional|required)? *([a-zA-Z0-9_]+) +([a-zA-Z0-9_]+) += +([0-9]+) *(\[[a-z]+=[a-z]*\])?.*;.*$"
)
SYNTAX_REGEX = re.compile(r'^ *syntax += +"(proto\d)" *;.*')
ENUM_REGEX = re.compile(r"^ *enum +([a-zA-Z0-9_]+) *{.*")
PACKAGE_REGEX = re.compile(r"^ *package +([a-zA-Z0-9_.]+) *;.*")
def import_proto(config, input_string=None, input_filename=None, input_file=None):
typedef_map = {}
if input_string is not None:
input_file = io.StringIO(input_string)
if input_file is None and input_filename is not None:
input_file = io.open(input_filename, "r")
if input_file is None:
raise ValueError("No file provided to import_proto")
syntax_version = "proto2"
package_prefix = ""
enum_names = []
message_trees = []
message_names = []
line = input_file.readline()
while line:
line = line.strip()
if line.startswith("syntax") and SYNTAX_REGEX.match(line):
syntax_version = SYNTAX_REGEX.match(line).group(1)
elif line.startswith("package") and PACKAGE_REGEX.match(line):
package_prefix = PACKAGE_REGEX.match(line).group(1) + "."
elif line.startswith("import"):
logging.warn(
"Proto file has import which is not supported "
"by the parser. Ensure the imported files are "
"processed first: %s",
line,
)
elif line.startswith("enum") and ENUM_REGEX.match(line):
enum_name = _parse_enum(line, input_file)
enum_names.append(enum_name)
elif line.startswith("message") and MESSAGE_START_REGEX.match(line):
message_tree = _preparse_message(line, input_file)
message_trees.append(message_tree)
line = input_file.readline()
# TODO parse the message data
for tree in message_trees:
new_message_names, new_enum_names = _collect_names(package_prefix, tree)
enum_names += new_enum_names
message_names += new_message_names
logging.debug("Got the following enum_names: %s", enum_names)
logging.debug("Got the following message_names: %s", message_names)
for tree in message_trees:
_parse_message(
tree,
typedef_map,
message_names,
enum_names,
package_prefix,
syntax_version == "proto3",
config,
)
return typedef_map
def _parse_enum(line, input_file):
"""Parse an enum out of the file. Goes from enum declaration to next }
Returns the enum's name
"""
enum_name = ENUM_REGEX.match(line).group(1)
# parse until the next '}'
while "}" not in line:
line = input_file.readline()
if not line:
raise ValueError("Did not find close of enum")
return enum_name
def _preparse_message(line, input_file):
"""Parse out a message name and the lines that make it up"""
message_name = MESSAGE_START_REGEX.match(line).group(1)
message_lines = []
inner_enums = []
inner_messages = []
while "}" not in line:
line = input_file.readline()
if not line:
raise ValueError("Did not find close of message")
line = line.strip()
if line.startswith("enum") and ENUM_REGEX.match(line):
enum_name = _parse_enum(line, input_file)
inner_enums.append(enum_name)
elif line.startswith("message") and MESSAGE_START_REGEX.match(line):
message_tree = _preparse_message(line, input_file)
inner_messages.append(message_tree)
# not an inner enum or message
else:
message_lines.append(line)
return {
"name": message_name,
"data": message_lines,
"enums": inner_enums,
"inner_messages": inner_messages,
}
def _collect_names(prefix, message_tree):
message_names = []
enum_names = []
name = prefix + message_tree["name"]
message_names.append(name)
for enum_name in message_tree["enums"]:
enum_names.append(prefix + enum_name)
for inner_message in message_tree["inner_messages"]:
new_message_names, new_enum_names = _collect_names(name + ".", inner_message)
message_names += new_message_names
enum_names += new_enum_names
return message_names, enum_names
def _check_message_name(current_path, name, known_message_names, config):
# Verify message name against preparsed message names and global
# known_messages
# For example, if we have:
# Message.InnerMesage
# referenced from:
# PackageA.Message2
# we would look up:
# PackageA.Message2.Message.InnerMessage
# PackageA.Message.InnerMessage
# should also work for enums
if name in config.known_types:
return True
# search for anything under a common prefix in known_message_names
logging.debug("Testing message name: %s", name)
prefix_options = [""]
for part in current_path.split("."):
if part:
prefix_options = [prefix_options[0] + part + "."] + prefix_options
logging.debug("prefix_options: %s", prefix_options)
for prefix in prefix_options:
logging.debug("Testing message name: %s", prefix + name)
if prefix + name in known_message_names:
return prefix + name
# remove the last bit of the prefix
if "." not in prefix:
break
prefix = ".".join(prefix.split(".")[:-1])
logging.debug(
"Message %s not found from %s Known names are: %s",
name,
current_path,
known_message_names,
)
return None
def _parse_message(
message_tree, typdef_map, known_message_names, enum_names, prefix, is_proto3, config
):
message_typedef = {}
message_name = prefix + message_tree["name"]
prefix = message_name + "."
# parse the actual message fields
for line in message_tree["data"]:
# lines should already be stripped and should not have messages or enums
# logging.debug("Line before assert: %s", line)
assert all([not line.strip().startswith(x) for x in ["message ", "enum "]])
# Check if the line matches the field regex
match = FIELD_REGEX.match(line)
if match:
field_number, field_typedef = _parse_field(
match, known_message_names, enum_names, prefix, is_proto3, config
)
message_typedef[field_number] = field_typedef
# add the messsage to tyep returned typedefs
logging.debug("Adding message %s to typedef maps", message_name)
typdef_map[message_name] = message_typedef
for inner_message in message_tree["inner_messages"]:
# TODO prefix should be added to?
_parse_message(
inner_message,
typdef_map,
known_message_names,
enum_names,
prefix,
is_proto3,
config,
)
# parse a field into a dictionary for the typedef
def _parse_field(match, known_message_names, enum_names, prefix, is_proto3, config):
typedef = {}
field_name = match.group(3)
if not field_name:
raise ValueError("Could not parse field name from line: %s" % match)
typedef["name"] = field_name
field_number = match.group(4)
if not field_number:
raise ValueError("Could not parse field number from line: %s" % match)
# figure out repeated
field_rule = match.group(1)
is_repeated = False
if field_rule and "repeated" in field_rule:
is_repeated = True
typedef["seen_repeated"] = True
field_type = match.group(2)
if not field_type:
raise ValueError("Could not parse field type from line: %s" % match)
# check normal types
bbp_type = PROTO_FILE_TYPE_TO_BBP.get(field_type, None)
if not bbp_type:
logging.debug("Got non-basic type: %s, checking enums", field_type)
# check enum names
if _check_message_name(prefix, field_type, enum_names, config):
# enum = uint
bbp_type = "uint"
if not bbp_type:
# Not enum or normal type, check messages
message_name = _check_message_name(
prefix, field_type, known_message_names, config
)
if message_name:
bbp_type = "message"
typedef["message_type_name"] = message_name
if not bbp_type:
# If we don't have a type now, then fail
raise ValueError(
"Could not get a type for field %s: %s" % (field_name, field_type)
)
# figure out packed
# default based on repeated + proto3, fallback to options
field_options = match.group(5)
is_packed = is_repeated and is_proto3 and (field_type in PACKABLE_TYPES)
if is_packed and field_options and "packed=false" in field_options:
is_packed = False
elif is_repeated and field_options and "packed=true" in field_options:
is_packed = True
# make sure the type lines up with packable
if is_packed and bbp_type not in PACKABLE_TYPES:
raise ValueError(
"Field %s set as packable, but not a packable type: %s"
% (field_name, bbp_type)
)
if is_packed:
bbp_type = "packed_" + bbp_type
typedef["type"] = bbp_type
logging.debug("Parsed field number %s: %s", field_number, typedef)
return field_number, typedef
|
src/genie/libs/parser/nxos/tests/ShowSpanningTreeDetail/cli/equal/golden_output_1_expected.py | balmasea/genieparser | 204 | 12767934 | <filename>src/genie/libs/parser/nxos/tests/ShowSpanningTreeDetail/cli/equal/golden_output_1_expected.py
expected_output = {
'mstp': {
'mst_instances': {
0: {
'mst_id': 0,
'bridge_priority': 32768,
'bridge_sysid': 0,
'bridge_address': '00e3.04ff.ad03',
'topology_change_flag': False,
'topology_detected_flag': False,
'topology_changes': 0,
'time_since_topology_change': '142:22:13',
'times': {
'hold': 1,
'topology_change': 70,
'notification': 10,
'max_age': 40,
'hello': 10,
'forwarding_delay': 30,
},
'timers' : {
'hello': 0,
'topology_change': 0,
'notification': 0,
},
'root_of_the_spanning_tree': True,
'interfaces': {
'Port-channel30': {
'name': 'Port-channel30',
'bridge_assurance_inconsistent': True,
'vpc_peer_link_inconsistent': True,
'port_num': 4125,
'status': 'broken',
'cost': 500,
'port_priority': 128,
'port_identifier': '128.4125',
'designated_root_priority': 32768,
'designated_root_address': '0023.04ff.ad03',
'designated_bridge_priority': 61440,
'designated_bridge_address': '4055.39ff.fee7',
'designated_port_id': '128.4125',
'designated_path_cost': 0,
'timers': {
'message_age': 0,
'forward_delay': 0,
'hold': 0,
},
'port_type' : 'network',
'number_of_forward_transitions': 0,
'link_type': 'point-to-point',
'internal': True,
'peer_type': 'STP',
'pvst_simulation': True,
'counters': {
'bpdu_sent': 110,
'bpdu_received': 0
}
}
}
}
},
'hello_time': 10,
'max_age': 40,
'forwarding_delay': 30
}
}
|
hyperglass/cache/base.py | blkmajik/hyperglass | 298 | 12767935 | <reponame>blkmajik/hyperglass<gh_stars>100-1000
"""Base Redis cache handler."""
# Standard Library
import re
import json
from typing import Any, Optional
# Third Party
from pydantic import SecretStr
class BaseCache:
"""Redis cache handler."""
def __init__(
self,
db: int,
host: str = "localhost",
port: int = 6379,
password: Optional[SecretStr] = None,
decode_responses: bool = True,
**kwargs: Any,
) -> None:
"""Initialize Redis connection."""
self.db: int = db
self.host: str = str(host)
self.port: int = port
self.password: Optional[SecretStr] = password
self.decode_responses: bool = decode_responses
self.redis_args: dict = kwargs
def __repr__(self) -> str:
"""Represent class state."""
return "HyperglassCache(db={}, host={}, port={}, password={})".format(
self.db, self.host, self.port, self.password
)
def parse_types(self, value: str) -> Any:
"""Parse a string to standard python types."""
def parse_string(str_value: str):
is_float = (re.compile(r"^(\d+\.\d+)$"), float)
is_int = (re.compile(r"^(\d+)$"), int)
is_bool = (re.compile(r"^(True|true|False|false)$"), bool)
is_none = (re.compile(r"^(None|none|null|nil|\(nil\))$"), lambda v: None)
is_jsonable = (re.compile(r"^[\{\[].*[\}\]]$"), json.loads)
for pattern, factory in (is_float, is_int, is_bool, is_none, is_jsonable):
if isinstance(str_value, str) and bool(re.match(pattern, str_value)):
str_value = factory(str_value)
break
return str_value
if isinstance(value, str):
value = parse_string(value)
elif isinstance(value, bytes):
value = parse_string(value.decode("utf-8"))
elif isinstance(value, list):
value = [parse_string(i) for i in value]
elif isinstance(value, tuple):
value = tuple(parse_string(i) for i in value)
elif isinstance(value, dict):
value = {k: self.parse_types(v) for k, v in value.items()}
return value
|
saleor/account/migrations/0059_merge_20220221_1025.py | victor-abz/saleor | 1,392 | 12767937 | <filename>saleor/account/migrations/0059_merge_20220221_1025.py
# Generated by Django 3.2.12 on 2022-02-21 10:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("account", "0057_clear_user_addresses"),
("account", "0058_update_user_search_document"),
]
operations = []
|
sematch/classify.py | dhimmel/sematch | 397 | 12767949 | <gh_stars>100-1000
from gsitk.datasets.datasets import DatasetManager
from nltk.corpus import opinion_lexicon
from collections import Counter
def prepare_lexicon(process=True, dim=250, save=False):
if process:
dm = DatasetManager()
data = dm.prepare_datasets()
nega = set(opinion_lexicon.negative())
posi = set(opinion_lexicon.positive())
lexicon = opinion_lexicon.words()
lexicon_dic = {x: 0 for x in lexicon}
for t in data['vader']['text']:
for w in t:
if w in lexicon_dic:
lexicon_dic[w] += 1
for t in data['sentiment140']['text']:
for w in t:
if w in lexicon_dic:
lexicon_dic[w] += 1
L = Counter(lexicon_dic).most_common(4000)
N = []
P = []
for w, _ in L:
if w in nega:
N.append(w)
elif w in posi:
P.append(w)
l = P[:dim] + N[:dim]
if save:
with open('senti.lexicon', 'w') as f:
for d in l:
f.write(d)
f.write('\n')
return l
else:
with open('senti.lexicon', 'r') as f:
data = [line.strip() for line in f]
return data
from gensim.models import Word2Vec
from numpy import array, dot
from gensim import matutils
import collections
import functools
class memoized(object):
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
if not isinstance(args, collections.Hashable):
# uncacheable. a list, for instance.
# better to not cache than blow up.
return self.func(*args)
if args in self.cache:
return self.cache[args]
else:
value = self.func(*args)
self.cache[args] = value
return value
def __repr__(self):
'''Return the function's docstring.'''
return self.func.__doc__
def __get__(self, obj, objtype):
'''Support instance methods.'''
return functools.partial(self.__call__, obj)
class WordRelatedness:
def __init__(self, model):
self._model = model
self._words = set([w for w in self._model.vocab])
def check_word(self, word):
return True if word in self._words else False
def check_words(self, words):
return [w for w in words if self.check_word(w)]
def similar_words(self, word):
return self._model.most_similar(word) if self.check_word(word) else []
@memoized
def word_similarity(self, w1, w2):
return self._model.similarity(w1, w2) if self.check_word(w1) and self.check_word(w2) else 0.0
def words_similarity(self, words1, words2):
w1 = self.check_words(words1)
w2 = self.check_words(words2)
return self._model.n_similarity(w1, w2) if w1 and w2 else 0.0
def word_vector(self, w):
return matutils.unitvec(self._model[w]) if self.check_word(w) else None
def words_vector(self, words):
v_words = [self._model[w] for w in self.check_words(words)]
return matutils.unitvec(array(v_words).mean(axis=0)) if v_words else None
def consine_similarity(self, v1, v2):
return dot(v1, v2)
from gsitk.features.word2vec import Word2VecFeatures
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction import DictVectorizer
from sklearn.pipeline import Pipeline
import numpy as np
import nltk
class SimVectorizer:
def __init__(self, senti_lexicon):
w2v_feat = Word2VecFeatures(w2v_model_path='/data/w2vmodel_500d_5mc')
sim_model = WordRelatedness(w2v_feat.model)
self._sim = sim_model.word_similarity
self._lexicon = senti_lexicon
self._N = len(self._lexicon)
# self._vectorizer = DictVectorizer(sparse=False)
self._stopwords = set(nltk.corpus.stopwords.words('english'))
def word_process(self, words):
return [w for w in words if w not in self._stopwords and len(w) > 2]
def similarity(self, words, feature):
return max([self._sim(w, feature) for w in words] + [0.0])
def transform(self, X):
X_transformed = np.zeros((len(X), self._N))
for i, x in enumerate(X):
# if i % 10000 == 0:
# print(i)
words = self.word_process(x)
words = set(words)
for j, f in enumerate(self._lexicon):
X_transformed[i, j] = self.similarity(words, f)
return X_transformed
from nltk.corpus import opinion_lexicon
from collections import Counter
import numpy as np
import nltk
Punc = [".", "!", "?", ",", ";", ":", "-", "'", "\"",
"!!", "!!!", "??", "???", "?!?", "!?!", "?!?!", "!?!?"]
Negate = ["aint", "arent", "cannot", "cant", "couldnt", "darent", "didnt", "doesnt",
"ain't", "aren't", "can't", "couldn't", "daren't", "didn't", "doesn't",
"dont", "hadnt", "hasnt", "havent", "isnt", "mightnt", "mustnt", "neither",
"don't", "hadn't", "hasn't", "haven't", "isn't", "mightn't", "mustn't",
"neednt", "needn't", "never", "none", "nope", "nor", "not", "nothing", "nowhere",
"oughtnt", "shant", "shouldnt", "uhuh", "wasnt", "werent",
"oughtn't", "shan't", "shouldn't", "uh-uh", "wasn't", "weren't",
"without", "wont", "wouldnt", "won't", "wouldn't", "rarely", "seldom", "despite"]
Booster = ["absolutely", "amazingly", "awfully", "completely", "considerably",
"decidedly", "deeply", "effing", "enormously", "entirely", "especially", "exceptionally",
"extremely", "fabulously", "flipping", "flippin", "fricking", "frickin", "frigging",
"friggin", "fully", "fucking", "greatly", "hella", "highly", "hugely", "incredibly",
"intensely", "majorly", "more", "most", "particularly", "purely", "quite", "really",
"remarkably", "so", "substantially", "thoroughly", "totally", "tremendously",
"uber", "unbelievably", "unusually", "utterly", "very", "almost", "barely", "hardly",
"just enough", "kind of", "kinda", "kindof", "kind-of", "less", "little", "marginally",
"occasionally", "partly", "scarcely", "slightly", "somewhat", "sort of", "sorta",
"sortof", "sort-of"]
Extra_Lexicon = Punc + Negate + Booster
def create_lexicon(corpus, embedding, num=250):
stopwords = set(nltk.corpus.stopwords.words('english'))
V = set([w for w in embedding.vocab])
tags = corpus['polarity']
texts = corpus['text']
P = [t for i, t in texts.iteritems() if int(tags[i]) == 1]
N = [t for i, t in texts.iteritems() if int(tags[i]) == -1]
def word_count(X):
d = {}
for x in X:
for w in x:
if w not in stopwords and w in V and len(w) > 1:
d[w] = d[w] + 1 if w in d else 1
return d
P_dict = word_count(P)
N_dict = word_count(N)
L_p = Counter(P_dict).most_common(num)
L_n = Counter(N_dict).most_common(num)
Words_p, Counts_p = zip(*L_p)
Words_n, Counts_n = zip(*L_n)
P_sum = sum(Counts_p)
N_sum = sum(Counts_n)
P_score = [x * 1.0 / P_sum for x in Counts_p]
N_score = [x * 1.0 / N_sum for x in Counts_n]
return Words_p + Words_n, P_score + N_score
def prepare_lexicon(corpus, embedding, num=250, extra=False):
V = set([w for w in embedding.vocab])
neg = set(opinion_lexicon.negative())
pos = set(opinion_lexicon.positive())
senti_lexicon = opinion_lexicon.words()
senti_lexicon = [w for w in senti_lexicon if w in V]
lexicon_dic = {x: 0 for x in senti_lexicon}
for sent in corpus:
for w in sent:
if w in lexicon_dic:
lexicon_dic[w] += 1
L = Counter(lexicon_dic).most_common(5000)
N = []
N_count = []
P = []
P_count = []
for word, count in L:
if word in neg:
N.append(word)
N_count.append(count)
elif word in pos:
P.append(word)
P_count.append(count)
Senti_L = P[:num] + N[:num]
P_sum = sum(P_count[:num])
P_score = [x * 1.0 / P_sum for x in P_count[:num]]
N_sum = sum(N_count[:num])
N_score = [x * 1.0 / N_sum for x in N_count[:num]]
Senti_W = P_score + N_score
if extra:
Extra_L = [l for l in Extra_Lexicon if l in V]
Extra_W = [1.0 for l in Extra_L]
return Senti_L + Extra_L, Senti_W + Extra_W
return Senti_L, Senti_W
class SimVectorizer:
def __init__(self, lexicon, weight, embedding, stopword=True, weighted=False):
self._stopwords = set(nltk.corpus.stopwords.words('english'))
self._model = embedding
self._W = weight
self._V = set([w for w in self._model.vocab])
self._L = self.word_vectors(lexicon).T
self._filter = lambda x: self.vectorization(self.word_process(x))
self.sim_vectorization = self._filter if stopword else self.vectorization
self._weighter = lambda x: np.multiply(self.sim_vectorization(x), self._W)
self.sim_vector = self._weighter if weighted else self.sim_vectorization
def word_process(self, words):
return [w for w in words if w not in self._stopwords and len(w) > 1]
def word_vectors(self, x):
return np.array([self._model[w] for _, w in enumerate(x) if w in self._V])
def vectorization(self, x):
v = self.word_vectors(x)
if v.shape[0] == 0:
return np.zeros(self._L.shape[1])
s = np.dot(v, self._L)
return s.max(axis=0)
def transform(self, X):
return np.array([self.sim_vector(x) for _, x in enumerate(X)])
|
Subsets and Splits