max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
example/vendors/admin.py | AllFactors/django-organizations | 855 | 11193080 | <reponame>AllFactors/django-organizations
from django.contrib import admin
from .models import Vendor, VendorUser, VendorOwner
admin.site.register(Vendor)
admin.site.register(VendorUser)
admin.site.register(VendorOwner)
|
tests/r/test_jevons.py | hajime9652/observations | 199 | 11193103 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.jevons import jevons
def test_jevons():
"""Test module jevons.py by downloading
jevons.csv and testing shape of
extracted data has 50 rows and 4 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = jevons(test_path)
try:
assert x_train.shape == (50, 4)
except:
shutil.rmtree(test_path)
raise()
|
cockpit/quantities/norm_test.py | wx-b/cockpit | 367 | 11193118 | """Class for tracking the Norm Test."""
from backpack.extensions import BatchGrad
from cockpit.quantities.quantity import SingleStepQuantity
from cockpit.quantities.utils_transforms import BatchGradTransformsHook_BatchL2Grad
class NormTest(SingleStepQuantity):
"""Quantitiy Class for the norm test.
Note: Norm test as proposed in
- <NAME>., <NAME>., <NAME>., & <NAME>.,
Sample size selection in optimization methods for machine learning (2012).
https://link.springer.com/article/10.1007%2Fs10107-012-0572-5
"""
def extensions(self, global_step):
"""Return list of BackPACK extensions required for the computation.
Args:
global_step (int): The current iteration number.
Returns:
list: (Potentially empty) list with required BackPACK quantities.
"""
ext = []
if self.should_compute(global_step):
ext.append(BatchGrad())
return ext
def extension_hooks(self, global_step):
"""Return list of BackPACK extension hooks required for the computation.
Args:
global_step (int): The current iteration number.
Returns:
[callable]: List of required BackPACK extension hooks for the current
iteration.
"""
hooks = []
if self.should_compute(global_step):
hooks.append(BatchGradTransformsHook_BatchL2Grad())
return hooks
def _compute(self, global_step, params, batch_loss):
"""Track the practical version of the norm test.
Return maximum θ for which the norm test would pass.
The norm test is defined by Equation (3.9) in byrd2012sample.
Args:
global_step (int): The current iteration number.
params ([torch.Tensor]): List of torch.Tensors holding the network's
parameters.
batch_loss (torch.Tensor): Mini-batch loss from current step.
Returns:
float: Maximum θ for which the norm test would pass.
"""
batch_l2_squared = self._fetch_batch_l2_squared_via_batch_grad_transforms(
params, aggregate=True
)
grad_l2_squared = self._fetch_grad_l2_squared(params, aggregate=True)
batch_size = batch_l2_squared.size(0)
var_l1 = self._compute_variance_l1(
batch_size, batch_l2_squared, grad_l2_squared
)
return self._compute_theta_max(batch_size, var_l1, grad_l2_squared).item()
def _compute_theta_max(self, batch_size, var_l1, grad_l2_squared):
"""Return maximum θ for which the norm test would pass.
Args:
batch_size (int): Mini-batch size.
var_l1 (torch.Tensor): [description]
grad_l2_squared (torch.Tensor): Squared ℓ₂ norm of mini-batch gradient.
Returns:
[type]: [description]
"""
return (var_l1 / batch_size / grad_l2_squared).sqrt()
def _compute_variance_l1(self, batch_size, batch_l2_squared, grad_l2_squared):
"""Compute the sample variance ℓ₁ norm.
It shows up in Equations (3.9) and (3.11) in byrd2012sample and relies
on the sample variance (Equation 3.6). The ℓ₁ norm can be computed using
individual gradient squared ℓ₂ norms and the mini-batch gradient squared
ℓ₂ norm.
Args:
batch_size (int): Mini-batch size.
batch_l2_squared (torch.Tensor): [description]
grad_l2_squared (torch.Tensor): Squared ℓ₂ norm of mini-batch gradient.
Returns:
torch.Tensor: The sample variance ℓ₁ norm.
"""
return (1 / (batch_size - 1)) * (
batch_size ** 2 * batch_l2_squared.sum() - batch_size * grad_l2_squared
)
|
src/job-exporter/test/test_ps.py | wyatuestc/pai | 1,417 | 11193127 | # Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import sys
import unittest
import base
sys.path.append(os.path.abspath("../src/"))
import ps
class TestPS(base.TestBase):
"""
Test ps.py
"""
def test_parse_ps_result(self):
sample_path = "data/ps_sample.txt"
with open(sample_path, "r") as f:
ps_result = f.read()
parse_result = ps.parse_result(ps_result)
self.assertEqual(4, len(parse_result))
self.assertEqual("D", parse_result[0].state)
self.assertEqual("4", parse_result[0].pid)
self.assertEqual(2 * 1024, parse_result[0].rss)
self.assertEqual("/var/drivers/nvidia/current/bin/nvidia-smi -q -x",
parse_result[0].cmd)
if __name__ == '__main__':
unittest.main()
|
survae/tests/nn/layers/autoregressive/ar_shift.py | alisiahkoohi/survae_flows | 262 | 11193147 | import torch
import torchtestcase
import unittest
import copy
from survae.tests.nn import ModuleTest
from survae.nn.layers.autoregressive import AutoregressiveShift
class AutoregressiveShiftTest(ModuleTest):
def test_layer_is_well_behaved(self):
seq_len = 7
batch_size = 10
features = 6
x = torch.randn(seq_len, batch_size, features)
module = AutoregressiveShift(embed_dim=features)
self.assert_layer_is_well_behaved(module, x)
def test_autoregressive_shift(self):
seq_len = 7
batch_size = 10
features = 6
x = torch.randn(seq_len, batch_size, features)
module = AutoregressiveShift(embed_dim=features)
y = module(x)
# Assert input x at O positions [O O O X] equals output y at O positions [Y O O O]
self.assertEqual(x[:-1], y[1:])
if __name__ == '__main__':
unittest.main()
|
tests/test_item.py | DahnJ/pystac | 130 | 11193153 | from copy import deepcopy
import os
import json
import tempfile
from typing import Any, Dict
import unittest
import pystac
from pystac import Asset, Item
from pystac.validation import validate_dict
import pystac.serialization.common_properties
from pystac.utils import datetime_to_str, get_opt, str_to_datetime, is_absolute_href
from tests.utils import TestCases, assert_to_from_dict
class ItemTest(unittest.TestCase):
def get_example_item_dict(self) -> Dict[str, Any]:
m = TestCases.get_path("data-files/item/sample-item.json")
with open(m) as f:
item_dict: Dict[str, Any] = json.load(f)
return item_dict
def test_to_from_dict(self) -> None:
self.maxDiff = None
item_dict = self.get_example_item_dict()
param_dict = deepcopy(item_dict)
assert_to_from_dict(self, Item, param_dict)
item = Item.from_dict(param_dict)
self.assertEqual(item.id, "CS3-20160503_132131_05")
# test asset creation additional field(s)
self.assertEqual(
item.assets["analytic"].extra_fields["product"],
"http://cool-sat.com/catalog/products/analytic.json",
)
self.assertEqual(len(item.assets["thumbnail"].extra_fields), 0)
# test that the parameter is preserved
self.assertEqual(param_dict, item_dict)
# assert that the parameter is not preserved with
# non-default parameter
_ = Item.from_dict(param_dict, preserve_dict=False)
self.assertNotEqual(param_dict, item_dict)
def test_from_dict_set_root(self) -> None:
item_dict = self.get_example_item_dict()
catalog = pystac.Catalog(id="test", description="test desc")
item = Item.from_dict(item_dict, root=catalog)
self.assertIs(item.get_root(), catalog)
def test_set_self_href_does_not_break_asset_hrefs(self) -> None:
cat = TestCases.test_case_2()
for item in cat.get_all_items():
for asset in item.assets.values():
if is_absolute_href(asset.href):
asset.href = f"./{os.path.basename(asset.href)}"
item.set_self_href("http://example.com/item.json")
for asset in item.assets.values():
self.assertTrue(is_absolute_href(asset.href))
def test_set_self_href_none_ignores_relative_asset_hrefs(self) -> None:
cat = TestCases.test_case_2()
for item in cat.get_all_items():
for asset in item.assets.values():
if is_absolute_href(asset.href):
asset.href = f"./{os.path.basename(asset.href)}"
item.set_self_href(None)
for asset in item.assets.values():
self.assertFalse(is_absolute_href(asset.href))
def test_asset_absolute_href(self) -> None:
item_dict = self.get_example_item_dict()
item = Item.from_dict(item_dict)
rel_asset = Asset("./data.geojson")
rel_asset.set_owner(item)
expected_href = os.path.abspath("./data.geojson")
actual_href = rel_asset.get_absolute_href()
self.assertEqual(expected_href, actual_href)
def test_extra_fields(self) -> None:
item = pystac.Item.from_file(
TestCases.get_path("data-files/item/sample-item.json")
)
item.extra_fields["test"] = "extra"
with tempfile.TemporaryDirectory() as tmp_dir:
p = os.path.join(tmp_dir, "item.json")
item.save_object(include_self_link=False, dest_href=p)
with open(p) as f:
item_json = json.load(f)
self.assertTrue("test" in item_json)
self.assertEqual(item_json["test"], "extra")
read_item = pystac.Item.from_file(p)
self.assertTrue("test" in read_item.extra_fields)
self.assertEqual(read_item.extra_fields["test"], "extra")
def test_clearing_collection(self) -> None:
collection = TestCases.test_case_4().get_child("acc")
assert isinstance(collection, pystac.Collection)
item = next(iter(collection.get_all_items()))
self.assertEqual(item.collection_id, collection.id)
item.set_collection(None)
self.assertIsNone(item.collection_id)
self.assertIsNone(item.get_collection())
item.set_collection(collection)
self.assertEqual(item.collection_id, collection.id)
self.assertIs(item.get_collection(), collection)
def test_datetime_ISO8601_format(self) -> None:
item_dict = self.get_example_item_dict()
item = Item.from_dict(item_dict)
formatted_time = item.to_dict()["properties"]["datetime"]
self.assertEqual("2016-05-03T13:22:30.040000Z", formatted_time)
def test_null_datetime(self) -> None:
item = pystac.Item.from_file(
TestCases.get_path("data-files/item/sample-item.json")
)
with self.assertRaises(pystac.STACError):
Item(
"test",
geometry=item.geometry,
bbox=item.bbox,
datetime=None,
properties={},
)
null_dt_item = Item(
"test",
geometry=item.geometry,
bbox=item.bbox,
datetime=None,
properties={
"start_datetime": datetime_to_str(get_opt(item.datetime)),
"end_datetime": datetime_to_str(get_opt(item.datetime)),
},
)
null_dt_item.validate()
def test_get_set_asset_datetime(self) -> None:
item = pystac.Item.from_file(
TestCases.get_path("data-files/item/sample-item-asset-properties.json")
)
item_datetime = item.datetime
# No property on asset
self.assertEqual(item.get_datetime(item.assets["thumbnail"]), item.datetime)
# Property on asset
self.assertNotEqual(item.get_datetime(item.assets["analytic"]), item.datetime)
self.assertEqual(
item.get_datetime(item.assets["analytic"]),
str_to_datetime("2017-05-03T13:22:30.040Z"),
)
item.set_datetime(
str_to_datetime("2018-05-03T13:22:30.040Z"), item.assets["thumbnail"]
)
self.assertEqual(item.get_datetime(), item_datetime)
self.assertEqual(
item.get_datetime(item.assets["thumbnail"]),
str_to_datetime("2018-05-03T13:22:30.040Z"),
)
def test_read_eo_item_owns_asset(self) -> None:
item = next(iter(TestCases.test_case_1().get_all_items()))
assert len(item.assets) > 0
for asset_key in item.assets:
self.assertEqual(item.assets[asset_key].owner, item)
def test_null_geometry(self) -> None:
m = TestCases.get_path(
"data-files/examples/1.0.0-beta.2/item-spec/examples/null-geom-item.json"
)
with open(m) as f:
item_dict = json.load(f)
validate_dict(item_dict, pystac.STACObjectType.ITEM)
item = Item.from_dict(item_dict)
self.assertIsInstance(item, Item)
item.validate()
item_dict = item.to_dict()
self.assertIsNone(item_dict["geometry"])
self.assertNotIn("bbox", item_dict)
def test_0_9_item_with_no_extensions_does_not_read_collection_data(self) -> None:
item_json = pystac.StacIO.default().read_json(
TestCases.get_path("data-files/examples/hand-0.9.0/010100/010100.json")
)
assert item_json.get("stac_extensions") is None
assert item_json.get("stac_version") == "0.9.0"
did_merge = pystac.serialization.common_properties.merge_common_properties(
item_json
)
self.assertFalse(did_merge)
def test_clone_sets_asset_owner(self) -> None:
cat = TestCases.test_case_2()
item = next(iter(cat.get_all_items()))
original_asset = list(item.assets.values())[0]
assert original_asset.owner is item
clone = item.clone()
clone_asset = list(clone.assets.values())[0]
self.assertIs(clone_asset.owner, clone)
def test_make_asset_href_relative_is_noop_on_relative_hrefs(self) -> None:
cat = TestCases.test_case_2()
item = next(iter(cat.get_all_items()))
asset = list(item.assets.values())[0]
assert not is_absolute_href(asset.href)
original_href = asset.get_absolute_href()
item.make_asset_hrefs_relative()
self.assertEqual(asset.get_absolute_href(), original_href)
def test_from_invalid_dict_raises_exception(self) -> None:
stac_io = pystac.StacIO.default()
catalog_dict = stac_io.read_json(
TestCases.get_path("data-files/catalogs/test-case-1/catalog.json")
)
with self.assertRaises(pystac.STACTypeError):
_ = pystac.Item.from_dict(catalog_dict)
class ItemSubClassTest(unittest.TestCase):
"""This tests cases related to creating classes inheriting from pystac.Catalog to
ensure that inheritance, class methods, etc. function as expected."""
SAMPLE_ITEM = TestCases.get_path("data-files/item/sample-item.json")
class BasicCustomItem(pystac.Item):
pass
def setUp(self) -> None:
self.stac_io = pystac.StacIO.default()
def test_from_dict_returns_subclass(self) -> None:
item_dict = self.stac_io.read_json(self.SAMPLE_ITEM)
custom_item = self.BasicCustomItem.from_dict(item_dict)
self.assertIsInstance(custom_item, self.BasicCustomItem)
def test_from_file_returns_subclass(self) -> None:
custom_item = self.BasicCustomItem.from_file(self.SAMPLE_ITEM)
self.assertIsInstance(custom_item, self.BasicCustomItem)
def test_clone(self) -> None:
custom_item = self.BasicCustomItem.from_file(self.SAMPLE_ITEM)
cloned_item = custom_item.clone()
self.assertIsInstance(cloned_item, self.BasicCustomItem)
class AssetSubClassTest(unittest.TestCase):
class CustomAsset(Asset):
pass
def setUp(self) -> None:
self.maxDiff = None
with open(TestCases.get_path("data-files/item/sample-item.json")) as src:
item_dict = json.load(src)
self.asset_dict = item_dict["assets"]["analytic"]
def test_from_dict(self) -> None:
asset = self.CustomAsset.from_dict(self.asset_dict)
self.assertIsInstance(asset, self.CustomAsset)
def test_clone(self) -> None:
asset = self.CustomAsset.from_dict(self.asset_dict)
cloned_asset = asset.clone()
self.assertIsInstance(cloned_asset, self.CustomAsset)
|
dbaas/physical/tests/test_form_databaseinfra.py | didindinn/database-as-a-service | 303 | 11193177 | <reponame>didindinn/database-as-a-service
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.test import TestCase
from ..forms.database_infra import DatabaseInfraForm
from . import factory
class FormDatabaseInfraTestCase(TestCase):
def setUp(self):
self.engine = factory.EngineFactory()
self.plan = factory.PlanFactory(engine_type=self.engine.engine_type)
self.disk_offering = factory.DiskOfferingFactory()
def _build_basic_form_data(self, plan, disk_offering):
return ({
'plan': plan,
'disk_offering': disk_offering
})
def test_can_create_form_without_args(self):
form = DatabaseInfraForm()
self.assertIsNotNone(form)
def test_can_create_form(self):
data = self._build_basic_form_data(self.plan.id, self.disk_offering.id)
form = DatabaseInfraForm(data)
self.assertEqual(form.data['plan'], self.plan.id)
self.assertEqual(form.data['disk_offering'], self.disk_offering.id)
def test_can_create_form_without_plan(self):
data = self._build_basic_form_data(None, self.disk_offering.id)
form = DatabaseInfraForm(data)
self.assertIsNone(form.data['plan'])
self.assertEqual(form.data['disk_offering'], self.disk_offering.id)
def test_can_create_form_without_plan_and_disk(self):
data = self._build_basic_form_data(None, None)
form = DatabaseInfraForm(data)
self.assertIsNone(form.data['plan'])
self.assertIsNone(form.data['disk_offering'])
def test_can_create_form_without_disk_and_no_plan_disk(self):
plan_without_disk = factory.PlanFactory(
engine_type=self.engine.engine_type
)
plan_without_disk.disk_offering = None
plan_without_disk.save()
data = self._build_basic_form_data(plan_without_disk.id, None)
form = DatabaseInfraForm(data)
self.assertEqual(form.data['plan'], plan_without_disk.id)
self.assertIsNone(form.data['disk_offering'])
def test_can_create_form_without_disk(self):
data = self._build_basic_form_data(self.plan.id, None)
form = DatabaseInfraForm(data)
self.assertEqual(form.data['plan'], self.plan.id)
self.assertEqual(
form.data['disk_offering'], self.plan.disk_offering.id
)
|
Python/Tests/TestData/Grammar/Delimiters.py | techkey/PTVS | 695 | 11193190 | 1(2)
1[2]
{1:2}
1, 2, 3
1[2:3]
1[2:3:4]
1[2::4]
1[::4]
1[...]
1[:,]
fob.oar
fob = 1
fob += 1
fob -= 1
fob *= 1
fob /= 1
fob //= 1
fob %= 1
fob &= 1
fob |= 1
fob ^= 1
fob >>= 1
fob <<= 1
fob **= 1
|
venv/Lib/site-packages/networkx/readwrite/tests/test_text.py | amelliaaas/tugastkc4 | 10,024 | 11193208 | <reponame>amelliaaas/tugastkc4<gh_stars>1000+
import pytest
import networkx as nx
from textwrap import dedent
def test_directed_tree_str():
# Create a directed forest with labels
graph = nx.balanced_tree(r=2, h=2, create_using=nx.DiGraph)
for node in graph.nodes:
graph.nodes[node]["label"] = "node_" + chr(ord("a") + node)
node_target = dedent(
"""
╙── 0
├─╼ 1
│ ├─╼ 3
│ └─╼ 4
└─╼ 2
├─╼ 5
└─╼ 6
"""
).strip()
label_target = dedent(
"""
╙── node_a
├─╼ node_b
│ ├─╼ node_d
│ └─╼ node_e
└─╼ node_c
├─╼ node_f
└─╼ node_g
"""
).strip()
# Basic node case
ret = nx.forest_str(graph, with_labels=False)
print(ret)
assert ret == node_target
# Basic label case
ret = nx.forest_str(graph, with_labels=True)
print(ret)
assert ret == label_target
# Custom write function case
lines = []
ret = nx.forest_str(graph, write=lines.append, with_labels=False)
assert ret is None
assert lines == node_target.split("\n")
# Smoke test to ensure passing the print function works. To properly test
# this case we would need to capture stdout. (for potential reference
# implementation see :class:`ubelt.util_stream.CaptureStdout`)
ret = nx.forest_str(graph, write=print)
assert ret is None
def test_empty_graph():
assert nx.forest_str(nx.DiGraph()) == "╙"
assert nx.forest_str(nx.Graph()) == "╙"
def test_directed_multi_tree_forest():
tree1 = nx.balanced_tree(r=2, h=2, create_using=nx.DiGraph)
tree2 = nx.balanced_tree(r=2, h=2, create_using=nx.DiGraph)
forest = nx.disjoint_union_all([tree1, tree2])
ret = nx.forest_str(forest)
print(ret)
target = dedent(
"""
╟── 0
╎ ├─╼ 1
╎ │ ├─╼ 3
╎ │ └─╼ 4
╎ └─╼ 2
╎ ├─╼ 5
╎ └─╼ 6
╙── 7
├─╼ 8
│ ├─╼ 10
│ └─╼ 11
└─╼ 9
├─╼ 12
└─╼ 13
"""
).strip()
assert ret == target
tree3 = nx.balanced_tree(r=2, h=2, create_using=nx.DiGraph)
forest = nx.disjoint_union_all([tree1, tree2, tree3])
ret = nx.forest_str(forest, sources=[0, 14, 7])
print(ret)
target = dedent(
"""
╟── 0
╎ ├─╼ 1
╎ │ ├─╼ 3
╎ │ └─╼ 4
╎ └─╼ 2
╎ ├─╼ 5
╎ └─╼ 6
╟── 14
╎ ├─╼ 15
╎ │ ├─╼ 17
╎ │ └─╼ 18
╎ └─╼ 16
╎ ├─╼ 19
╎ └─╼ 20
╙── 7
├─╼ 8
│ ├─╼ 10
│ └─╼ 11
└─╼ 9
├─╼ 12
└─╼ 13
"""
).strip()
assert ret == target
ret = nx.forest_str(forest, sources=[0, 14, 7], ascii_only=True)
print(ret)
target = dedent(
"""
+-- 0
: |-> 1
: | |-> 3
: | L-> 4
: L-> 2
: |-> 5
: L-> 6
+-- 14
: |-> 15
: | |-> 17
: | L-> 18
: L-> 16
: |-> 19
: L-> 20
+-- 7
|-> 8
| |-> 10
| L-> 11
L-> 9
|-> 12
L-> 13
"""
).strip()
assert ret == target
def test_undirected_multi_tree_forest():
tree1 = nx.balanced_tree(r=2, h=2, create_using=nx.Graph)
tree2 = nx.balanced_tree(r=2, h=2, create_using=nx.Graph)
tree2 = nx.relabel_nodes(tree2, {n: n + len(tree1) for n in tree2.nodes})
forest = nx.union(tree1, tree2)
ret = nx.forest_str(forest, sources=[0, 7])
print(ret)
target = dedent(
"""
╟── 0
╎ ├── 1
╎ │ ├── 3
╎ │ └── 4
╎ └── 2
╎ ├── 5
╎ └── 6
╙── 7
├── 8
│ ├── 10
│ └── 11
└── 9
├── 12
└── 13
"""
).strip()
assert ret == target
ret = nx.forest_str(forest, sources=[0, 7], ascii_only=True)
print(ret)
target = dedent(
"""
+-- 0
: |-- 1
: | |-- 3
: | L-- 4
: L-- 2
: |-- 5
: L-- 6
+-- 7
|-- 8
| |-- 10
| L-- 11
L-- 9
|-- 12
L-- 13
"""
).strip()
assert ret == target
def test_undirected_tree_str():
# Create a directed forest with labels
graph = nx.balanced_tree(r=2, h=2, create_using=nx.Graph)
# arbitrary starting point
nx.forest_str(graph)
node_target0 = dedent(
"""
╙── 0
├── 1
│ ├── 3
│ └── 4
└── 2
├── 5
└── 6
"""
).strip()
# defined starting point
ret = nx.forest_str(graph, sources=[0])
print(ret)
assert ret == node_target0
# defined starting point
node_target2 = dedent(
"""
╙── 2
├── 0
│ └── 1
│ ├── 3
│ └── 4
├── 5
└── 6
"""
).strip()
ret = nx.forest_str(graph, sources=[2])
print(ret)
assert ret == node_target2
def test_forest_str_errors():
ugraph = nx.complete_graph(3, create_using=nx.Graph)
with pytest.raises(nx.NetworkXNotImplemented):
nx.forest_str(ugraph)
dgraph = nx.complete_graph(3, create_using=nx.DiGraph)
with pytest.raises(nx.NetworkXNotImplemented):
nx.forest_str(dgraph)
def test_overspecified_sources():
"""
When sources are directly specified, we wont be able to determine when we
are in the last component, so there will always be a trailing, leftmost
pipe.
"""
graph = nx.disjoint_union_all(
[
nx.balanced_tree(r=2, h=1, create_using=nx.DiGraph),
nx.balanced_tree(r=1, h=2, create_using=nx.DiGraph),
nx.balanced_tree(r=2, h=1, create_using=nx.DiGraph),
]
)
# defined starting point
target1 = dedent(
"""
╟── 0
╎ ├─╼ 1
╎ └─╼ 2
╟── 3
╎ └─╼ 4
╎ └─╼ 5
╟── 6
╎ ├─╼ 7
╎ └─╼ 8
"""
).strip()
target2 = dedent(
"""
╟── 0
╎ ├─╼ 1
╎ └─╼ 2
╟── 3
╎ └─╼ 4
╎ └─╼ 5
╙── 6
├─╼ 7
└─╼ 8
"""
).strip()
lines = []
nx.forest_str(graph, write=lines.append, sources=graph.nodes)
got1 = chr(10).join(lines)
print("got1: ")
print(got1)
lines = []
nx.forest_str(graph, write=lines.append)
got2 = chr(10).join(lines)
print("got2: ")
print(got2)
assert got1 == target1
assert got2 == target2
|
back/restapi/models/alarm_clock.py | ramonakira/piclodio3 | 120 | 11193213 | from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from restapi.models.web_radio import WebRadio
class AlarmClock(models.Model):
name = models.CharField(max_length=250)
monday = models.BooleanField(default=False)
tuesday = models.BooleanField(default=False)
wednesday = models.BooleanField(default=False)
thursday = models.BooleanField(default=False)
friday = models.BooleanField(default=False)
saturday = models.BooleanField(default=False)
sunday = models.BooleanField(default=False)
hour = models.IntegerField(validators=[
MaxValueValidator(23),
MinValueValidator(0)
])
minute = models.IntegerField(validators=[
MaxValueValidator(59),
MinValueValidator(0)
])
enabled = models.BooleanField(default=True)
auto_stop_minutes = models.IntegerField(default=0)
webradio = models.ForeignKey(WebRadio, on_delete=models.CASCADE)
def get_day_of_week(self):
"""
get a valid day of week period string usable with crontab
"""
def add_el(current_string, el):
if current_string is not None:
el = ",%s" % el
current_string += el
else:
current_string = el
return current_string
returned_period = None
if self.monday:
returned_period = add_el(returned_period, "0")
if self.tuesday:
returned_period = add_el(returned_period, "1")
if self.wednesday:
returned_period = add_el(returned_period, "2")
if self.thursday:
returned_period = add_el(returned_period, "3")
if self.friday:
returned_period = add_el(returned_period, "4")
if self.saturday:
returned_period = add_el(returned_period, "5")
if self.sunday:
returned_period = add_el(returned_period, "6")
return returned_period
|
tock/projects/migrations/0006_auto_20151229_1618.py | mikiec84/tock | 134 | 11193215 | <filename>tock/projects/migrations/0006_auto_20151229_1618.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('projects', '0005_project_mbnumber'),
]
operations = [
migrations.AddField(
model_name='project',
name='notes_displayed',
field=models.BooleanField(help_text='Check this if a notes field should be displayed along with a time entry against a project.', default=False),
),
migrations.AddField(
model_name='project',
name='notes_required',
field=models.BooleanField(help_text='Check this if notes should be required for time entries against this project. Note: Checking this will enable notes to be displayed as well.', default=False),
),
]
|
exercises/sum-of-multiples/sum_of_multiples.py | kishankj/python | 1,177 | 11193290 | def sum_of_multiples(limit, multiples):
pass
|
tests/test_build_steps.py | pretl/ALIGN-public | 119 | 11193300 | <filename>tests/test_build_steps.py
import pytest
from align.main import build_steps
def test_A():
assert ['1_topology','2_primitives', '3_pnr:prep' , '3_pnr:place', '3_pnr:route', '3_pnr:check'] == build_steps( '1_topology', '3_pnr')
assert ['1_topology','2_primitives', '3_pnr:prep' , '3_pnr:place', '3_pnr:route'] == build_steps( '1_topology', '3_pnr:route')
assert ['1_topology','2_primitives', '3_pnr:prep'] == build_steps( '1_topology', '3_pnr:prep')
assert [ '3_pnr:prep'] == build_steps( '3_pnr:prep', '3_pnr:prep')
with pytest.raises(AssertionError):
build_steps( '3_pnr:prep', '1_topology')
with pytest.raises(NotImplementedError):
build_steps( None, '3_pnr:place')
with pytest.raises(NotImplementedError):
build_steps( '3_pnr:route', None)
with pytest.raises(NotImplementedError):
build_steps( '3_pnr:check', None)
|
ethical-hacking/hashing-functions/simple_hashing.py | caesarcc/python-code-tutorials | 1,059 | 11193321 | <filename>ethical-hacking/hashing-functions/simple_hashing.py
import hashlib
# encode it to bytes using UTF-8 encoding
message = "Some text to hash".encode()
# hash with MD5 (not recommended)
print("MD5:", hashlib.md5(message).hexdigest())
# hash with SHA-2 (SHA-256 & SHA-512)
print("SHA-256:", hashlib.sha256(message).hexdigest())
print("SHA-512:", hashlib.sha512(message).hexdigest())
# hash with SHA-3
print("SHA-3-256:", hashlib.sha3_256(message).hexdigest())
print("SHA-3-512:", hashlib.sha3_512(message).hexdigest())
# hash with BLAKE2
# 256-bit BLAKE2 (or BLAKE2s)
print("BLAKE2c:", hashlib.blake2s(message).hexdigest())
# 512-bit BLAKE2 (or BLAKE2b)
print("BLAKE2b:", hashlib.blake2b(message).hexdigest())
|
util/clevr_feature_loader/feature_loader.py | geetickachauhan/lcgn | 101 | 11193327 | <filename>util/clevr_feature_loader/feature_loader.py<gh_stars>100-1000
import h5py
import os.path as osp
class SpatialFeatureLoader:
def __init__(self, feature_dir):
h5_paths = {split: osp.join(feature_dir, '%s.h5' % split)
for split in ('train', 'val', 'test')}
self.h5_files = {
split: h5py.File(path, 'r') for split, path in h5_paths.items()}
def __del__(self):
for f in self.h5_files.values():
f.close()
def load_feature(self, imageId):
split, idx = imageId.split('_')
idx = int(idx)
return self.h5_files[split]['features'][idx]
|
extractor/baidutieba.py | pwh19920920/spiders | 390 | 11193347 | <filename>extractor/baidutieba.py
import re
import requests
def get(url: str) -> dict:
"""
videos
"""
data = {}
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36",
}
rep = requests.get(url, headers=headers, timeout=10)
if rep.status_code == 200:
data["videos"] = re.findall(r'data-video="(.*?)"', rep.text)
else:
data["msg"] = "获取失败"
return data
if __name__ == "__main__":
# url = "https://tieba.baidu.com/p/6098286801?share=9105&fr=share&sfc=copy&client_type=2&client_version=11.3.8.2&st=1585294971&unique=190E4CEC3908756B412C7ABAE54C772F&red_tag=2618234446"
url = input("url: ")
print(get(url))
|
example/subuser/post_set_subuser_transferability.py | bailzx5522/huobi_Python | 611 | 11193392 | from huobi.client.subuser import SubuserClient
from huobi.constant import *
from huobi.utils import *
subuser_client = SubuserClient(api_key=g_api_key, secret_key=g_secret_key)
sub_uids = '122946475'
transferability_result = subuser_client.post_set_subuser_transferability(sub_uids, False)
LogInfo.output_list(transferability_result)
|
n-queens_ii/solution.py | mahimadubey/leetcode-python | 528 | 11193425 | <gh_stars>100-1000
class Solution:
# @return an integer
def totalNQueens(self, n):
self.res = 0
self.n = n
columns = [-1 for i in range(n)]
self.solve(columns, 0)
return self.res
def is_valid(self, columns, row, col):
for r in range(row):
c = columns[r]
if c == col:
return False
if abs(c - col) == row - r:
return False
return True
def solve(self, columns, row):
if row == self.n:
self.res += 1
else:
for col in range(self.n):
if self.is_valid(columns, row, col):
columns[row] = col
self.solve(columns, row + 1)
|
lib/python3.4/site-packages/scss/grammar/__init__.py | LChristakis/chalice-hunter | 152 | 11193526 | <reponame>LChristakis/chalice-hunter<gh_stars>100-1000
"""Grammar and parser plumbing for Sass. Much of this is generated or compiled
in some fashion.
"""
from .scanner import NoMoreTokens
from .scanner import Parser
from .scanner import Scanner
from .scanner import locate_blocks
__all__ = ('NoMoreTokens', 'Parser', 'Scanner', 'locate_blocks')
|
slack_sdk/oauth/state_store/file/__init__.py | timgates42/python-slack-sdk | 2,486 | 11193528 | import logging
import os
import time
from logging import Logger
from pathlib import Path
from typing import Union, Optional
from uuid import uuid4
from ..async_state_store import AsyncOAuthStateStore
from ..state_store import OAuthStateStore
class FileOAuthStateStore(OAuthStateStore, AsyncOAuthStateStore):
def __init__(
self,
*,
expiration_seconds: int,
base_dir: str = str(Path.home()) + "/.bolt-app-oauth-state",
client_id: Optional[str] = None,
logger: Logger = logging.getLogger(__name__),
):
self.expiration_seconds = expiration_seconds
self.base_dir = base_dir
self.client_id = client_id
if self.client_id is not None:
self.base_dir = f"{self.base_dir}/{self.client_id}"
self._logger = logger
@property
def logger(self) -> Logger:
if self._logger is None:
self._logger = logging.getLogger(__name__)
return self._logger
async def async_issue(self, *args, **kwargs) -> str:
return self.issue(*args, **kwargs)
async def async_consume(self, state: str) -> bool:
return self.consume(state)
def issue(self, *args, **kwargs) -> str:
state = str(uuid4())
self._mkdir(self.base_dir)
filepath = f"{self.base_dir}/{state}"
with open(filepath, "w") as f:
content = str(time.time())
f.write(content)
return state
def consume(self, state: str) -> bool:
filepath = f"{self.base_dir}/{state}"
try:
with open(filepath) as f:
created = float(f.read())
expiration = created + self.expiration_seconds
still_valid: bool = time.time() < expiration
os.remove(filepath) # consume the file by deleting it
return still_valid
except FileNotFoundError as e:
message = f"Failed to find any persistent data for state: {state} - {e}"
self.logger.warning(message)
return False
@staticmethod
def _mkdir(path: Union[str, Path]):
if isinstance(path, str):
path = Path(path)
path.mkdir(parents=True, exist_ok=True)
|
code/python/tools/modify_vrscene_reduce_image_quality.py | americast/ml-hypersim | 1,246 | 11193535 | #
# For licensing see accompanying LICENSE.txt file.
# Copyright (C) 2020 Apple Inc. All Rights Reserved.
#
from pylab import *
import argparse
import os
import time
import vray
parser = argparse.ArgumentParser()
parser.add_argument("--in_file", required=True)
parser.add_argument("--out_file", required=True)
args = parser.parse_args()
assert os.path.exists(args.in_file)
print("[HYPERSIM: MODIFY_VRSCENE_REDUCE_IMAGE_QUALITY] Begin...")
output_dir = os.path.dirname(args.out_file)
if output_dir == "":
output_dir = "."
if not os.path.exists(output_dir): os.makedirs(output_dir)
renderer = vray.VRayRenderer()
def log_msg(renderer, message, level, instant):
print(str(instant) + " " + str(level) + " " + message)
renderer.setOnLogMessage(log_msg)
renderer.load(args.in_file)
time.sleep(0.5)
# SettingsImageSampler
settings_image_sampler = renderer.classes.SettingsImageSampler.getInstanceOrCreate()
settings_image_sampler.type = 0
settings_image_sampler.fixed_subdivs = 1
settings_image_sampler.fixed_per_pixel_filtering = 0
settings_image_sampler.min_shade_rate = 1
# RenderChannelDenoiser
render_channel_denoisers = renderer.classes.RenderChannelDenoiser.getInstances()
for r in render_channel_denoisers:
r.enabled = 0
renderer.export(args.out_file)
print("[HYPERSIM: MODIFY_VRSCENE_REDUCE_IMAGE_QUALITY] Exported vrscene successfully.")
renderer.close()
time.sleep(0.5)
print("[HYPERSIM: MODIFY_VRSCENE_REDUCE_IMAGE_QUALITY] Finished.")
|
test/test_corsair_cli.py | javixeneize/corsair_scan | 117 | 11193546 | from click.testing import CliRunner
import unittest
from unittest import TestCase
from corsair_scan import corsair_cli
from mock import patch
class TestCorsairScanManager(TestCase):
def test_run_cli_scan_file_not_found(self):
runner = CliRunner()
result = runner.invoke(corsair_cli.run_cli_scan, ['dummytest.json'])
self.assertEqual(result.output, "Error. File not found\n", "Error in test_run_cli_scan_file_not_found test")
def test_run_cli_scan_malformed_json(self):
runner = CliRunner()
result = runner.invoke(corsair_cli.run_cli_scan, ['test/testfiles/json_test_malformed.json'])
self.assertEqual(result.output, 'Error. The format does not appear to be correct, please review\n',
"Error in test_run_cli_scan_malformed test")
@patch('corsair_scan.corsair_scan.corsair_scan')
def test_run_cli_scan_error(self, corsair):
runner = CliRunner()
corsair.return_value = {'summary': 'test'}
result = runner.invoke(corsair_cli.run_cli_scan, ['test/testfiles/json_test.json'])
self.assertEqual(result.output, 'There was an error running corsair. Please check the input data is correct\n',
"Error in test_run_cli_scan_error test")
@patch('corsair_scan.corsair_scan.corsair_scan')
def test_run_cli_scan_report(self, corsair):
runner = CliRunner()
corsair.return_value = {'summary': {'misconfigured': [], 'error': []}, 'report': [{'url': 'https://example.com/', 'verb': 'GET', 'fake_origin': {'Origin': 'https://scarymonster.com', 'Access-Control-Allow-Origin': None, 'credentials': False, 'status_code': 400, 'error': False, 'misconfigured': False}, 'post-domain': {'Origin': 'https://example.com.scarymonster.com', 'Access-Control-Allow-Origin': None, 'credentials': False, 'status_code': 400, 'error': False, 'misconfigured': False}, 'sub-domain': {'Origin': 'https://scarymonster.example.com', 'Access-Control-Allow-Origin': None, 'credentials': False, 'status_code': 400, 'error': False, 'misconfigured': False}, 'pre-domain': {'Origin': 'https://.scarymonsterexample.com', 'Access-Control-Allow-Origin': None, 'credentials': False, 'status_code': 400, 'error': False, 'misconfigured': False}}]}
result = runner.invoke(corsair_cli.run_cli_scan, ['test/testfiles/json_test.json', '-nv', '-r testreport.json'])
self.assertEqual(result.output, 'Report generated in testreport.json\n', 'Error in OK test')
if __name__ == '__main__':
unittest.main()
|
study/utils/plot_mem.py | Kshitiz-Bansal/wavetorch | 470 | 11193553 | <filename>study/utils/plot_mem.py
""" Helper script for plotting memory usage from memory profiler
Install memory_profiler:
conda install memory_profiler
Profile the code:
mprof run study/vowel_train.py study/example.yml
This will generate a mprofile dat file which you can then plot with this script
"""
import numpy as np
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('files', nargs='+')
args = parser.parse_args()
fig, ax = plt.subplots(1,1, constrained_layout=True, figsize=(4,3))
for file in args.files:
data = np.loadtxt(file, usecols=(1,2), skiprows=1, delimiter=' ')
mem = data[:,0]
t = data[:,1]
t = t-t.min()
ax.plot(t, mem/1e3)
ax.set_xlabel('Time (sec)')
ax.set_ylabel('Memory (GB)')
ax.grid()
plt.show()
|
scout/server/app.py | Clinical-Genomics/scout | 111 | 11193580 | <reponame>Clinical-Genomics/scout
"""Code for flask app"""
import logging
import re
import coloredlogs
from flask import Flask, current_app, redirect, request, url_for
from flask_babel import Babel
from flask_cors import CORS
from flask_login import current_user
from flaskext.markdown import Markdown
from . import extensions
from .blueprints import (
alignviewers,
api,
cases,
dashboard,
diagnoses,
genes,
institutes,
login,
managed_variants,
panels,
phenotypes,
public,
variant,
variants,
)
try:
from urllib.parse import unquote
except ImportError:
from urllib2 import unquote
LOG = logging.getLogger(__name__)
try:
from chanjo_report.server.app import configure_template_filters
from chanjo_report.server.blueprints import report_bp
from chanjo_report.server.extensions import api as chanjo_api
except ImportError:
chanjo_api = None
report_bp = None
configure_template_filters = None
LOG.info("chanjo report not installed!")
def create_app(config_file=None, config=None):
"""Flask app factory function."""
app = Flask(__name__)
CORS(app)
app.jinja_env.add_extension("jinja2.ext.do")
app.config.from_pyfile("config.py") # Load default config file
if (
config
): # Params from an optional .yaml config file provided by the user or created by the app cli
app.config.update((k, v) for k, v in config.items() if v is not None)
if config_file: # Params from an optional .py config file provided by the user
app.config.from_pyfile(config_file)
app.config["JSON_SORT_KEYS"] = False
current_log_level = LOG.getEffectiveLevel()
coloredlogs.install(level="DEBUG" if app.debug else current_log_level)
configure_extensions(app)
register_blueprints(app)
register_filters(app)
if not (app.debug or app.testing) and app.config.get("MAIL_USERNAME"):
# setup email logging of errors
configure_email_logging(app)
@app.before_request
def check_user():
if not app.config.get("LOGIN_DISABLED") and request.endpoint:
# check if the endpoint requires authentication
static_endpoint = "static" in request.endpoint or request.endpoint in [
"report.report",
"report.json_chrom_coverage",
]
public_endpoint = getattr(app.view_functions[request.endpoint], "is_public", False)
relevant_endpoint = not (static_endpoint or public_endpoint)
# if endpoint requires auth, check if user is authenticated
if relevant_endpoint and not current_user.is_authenticated:
# combine visited URL (convert byte string query string to unicode!)
next_url = "{}?{}".format(request.path, request.query_string.decode())
login_url = url_for("public.index", next=next_url)
return redirect(login_url)
return app
def configure_extensions(app):
"""Configure Flask extensions."""
extensions.toolbar.init_app(app)
extensions.bootstrap.init_app(app)
extensions.mongo.init_app(app)
extensions.store.init_app(app)
extensions.login_manager.init_app(app)
extensions.mail.init_app(app)
Markdown(app)
if app.config.get("SQLALCHEMY_DATABASE_URI"):
LOG.info("Chanjo extension enabled")
configure_coverage(app)
if app.config.get("LOQUSDB_SETTINGS"):
LOG.info("LoqusDB enabled")
# setup LoqusDB
extensions.loqusdb.init_app(app)
if app.config.get("GENS_HOST"):
LOG.info("Gens enabled")
extensions.gens.init_app(app)
if all(
[
app.config.get("MME_URL"),
app.config.get("MME_ACCEPTS"),
app.config.get("MME_TOKEN"),
]
):
LOG.info("MatchMaker Exchange enabled")
extensions.matchmaker.init_app(app)
if app.config.get("RERUNNER_API_ENTRYPOINT") and app.config.get("RERUNNER_API_KEY"):
LOG.info("Rerunner service enabled")
# setup rerunner service
extensions.rerunner.init_app(app)
if app.config.get("LDAP_HOST"):
LOG.info("LDAP login enabled")
# setup connection to server
extensions.ldap_manager.init_app(app)
if app.config.get("GOOGLE"):
LOG.info("Google login enabled")
# setup connection to google oauth2
configure_oauth_login(app)
if app.config.get("CLOUD_IGV_TRACKS"):
LOG.info("Collecting IGV tracks from cloud resources")
extensions.cloud_tracks.init_app(app)
def register_blueprints(app):
"""Register Flask blueprints."""
app.register_blueprint(public.public_bp)
app.register_blueprint(genes.genes_bp)
app.register_blueprint(cases.cases_bp)
app.register_blueprint(login.login_bp)
app.register_blueprint(variant.variant_bp)
app.register_blueprint(variants.variants_bp)
app.register_blueprint(panels.panels_bp)
app.register_blueprint(dashboard.dashboard_bp)
app.register_blueprint(api.api_bp)
app.register_blueprint(alignviewers.alignviewers_bp)
app.register_blueprint(phenotypes.hpo_bp)
app.register_blueprint(diagnoses.omim_bp)
app.register_blueprint(institutes.overview)
app.register_blueprint(managed_variants.managed_variants_bp)
def register_filters(app):
@app.template_filter()
def human_decimal(number, ndigits=4):
"""Return a standard representation of a decimal number.
Args:
number (float): number to humanize
ndigits (int, optional): max number of digits to round to
Return:
str: humanized string of the decimal number
"""
min_number = 10 ** -ndigits
if isinstance(number, str):
number = None
if number is None:
# NaN
return "-"
if number == 0:
# avoid confusion over what is rounded and what is actually 0
return 0
if number < min_number:
# make human readable and sane
return "< {}".format(min_number)
# round all other numbers
return round(number, ndigits)
@app.template_filter()
def url_decode(string):
"""Decode a string with encoded hex values."""
return unquote(string)
@app.template_filter()
def cosmic_prefix(cosmicId):
"""If cosmicId is an integer, add 'COSM' as prefix
otherwise return unchanged"""
if isinstance(cosmicId, int):
return "COSM" + str(cosmicId)
return cosmicId
@app.template_filter()
def count_cursor(pymongo_cursor):
"""Count numer of returned documents (deprecated pymongo.cursor.count())"""
# Perform operations on a copy of the cursor so original does not move
cursor_copy = pymongo_cursor.clone()
return len(list(cursor_copy))
def configure_oauth_login(app):
"""Register the Google Oauth login client using config settings"""
google_conf = app.config["GOOGLE"]
discovery_url = google_conf.get("discovery_url")
client_id = google_conf.get("client_id")
client_secret = google_conf.get("client_secret")
extensions.oauth_client.init_app(app)
extensions.oauth_client.register(
name="google",
server_metadata_url=discovery_url,
client_id=client_id,
client_secret=client_secret,
client_kwargs={"scope": "openid email profile"},
)
def configure_email_logging(app):
"""Setup logging of error/exceptions to email."""
import logging
from scout.log import TlsSMTPHandler
mail_handler = TlsSMTPHandler(
mailhost=app.config["MAIL_SERVER"],
fromaddr=app.config["MAIL_USERNAME"],
toaddrs=app.config["ADMINS"],
subject="O_ops... {} failed!".format(app.name),
credentials=(app.config["MAIL_USERNAME"], app.config["MAIL_PASSWORD"]),
)
mail_handler.setLevel(logging.ERROR)
mail_handler.setFormatter(
logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s: %(message)s " "[in %(pathname)s:%(lineno)d]"
)
)
app.logger.addHandler(mail_handler)
def configure_coverage(app):
"""Setup coverage related extensions."""
# setup chanjo report
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = True if app.debug else False
if chanjo_api:
chanjo_api.init_app(app)
configure_template_filters(app)
# register chanjo report blueprint
app.register_blueprint(report_bp, url_prefix="/reports")
babel = Babel(app)
@babel.localeselector
def get_locale():
"""Determine locale to use for translations."""
accept_languages = current_app.config.get("ACCEPT_LANGUAGES", ["en"])
# first check request args
session_language = request.args.get("lang")
if session_language in accept_languages:
current_app.logger.info("using session language: %s", session_language)
return session_language
# language can be forced in config
user_language = current_app.config.get("REPORT_LANGUAGE")
if user_language:
return user_language
# try to guess the language from the user accept header that
# the browser transmits. We support de/fr/en in this example.
# The best match wins.
return request.accept_languages.best_match(accept_languages)
|
test/test.py | aachenmax/ffmpeg-normalize | 775 | 11193603 | <reponame>aachenmax/ffmpeg-normalize
import os
import sys
import subprocess
import pytest
import json
import shutil
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__) + "/../"))
def ffmpeg_normalize_call(args):
cmd = [sys.executable, "-m", "ffmpeg_normalize"]
cmd.extend(args)
try:
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
)
stdout, stderr = p.communicate()
return stdout, stderr
except subprocess.CalledProcessError as e:
print(e.output)
raise e
def _get_stats(input_file, normalization_type="ebu"):
stdout, _ = ffmpeg_normalize_call(
[input_file, "-f", "-n", "--print-stats", "-nt", normalization_type]
)
stats = json.loads(stdout)
print(json.dumps(stats, indent=4))
return stats
def _get_stream_info(input_file):
cmd = [
"ffprobe",
"-hide_banner",
"-loglevel",
"error",
input_file,
"-of",
"json",
"-show_streams",
]
return json.loads(
subprocess.check_output(cmd, stderr=subprocess.STDOUT, universal_newlines=True)
)["streams"]
def fuzzy_equal(d1, d2, precision=0.1):
"""
Compare two objects recursively (just as standard '==' except floating point
values are compared within given precision.
Based on https://gist.github.com/durden/4236551, modified to handle lists
"""
if len(d1) != len(d2):
print("Length of objects does not match {}, {}".format(d1, d2))
return False
if isinstance(d1, list):
ret = []
for v1, v2 in zip(d1, d2):
if isinstance(v1, dict):
ret.append(fuzzy_equal(v1, v2, precision))
else:
if not abs(v1 - v2) < precision:
print("Values do not match: Got {}, expected {}".format(v1, v2))
return False
else:
ret.append(True)
return all(ret)
elif isinstance(d1, dict):
errors = []
for k, v in d1.items():
# Make sure all the keys are equal
if k not in d2:
print("Object does not contain: {}, {}".format(k, d2))
return False
# Fuzzy float comparison
if isinstance(v, float) and isinstance(d2[k], float):
if not abs(v - d2[k]) < precision:
errors.append(
"Values for {} do not match: Got {}, expected {}".format(
k, v, d2[k]
)
)
# Recursive compare if there are nested dicts
elif isinstance(v, dict):
if not fuzzy_equal(v, d2[k], precision):
return False
# Fall back to default
elif v != d2[k]:
errors.append(
"Values for {} do not match: Got {}, expected {}".format(
k, v, d2[k]
)
)
if len(errors):
print("Errors:\n" + "\n".join(errors))
return False
else:
if not abs(d1 - d2) < precision:
print("Values do not match: Got {}, expected {}".format(d2, d2))
return False
return True
class TestFFmpegNormalize:
@pytest.fixture(scope="function", autouse=True)
def cleanup(self):
os.makedirs("normalized", exist_ok=True)
yield
for file in [
"test.mkv",
"test.wav",
"test2.wav",
"test.mp3",
"test.aac",
"test.mp4",
"test1.mkv",
"test2.mkv",
]:
if os.path.isfile("normalized/" + file):
os.remove("normalized/" + file)
if os.path.isdir("normalized"):
shutil.rmtree("normalized")
def test_output_filename_and_folder(self):
ffmpeg_normalize_call(["test/test.mp4"])
assert os.path.isfile("normalized/test.mkv")
def test_default_warnings(self):
_, stderr = ffmpeg_normalize_call(
["test/test.mp4", "-o", "normalized/test2.wav"]
)
assert "The sample rate will automatically be set" in stderr
def test_multiple_outputs(self):
os.makedirs("normalized", exist_ok=True)
ffmpeg_normalize_call(
[
"test/test.mp4",
"test/test.mp4",
"-o",
"normalized/test1.mkv",
"normalized/test2.mkv",
]
)
assert os.path.isfile("normalized/test1.mkv")
assert os.path.isfile("normalized/test2.mkv")
def test_overwrites(self):
ffmpeg_normalize_call(["test/test.mp4", "-v"])
_, stderr = ffmpeg_normalize_call(["test/test.mp4", "-v"])
assert "exists" in stderr
def test_dry(self):
ffmpeg_normalize_call(["test/test.mp4", "-n"])
assert not os.path.isfile("normalized/test.mkv")
def test_only_supports_one_stream_output(self):
os.makedirs("normalized", exist_ok=True)
_, stderr = ffmpeg_normalize_call(
["test/test.mp4", "-o", "normalized/test.wav", "-v"]
)
assert "Output file only supports one stream" in stderr
def test_peak(self):
ffmpeg_normalize_call(["test/test.mp4", "-nt", "peak", "-t", "0"])
assert os.path.isfile("normalized/test.mkv")
assert fuzzy_equal(
_get_stats("normalized/test.mkv", "peak"),
[
{
"input_file": "normalized/test.mkv",
"output_file": "normalized/test.mkv",
"stream_id": 1,
"ebu": None,
"mean": -14.8,
"max": -0.0,
},
{
"input_file": "normalized/test.mkv",
"output_file": "normalized/test.mkv",
"stream_id": 2,
"ebu": None,
"mean": -19.3,
"max": -0.0,
},
],
)
def test_rms(self):
ffmpeg_normalize_call(["test/test.mp4", "-nt", "rms", "-t", "-15"])
assert os.path.isfile("normalized/test.mkv")
assert fuzzy_equal(
_get_stats("normalized/test.mkv", "rms"),
[
{
"input_file": "normalized/test.mkv",
"output_file": "normalized/test.mkv",
"stream_id": 1,
"ebu": None,
"mean": -15.0,
"max": -0.2,
},
{
"input_file": "normalized/test.mkv",
"output_file": "normalized/test.mkv",
"stream_id": 2,
"ebu": None,
"mean": -15.1,
"max": 0.0,
},
],
)
def test_ebu(self):
ffmpeg_normalize_call(["test/test.mp4", "-nt", "ebu"])
assert os.path.isfile("normalized/test.mkv")
assert fuzzy_equal(
_get_stats("normalized/test.mkv", "ebu"),
[
{
"input_file": "normalized/test.mkv",
"output_file": "normalized/test.mkv",
"stream_id": 1,
"ebu": {
"input_i": -23.00,
"input_tp": -10.32,
"input_lra": 2.40,
"input_thresh": -33.06,
"output_i": -22.03,
"output_tp": -8.89,
"output_lra": 2.30,
"output_thresh": -32.12,
"normalization_type": "dynamic",
"target_offset": -0.97,
},
"mean": None,
"max": None,
},
{
"input_file": "normalized/test.mkv",
"output_file": "normalized/test.mkv",
"stream_id": 2,
"ebu": {
"input_i": -22.98,
"input_tp": -10.72,
"input_lra": 2.10,
"input_thresh": -33.03,
"output_i": -22.16,
"output_tp": -9.46,
"output_lra": 2.10,
"output_thresh": -32.25,
"normalization_type": "dynamic",
"target_offset": -0.84,
},
"mean": None,
"max": None,
},
],
)
def test_acodec(self):
ffmpeg_normalize_call(["test/test.mp4", "-c:a", "aac"])
assert os.path.isfile("normalized/test.mkv")
assert _get_stream_info("normalized/test.mkv")[1]["codec_name"] == "aac"
def test_abr(self):
os.makedirs("normalized", exist_ok=True)
ffmpeg_normalize_call(
[
"test/test.mp4",
"-c:a",
"aac",
"-b:a",
"320k",
"-o",
"normalized/test.aac",
]
)
assert os.path.isfile("normalized/test.aac")
assert _get_stream_info("normalized/test.aac")[0]["codec_name"] == "aac"
assert abs(133000 - float(_get_stream_info("normalized/test.aac")[0]["bit_rate"])) > 10000
def test_ar(self):
ffmpeg_normalize_call(["test/test.mp4", "-ar", "48000"])
assert os.path.isfile("normalized/test.mkv")
assert _get_stream_info("normalized/test.mkv")[1]["sample_rate"] == "48000"
def test_vcodec(self):
ffmpeg_normalize_call(["test/test.mp4", "-c:v", "libx265"])
assert os.path.isfile("normalized/test.mkv")
assert _get_stream_info("normalized/test.mkv")[0]["codec_name"] == "hevc"
def test_extra_input_options_json(self):
ffmpeg_normalize_call(
["test/test.mp4", "-c:a", "aac", "-ei", '[ "-f", "mp4" ]']
)
# FIXME: some better test that options are respected?
assert os.path.isfile("normalized/test.mkv")
def test_extra_output_options_json(self):
ffmpeg_normalize_call(["test/test.mp4", "-c:a", "aac", "-e", '[ "-vbr", "3" ]'])
# FIXME: some better test that options are respected?
assert os.path.isfile("normalized/test.mkv")
def test_ofmt_fail(self):
_, stderr = ffmpeg_normalize_call(
["test/test.mp4", "-ofmt", "mp3", "-o", "normalized/test.mp3", "-vn", "-sn"]
)
assert "does not support" in stderr
def test_ofmt_mp3(self):
ffmpeg_normalize_call(
[
"test/test.mp4",
"-ofmt",
"mp3",
"-o",
"normalized/test.mp3",
"-c:a",
"libmp3lame",
"-vn",
"-sn",
]
)
assert os.path.isfile("normalized/test.mp3")
def test_ext_fail(self):
_, stderr = ffmpeg_normalize_call(["test/test.mp4", "-ext", "mp3"])
assert "does not support" in stderr
def test_ext_mp3(self):
ffmpeg_normalize_call(["test/test.mp4", "-ext", "mp3", "-c:a", "libmp3lame"])
assert os.path.isfile("normalized/test.mp3")
def test_version(self):
stdout, _ = ffmpeg_normalize_call(["--version"])
assert "ffmpeg-normalize v" in stdout
def test_progress(self):
_, stderr = ffmpeg_normalize_call(["test/test.mp4", "-pr"])
assert "100/100" in stderr
assert os.path.isfile("normalized/test.mkv")
def test_duration(self):
_, stderr = ffmpeg_normalize_call(["test/test.wav", "--debug"])
assert "Found duration: " in stderr
def test_pre_filters(self):
ffmpeg_normalize_call(
[
"test/test.wav",
"-o",
"normalized/test2.wav",
"-prf",
"volume=0.5,volume=0.5",
]
)
assert os.path.isfile("normalized/test2.wav")
assert fuzzy_equal(
_get_stats("normalized/test2.wav", "ebu"),
[
{
"input_file": "normalized/test2.wav",
"output_file": "normalized/test2.mkv",
"stream_id": 0,
"ebu": {
"input_i": -23.01,
"input_tp": -10.75,
"input_lra": 2.20,
"input_thresh": -33.06,
"output_i": -22.16,
"output_tp": -9.46,
"output_lra": 2.10,
"output_thresh": -32.25,
"normalization_type": "dynamic",
"target_offset": -0.84,
},
"mean": None,
"max": None,
}
],
)
def test_post_filters(self):
ffmpeg_normalize_call(
[
"test/test.wav",
"-o",
"normalized/test2.wav",
"-pof",
"volume=0.5,volume=0.5",
]
)
assert os.path.isfile("normalized/test2.wav")
assert fuzzy_equal(
_get_stats("normalized/test2.wav", "ebu"),
[
{
"input_file": "normalized/test2.wav",
"output_file": "normalized/test2.mkv",
"stream_id": 0,
"ebu": {
"input_i": -35.02,
"input_tp": -22.76,
"input_lra": 2.20,
"input_thresh": -45.07,
"output_i": -22.16,
"output_tp": -9.46,
"output_lra": 2.10,
"output_thresh": -32.24,
"normalization_type": "dynamic",
"target_offset": -0.84,
},
"mean": None,
"max": None,
}
],
)
def test_quiet(self):
_, stderr = ffmpeg_normalize_call(
["test/test.mp4", "-ext", "wav", "-vn", "-f", "q"]
)
assert "only supports one stream" not in stderr
|
tests/settings.py | Gagaro/django-unused-media | 112 | 11193642 | # -*- coding: utf-8 -*-
import os
DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3'
}
}
INSTALLED_APPS = (
'django_unused_media',
'tests',
)
SECRET_KEY = 'test'
TEST_DIR = os.path.dirname(os.path.realpath(__file__))
MEDIA_ROOT = os.path.join(TEST_DIR, 'media')
|
src/monopsr/datasets/kitti/instance_utils_test.py | minghanz/monopsr | 104 | 11193667 | import unittest
import numpy as np
import tensorflow as tf
from monopsr.datasets.kitti import instance_utils
class InstanceUtilsTest(tf.test.TestCase):
def test_get_proj_uv_map(self):
box_2d = np.asarray([0, 10, 10, 20], dtype=np.float32)
roi_size = (10, 10)
proj_uv_map = instance_utils.get_exp_proj_uv_map(box_2d, roi_size)
proj_u_row = proj_uv_map[0][0]
proj_v_col = proj_uv_map[1][:, 0]
# Check that points are in the middle of each pixel
exp_u_row = np.linspace(10.5, 19.5, 10)
exp_v_row = np.linspace(0.5, 9.5, 10)
np.testing.assert_allclose(proj_u_row, exp_u_row)
np.testing.assert_allclose(proj_v_col, exp_v_row)
def test_tf_get_proj_uv_map(self):
boxes_2d = np.asarray([
[0.0, 10.0, 10.0, 20.0],
[5.0, 5.0, 10.0, 10.0],
[0.0, 0.0, 100.0, 100.0],
], np.float32)
roi_size = (10, 10)
exp_proj_uv_maps = [instance_utils.get_exp_proj_uv_map(
box_2d, roi_size, use_pixel_centres=True)
for box_2d in boxes_2d]
# Convert to tensors
tf_boxes_2d = tf.to_float(boxes_2d)
proj_uv_map = instance_utils.tf_get_exp_proj_uv_map(tf_boxes_2d, roi_size)
with self.test_session() as sess:
proj_uv_map_out = sess.run(proj_uv_map)
# Compare with expected
np.testing.assert_allclose(proj_uv_map_out, exp_proj_uv_maps)
def test_tf_inst_xyz_map_local_to_global(self):
inst_points_local = np.random.rand(2304, 3).astype(np.float32)
viewing_angle = np.deg2rad(10.0).astype(np.float32)
centroid = np.asarray([2.5, 1.5, 15.0], dtype=np.float32)
np_inst_points_global = instance_utils.inst_points_local_to_global(
inst_points_local, viewing_angle, centroid)
xyz_maps_local = inst_points_local.reshape(1, 48, 48, 3)
tf_view_angs = np.reshape(viewing_angle, (-1, 1))
tf_centroids = np.reshape(centroid, (-1, 3))
tf_inst_xyz_map_global = instance_utils.tf_inst_xyz_map_local_to_global(
xyz_maps_local, map_roi_size=(48, 48),
view_angs=tf_view_angs, centroids=tf_centroids)
with self.test_session() as sess:
tf_inst_xyz_map_global_out = sess.run(tf_inst_xyz_map_global)
# Check equivalence
tf_inst_points_global = tf_inst_xyz_map_global_out.reshape(2304, 3)
np.testing.assert_allclose(np_inst_points_global, tf_inst_points_global)
|
pytorch/train_net.py | mjm522/gpd | 439 | 11193683 | <reponame>mjm522/gpd<filename>pytorch/train_net.py<gh_stars>100-1000
import h5py
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as data
import sys
class H5Dataset(data.Dataset):
def __init__(self, file_path):
super(H5Dataset, self).__init__()
h5_file = h5py.File(file_path)
self.data = h5_file.get('images')
self.target = h5_file.get('labels')
def __getitem__(self, index):
data = self.data[index,:,:].astype('float32')
# ptorch uses NCHW format
data = data.reshape((data.shape[2], data.shape[0], data.shape[1]))
target = self.target[index,:].astype('int32')[0]
return (data, target)
def __len__(self):
return self.data.shape[0]
class Net(nn.Module):
def __init__(self, input_channels):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(input_channels, 20, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(20, 50, 5)
self.fc1 = nn.Linear(50 * 12 * 12, 500)
self.fc2 = nn.Linear(500, 2)
# self.fc2 = nn.Linear(120, 84)
# self.fc3 = nn.Linear(84, 2)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 50 * x.shape[2] * x.shape[3])
x = F.relu(self.fc1(x))
x = self.fc2(x)
# x = F.relu(self.fc2(x))
# x = self.fc3(x)
return x
# Load the training data.
dset = H5Dataset(sys.argv[1])
train_loader = data.DataLoader(dset, batch_size=64, shuffle=True)
# Create the network.
input_channels = int(sys.argv[3])
net = Net(input_channels)
print net
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
net.to(device)
# Define the loss function and optimizer.
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.00001, momentum=0.9)
num_epochs = 1
for epoch in range(num_epochs): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(train_loader, 0):
# get the inputs
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels.long())
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 10 == 9: # print every 10 mini-batches
print('[%d, %5d] loss: %.5f' %
(epoch + 1, i + 1, running_loss / 10))
running_loss = 0.0
print('Finished Training')
# Test the network on the test data.
test_set = H5Dataset(sys.argv[2])
test_loader = data.DataLoader(test_set, batch_size=64, shuffle=True)
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
|
deprecated_gallery/elf32.py | guyongqiangx/construct | 629 | 11193697 | <filename>deprecated_gallery/elf32.py
"""
Executable and Linkable Format (ELF), 32 bit, big or little endian.
Used on Unix systems as a replacement of the older a.out format.
Big-endian support kindly submitted by <NAME> (mcqueen-c#edsrd1!yzk!co!jp).
"""
from construct import *
def elf32_body(ElfInt16, ElfInt32):
elf32_program_header = Struct(
"type" / Enum(ElfInt32,
NULL = 0,
LOAD = 1,
DYNAMIC = 2,
INTERP = 3,
NOTE = 4,
SHLIB = 5,
PHDR = 6,
),
"offset" / ElfInt32,
"vaddr" / ElfInt32,
"paddr" / ElfInt32,
"file_size" / ElfInt32,
"mem_size" / ElfInt32,
"flags" / ElfInt32,
"align" / ElfInt32,
)
elf32_section_header = Struct(
"name_offset" / ElfInt32,
"name" / Pointer(this._.strtab_data_offset + this.name_offset,
CString("utf8")),
"type" / Enum(ElfInt32,
NULL = 0,
PROGBITS = 1,
SYMTAB = 2,
STRTAB = 3,
RELA = 4,
HASH = 5,
DYNAMIC = 6,
NOTE = 7,
NOBITS = 8,
REL = 9,
SHLIB = 10,
DYNSYM = 11,
),
"flags" / ElfInt32,
"addr" / ElfInt32,
"offset" / ElfInt32,
"size" / ElfInt32,
"link" / ElfInt32,
"info" / ElfInt32,
"align" / ElfInt32,
"entry_size" / ElfInt32,
"data" / Pointer(this.offset,
Bytes(this.size)),
)
return Struct(
"type" / Enum(ElfInt16,
NONE = 0,
RELOCATABLE = 1,
EXECUTABLE = 2,
SHARED = 3,
CORE = 4,
),
"machine" / Enum(ElfInt16,
NONE = 0,
M32 = 1,
SPARC = 2,
I386 = 3,
Motorolla68K = 4,
Motorolla88K = 5,
Intel860 = 7,
MIPS = 8,
),
"version" / ElfInt32,
"entry" / ElfInt32,
"ph_offset" / ElfInt32,
"sh_offset" / ElfInt32,
"flags" / ElfInt32,
"header_size" / ElfInt16,
"ph_entry_size" / ElfInt16,
"ph_count" / ElfInt16,
"sh_entry_size" / ElfInt16,
"sh_count" / ElfInt16,
"strtab_section_index" / ElfInt16,
# calculate the string table data offset (pointer arithmetics)
# ugh... anyway, we need it in order to read the section names, later on
"strtab_data_offset" / Pointer(this.sh_offset + this.strtab_section_index * this.sh_entry_size + 16,
ElfInt32),
"program_table" / Pointer(this.ph_offset,
elf32_program_header[this.ph_count]),
"sections" / Pointer(this.sh_offset,
elf32_section_header[this.sh_count]),
)
elf32_file = Struct(
"identifier" / Struct(
Const(b"\x7fELF"),
"file_class" / Enum(Byte,
NONE = 0,
CLASS32 = 1,
CLASS64 = 2,
),
"encoding" / Enum(Byte,
NONE = 0,
LSB = 1,
MSB = 2,
),
"version" / Byte,
Padding(9),
),
"body" / IfThenElse(this.identifier.encoding == "LSB",
elf32_body(Int16ul, Int32ul),
elf32_body(Int16ub, Int32ub),
),
)
|
solidstuff/bfastq-to-bwa.py | menghaowei/bio-playground | 124 | 11193755 | <reponame>menghaowei/bio-playground<filename>solidstuff/bfastq-to-bwa.py<gh_stars>100-1000
import sys
import string
encoder = string.maketrans('0123.', 'ACGTN')
for i, line in enumerate(sys.stdin, start=0):
if i % 4 == 1:
# double encode sequence
assert line[0] == "T"
print line[2:-1].translate(encoder)
elif i % 4 == 3:
# drop first qual
print line[1:],
else:
print line,
|
moya/command/sub/library_template.py | moyaproject/moya | 129 | 11193757 | template = """
@WRAPTEXT /media/readme.txt
Put any shared media (css, JS etc) here
@TEXT /__init__.py
# Required if you want to distribute your library as a Python module
@TEXT /lib.ini
[author]
name = ${{ author.name }}
email = ${{ author.email }}
organization = ${{ author.organization }}
url = ${{ author.url }}
[lib]
location = ./logic
title = ${{ library.title }}
url = ${{ library.url }}
namespace = ${{ library.namespace }}
name = ${{ library.longname }}
# Set to 0.1.0 for your first release
version = 0.1.0-dev
[settings]
[templates]
location = ./templates
[media:media]
location = ./media
[locale]
location = ./locale
default_language = en
languages = en
[documentation]
location = ./docs
[package]
exclude = __*__/*
.*
*.pyc
.svn
.hg
.git
@WRAPTEXT /locale/readme.txt
Translations go here. Use the 'moya extract' command to create message catalogs.
@WRAPTEXT /templates/${{ library.longname }}/widgets/readme.txt
This folder should contain templates for widgets defined in the library.
@TEXT /templates/${{ library.longname }}/base.html
{% extends "/base.html" %}
{% block "body" %}
<h2>${{ library.title }}</h2>
<div class="alert alert-info">
Created by <tt>moya start library</tt>
</div>
{% render sections.body %}
{% endblock %}
@TEXT /logic/content.xml
<?xml version="1.0" encoding="UTF-8"?>
<moya xmlns="http://moyaproject.com"
xmlns:let="http://moyaproject.com/let"
xmlns:db="http://moyaproject.com/db"
xmlns:forms="http://moyaproject.com/forms"
xmlns:html="http://moyaproject.com/html">
<!-- Content is a high level description of a page -->
<content libname="content.front" template="base.html">
<section name="body">
<html:div class="well" if="name">Hello, ${name}!</html:div>
<render src="form" />
</section>
</content>
</moya>
@TEXT /logic/models.xml
<?xml version="1.0" encoding="UTF-8"?>
<moya xmlns="http://moyaproject.com">
<!-- Here's were you might define your database models -->
<!--
<model name="Student" libname="Student" xmlns="http://moyaproject.com/db">
<string name="name" length="30" />
<string name="email" length="300" />
</model>
-->
</moya>
@TEXT /logic/views.xml
<?xml version="1.0" encoding="UTF-8"?>
<moya xmlns="http://moyaproject.com"
xmlns:let="http://moyaproject.com/let"
xmlns:db="http://moyaproject.com/db"
xmlns:forms="http://moyaproject.com/forms">
<!-- Example view that gets a form -->
<view libname="view.front" content="#content.front">
<!-- Get the form -->
<forms:get form="#form.getname" dst="form"/>
<!-- Validate the form using POST data -->
<forms:validate src="form" if=".request.POST">
<!-- if the form validates set the value 'name' which is passed to the content -->
<let name="form.data.name" />
</forms:validate>
</view>
</moya>
@TEXT /logic/signals.xml
<?xml version="1.0" encoding="UTF-8"?>
<moya xmlns="http://moyaproject.com"
xmlns:let="http://moyaproject.com/let"
xmlns:db="http://moyaproject.com/db">
<!-- define signals here -->
<!--
<handle signal="moya.auth.post-login">
<echo>${signal.data.user} just logged in</echo>
</handle>
-->
</moya>
@TEXT /logic/forms.xml
<?xml version="1.0" encoding="UTF-8"?>
<moya xmlns="http://moyaproject.com">
<!-- Forms know how to render and validate themselves -->
<form libname="form.getname" legend="Hello World Form" style="horizontal" xmlns="http://moyaproject.com/forms">
<input name="name" label="What is your name?" class="input-xlarge" type="text" maxlength="30" required="yes"/>
<submit-button text="Submit" />
</form>
</moya>
@WRAPTEXT /logic/readme.txt
Moya code goes here
The filenames used here are just a suggestion of how to organize your Moya code -- all files with the extension .xml will be read.
@TEXT /logic/mountpoints.xml
<moya xmlns="http://moyaproject.com">
<!-- Libraries will typically define a mountpoint to add URLs -->
<mountpoint name="main">
<!-- A simple default view to start you off -->
<url route="/" methods="GET,POST" view="#view.front" name="front" />
</mountpoint>
</moya>
@TEXT /logic/widgets.xml
<?xml version="1.0" encoding="UTF-8"?>
<moya xmlns="http://moyaproject.com"
xmlns:let="http://moyaproject.com/let">
<!-- define your widgets here -->
</moya>
@TEXT /logic/tags.xml
<?xml version="1.0" encoding="UTF-8"?>
<moya xmlns="http://moyaproject.com"
xmlns:let="http://moyaproject.com/let">
<!-- define your tags here -->
</moya>
"""
|
boto3_type_annotations/boto3_type_annotations/apigateway/client.py | cowboygneox/boto3_type_annotations | 119 | 11193779 | from typing import Optional
from typing import IO
from botocore.client import BaseClient
from typing import Dict
from typing import Union
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import List
class Client(BaseClient):
def can_paginate(self, operation_name: str = None):
pass
def create_api_key(self, name: str = None, description: str = None, enabled: bool = None, generateDistinctId: bool = None, value: str = None, stageKeys: List = None, customerId: str = None) -> Dict:
pass
def create_authorizer(self, restApiId: str, name: str, type: str, providerARNs: List = None, authType: str = None, authorizerUri: str = None, authorizerCredentials: str = None, identitySource: str = None, identityValidationExpression: str = None, authorizerResultTtlInSeconds: int = None) -> Dict:
pass
def create_base_path_mapping(self, domainName: str, restApiId: str, basePath: str = None, stage: str = None) -> Dict:
pass
def create_deployment(self, restApiId: str, stageName: str = None, stageDescription: str = None, description: str = None, cacheClusterEnabled: bool = None, cacheClusterSize: str = None, variables: Dict = None, canarySettings: Dict = None, tracingEnabled: bool = None) -> Dict:
pass
def create_documentation_part(self, restApiId: str, location: Dict, properties: str) -> Dict:
pass
def create_documentation_version(self, restApiId: str, documentationVersion: str, stageName: str = None, description: str = None) -> Dict:
pass
def create_domain_name(self, domainName: str, certificateName: str = None, certificateBody: str = None, certificatePrivateKey: str = None, certificateChain: str = None, certificateArn: str = None, regionalCertificateName: str = None, regionalCertificateArn: str = None, endpointConfiguration: Dict = None) -> Dict:
pass
def create_model(self, restApiId: str, name: str, contentType: str, description: str = None, schema: str = None) -> Dict:
pass
def create_request_validator(self, restApiId: str, name: str = None, validateRequestBody: bool = None, validateRequestParameters: bool = None) -> Dict:
pass
def create_resource(self, restApiId: str, parentId: str, pathPart: str) -> Dict:
pass
def create_rest_api(self, name: str, description: str = None, version: str = None, cloneFrom: str = None, binaryMediaTypes: List = None, minimumCompressionSize: int = None, apiKeySource: str = None, endpointConfiguration: Dict = None, policy: str = None) -> Dict:
pass
def create_stage(self, restApiId: str, stageName: str, deploymentId: str, description: str = None, cacheClusterEnabled: bool = None, cacheClusterSize: str = None, variables: Dict = None, documentationVersion: str = None, canarySettings: Dict = None, tracingEnabled: bool = None, tags: Dict = None) -> Dict:
pass
def create_usage_plan(self, name: str, description: str = None, apiStages: List = None, throttle: Dict = None, quota: Dict = None) -> Dict:
pass
def create_usage_plan_key(self, usagePlanId: str, keyId: str, keyType: str) -> Dict:
pass
def create_vpc_link(self, name: str, targetArns: List, description: str = None) -> Dict:
pass
def delete_api_key(self, apiKey: str):
pass
def delete_authorizer(self, restApiId: str, authorizerId: str):
pass
def delete_base_path_mapping(self, domainName: str, basePath: str):
pass
def delete_client_certificate(self, clientCertificateId: str):
pass
def delete_deployment(self, restApiId: str, deploymentId: str):
pass
def delete_documentation_part(self, restApiId: str, documentationPartId: str):
pass
def delete_documentation_version(self, restApiId: str, documentationVersion: str):
pass
def delete_domain_name(self, domainName: str):
pass
def delete_gateway_response(self, restApiId: str, responseType: str):
pass
def delete_integration(self, restApiId: str, resourceId: str, httpMethod: str):
pass
def delete_integration_response(self, restApiId: str, resourceId: str, httpMethod: str, statusCode: str):
pass
def delete_method(self, restApiId: str, resourceId: str, httpMethod: str):
pass
def delete_method_response(self, restApiId: str, resourceId: str, httpMethod: str, statusCode: str):
pass
def delete_model(self, restApiId: str, modelName: str):
pass
def delete_request_validator(self, restApiId: str, requestValidatorId: str):
pass
def delete_resource(self, restApiId: str, resourceId: str):
pass
def delete_rest_api(self, restApiId: str):
pass
def delete_stage(self, restApiId: str, stageName: str):
pass
def delete_usage_plan(self, usagePlanId: str):
pass
def delete_usage_plan_key(self, usagePlanId: str, keyId: str):
pass
def delete_vpc_link(self, vpcLinkId: str):
pass
def flush_stage_authorizers_cache(self, restApiId: str, stageName: str):
pass
def flush_stage_cache(self, restApiId: str, stageName: str):
pass
def generate_client_certificate(self, description: str = None) -> Dict:
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
pass
def get_account(self) -> Dict:
pass
def get_api_key(self, apiKey: str, includeValue: bool = None) -> Dict:
pass
def get_api_keys(self, position: str = None, limit: int = None, nameQuery: str = None, customerId: str = None, includeValues: bool = None) -> Dict:
pass
def get_authorizer(self, restApiId: str, authorizerId: str) -> Dict:
pass
def get_authorizers(self, restApiId: str, position: str = None, limit: int = None) -> Dict:
pass
def get_base_path_mapping(self, domainName: str, basePath: str) -> Dict:
pass
def get_base_path_mappings(self, domainName: str, position: str = None, limit: int = None) -> Dict:
pass
def get_client_certificate(self, clientCertificateId: str) -> Dict:
pass
def get_client_certificates(self, position: str = None, limit: int = None) -> Dict:
pass
def get_deployment(self, restApiId: str, deploymentId: str, embed: List = None) -> Dict:
pass
def get_deployments(self, restApiId: str, position: str = None, limit: int = None) -> Dict:
pass
def get_documentation_part(self, restApiId: str, documentationPartId: str) -> Dict:
pass
def get_documentation_parts(self, restApiId: str, type: str = None, nameQuery: str = None, path: str = None, position: str = None, limit: int = None, locationStatus: str = None) -> Dict:
pass
def get_documentation_version(self, restApiId: str, documentationVersion: str) -> Dict:
pass
def get_documentation_versions(self, restApiId: str, position: str = None, limit: int = None) -> Dict:
pass
def get_domain_name(self, domainName: str) -> Dict:
pass
def get_domain_names(self, position: str = None, limit: int = None) -> Dict:
pass
def get_export(self, restApiId: str, stageName: str, exportType: str, parameters: Dict = None, accepts: str = None) -> Dict:
pass
def get_gateway_response(self, restApiId: str, responseType: str) -> Dict:
pass
def get_gateway_responses(self, restApiId: str, position: str = None, limit: int = None) -> Dict:
pass
def get_integration(self, restApiId: str, resourceId: str, httpMethod: str) -> Dict:
pass
def get_integration_response(self, restApiId: str, resourceId: str, httpMethod: str, statusCode: str) -> Dict:
pass
def get_method(self, restApiId: str, resourceId: str, httpMethod: str) -> Dict:
pass
def get_method_response(self, restApiId: str, resourceId: str, httpMethod: str, statusCode: str) -> Dict:
pass
def get_model(self, restApiId: str, modelName: str, flatten: bool = None) -> Dict:
pass
def get_model_template(self, restApiId: str, modelName: str) -> Dict:
pass
def get_models(self, restApiId: str, position: str = None, limit: int = None) -> Dict:
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
pass
def get_request_validator(self, restApiId: str, requestValidatorId: str) -> Dict:
pass
def get_request_validators(self, restApiId: str, position: str = None, limit: int = None) -> Dict:
pass
def get_resource(self, restApiId: str, resourceId: str, embed: List = None) -> Dict:
pass
def get_resources(self, restApiId: str, position: str = None, limit: int = None, embed: List = None) -> Dict:
pass
def get_rest_api(self, restApiId: str) -> Dict:
pass
def get_rest_apis(self, position: str = None, limit: int = None) -> Dict:
pass
def get_sdk(self, restApiId: str, stageName: str, sdkType: str, parameters: Dict = None) -> Dict:
pass
def get_sdk_type(self, id: str) -> Dict:
pass
def get_sdk_types(self, position: str = None, limit: int = None) -> Dict:
pass
def get_stage(self, restApiId: str, stageName: str) -> Dict:
pass
def get_stages(self, restApiId: str, deploymentId: str = None) -> Dict:
pass
def get_tags(self, resourceArn: str, position: str = None, limit: int = None) -> Dict:
pass
def get_usage(self, usagePlanId: str, startDate: str, endDate: str, keyId: str = None, position: str = None, limit: int = None) -> Dict:
pass
def get_usage_plan(self, usagePlanId: str) -> Dict:
pass
def get_usage_plan_key(self, usagePlanId: str, keyId: str) -> Dict:
pass
def get_usage_plan_keys(self, usagePlanId: str, position: str = None, limit: int = None, nameQuery: str = None) -> Dict:
pass
def get_usage_plans(self, position: str = None, keyId: str = None, limit: int = None) -> Dict:
pass
def get_vpc_link(self, vpcLinkId: str) -> Dict:
pass
def get_vpc_links(self, position: str = None, limit: int = None) -> Dict:
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
pass
def import_api_keys(self, body: Union[bytes, IO], format: str, failOnWarnings: bool = None) -> Dict:
pass
def import_documentation_parts(self, restApiId: str, body: Union[bytes, IO], mode: str = None, failOnWarnings: bool = None) -> Dict:
pass
def import_rest_api(self, body: Union[bytes, IO], failOnWarnings: bool = None, parameters: Dict = None) -> Dict:
pass
def put_gateway_response(self, restApiId: str, responseType: str, statusCode: str = None, responseParameters: Dict = None, responseTemplates: Dict = None) -> Dict:
pass
def put_integration(self, restApiId: str, resourceId: str, httpMethod: str, type: str, integrationHttpMethod: str = None, uri: str = None, connectionType: str = None, connectionId: str = None, credentials: str = None, requestParameters: Dict = None, requestTemplates: Dict = None, passthroughBehavior: str = None, cacheNamespace: str = None, cacheKeyParameters: List = None, contentHandling: str = None, timeoutInMillis: int = None) -> Dict:
pass
def put_integration_response(self, restApiId: str, resourceId: str, httpMethod: str, statusCode: str, selectionPattern: str = None, responseParameters: Dict = None, responseTemplates: Dict = None, contentHandling: str = None) -> Dict:
pass
def put_method(self, restApiId: str, resourceId: str, httpMethod: str, authorizationType: str, authorizerId: str = None, apiKeyRequired: bool = None, operationName: str = None, requestParameters: Dict = None, requestModels: Dict = None, requestValidatorId: str = None, authorizationScopes: List = None) -> Dict:
pass
def put_method_response(self, restApiId: str, resourceId: str, httpMethod: str, statusCode: str, responseParameters: Dict = None, responseModels: Dict = None) -> Dict:
pass
def put_rest_api(self, restApiId: str, body: Union[bytes, IO], mode: str = None, failOnWarnings: bool = None, parameters: Dict = None) -> Dict:
pass
def tag_resource(self, resourceArn: str, tags: Dict):
pass
def test_invoke_authorizer(self, restApiId: str, authorizerId: str, headers: Dict = None, multiValueHeaders: Dict = None, pathWithQueryString: str = None, body: str = None, stageVariables: Dict = None, additionalContext: Dict = None) -> Dict:
pass
def test_invoke_method(self, restApiId: str, resourceId: str, httpMethod: str, pathWithQueryString: str = None, body: str = None, headers: Dict = None, multiValueHeaders: Dict = None, clientCertificateId: str = None, stageVariables: Dict = None) -> Dict:
pass
def untag_resource(self, resourceArn: str, tagKeys: List):
pass
def update_account(self, patchOperations: List = None) -> Dict:
pass
def update_api_key(self, apiKey: str, patchOperations: List = None) -> Dict:
pass
def update_authorizer(self, restApiId: str, authorizerId: str, patchOperations: List = None) -> Dict:
pass
def update_base_path_mapping(self, domainName: str, basePath: str, patchOperations: List = None) -> Dict:
pass
def update_client_certificate(self, clientCertificateId: str, patchOperations: List = None) -> Dict:
pass
def update_deployment(self, restApiId: str, deploymentId: str, patchOperations: List = None) -> Dict:
pass
def update_documentation_part(self, restApiId: str, documentationPartId: str, patchOperations: List = None) -> Dict:
pass
def update_documentation_version(self, restApiId: str, documentationVersion: str, patchOperations: List = None) -> Dict:
pass
def update_domain_name(self, domainName: str, patchOperations: List = None) -> Dict:
pass
def update_gateway_response(self, restApiId: str, responseType: str, patchOperations: List = None) -> Dict:
pass
def update_integration(self, restApiId: str, resourceId: str, httpMethod: str, patchOperations: List = None) -> Dict:
pass
def update_integration_response(self, restApiId: str, resourceId: str, httpMethod: str, statusCode: str, patchOperations: List = None) -> Dict:
pass
def update_method(self, restApiId: str, resourceId: str, httpMethod: str, patchOperations: List = None) -> Dict:
pass
def update_method_response(self, restApiId: str, resourceId: str, httpMethod: str, statusCode: str, patchOperations: List = None) -> Dict:
pass
def update_model(self, restApiId: str, modelName: str, patchOperations: List = None) -> Dict:
pass
def update_request_validator(self, restApiId: str, requestValidatorId: str, patchOperations: List = None) -> Dict:
pass
def update_resource(self, restApiId: str, resourceId: str, patchOperations: List = None) -> Dict:
pass
def update_rest_api(self, restApiId: str, patchOperations: List = None) -> Dict:
pass
def update_stage(self, restApiId: str, stageName: str, patchOperations: List = None) -> Dict:
pass
def update_usage(self, usagePlanId: str, keyId: str, patchOperations: List = None) -> Dict:
pass
def update_usage_plan(self, usagePlanId: str, patchOperations: List = None) -> Dict:
pass
def update_vpc_link(self, vpcLinkId: str, patchOperations: List = None) -> Dict:
pass
|
software/glasgow/applet/sensor/sen5x/__init__.py | whitequark/glasgow | 280 | 11193788 | <reponame>whitequark/glasgow
# Ref: https://developer.sensirion.com/fileadmin/user_upload/customers/sensirion/Dokumente/15_Environmental_Sensor_Node/Datasheets/Sensirion_Environmental_Sensor_Node_SEN5x_Datasheet.pdf
# Accession: G00083
from collections import namedtuple
import argparse
import logging
import asyncio
import struct
import crcmod
from ....support.logging import dump_hex
from ....support.data_logger import DataLogger
from ...interface.i2c_initiator import I2CInitiatorApplet
from ... import *
CMD_START_MEASURE = 0x0021
CMD_STOP_MEASURE = 0x0104
CMD_DATA_READY = 0x0202
CMD_READ_MEASURE = 0x03C4
CMD_PRODUCT_NAME = 0xD014
CMD_SERIAL_NUM = 0xD033
CMD_FIRMWARE_VER = 0xD100
CMD_SOFT_RESET = 0xD304
class SEN5xError(GlasgowAppletError):
pass
SEN5xMeasurement = namedtuple("SEN5xMeasurement", ("pm1_0", "pm2_5", "pm4_0", "pm10", "rh_pct", "temp_degC", "voc_index", "nox_index"))
class SEN5xI2CInterface:
i2c_addr = 0x69
def __init__(self, interface, logger):
self.lower = interface
self._logger = logger
self._level = logging.DEBUG if self._logger.name == __name__ else logging.TRACE
def _log(self, message, *args):
self._logger.log(self._level, "SEN5x: " + message, *args)
_crc = staticmethod(crcmod.mkCrcFun(0x131, initCrc=0xff, rev=False))
async def _read_raw(self, addr, length=0, delay_seconds=None):
assert length % 2 == 0
acked = await self.lower.write(self.i2c_addr, struct.pack(">H", addr), stop=True)
if acked is False:
raise SEN5xError("SEN5x did not acknowledge address write")
if delay_seconds is not None:
await asyncio.sleep(delay_seconds)
crc_data = await self.lower.read(self.i2c_addr, length // 2 * 3, stop=True)
if crc_data is None:
raise SEN5xError("SEN5x did not acknowledge data read")
self._log("addr=%#06x data=<%s>", addr, dump_hex(crc_data))
data = bytearray()
for index, (chunk, crc) in enumerate(struct.iter_unpack(">2sB", crc_data)):
if self._crc(chunk) != crc:
raise SEN5xError("CRC failed on word {}".format(index))
data += chunk
return data
async def _write_raw(self, cmd, data=b""):
assert len(data) % 2 == 0
crc_data = bytearray()
for chunk, in struct.iter_unpack(">2s", data):
crc_data += chunk
crc_data.append(self._crc(chunk))
self._log("cmd=%#06x args=<%s>", cmd, dump_hex(crc_data))
acked = await self.lower.write(self.i2c_addr, struct.pack(">H", cmd) + crc_data,
stop=True)
if acked is False:
raise SEN5xError("SEN5x did not acknowledge command write")
async def _read(self, addr, format, delay_seconds=None):
return struct.unpack(format, await self._read_raw(addr, struct.calcsize(format), delay_seconds))
async def _write(self, cmd, format="", *args):
await self._write_raw(cmd, struct.pack(format, *args))
async def soft_reset(self):
self._log("soft reset")
await self._write(CMD_SOFT_RESET)
async def product_name(self):
name, = await self._read(CMD_PRODUCT_NAME, ">32s")
self._log("product name=%s", name)
return name.rstrip(b'\x00').decode('ascii')
async def serial_number(self):
serial, = await self._read(CMD_SERIAL_NUM, ">32s")
self._log("serial number=%s", serial)
return serial.rstrip(b'\x00').decode('ascii')
async def firmware_version(self):
version, reserved = await self._read(CMD_FIRMWARE_VER, ">BB")
self._log("firmware version=%d reserved=%d", version, reserved)
return version
async def is_data_ready(self):
ready, = await self._read(CMD_DATA_READY, ">H")
self._log("data ready=%d", ready)
return bool(ready)
async def start_measurement(self):
self._log("start measurement")
await self._write(CMD_START_MEASURE)
async def stop_measurement(self):
self._log("stop measurement")
await self._write(CMD_STOP_MEASURE)
async def read_measurement(self):
measurements = await self._read(CMD_READ_MEASURE, ">HHHHhhhh", delay_seconds=10e-3)
scale_factors = [10, 10, 10, 10, 100, 200, 10, 10]
measurements = [a / float(b) for a,b in zip(measurements, scale_factors)]
(pm1_0, pm2_5, pm4_0, pm10, rh_pct, temp_degC, voc_index, nox_index) = measurements
self._log("measured PM1.0=%.1f [µg/m³] PM2.5=%.1f [µg/m³] PM4.0%.1f [µg/m³] PM10=%.1f [µg/m³] RH=%.2f [%%] T=%.2f [°C] VOC=%.1f NOx=%.1f",
pm1_0, pm2_5, pm4_0, pm10, rh_pct, temp_degC, voc_index, nox_index)
return SEN5xMeasurement(pm1_0, pm2_5, pm4_0, pm10, rh_pct, temp_degC, voc_index, nox_index)
class SensorSEN5xApplet(I2CInitiatorApplet, name="sensor-sen5x"):
logger = logging.getLogger(__name__)
help = "measure PM, NOx, VOC, humidity, and temperature with Sensirion SEN5x sensors"
description = """
Measure PM, NOx, VOC, humidity, and temperature using Sensirion SEN5x sensors
connected over the I²C interface.
NOTE: The SEL pin must be connected to ground before startup, for the SEN5x to enable the I2C interface.
"""
async def run(self, device, args):
i2c_iface = await self.run_lower(SensorSEN5xApplet, device, args)
return SEN5xI2CInterface(i2c_iface, self.logger)
@classmethod
def add_interact_arguments(cls, parser):
def arg_conv_range(conv, low, high):
def arg(value):
value = conv(value)
if not (low <= value <= high):
raise argparse.ArgumentTypeError(
"{} is not between {} and {}".format(value, low, high))
return value
return arg
p_operation = parser.add_subparsers(dest="operation", metavar="OPERATION", required=True)
p_start = p_operation.add_parser(
"start", help="start measurement")
p_stop = p_operation.add_parser(
"stop", help="stop measurement")
p_measure = p_operation.add_parser(
"measure", help="read measured values (must start first)")
p_log = p_operation.add_parser(
"log", help="log measured values (must start first)")
DataLogger.add_subparsers(p_log)
async def interact(self, device, args, sen5x):
product_name = await sen5x.product_name()
serial = await sen5x.serial_number()
version = await sen5x.firmware_version()
self.logger.info("SEN5x: %s serial %s firmware v%d", product_name, serial, version)
if args.operation == "start":
await sen5x.start_measurement()
if args.operation == "stop":
await sen5x.stop_measurement()
if args.operation == "measure":
while not await sen5x.is_data_ready():
await asyncio.sleep(1.0)
sample = await sen5x.read_measurement()
print("PM1.0 concentration : {:.1f} µg/m³".format(sample.pm1_0))
print("PM2.5 concentration : {:.1f} µg/m³".format(sample.pm2_5))
print("PM4.0 concentration : {:.1f} µg/m³".format(sample.pm4_0))
print("PM10 concentration : {:.1f} µg/m³".format(sample.pm10))
print("relative humidity : {:.2f} %".format(sample.rh_pct))
print("temperature : {:.2f} °C".format(sample.temp_degC))
print("VOC index : {:.1f}".format(sample.voc_index))
print("NOx index : {:.1f}".format(sample.nox_index))
if args.operation == "log":
field_names = dict(
pm1_0="PM1.0(µg/m³)",
pm2_5="PM2.5(µg/m³)",
pm4_0="PM4.0(µg/m³)",
pm10="PM10(µg/m³)",
rh_pct="RH(%)",
temp_degC="T(°C)",
voc_index="VOC",
nox_index="NOx"
)
data_logger = await DataLogger(self.logger, args, field_names=field_names)
meas_interval = 1.0
while True:
async def report():
while not await sen5x.is_data_ready():
await asyncio.sleep(meas_interval / 2)
sample = await sen5x.read_measurement()
fields = sample._asdict()
await data_logger.report_data(fields)
try:
await asyncio.wait_for(report(), meas_interval * 3)
except SEN5xError as error:
await data_logger.report_error(str(error), exception=error)
await sen5x.lower.reset()
await asyncio.sleep(meas_interval)
except asyncio.TimeoutError as error:
await data_logger.report_error("timeout", exception=error)
await sen5x.lower.reset()
|
ask-smapi-sdk/ask_smapi_sdk/smapi_builder.py | nikhilym/alexa-skills-kit-sdk-for-python | 496 | 11193844 | # -*- coding: utf-8 -*-
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights
# Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the
# License.
#
import typing
from abc import ABCMeta, abstractmethod
from ask_sdk_model_runtime import (DefaultApiClient, DefaultSerializer,
ApiConfiguration,
AuthenticationConfiguration)
from ask_smapi_model.services.skill_management import (
SkillManagementServiceClient)
if typing.TYPE_CHECKING:
from ask_sdk_model_runtime import ApiClient, Serializer
from typing import Optional
DEFAULT_API_ENDPOINT = "https://api.amazonalexa.com"
class SmapiClientBuilder(object):
"""Abstract SmapiClient Builder for building
:py:class:`ask_smapi_model.services.skill_management.SkillManagementServiceClient`
object.
"""
__metaclass__ = ABCMeta
def __init__(self):
# type: () -> None
"""Abstract SmapiClient Builder for building
:py:class:`ask_smapi_model.services.skill_management.SkillManagementServiceClient`
object.
"""
self._api_endpoint = None # type: Optional[str]
@property
def api_endpoint(self):
# type: () -> Optional[str]
"""Returns the Endpoint to hit by the SMAPI Service.
:return: Endpoint to hit by the SMAPI service client.
:rtype: str
"""
return self._api_endpoint
@api_endpoint.setter
def api_endpoint(self, value):
# type: (str) -> None
"""Sets the Endpoint value to hit by the SMAPI Service.
:param value: Endpoint to hit by the SMAPI service client.
:type value: str
"""
self._api_endpoint = value
@abstractmethod
def client(self):
# type: () -> SkillManagementServiceClient
raise NotImplementedError
class StandardSmapiClientBuilder(SmapiClientBuilder):
"""Standard SmapiClient Builder class used to generate
:py:class:`ask_smapi_model.services.skill_management.SkillManagementServiceClient`
object with default Serializer and ApiClient implementations.
:param client_id: The ClientId value from LWA profiles.
:type client_id: str
:param client_secret: The ClientSecret value from LWA profiles.
:type client_secret: str
:param refresh_token: Client refresh_token required to get access token
for API calls.
:type refresh_token: str
"""
def __init__(self, client_id, client_secret, refresh_token):
# type: (str, str, str) -> None
"""Smapi Builder class used to generate
:py:class:`ask_smapi_model.services.skill_management.SkillManagementServiceClient`
object with default Serializer and ApiClient implementations.
:param client_id: The ClientId value from LWA profiles.
:type client_id: str
:param client_secret: The ClientSecret value from LWA profiles.
:type client_secret: str
:param refresh_token: Client refresh_token required to get access token
for API calls.
"""
super(StandardSmapiClientBuilder, self).__init__()
self.client_id = client_id
self.client_secret = client_secret
self.refresh_token = refresh_token
def client(self):
# type: () -> SkillManagementServiceClient
"""Creates the smapi client object using AuthenticationConfiguration
and ApiConfiguration registered values.
:return: A smapi object that can be used for making SMAPI method
invocations.
:rtype: :py:class:`ask_smapi_model.services.skill_management.SkillManagementServiceClient`
"""
if self.api_endpoint is None:
self.api_endpoint = DEFAULT_API_ENDPOINT
api_configuration = ApiConfiguration(serializer=DefaultSerializer(),
api_client=DefaultApiClient(),
api_endpoint=self.api_endpoint)
authentication_configuration = AuthenticationConfiguration(
client_id=self.client_id, client_secret=self.client_secret,
refresh_token=self.refresh_token)
return SkillManagementServiceClient(
api_configuration=api_configuration,
authentication_configuration=authentication_configuration)
class CustomSmapiClientBuilder(SmapiClientBuilder):
"""Smapi Custom Builder with serializer, api_client and api_endpoint setter
functions.
This builder is used to create an instance of
:py:class:`ask_smapi_model.services.skill_management.SkillManagementServiceClient`
with default Serializers and ApiClient implementations.
"""
def __init__(self, client_id, client_secret, refresh_token,
serializer=None, api_client=None):
# type: (str, str, str, Serializer, ApiClient) -> None
"""Smapi Custom Builder with serializer, api_client and api_endpoint
setter functions.
This builder is used to create an instance of
:py:class:`ask_smapi_model.services.skill_management.SkillManagementServiceClient`
with default Serializers and ApiClient implementations.
:param client_id: The ClientId value from LWA profiles.
:type client_id: str
:param client_secret: The ClientSecret value from LWA profiles.
:type client_secret: str
:param refresh_token: Client refresh_token required to get access token
for API calls.
:param serializer: serializer implementation for encoding/decoding JSON
from/to Object models.
:type serializer: (optional) ask_sdk_model_runtime.serializer.Serializer
:param api_client: API Client implementation
:type api_client: (optional) ask_sdk_model_runtime.api_client.ApiClient
"""
super(CustomSmapiClientBuilder, self).__init__()
self.client_id = client_id
self.client_secret = client_secret
self.refresh_token = refresh_token
self.serializer = serializer
self.api_client = api_client
def client(self):
# type: () -> SkillManagementServiceClient
"""Creates the smapi client object using AuthenticationConfiguration
and ApiConfiguration registered values.
:return: A smapi object that can be used for making SMAPI method
invocations.
:rtype: :py:class:`ask_smapi_model.services.skill_management.SkillManagementServiceClient`
"""
if self.serializer is None:
self.serializer = DefaultSerializer()
if self.api_client is None:
self.api_client = DefaultApiClient()
if self.api_endpoint is None:
self.api_endpoint = DEFAULT_API_ENDPOINT
api_configuration = ApiConfiguration(serializer=self.serializer,
api_client=self.api_client,
api_endpoint=self.api_endpoint)
authentication_configuration = AuthenticationConfiguration(
client_id=self.client_id, client_secret=self.client_secret,
refresh_token=self.refresh_token)
return SkillManagementServiceClient(
api_configuration=api_configuration,
authentication_configuration=authentication_configuration)
|
py_feature/315_streak.py | weiziyoung/instacart | 290 | 11193886 | <gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 31 02:10:45 2017
@author: konodera
現時点の連続購入記録
*リーク
"""
import pandas as pd
import numpy as np
from tqdm import tqdm
import multiprocessing as mp
import utils
utils.start(__file__)
streak = pd.read_pickle('../input/mk/streak_order-product.p')
X_base = pd.read_pickle('../feature/X_base_t3.p')
#==============================================================================
# def
#==============================================================================
def multi(T):
"""
T = 0
folder = 'trainT-0'
"""
if T==-1:
folder = 'test'
else:
folder = 'trainT-'+str(T)
label = pd.read_pickle('../feature/{}/label_reordered.p'.format(folder))
label = pd.merge(label, X_base, on='order_id', how='inner')
# ======== T-1~3 ========
for t in range(1,4):
df = pd.merge(label, streak.rename(columns={'order_id':'t-{}_order_id'.format(t),
'streak':'t-{}_streak'.format(t)}),
on=['t-{}_order_id'.format(t),'product_id'], how='left')
print(df.isnull().sum())
df.fillna(-99, inplace=1)
df.reset_index(drop=1, inplace=1)
col = ['order_id', 'product_id', 't-{}_streak'.format(t)]
df[col].to_pickle('../feature/{}/f315-{}_order-product.p'.format(folder, t))
#==============================================================================
# main
#==============================================================================
mp_pool = mp.Pool(3)
callback = mp_pool.map(multi, list(range(-1,3)))
#==============================================================================
utils.end(__file__)
|
triton_transformer/utils.py | dumpmemory/triton-transformer | 118 | 11193900 | <filename>triton_transformer/utils.py
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def calc_num_warps(block_size):
num_warps = 4
if block_size >= 2048:
num_warps = 8
if block_size >= 4096:
num_warps = 16
return num_warps
|
sandbox/jorvis/generate_read_to_metaref_seed_alignment.py | senjoro/biocode | 355 | 11193958 | #!/usr/local/packages/Python-3.3.2/bin/python3
"""
Started at 3:30 PM:
$ /usr/local/packages/bowtie2-2.2.4/bowtie2 -x /usr/local/projects/dacc/jorvis/read_to_metaref_alignments/mumi.20150105.genomic -1 /usr/local/scratch/jorvis/dacc/read_to_metaref_seed_alignments/phase2/SRS143214/SRS143214.denovo_duplicates_marked.trimmed.1.fastq -2 /usr/local/scratch/jorvis/dacc/read_to_metaref_seed_alignments/phase2/SRS143214/SRS143214.denovo_duplicates_marked.trimmed.2.fastq -U /usr/local/scratch/jorvis/dacc/read_to_metaref_seed_alignments/phase2/SRS143214/SRS143214.denovo_duplicates_marked.trimmed.singleton.fastq -S /usr/local/scratch/jorvis/dacc/read_to_metaref_seed_alignments/phase2/SRS143214/SRS143214.vs_metaref_seeds.bowtie2.sam -a >& run.out
509710 reads; of these:
509710 (100.00%) were paired; of these:
325656 (63.89%) aligned concordantly 0 times
72035 (14.13%) aligned concordantly exactly 1 time
112019 (21.98%) aligned concordantly >1 times
----
325656 pairs aligned concordantly 0 times; of these:
511 (0.16%) aligned discordantly 1 time
----
325145 pairs aligned 0 times concordantly or discordantly; of these:
650290 mates make up the pairs; of these:
630220 (96.91%) aligned 0 times
12267 (1.89%) aligned exactly 1 time
7803 (1.20%) aligned >1 times
509710 reads? Files were removed from scratch during runtime
Test execution (4.7GB tarball):
./generate_read_to_metaref_seed_alignment.py -r /local/projects-t2/dacc/dmz_Illumina/PHASEII/stool/SRS148424.tar.bz2 -s SRS148424
Test SGE execution:
qsub -P owhite-dacc-irc -q threaded.q -v PATH -pe thread 8 -l mem_free=25G -wd /usr/local/scratch/jorvis/dacc/read_to_metaref_seed_alignments -o SRS140663.process.log -e SRS140663.process.stderr -b y /usr/local/projects/dacc/bin/generate_read_to_metaref_seed_alignment.py -r /local/projects-t2/dacc/dmz_Illumina/PHASEII/anterior_nares/SRS140663.tar.bz2 -s SRS140663 -c 8
"""
import argparse
import datetime
import os
import subprocess
def main():
READ_BASE_DIR = "/local/projects-t2/dacc/dmz_Illumina"
#WORK_BASE_DIR = "/usr/local/scratch/jorvis/dacc/read_to_metaref_seed_alignments/partition20"
#WORK_BASE_DIR = "/local/scratch2/dacc/read_to_metaref_seed_alignments/partition20"
WORK_BASE_DIR = "/local/hmp/dacc/restore/read_to_metaref_seed_alignments/partition20"
BOWTIE_PATH = "/usr/local/packages/bowtie2-2.2.4/bowtie2"
COMPLETION_BASE = "/local/hmp/dacc/restore/read_to_metaref_seed_alignments/complete/microbes/"
# any reads >= this percentage of Ns will be removed
FASTQ_FILTERING_N_PCT_CUTOFF = 80
parser = argparse.ArgumentParser( description='Put a description of your script here')
## output file to be written
parser.add_argument('-r', '--read_file', type=str, required=True, help='Read file (in tar.bz2 format)' )
parser.add_argument('-s', '--sample', type=str, required=True, help='sample_base_name, like SRS016516' )
parser.add_argument('-c', '--cpu_cores', type=int, required=False, default=8, help='CPU count to use for bowtie step' )
args = parser.parse_args()
## decompress it
# tar -xjf posterior_fornix/SRS016516.tar.bz2 -C /tmp/
# ls /tmp/SRS016516/
cmd = "tar -xjf {0} -C {1}/".format(args.read_file, WORK_BASE_DIR)
run_command(cmd)
## creates three files:
# SRS016516.denovo_duplicates_marked.trimmed.1.fastq
# SRS016516.denovo_duplicates_marked.trimmed.2.fastq
# SRS016516.denovo_duplicates_marked.trimmed.singleton.fastq
scratch_dir = "{0}/{1}".format(WORK_BASE_DIR, args.sample)
f_reads = "{0}/{1}.denovo_duplicates_marked.trimmed.1.fastq".format(scratch_dir, args.sample)
r_reads = "{0}/{1}.denovo_duplicates_marked.trimmed.2.fastq".format(scratch_dir, args.sample)
s_reads = "{0}/{1}.denovo_duplicates_marked.trimmed.singleton.fastq".format(scratch_dir, args.sample)
# touch each of these files so they aren't removed while running!!
run_command("touch {0}".format(f_reads))
run_command("touch {0}".format(r_reads))
run_command("touch {0}".format(s_reads))
## run my script to filter out reads with Ns
f_reads_trimmed = "{0}/{1}.1.ntrimmed.fastq".format(scratch_dir, args.sample)
r_reads_trimmed = "{0}/{1}.2.ntrimmed.fastq".format(scratch_dir, args.sample)
s_reads_trimmed = "{0}/{1}.singletons.ntrimmed.fastq".format(scratch_dir, args.sample)
cmd = "/home/jorvis/git/biocode/fastq/filter_fastq_by_N_content.py -l {0} -r {1} -s {2} -lo {3} -ro {4} -so {5} -p {7} -or {8}/{6}.ntrimming.report".format(f_reads, r_reads, s_reads, f_reads_trimmed, r_reads_trimmed, s_reads_trimmed, args.sample, FASTQ_FILTERING_N_PCT_CUTOFF, scratch_dir)
run_command(cmd)
# run bowtie2p
# /usr/local/packages/bowtie2-2.2.4/bowtie2 -1 <m1> -2 <m2> -U <r>} -S <sam>
mapped_list = "{0}/bam_files_to_merge.mapped.list".format(scratch_dir)
mapped_list_to_merge = open(mapped_list, 'wt')
unmapped_list = "{0}/bam_files_to_merge.unmapped.list".format(scratch_dir)
unmapped_list_to_merge = open(unmapped_list, 'wt')
# each of these could be different threads
for index_i in range(1, 21):
sam_file_base = "{0}/{1}.vs_metaref_seeds.fragment{2}.bowtie2".format(scratch_dir, args.sample, index_i)
cmd = "{0} -p {5} -a -x /usr/local/projects/dacc/jorvis/read_to_metaref_alignments/partition20/mumi.20150105.genomic.withNs.fna.part{6} -1 {1} -2 {2} -U {3} -S {4}.sam".format(BOWTIE_PATH, f_reads_trimmed, r_reads_trimmed, s_reads_trimmed, sam_file_base, args.cpu_cores, index_i)
run_command(cmd)
# Convert SAM to BAM
cmd = "samtools view -bS {0}.sam > {0}.bam".format(sam_file_base)
run_command(cmd)
# Delete SAM
cmd = "rm {0}.sam".format(sam_file_base)
run_command(cmd)
# Sort BAM
# May have to set the -m option here to limit the memory used here.
cmd = "samtools sort -@ 4 -m 3G {0}.bam {0}.sorted".format(sam_file_base)
run_command(cmd)
# Delete unsorted BAM
cmd = "rm {0}.bam".format(sam_file_base)
run_command(cmd)
# Write a file of just the mapped reads
cmd = "samtools view -h -F 4 -b {0}.sorted.bam > {0}.sorted.mapped.bam".format(sam_file_base)
run_command(cmd)
mapped_list_to_merge.write("{0}.sorted.mapped.bam\n".format(sam_file_base))
# Write a file of just the unmapped reads
cmd = "samtools view -h -f 4 -b {0}.sorted.bam > {0}.sorted.unmapped.bam".format(sam_file_base)
run_command(cmd)
unmapped_list_to_merge.write("{0}.sorted.unmapped.bam\n".format(sam_file_base))
# Delete the full file
cmd = "rm {0}.sorted.bam".format(sam_file_base)
run_command(cmd)
# merge the mapped BAM files (attempt using samtools):
#"{0}/{1}.vs_metaref_seeds.fragment{2}.bowtie2".format(scratch_dir, args.sample, index_i)
#cmd = "samtools merge {0}/{1}.vs_metaref_seeds.mapped.bowtie2.sorted.bam {0}/{1}.*.fragment.*.mapped.bam".format(scratch_dir, args.sample)
#run_command(cmd)
# merge the mapped BAM files (attempt using Picard):
#cmd = "java -jar -Xmx12g /usr/local/packages/picard-tools-1.115/MergeSamFiles.jar TMP_DIR={0}/{1} MERGE_SEQUENCE_DICTIONARIES=true USE_THREADING=true OUTPUT={0}/{1}.picard.mapped.merged.bam ".format(scratch_dir, args.sample)
#for idx in range(1, 21):
# partial_bamfile = "{0}/{1}.vs_metaref_seeds.fragment{2}.bowtie2.sorted.mapped.bam".format(scratch_dir, args.sample, idx)
# cmd += "INPUT={0} ".format(partial_bamfile)
mapped_list_to_merge.close()
unmapped_list_to_merge.close()
# merge the mapped BAM files (my own script), then delete them:
cmd = "/home/jorvis/git/biocode/general/merge_bam_files.py -i {0} -o {1}/{2}.mapped.merged".format(mapped_list, scratch_dir, args.sample)
run_command(cmd)
run_command("rm {0}/*.fragment*.mapped.bam".format(scratch_dir))
# merge the unmapped BAM files (my own script), then delete them:
cmd = "/home/jorvis/git/biocode/general/merge_bam_files.py -i {0} -o {1}/{2}.unmapped.merged".format(unmapped_list, scratch_dir, args.sample)
run_command(cmd)
run_command("rm {0}/*.fragment*.unmapped.bam".format(scratch_dir))
# Sort the merged BAM file, then delete the unsorted one
cmd = "samtools sort {0}/{1}.mapped.merged.bam {0}/{1}.mapped.merged.sorted".format(scratch_dir, args.sample)
run_command(cmd)
run_command("rm {0}/{1}.mapped.merged.bam".format(scratch_dir, args.sample))
cmd = "samtools sort -@ 4 -m 3G {0}/{1}.unmapped.merged.bam {0}/{1}.unmapped.merged.sorted".format(scratch_dir, args.sample)
run_command(cmd)
run_command("rm {0}/{1}.unmapped.merged.bam".format(scratch_dir, args.sample))
# Done - migrate results
result_dir = "{0}/{1}".format(COMPLETION_BASE, args.sample)
run_command("mkdir {0}".format(result_dir))
run_command("mv {2}/{0}.mapped.merged.sorted.bam {2}/{0}.unmapped.merged.sorted.bam {1}/".format(args.sample, result_dir, scratch_dir))
run_command("rm -rf {0}".format(scratch_dir))
def run_command(cmd):
print("INFO: [{1}] Running command: {0}\n".format(cmd, datetime.datetime.now()), flush=True)
return_code = subprocess.call(cmd, shell=True)
if return_code != 0:
raise Exception("ERROR: [{2}] Return code {0} when running the following command: {1}".format(return_code, cmd, datetime.datetime.now()))
if __name__ == '__main__':
main()
|
pyrival/misc/mod.py | MattJDavidson/aoc2021 | 748 | 11193986 | import __pypy__
int_add = __pypy__.intop.int_add
int_sub = __pypy__.intop.int_sub
int_mul = __pypy__.intop.int_mul
def make_mod_mul(mod=10**9 + 7):
fmod_inv = 1.0 / mod
def mod_mul(a, b, c=0):
res = int_sub(int_add(int_mul(a, b), c), int_mul(mod, int(fmod_inv * a * b + fmod_inv * c)))
if res >= mod:
return res - mod
elif res < 0:
return res + mod
else:
return res
return mod_mul
mod_mul = make_mod_mul()
def mod_pow(x, y):
if y == 0:
return 1
res = 1
while y > 1:
if y & 1 == 1:
res = mod_mul(res, x)
x = mod_mul(x, x)
y >>= 1
return mod_mul(res, x)
|
json-to-messages/python/features/hooks/environment.py | mohan-chinnappan-n/cucumber | 3,974 | 11193988 | <gh_stars>1000+
def before_scenario(context, scenario):
pass
def before_tag(context, tag):
if tag == 'failBeforeHook':
raise Exception('spam', 'eggs')
def after_tags(context, tag):
pass |
djstripe/migrations/0015_alter_customer_delinquent.py | ExtraE113/dj-stripe | 937 | 11193994 | # Generated by Django 3.2.10 on 2021-12-22 00:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("djstripe", "0014_webhookendpoint"),
]
operations = [
migrations.AlterField(
model_name="customer",
name="delinquent",
field=models.BooleanField(
blank=True,
default=False,
help_text="Whether or not the latest charge for the customer's latest invoice has failed.",
null=True,
),
),
]
|
research/object_detection/predictors/mask_rcnn_box_predictor_tf1_test.py | gujralsanyam22/models | 82,518 | 11194015 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.predictors.mask_rcnn_box_predictor."""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import box_predictor_builder
from object_detection.builders import hyperparams_builder
from object_detection.predictors import mask_rcnn_box_predictor as box_predictor
from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class MaskRCNNBoxPredictorTest(test_case.TestCase):
def _build_arg_scope_with_hyperparams(self,
op_type=hyperparams_pb2.Hyperparams.FC):
hyperparams = hyperparams_pb2.Hyperparams()
hyperparams_text_proto = """
activation: NONE
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(hyperparams_text_proto, hyperparams)
hyperparams.op = op_type
return hyperparams_builder.build(hyperparams, is_training=True)
def test_get_boxes_with_five_classes(self):
def graph_fn(image_features):
mask_box_predictor = box_predictor_builder.build_mask_rcnn_box_predictor(
is_training=False,
num_classes=5,
fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(),
use_dropout=False,
dropout_keep_prob=0.5,
box_code_size=4,
)
box_predictions = mask_box_predictor.predict(
[image_features],
num_predictions_per_location=[1],
scope='BoxPredictor',
prediction_stage=2)
return (box_predictions[box_predictor.BOX_ENCODINGS],
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND])
image_features = np.random.rand(2, 7, 7, 3).astype(np.float32)
(box_encodings,
class_predictions_with_background) = self.execute(graph_fn,
[image_features])
self.assertAllEqual(box_encodings.shape, [2, 1, 5, 4])
self.assertAllEqual(class_predictions_with_background.shape, [2, 1, 6])
def test_get_boxes_with_five_classes_share_box_across_classes(self):
def graph_fn(image_features):
mask_box_predictor = box_predictor_builder.build_mask_rcnn_box_predictor(
is_training=False,
num_classes=5,
fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(),
use_dropout=False,
dropout_keep_prob=0.5,
box_code_size=4,
share_box_across_classes=True
)
box_predictions = mask_box_predictor.predict(
[image_features],
num_predictions_per_location=[1],
scope='BoxPredictor',
prediction_stage=2)
return (box_predictions[box_predictor.BOX_ENCODINGS],
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND])
image_features = np.random.rand(2, 7, 7, 3).astype(np.float32)
(box_encodings,
class_predictions_with_background) = self.execute(graph_fn,
[image_features])
self.assertAllEqual(box_encodings.shape, [2, 1, 1, 4])
self.assertAllEqual(class_predictions_with_background.shape, [2, 1, 6])
def test_value_error_on_predict_instance_masks_with_no_conv_hyperparms(self):
with self.assertRaises(ValueError):
box_predictor_builder.build_mask_rcnn_box_predictor(
is_training=False,
num_classes=5,
fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(),
use_dropout=False,
dropout_keep_prob=0.5,
box_code_size=4,
predict_instance_masks=True)
def test_get_instance_masks(self):
def graph_fn(image_features):
mask_box_predictor = box_predictor_builder.build_mask_rcnn_box_predictor(
is_training=False,
num_classes=5,
fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(),
use_dropout=False,
dropout_keep_prob=0.5,
box_code_size=4,
conv_hyperparams_fn=self._build_arg_scope_with_hyperparams(
op_type=hyperparams_pb2.Hyperparams.CONV),
predict_instance_masks=True)
box_predictions = mask_box_predictor.predict(
[image_features],
num_predictions_per_location=[1],
scope='BoxPredictor',
prediction_stage=3)
return (box_predictions[box_predictor.MASK_PREDICTIONS],)
image_features = np.random.rand(2, 7, 7, 3).astype(np.float32)
mask_predictions = self.execute(graph_fn, [image_features])
self.assertAllEqual(mask_predictions.shape, [2, 1, 5, 14, 14])
def test_do_not_return_instance_masks_without_request(self):
image_features = tf.random_uniform([2, 7, 7, 3], dtype=tf.float32)
mask_box_predictor = box_predictor_builder.build_mask_rcnn_box_predictor(
is_training=False,
num_classes=5,
fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(),
use_dropout=False,
dropout_keep_prob=0.5,
box_code_size=4)
box_predictions = mask_box_predictor.predict(
[image_features],
num_predictions_per_location=[1],
scope='BoxPredictor',
prediction_stage=2)
self.assertEqual(len(box_predictions), 2)
self.assertTrue(box_predictor.BOX_ENCODINGS in box_predictions)
self.assertTrue(box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND
in box_predictions)
if __name__ == '__main__':
tf.test.main()
|
kindred/manualAnnotation.py | wasimaftab/kindred | 141 | 11194024 | <gh_stars>100-1000
import kindred
from collections import OrderedDict,defaultdict
import six
# Colors to use for output sentences with annotation
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class RESPONSE:
POSITIVE = 1
NEGATIVE = 0
ENTITYERROR = -1
TABLE = {'y':POSITIVE,'n':NEGATIVE,'x':ENTITYERROR}
def manuallyAnnotate(corpus,candidateRelations):
"""
Provides a method for basic manual annotation of a series of candidate relations. Deals with a corpus, sentence by sentence, and prompts the user to annotate each candidate relation in turn. Can be exited before completion of the full list and the resulting annotations are split into an annotated corpus and unannotated corpus. Each document in the new corpora are individual sentences.
:param corpus: Corpus of text for annotation
:param candidateRelations: List of candidate relations (created using CandidateBuilder) to manually review and annotate
:type corpus: kindred.Corpus
:type candidateRelations: List of kindred.CandidateRelation
:return: a tuple of an annotated corpus and unannotated corpus
:rtype: two kindred.Corpus
"""
annotatedCorpus = kindred.Corpus()
unannotatedCorpus = kindred.Corpus()
options = OrderedDict()
options['x'] = 'Done'
options['0'] = 'None'
print()
print("For each sentence, choose an existing option or type the name of a new annotation")
endAnnotation = False
crCounter = 0
#for sentence,crsInSentence in groupedBySentences.items():
for doc in corpus.documents:
docSentences = set(doc.sentences)
crsInDoc = [ cr for cr in candidateRelations if cr.sentence in docSentences ]
doc = kindred.Document(doc.text,doc.entities,[])
if not endAnnotation:
for candidateRelation in crsInDoc:
crCounter += 1
sentence = candidateRelation.sentence
sentenceStart = sentence.tokens[0].startPos
e1,e2 = candidateRelation.entities
assert len(e1.position) == 1, 'Annotator cannot currently deal with non-continuous entities'
assert len(e2.position) == 1, 'Annotator cannot currently deal with non-continuous entities'
start1,end1 = e1.position[0]
start2,end2 = e2.position[0]
start1,end1 = start1-sentenceStart,end1-sentenceStart
start2,end2 = start2-sentenceStart,end2-sentenceStart
charByChar = list(candidateRelation.sentence.text)
charByChar[start1] = bcolors.FAIL + charByChar[start1]
charByChar[end1-1] += bcolors.ENDC
charByChar[start2] = bcolors.OKGREEN + charByChar[start2]
charByChar[end2-1] += bcolors.ENDC
sentence = "".join(charByChar)
print()
print('#'*30 + " (%d/%d)" % (crCounter,len(candidateRelations)))
print(sentence)
optionTxt = " ".join("%s:%s" % (key,value) for key,value in options.items())
response = None
while not response:
response = six.moves.input('%s ? ' % optionTxt).strip()
if response == 'x':
endAnnotation = True
break
elif response and not response in optionTxt:
newKey = str(len(options)-1)
options[newKey] = response
else:
response = options[response]
if response != 'None':
r = kindred.Relation(response,candidateRelation.entities)
doc.addRelation(r)
if endAnnotation:
# Annotation is incomplete, so wipe any previous annotation on this sentence
doc.relations = []
unannotatedCorpus.addDocument(doc)
else:
annotatedCorpus.addDocument(doc)
return annotatedCorpus,unannotatedCorpus
|
rest_hooks/tasks.py | nagesh4193/django-rest-hooks | 451 | 11194041 | <reponame>nagesh4193/django-rest-hooks
import requests
import json
from celery.task import Task
from django.core.serializers.json import DjangoJSONEncoder
from rest_hooks.utils import get_hook_model
class DeliverHook(Task):
def run(self, target, payload, instance=None, hook_id=None, **kwargs):
"""
target: the url to receive the payload.
payload: a python primitive data structure
instance: a possibly null "trigger" instance
hook: the defining Hook object (useful for removing)
"""
response = requests.post(
url=target,
data=json.dumps(payload, cls=DjangoJSONEncoder),
headers={'Content-Type': 'application/json'}
)
if response.status_code == 410 and hook_id:
HookModel = get_hook_model()
hook = HookModel.object.get(id=hook_id)
hook.delete()
# would be nice to log this, at least for a little while...
def deliver_hook_wrapper(target, payload, instance=None, hook=None, **kwargs):
if hook:
kwargs['hook_id'] = hook.id
return DeliverHook.delay(target, payload, **kwargs)
|
avalanche/benchmarks/datasets/cub200/cub200.py | PRISHIta123/avalanche | 810 | 11194053 | ################################################################################
# Copyright (c) 2021 ContinualAI. #
# Copyrights licensed under the MIT License. #
# See the accompanying LICENSE file for terms. #
# #
# Date: 12-04-2021 #
# Author: <NAME>, <NAME> #
# E-mail: <EMAIL> #
# Website: continualai.org #
################################################################################
"""
CUB200 Pytorch Dataset: Caltech-UCSD Birds-200-2011 (CUB-200-2011) is an
extended version of the CUB-200 dataset, with roughly double the number of
images per class and new part location annotations. For detailed information
about the dataset, please check the official website:
http://www.vision.caltech.edu/visipedia/CUB-200-2011.html.
"""
import csv
from pathlib import Path
from typing import Union
import gdown
import os
from collections import OrderedDict
from torchvision.datasets.folder import default_loader
from avalanche.benchmarks.datasets import default_dataset_location, \
DownloadableDataset
from avalanche.benchmarks.utils import PathsDataset
class CUB200(PathsDataset, DownloadableDataset):
""" Basic CUB200 PathsDataset to be used as a standard PyTorch Dataset.
A classic continual learning benchmark built on top of this dataset
can be found in 'benchmarks.classic', while for more custom benchmark
design please use the 'benchmarks.generators'."""
images_folder = 'CUB_200_2011/images'
official_url = 'http://www.vision.caltech.edu/visipedia-data/CUB-200-2011/'\
'CUB_200_2011.tgz'
gdrive_url = "https://drive.google.com/u/0/uc?id=" \
"1hbzc_P1FuxMkcabkgn9ZKinBwW683j45"
filename = 'CUB_200_2011.tgz'
tgz_md5 = '97eceeb196236b17998738112f37df78'
def __init__(
self,
root: Union[str, Path] = None,
*,
train=True, transform=None, target_transform=None,
loader=default_loader, download=True):
"""
:param root: root dir where the dataset can be found or downloaded.
Defaults to None, which means that the default location for
'CUB_200_2011' will be used.
:param train: train or test subset of the original dataset. Default
to True.
:param transform: eventual input data transformations to apply.
Default to None.
:param target_transform: eventual target data transformations to apply.
Default to None.
:param loader: method to load the data from disk. Default to
torchvision default_loader.
:param download: default set to True. If the data is already
downloaded it will skip the download.
"""
if root is None:
root = default_dataset_location('CUB_200_2011')
self.train = train
DownloadableDataset.__init__(
self, root, download=download, verbose=True)
self._load_dataset()
PathsDataset.__init__(
self, os.path.join(root, CUB200.images_folder), self._images,
transform=transform, target_transform=target_transform,
loader=loader)
def _download_dataset(self) -> None:
try:
self._download_and_extract_archive(
CUB200.official_url, CUB200.filename,
checksum=CUB200.tgz_md5)
except Exception:
if self.verbose:
print('[CUB200] Direct download may no longer be possible, '
'will try GDrive.')
filepath = self.root / self.filename
gdown.download(self.gdrive_url, str(filepath), quiet=False)
gdown.cached_download(
self.gdrive_url, str(filepath), md5=self.tgz_md5
)
self._extract_archive(filepath)
def _download_error_message(self) -> str:
return '[CUB200] Error downloading the dataset. Consider downloading ' \
'it manually at: ' + CUB200.official_url + ' and placing it ' \
'in: ' + str(self.root)
def _load_metadata(self):
""" Main method to load the CUB200 metadata """
cub_dir = self.root / 'CUB_200_2011'
self._images = OrderedDict()
with open(str(cub_dir / 'train_test_split.txt')) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=' ')
for row in csv_reader:
img_id = int(row[0])
is_train_instance = int(row[1]) == 1
if is_train_instance == self.train:
self._images[img_id] = []
with open(str(cub_dir / 'images.txt')) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=' ')
for row in csv_reader:
img_id = int(row[0])
if img_id in self._images:
self._images[img_id].append(row[1])
with open(str(cub_dir / 'image_class_labels.txt')) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=' ')
for row in csv_reader:
img_id = int(row[0])
if img_id in self._images:
# CUB starts counting classes from 1 ...
self._images[img_id].append(int(row[1]) - 1)
with open(str(cub_dir / 'bounding_boxes.txt')) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=' ')
for row in csv_reader:
img_id = int(row[0])
if img_id in self._images:
box_cub = [int(float(x)) for x in row[1:]]
box_avl = [box_cub[1], box_cub[0], box_cub[3], box_cub[2]]
# PathsDataset accepts (top, left, height, width)
self._images[img_id].append(box_avl)
images_tuples = []
for _, img_tuple in self._images.items():
images_tuples.append(tuple(img_tuple))
self._images = images_tuples
# Integrity check
for row in self._images:
filepath = self.root / CUB200.images_folder / row[0]
if not filepath.is_file():
if self.verbose:
print('[CUB200] Error checking integrity of:', filepath)
return False
return True
if __name__ == "__main__":
""" Simple test that will start if you run this script directly """
import matplotlib.pyplot as plt
dataset = CUB200(train=False, download=True)
print("test data len:", len(dataset))
img, _ = dataset[14]
plt.imshow(img)
plt.show()
dataset = CUB200(train=True)
print("train data len:", len(dataset))
img, _ = dataset[700]
plt.imshow(img)
plt.show()
__all__ = [
'CUB200'
]
|
lib/models/modules/seg_basic.py | littleSunlxy/contrastive-seg-lin | 398 | 11194060 | import torch.nn as nn
from lib.models.tools.module_helper import ModuleHelper
class _FCNHead(nn.Module):
def __init__(self, in_channels, channels):
super(_FCNHead, self).__init__()
inter_channels = in_channels // 4
self.block = nn.Sequential(
nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
ModuleHelper.BNReLU(inter_channels, bn_type='torchsyncbn'),
nn.Dropout(0.1),
nn.Conv2d(inter_channels, channels, 1)
)
def forward(self, x):
return self.block(x)
|
frameworks/tensorflow/tf_collective_benchmark.py | Michoumichmich/antares | 132 | 11194069 | #!/usr/bin/env python3
# mpiexec -n 2 --allow-run-as-root --map-by slot --bind-to none -x N=$((1024 * 1024)) -x R=1 -x OP='all_reduce:+' ./tf_nccl_benchmark.py
import os, tensorflow as tf
from tensorflow.contrib import antares
if tf.version.VERSION.startswith('2.'):
tf = tf.compat.v1
tf.disable_eager_execution()
rank, size, local_rank = antares.init_communicate_config()
count = int(os.environ.get('N', '4096'))
op = os.environ.get('OP', 'all_reduce:+')
repeat = int(os.environ.get('RP', '1'))
if not op.startswith('all_gather:'):
count *= size
input_0 = tf.get_variable('input_0', count, 'float32', initializer=tf.initializers.ones('float32'))
[input_0] = antares.metric([input_0])
for i in range(repeat): [input_0] = antares.communicate(op, [input_0], names=["input_0"])
[output_0] = antares.metric([input_0])
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
tf_config.gpu_options.visible_device_list = str(local_rank)
with tf.Session(config=tf_config) as sess:
sess.run(tf.global_variables_initializer())
print("Node[%d/%d]: Tensor output =" % (rank, size), sess.run(output_0))
for x in range(4):
sess.run(output_0)
print("Node[%d/%d]: Tensor output properties: shape = %s, dtype = %s" % (rank, size, output_0.shape, output_0.dtype))
|
sponsors/migrations/0026_auto_20210416_1940.py | ewjoachim/pythondotorg | 911 | 11194090 | <gh_stars>100-1000
# Generated by Django 2.0.13 on 2021-04-16 19:40
from django.db import migrations, models
import markupfield.fields
class Migration(migrations.Migration):
dependencies = [
('sponsors', '0025_auto_20210416_1939'),
]
operations = [
migrations.AlterField(
model_name='contract',
name='_legal_clauses_rendered',
field=models.TextField(default='', editable=False),
),
migrations.AlterField(
model_name='contract',
name='legal_clauses',
field=markupfield.fields.MarkupField(blank=True, default='', rendered_field=True),
),
]
|
etna/clustering/distances/distance_matrix.py | Pacman1984/etna | 326 | 11194113 | import warnings
from typing import TYPE_CHECKING
from typing import Dict
from typing import List
from typing import Optional
import numpy as np
import pandas as pd
from etna.clustering.distances.base import Distance
from etna.core import BaseMixin
from etna.loggers import tslogger
if TYPE_CHECKING:
from etna.datasets import TSDataset
class DistanceMatrix(BaseMixin):
"""DistanceMatrix computes distance matrix from TSDataset."""
def __init__(self, distance: Distance):
"""Init DistanceMatrix.
Parameters
----------
distance:
class for distance measurement
"""
self.distance = distance
self.matrix: Optional[np.ndarray] = None
self.series: Optional[List[np.ndarray]] = None
self.segment2idx: Dict[str, int] = {}
self.idx2segment: Dict[int, str] = {}
self.series_number: Optional[int] = None
@staticmethod
def _validate_dataset(ts: "TSDataset"):
"""Check that dataset does not contain NaNs."""
for segment in ts.segments:
series = ts[:, segment, "target"]
first_valid_index = 0
last_valid_index = series.reset_index(drop=True).last_valid_index()
series_length = last_valid_index - first_valid_index + 1
if len(series.dropna()) != series_length:
warnings.warn(
f"Timeseries contains NaN values, which will be dropped. "
f"If it is not desirable behaviour, handle them manually."
)
break
def _get_series(self, ts: "TSDataset") -> List[pd.Series]:
"""Parse given TSDataset and get timestamp-indexed segment series.
Build mapping from segment to idx in matrix and vice versa.
"""
series_list = []
for i, segment in enumerate(ts.segments):
self.segment2idx[segment] = i
self.idx2segment[i] = segment
series = ts[:, segment, "target"].dropna()
series_list.append(series)
self.series_number = len(series_list)
return series_list
def _compute_dist(self, series: List[pd.Series], idx: int) -> np.ndarray:
"""Compute distance from idx-th series to other ones."""
if self.series_number is None:
raise ValueError("Something went wrong during getting the series from dataset!")
distances = np.array([self.distance(series[idx], series[j]) for j in range(self.series_number)])
return distances
def _compute_dist_matrix(self, series: List[pd.Series]) -> np.ndarray:
"""Compute distance matrix for given series."""
if self.series_number is None:
raise ValueError("Something went wrong during getting the series from dataset!")
distances = np.empty(shape=(self.series_number, self.series_number))
logging_freq = max(1, self.series_number // 10)
tslogger.log(f"Calculating distance matrix...")
for idx in range(self.series_number):
distances[idx] = self._compute_dist(series=series, idx=idx)
if (idx + 1) % logging_freq == 0:
tslogger.log(f"Done {idx + 1} out of {self.series_number} ")
return distances
def fit(self, ts: "TSDataset") -> "DistanceMatrix":
"""Fit distance matrix: get timeseries from ts and compute pairwise distances.
Parameters
----------
ts:
TSDataset with timeseries
Returns
-------
self:
fitted DistanceMatrix object
"""
self._validate_dataset(ts)
self.series = self._get_series(ts)
self.matrix = self._compute_dist_matrix(self.series)
return self
def predict(self) -> np.ndarray:
"""Get distance matrix.
Returns
-------
np.ndarray:
2D array with distances between series
"""
if self.matrix is None:
raise ValueError("DistanceMatrix is not fitted! Fit the DistanceMatrix before calling predict method!")
return self.matrix
def fit_predict(self, ts: "TSDataset") -> np.ndarray:
"""Compute distance matrix and return it.
Parameters
----------
ts:
TSDataset with timeseries to compute matrix with
Returns
-------
np.ndarray:
2D array with distances between series
"""
return self.fit(ts).predict()
__all__ = ["DistanceMatrix"]
|
pororo/models/tts/tacotron/attention.py | jayten42/pororo | 1,137 | 11194114 | import torch
from torch.nn import Conv1d, Linear, Parameter
from torch.nn import functional as F
class AttentionBase(torch.nn.Module):
"""Abstract attention class.
Arguments:
representation_dim -- size of the hidden representation
query_dim -- size of the attention query input (probably decoder hidden state)
memory_dim -- size of the attention memory input (probably encoder outputs)
"""
def __init__(self, representation_dim, query_dim, memory_dim):
super(AttentionBase, self).__init__()
self._bias = Parameter(torch.zeros(1, representation_dim))
self._energy = Linear(representation_dim, 1, bias=False)
self._query = Linear(query_dim, representation_dim, bias=False)
self._memory = Linear(memory_dim, representation_dim, bias=False)
self._memory_dim = memory_dim
def reset(self, encoded_input, batch_size, max_len, device):
"""Initialize previous attention weights & prepare attention memory."""
self._memory_transform = self._memory(encoded_input)
self._prev_weights = torch.zeros(batch_size, max_len, device=device)
self._prev_context = torch.zeros(
batch_size,
self._memory_dim,
device=device,
)
return self._prev_context
def _attent(self, query, memory_transform, weights):
raise NotImplementedError
def _combine_weights(self, previsous_weights, weights):
raise NotImplementedError
def _normalize(self, energies, mask):
raise NotImplementedError
def forward(self, query, memory, mask, prev_decoder_output):
energies = self._attent(
query,
self._memory_transform,
self._prev_weights,
)
attention_weights = self._normalize(energies, mask)
self._prev_weights = self._combine_weights(
self._prev_weights,
attention_weights,
)
attention_weights = attention_weights.unsqueeze(1)
self._prev_context = torch.bmm(attention_weights, memory).squeeze(1)
return self._prev_context, attention_weights.squeeze(1)
class LocationSensitiveAttention(AttentionBase):
"""
Location Sensitive Attention:
Location-sensitive attention: https://arxiv.org/abs/1506.07503.
Extends additive attention (here https://arxiv.org/abs/1409.0473)
to use cumulative attention weights from previous decoder time steps.
Arguments:
kernel_size -- kernel size of the convolution calculating location features
channels -- number of channels of the convolution calculating location features
smoothing -- to normalize weights using softmax, use False (default) and True to use sigmoids
"""
def __init__(
self,
kernel_size,
channels,
smoothing,
representation_dim,
query_dim,
memory_dim,
):
super(LocationSensitiveAttention,
self).__init__(representation_dim, query_dim, memory_dim)
self._location = Linear(channels, representation_dim, bias=False)
self._loc_features = Conv1d(
1,
channels,
kernel_size,
padding=(kernel_size - 1) // 2,
bias=False,
)
self._smoothing = smoothing
def _attent(self, query, memory_transform, cum_weights):
query = self._query(query.unsqueeze(1))
cum_weights = cum_weights.unsqueeze(-1)
loc_features = self._loc_features(cum_weights.transpose(1, 2))
loc_features = self._location(loc_features.transpose(1, 2))
energy = query + memory_transform + loc_features
energy = self._energy(torch.tanh(energy + self._bias))
return energy.squeeze(-1)
def _normalize(self, energies, mask):
energies[~mask] = float("-inf")
if self._smoothing:
sigmoid = torch.sigmoid(energies)
total = torch.sum(sigmoid, dim=-1)
return sigmoid / total
else:
return F.softmax(energies, dim=1)
def _combine_weights(self, previous_weights, weights):
return previous_weights + weights
|
bin/scripting-examples/ttc-merger.py | HinTak/Font-Validator | 117 | 11194121 | <filename>bin/scripting-examples/ttc-merger.py
# Usage:
# mono ipy.exe ttc-merger.py outfile infile1 infile2 ... infileN
# Copyright (c) <NAME>
import clr
import sys
clr.AddReference("OTFontFile.dll")
from OTFontFile import OTFile
from System import Array, Console
if __name__ == '__main__':
if not sys.argv[1:]:
print("Usage: %s outfile infile1 infile2 ... infileN" % sys.argv[0])
newfont = Array.CreateInstance(OTFont, len(sys.argv)-2)
for i in range(2, len(sys.argv)):
f = OTFile()
f.open(sys.argv[i])
newfont[i-2] = f.GetFont(0)
OTFile.WriteFile(sys.argv[1], newfont)
|
src/lib/world_magnetic_model/fetch_noaa_table.py | lgarciaos/Firmware | 4,224 | 11194151 | #!/usr/bin/env python3
############################################################################
#
# Copyright (c) 2020-2021 PX4 Development Team. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name PX4 nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
############################################################################
import math
import json
import urllib.request
SAMPLING_RES = 10
SAMPLING_MIN_LAT = -90
SAMPLING_MAX_LAT = 90
SAMPLING_MIN_LON = -180
SAMPLING_MAX_LON = 180
def constrain(n, nmin, nmax):
return max(min(nmin, n), nmax)
header = """/****************************************************************************
*
* Copyright (c) 2020-2021 PX4 Development Team. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name PX4 nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
"""
print(header)
print('#include <stdint.h>\n')
LAT_DIM=int((SAMPLING_MAX_LAT-SAMPLING_MIN_LAT)/SAMPLING_RES)+1
LON_DIM=int((SAMPLING_MAX_LON-SAMPLING_MIN_LON)/SAMPLING_RES)+1
print('static constexpr float SAMPLING_RES = {}'.format(SAMPLING_RES) + ';')
print('static constexpr float SAMPLING_MIN_LAT = {}'.format(SAMPLING_MIN_LAT) + ';')
print('static constexpr float SAMPLING_MAX_LAT = {}'.format(SAMPLING_MAX_LAT) + ';')
print('static constexpr float SAMPLING_MIN_LON = {}'.format(SAMPLING_MIN_LON) + ';')
print('static constexpr float SAMPLING_MAX_LON = {}'.format(SAMPLING_MAX_LON) + ';')
print('')
print('static constexpr int LAT_DIM = {}'.format(LAT_DIM) + ';')
print('static constexpr int LON_DIM = {}'.format(LON_DIM) + ';')
print('\n')
print('// *INDENT-OFF*')
# Declination
params = urllib.parse.urlencode({'lat1': 0, 'lat2': 0, 'lon1': 0, 'lon2': 0, 'latStepSize': 1, 'lonStepSize': 1, 'magneticComponent': 'd', 'resultFormat': 'json'})
f = urllib.request.urlopen("https://www.ngdc.noaa.gov/geomag-web/calculators/calculateIgrfgrid?%s" % params)
data = json.loads(f.read())
print("// Magnetic declination data in radians * 10^-4")
print('// Model: {},'.format(data['model']))
print('// Version: {},'.format(data['version']))
print('// Date: {},'.format(data['result'][0]['date']))
print('static constexpr const int16_t declination_table[{}][{}]'.format(LAT_DIM, LON_DIM) + " {")
print('\t// LONGITUDE: ', end='')
for l in range(SAMPLING_MIN_LON, SAMPLING_MAX_LON+1, SAMPLING_RES):
print('{0:6d},'.format(l), end='')
print('')
for latitude in range(SAMPLING_MIN_LAT, SAMPLING_MAX_LAT+1, SAMPLING_RES):
params = urllib.parse.urlencode({'lat1': latitude, 'lat2': latitude, 'lon1': SAMPLING_MIN_LON, 'lon2': SAMPLING_MAX_LON, 'latStepSize': 1, 'lonStepSize': SAMPLING_RES, 'magneticComponent': 'd', 'resultFormat': 'json'})
f = urllib.request.urlopen("https://www.ngdc.noaa.gov/geomag-web/calculators/calculateIgrfgrid?%s" % params)
data = json.loads(f.read())
print('\t/* LAT: {0:3d} */'.format(latitude) + ' { ', end='')
for p in data['result']:
# declination in radians * 10^-4
declination_int = constrain(int(round(math.radians(p['declination'] * 10000))), 32767, -32768)
print('{0:6d},'.format(declination_int), end='')
print(' },')
print("};\n")
# Inclination
params = urllib.parse.urlencode({'lat1': 0, 'lat2': 0, 'lon1': 0, 'lon2': 0, 'latStepSize': 1, 'lonStepSize': 1, 'magneticComponent': 'i', 'resultFormat': 'json'})
f = urllib.request.urlopen("https://www.ngdc.noaa.gov/geomag-web/calculators/calculateIgrfgrid?%s" % params)
data = json.loads(f.read())
print("// Magnetic inclination data in radians * 10^-4")
print('// Model: {},'.format(data['model']))
print('// Version: {},'.format(data['version']))
print('// Date: {},'.format(data['result'][0]['date']))
print('static constexpr const int16_t inclination_table[{}][{}]'.format(LAT_DIM, LON_DIM) + " {")
print('\t// LONGITUDE: ', end='')
for l in range(SAMPLING_MIN_LON, SAMPLING_MAX_LON+1, SAMPLING_RES):
print('{0:6d},'.format(l), end='')
print('')
for latitude in range(SAMPLING_MIN_LAT, SAMPLING_MAX_LAT+1, SAMPLING_RES):
params = urllib.parse.urlencode({'lat1': latitude, 'lat2': latitude, 'lon1': SAMPLING_MIN_LON, 'lon2': SAMPLING_MAX_LON, 'latStepSize': 1, 'lonStepSize': SAMPLING_RES, 'magneticComponent': 'i', 'resultFormat': 'json'})
f = urllib.request.urlopen("https://www.ngdc.noaa.gov/geomag-web/calculators/calculateIgrfgrid?%s" % params)
data = json.loads(f.read())
print('\t/* LAT: {0:3d} */'.format(latitude) + ' { ', end='')
for p in data['result']:
# inclination in radians * 10^-4
inclination_int = constrain(int(round(math.radians(p['inclination'] * 10000))), 32767, -32768)
print('{0:6d},'.format(inclination_int), end='')
print(' },')
print("};\n")
# total intensity
params = urllib.parse.urlencode({'lat1': 0, 'lat2': 0, 'lon1': 0, 'lon2': 0, 'latStepSize': 1, 'lonStepSize': 1, 'magneticComponent': 'i', 'resultFormat': 'json'})
f = urllib.request.urlopen("https://www.ngdc.noaa.gov/geomag-web/calculators/calculateIgrfgrid?%s" % params)
data = json.loads(f.read())
print("// Magnetic strength data in milli-Gauss * 10")
print('// Model: {},'.format(data['model']))
print('// Version: {},'.format(data['version']))
print('// Date: {},'.format(data['result'][0]['date']))
print('static constexpr const int16_t strength_table[{}][{}]'.format(LAT_DIM, LON_DIM) + " {")
print('\t// LONGITUDE: ', end='')
for l in range(SAMPLING_MIN_LON, SAMPLING_MAX_LON+1, SAMPLING_RES):
print('{0:5d},'.format(l), end='')
print('')
for latitude in range(SAMPLING_MIN_LAT, SAMPLING_MAX_LAT+1, SAMPLING_RES):
params = urllib.parse.urlencode({'lat1': latitude, 'lat2': latitude, 'lon1': SAMPLING_MIN_LON, 'lon2': SAMPLING_MAX_LON, 'latStepSize': 1, 'lonStepSize': SAMPLING_RES, 'magneticComponent': 'f', 'resultFormat': 'json'})
f = urllib.request.urlopen("https://www.ngdc.noaa.gov/geomag-web/calculators/calculateIgrfgrid?%s" % params)
data = json.loads(f.read())
print('\t/* LAT: {0:3d} */'.format(latitude) + ' { ', end='')
for p in data['result']:
totalintensity_int = int(round(p['totalintensity']/10))
print('{0:5d},'.format(totalintensity_int), end='')
print(' },')
print("};")
|
windows/winobject/wmi.py | IMULMUL/PythonForWindows | 479 | 11194157 | <filename>windows/winobject/wmi.py
import windows
import ctypes
import struct
import functools
from functools import partial
from collections import namedtuple
from ctypes.wintypes import *
import windows.com
import windows.generated_def as gdef
from windows.generated_def.winstructs import *
from windows.pycompat import basestring
# Common error check for all WMI COM interfaces
# This 'just' add the corresponding 'WBEMSTATUS' to the hresult error code
class WmiComInterface(object):
"""Base class used for COM call error checking for WMI interfaces"""
def errcheck(self, result, func, args):
if result < 0:
wmitag = gdef.WBEMSTATUS.mapper[result & 0xffffffff]
raise ctypes.WinError(result, wmitag)
return args
sentinel = object()
# POC
class QualifierSet(gdef.IWbemQualifierSet):
def get_variant(self, name):
"""Retrieve the value of property ``name`` as a :class:`~windows.com.Variant`
:return: :class:`~windows.com.Variant`
"""
if not isinstance(name, basestring):
nametype = type(name).__name__
raise TypeError("WmiObject attributes name must be str, not <{0}>".format(nametype))
variant_res = windows.com.Variant()
self.Get(name, 0, variant_res, None)
return variant_res
def get(self, name, default=sentinel):
"""Return the value of the property ``name``. The return value depends of the type of the property and can vary"""
try:
return self.get_variant(name).value
except WindowsError as e:
if (e.winerror & 0xffffffff) != gdef.WBEM_E_NOT_FOUND:
raise
if default is sentinel:
raise
return default
def names(self):
res = POINTER(windows.com.SafeArray)()
x = ctypes.pointer(res)
self.GetNames(0, cast(x, POINTER(POINTER(gdef.SAFEARRAY))))
# need to free the safearray / unlock ?
properties = [p for p in res[0].to_list(BSTR)]
return properties
# https://docs.microsoft.com/en-us/windows/desktop/api/wbemcli/nn-wbemcli-iwbemclassobject
WmiMethod = namedtuple("WmiMethod", ["inparam", "outparam"])
# https://docs.microsoft.com/en-us/windows/desktop/WmiSdk/calling-a-method
class WmiObject(gdef.IWbemClassObject, WmiComInterface):
"""The WmiObject (which wrap ``IWbemClassObject``) contains and manipulates both class definitions and class object instances.
Can be used as a mapping to access properties.
"""
def get_variant(self, name):
"""Retrieve the value of property ``name`` as a :class:`~windows.com.Variant`
:return: :class:`~windows.com.Variant`
"""
if not isinstance(name, basestring):
nametype = type(name).__name__
raise TypeError("WmiObject attributes name must be str, not <{0}>".format(nametype))
variant_res = windows.com.Variant()
self.Get(name, 0, variant_res, None, None)
return variant_res
def get(self, name):
"""Return the value of the property ``name``. The return value depends of the type of the property and can vary"""
return self.get_variant(name).value
def get_method(self, name):
"""Return the information about the method ``name``
:returns: :class:`WmiMethod`
"""
inpararm = type(self)()
outpararm = type(self)()
variant_res = windows.com.Variant()
self.GetMethod(name, 0, inpararm, outpararm)
return WmiMethod(inpararm, outpararm)
def put_variant(self, name, variant):
if not isinstance(name, basestring):
nametype = type(name).__name__
raise TypeError("WmiObject attributes name must be str, not <{0}>".format(nametype))
return self.Put(name, 0, variant, 0)
def put(self, name, value):
"""Set the property ``name`` to ``value``"""
variant_value = windows.com.Variant(value)
return self.put_variant(name, variant_value)
def spawn_instance(self):
"""Create a new object of the class represented by the current :class:`WmiObject`
:returns: :class:`WmiObject`
"""
instance = type(self)()
self.SpawnInstance(0, instance)
return instance
@property
def genus(self):
"""The genus of the object.
:returns: ``WBEM_GENUS_CLASS(0x1L)`` if the :class:`WmiObject` is a Class and ``WBEM_GENUS_INSTANCE(0x2L)`` for instances and events.
"""
return gdef.tag_WBEM_GENUS_TYPE.mapper[self.get("__GENUS")]
## Higher level API
def get_properties(self, system_properties=False):
"""Return the list of properties names available for the current object.
If ``system_properties`` is ``False`` property names begining with ``_`` are ignored.
:returns: [:class:`str`] -- A list of string
.. note:
About system properties: https://docs.microsoft.com/en-us/windows/desktop/wmisdk/wmi-system-properties
"""
res = POINTER(windows.com.SafeArray)()
x = ctypes.pointer(res)
self.GetNames(None, 0, None, cast(x, POINTER(POINTER(gdef.SAFEARRAY))))
# need to free the safearray / unlock ?
properties = [p for p in res[0].to_list(BSTR) if system_properties or (not p.startswith("_"))]
return properties
properties = property(get_properties) #: The properties of the object (exclude system properties)
@property
def qualifier_set(self): # changer de nom ?
res = QualifierSet()
self.GetQualifierSet(res)
return res
def get_p_set(self, name): # Changer de nom ?
res = QualifierSet()
self.GetPropertyQualifierSet(name, res)
return res
# Make WmiObject a mapping object
def keys(self):
"""The properties of the object (include system properties)"""
return self.get_properties(system_properties=True)
__getitem__ = get
__setitem__ = put
def items(self):
return [(k, self.get(k)) for k in self.properties]
def values(self): # Not sur anyone will use this but keep the dict interface
return [x[1] for x in self.items()]
## Make it callable like any class :D
__call__ = spawn_instance
def __repr__(self):
if not self:
return """<{0} (NULL)>""".format(type(self).__name__,)
if self.genus == gdef.WBEM_GENUS_CLASS:
return """<{0} class "{1}">""".format(type(self).__name__, self.get("__Class"))
return """<{0} instance of "{1}">""".format(type(self).__name__, self.get("__Class"))
def __sprint__(self):
return """ {0}\n
{1}
""".format(repr(self), "\n".join(": ".join([x[0], str(x[1])]) for x in sorted(self.items())))
class WmiEnumeration(gdef.IEnumWbemClassObject, WmiComInterface):
"""Represent an enumeration of object that can be itered"""
DEFAULT_TIMEOUT = gdef.WBEM_INFINITE #: The default timeout
def next(self, timeout=None):
"""Return the next object in the enumeration with `timeout`.
:raises: ``WindowsError(WBEM_S_TIMEDOUT)`` if timeout expire
:returns: :class:`WmiObject`
"""
timeout = self.DEFAULT_TIMEOUT if timeout is None else timeout
# For now the count is hardcoded to 1
obj = WmiObject()
return_count = gdef.ULONG(0)
error = self.Next(timeout, 1, obj, return_count)
if error == gdef.WBEM_S_TIMEDOUT:
raise ctypes.WinError(gdef.WBEM_S_TIMEDOUT, "Wmi timeout")
elif error == WBEM_S_FALSE:
return None
else:
return obj
def __iter__(self):
"""Return an iterator with ``DEFAULT_TIMEOUT``"""
return self.iter_timeout(self.DEFAULT_TIMEOUT)
def iter_timeout(self, timeout=None):
"""Return an iterator with a custom ``timeout``"""
while True:
obj = self.next(timeout)
if obj is None:
return
yield obj
def all(self):
"""Return all elements in the enumeration as a list
:returns: [:class:`WmiObject`] - A list of :class:`WmiObject`
"""
return list(self) # SqlAlchemy like :)
class WmiCallResult(gdef.IWbemCallResult, WmiComInterface):
"""The result of a WMI call/query. Real result value type depends of the context"""
def __init__(self, result_type=None, namespace_name=None):
self.result_type = result_type
self.namespace_name = namespace_name
def get_call_status(self, timeout=gdef.WBEM_INFINITE):
"""The status of the call"""
status = gdef.LONG()
self.GetCallStatus(timeout, status)
return WBEMSTATUS.mapper[status.value & 0xffffffff]
def get_result_object(self, timeout=gdef.WBEM_INFINITE):
"""The result as a :class:`WmiObject` (returned by :func:`WmiNamespace.exec_method`)"""
result = WmiObject()
self.GetResultObject(timeout, result)
return result
def get_result_string(self, timeout=gdef.WBEM_INFINITE):
"""The result as a :class:`WmiObject` (returned by :func:`WmiNamespace.put_instance`)"""
result = gdef.BSTR()
self.GetResultString(timeout, result)
return result
def get_result_service(self, timeout=gdef.WBEM_INFINITE):
"""The result as a :class:`WmiNamespace` (not used yet)"""
result = WmiNamespace()
self.GetResultServices(timeout, result)
return result
@property
def result(self):
"""The result of the correct type based on ``self.result_type``"""
if self.result_type is None:
raise ValueError("Cannot call <result> with no result_type")
return getattr(self, "get_result_" + self.result_type)()
class WmiLocator(gdef.IWbemLocator, WmiComInterface):
pass # Just for the WMI errcheck callback
# !TEST CODE
class WmiNamespace(gdef.IWbemServices, WmiComInterface):
r"""An object to perform wmi request to a given ``namespace``"""
#CLSID_WbemAdministrativeLocator_IID = windows.com.IID.from_string('CB8555CC-9128-11D1-AD9B-00C04FD8FDFF')
WbemLocator_CLSID = windows.com.IID.from_string('4590F811-1D3A-11D0-891F-00AA004B2E24')
DEFAULT_ENUM_FLAGS = (gdef.WBEM_FLAG_RETURN_IMMEDIATELY |
WBEM_FLAG_FORWARD_ONLY) #: The defauls flags used for enumeration. ``(WBEM_FLAG_RETURN_IMMEDIATELY | WBEM_FLAG_FORWARD_ONLY)``
def __init__(self, namespace):
self.name = namespace
@classmethod
def connect(cls, namespace, user=None, password=None):
"""Connect to ``namespace`` using ``user`` and ``password`` for authentification if given
:return: :class:`WmiNamespace` - The connected :class:`WmiNamespace`"""
# this method assert com is initialised
self = cls(namespace) # IWbemServices subclass
locator = WmiLocator()
windows.com.create_instance(cls.WbemLocator_CLSID, locator)
locator.ConnectServer(namespace, user, password , None, gdef.WBEM_FLAG_CONNECT_USE_MAX_WAIT, None, None, self)
locator.Release()
return self
def query(self, query):
"""Return the list of :class:`WmiObject` matching ``query``.
This API is the `simple one`, if you need timeout or complexe feature see :func:`exec_query`
:return: [:class:`WmiObject`] - A list of :class:`WmiObject`
"""
return list(self.exec_query(query))
def select(self, clsname, deep=True):
"""Return the list of :class:`WmiObject` that are instance of ``clsname``. Deep has the same meaning as in :func:`create_instance_enum`.
This API is the `simple one`, if you need timeout or complexe feature see :func:`create_instance_enum`
:return: [:class:`WmiObject`] - A list of :class:`WmiObject`
"""
return list(self.create_instance_enum(clsname, deep=deep))
def exec_query(self, query, flags=DEFAULT_ENUM_FLAGS, ctx=None):
"""Execute a WQL query with custom flags and returns a ::class:`WmiEnumeration` that can be used to
iter the result with timeouts
:returns: :class:`WmiEnumeration`
"""
enumerator = WmiEnumeration()
self.ExecQuery("WQL", query, flags, ctx, enumerator)
return enumerator
# Create friendly name for create_class_enum & create_instance_enum ?
def create_class_enum(self, superclass, flags=DEFAULT_ENUM_FLAGS, deep=True):
"""Enumerate the classes in the ``namespace`` that match ``superclass``.
if ``superclass`` is None will enumerate all top-level class. ``deep`` allow to returns all subclasses
:returns: :class:`WmiEnumeration`
.. note::
See https://docs.microsoft.com/en-us/windows/desktop/api/wbemcli/nf-wbemcli-iwbemservices-createclassenum
"""
flags |= gdef.WBEM_FLAG_DEEP if deep else gdef.WBEM_FLAG_SHALLOW
enumerator = WmiEnumeration()
self.CreateClassEnum(superclass, flags, None, enumerator)
return enumerator
@property
def classes(self):
"""The list of classes in the namespace. This a a wrapper arround :func:`create_class_enum`.
:return: [:class:`WmiObject`] - A list of :class:`WmiObject`
"""
return self.create_class_enum(None, deep=True)
def create_instance_enum(self, clsname, flags=DEFAULT_ENUM_FLAGS, deep=True):
"""Enumerate the instances of ``clsname``. Deep allows to enumerate the instance of subclasses as well
:returns: :class:`WmiEnumeration`
Example:
>>> windows.system.wmi["root\\subscription"].create_instance_enum("__EventConsumer", deep=False).all()
[]
>>> windows.system.wmi["root\\subscription"].create_instance_enum("__EventConsumer", deep=True).all()
[<WmiObject instance of "NTEventLogEventConsumer">]
.. note::
See https://docs.microsoft.com/en-us/windows/desktop/api/wbemcli/nf-wbemcli-iwbemservices-createinstanceenum
"""
flags |= gdef.WBEM_FLAG_DEEP if deep else gdef.WBEM_FLAG_SHALLOW
enumerator = WmiEnumeration()
self.CreateInstanceEnum(clsname, flags, None, enumerator)
return enumerator
def get_object(self, path):
"""Return the object matching ``path``. If ``path`` is a class name return the class object``
:return: :class:`WmiObject`
"""
result = WmiObject()
self.GetObject(path, gdef.WBEM_FLAG_RETURN_WBEM_COMPLETE, None, result, None)
return result
def put_instance(self, instance, flags=gdef.WBEM_FLAG_CREATE_ONLY):
"""Creates or updates an instance of an existing class in the namespace
:return: :class:`WmiCallResult` ``(string)`` - Used to retrieve the string representing the path of the object created/updated
"""
res = WmiCallResult(result_type="string")
self.PutInstance(instance, flags, None, res)
return res
def delete_instance(self, instance, flags=0):
"""TODO: Document"""
if isinstance(instance, gdef.IWbemClassObject):
instance = instance["__Path"]
return self.DeleteInstance(instance, flags, None, None)
def exec_method(self, obj, method, inparam, flags=0):
"""Exec method named on ``object`` with ``inparam``.
:params obj: The :class:`WmiObject` or path of the object the call apply to
:params method: The name of the method to call on the object
:params inparam: The :class:`WmiObject` representing the input parameters and retrieve using :func:`WmiObject.get_method`
:returns: :class:`WmiCallResult` ``(object)`` if flag `WBEM_FLAG_RETURN_IMMEDIATELY` was passed
:returns: :class:`WmiObject` the outparam object if flag `WBEM_FLAG_RETURN_IMMEDIATELY` was NOT passed
.. note::
This API will lakely change to better wrap with WmiObject/inparam/Dict & co
"""
if flags & gdef.WBEM_FLAG_RETURN_IMMEDIATELY:
# semisynchronous call -> WmiCallResult
result = WmiCallResult(result_type="object")
outparam = None
else:
# Synchronous call -> WmiObject (outparam)
result = None
outparam = WmiObject()
if isinstance(obj, gdef.IWbemClassObject):
obj = obj.get("__Path")
# Flags 0 -> synchronous call
# No WmiCallResult result is directly in outparam
self.ExecMethod(obj, method, 0, None, inparam, outparam, result)
return outparam or result
def __repr__(self):
null = "" if self else " (NULL)"
return """<{0} "{1}"{2}>""".format(type(self).__name__, self.name, null)
class WmiManager(dict):
"""The main WMI class exposed, used to list and access differents WMI namespace, can be used as a dict to access
:class:`WmiNamespace` by name
Example:
>>> windows.system.wmi["root\\SecurityCenter2"]
<WmiNamespace "root\SecurityCenter2">
"""
DEFAULT_NAMESPACE = "root\\cimv2" #: The default namespace for :func:`select` & :func:`query`
def __init__(self):
# Someone is going to use wmi: let's init com !
windows.com.init()
self.wmi_requester_by_namespace = {}
@property
def default_namespace(self):
return self[self.DEFAULT_NAMESPACE]
@property
def select(self):
r""":func:`WmiRequester.select` for default WMI namespace 'root\\cimv2'"""
return self.default_namespace.select
@property
def query(self):
r""":func:`WmiRequester.query` for default WMI namespace 'root\\cimv2'"""
return self.default_namespace.query
def get_subnamespaces(self, root="root"):
return [x["Name"] for x in self[root].select("__NameSpace")]
namespaces = property(get_subnamespaces)
"""The list of available WMI namespaces"""
def _open_wmi_requester(self, namespace):
return WmiNamespace.connect(namespace)
def __missing__(self, key):
self[key] = self._open_wmi_requester(key)
return self[key]
def __repr__(self):
return object.__repr__(self) |
src/db-up/azext_db_up/vendored_sdks/azure_mgmt_sql/sql/models/job_step_output.py | Mannan2812/azure-cli-extensions | 207 | 11194163 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class JobStepOutput(Model):
"""The output configuration of a job step.
All required parameters must be populated in order to send to Azure.
:param type: The output destination type. Possible values include:
'SqlDatabase'. Default value: "SqlDatabase" .
:type type: str or ~azure.mgmt.sql.models.JobStepOutputType
:param subscription_id: The output destination subscription id.
:type subscription_id: str
:param resource_group_name: The output destination resource group.
:type resource_group_name: str
:param server_name: Required. The output destination server name.
:type server_name: str
:param database_name: Required. The output destination database.
:type database_name: str
:param schema_name: The output destination schema. Default value: "dbo" .
:type schema_name: str
:param table_name: Required. The output destination table.
:type table_name: str
:param credential: Required. The resource ID of the credential to use to
connect to the output destination.
:type credential: str
"""
_validation = {
'server_name': {'required': True},
'database_name': {'required': True},
'table_name': {'required': True},
'credential': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'resource_group_name': {'key': 'resourceGroupName', 'type': 'str'},
'server_name': {'key': 'serverName', 'type': 'str'},
'database_name': {'key': 'databaseName', 'type': 'str'},
'schema_name': {'key': 'schemaName', 'type': 'str'},
'table_name': {'key': 'tableName', 'type': 'str'},
'credential': {'key': 'credential', 'type': 'str'},
}
def __init__(self, **kwargs):
super(JobStepOutput, self).__init__(**kwargs)
self.type = kwargs.get('type', "SqlDatabase")
self.subscription_id = kwargs.get('subscription_id', None)
self.resource_group_name = kwargs.get('resource_group_name', None)
self.server_name = kwargs.get('server_name', None)
self.database_name = kwargs.get('database_name', None)
self.schema_name = kwargs.get('schema_name', "dbo")
self.table_name = kwargs.get('table_name', None)
self.credential = kwargs.get('credential', None)
|
autotest/test_gwt_adv04.py | MODFLOW-USGS/modflow6 | 102 | 11194172 | <filename>autotest/test_gwt_adv04.py
"""
MODFLOW 6 Autotest
Test the advection schemes in the gwt advection package for two-dimensional
injection of solute into the middle of a square grid. The test will pass
if the results are symmetric.
"""
import os
import pytest
import sys
import numpy as np
try:
import pymake
except:
msg = "Error. Pymake package is not available.\n"
msg += "Try installing using the following command:\n"
msg += " pip install https://github.com/modflowpy/pymake/zipball/master"
raise Exception(msg)
try:
import flopy
except:
msg = "Error. FloPy package is not available.\n"
msg += "Try installing using the following command:\n"
msg += " pip install flopy"
raise Exception(msg)
from framework import testing_framework
from simulation import Simulation
ex = ["adv04a", "adv04b", "adv04c"]
scheme = ["upstream", "central", "tvd"]
exdirs = []
for s in ex:
exdirs.append(os.path.join("temp", s))
ddir = "data"
def build_model(idx, dir):
nlay, nrow, ncol = 1, 21, 21
nper = 1
perlen = [5.0]
nstp = [200]
tsmult = [1.0]
steady = [True]
delr = 1.0
delc = 1.0
botm = [0.0]
strt = 1.0
hnoflo = 1e30
hdry = -1e30
hk = 1.0
top = 1.0
laytyp = 0
# put constant heads all around the box
chdlist = []
ib = np.ones((nlay, nrow, ncol), dtype=int)
ib[:, 1 : nrow - 1, 1 : ncol - 1] = 0
idloc = np.where(ib > 0)
for k, i, j in zip(idloc[0], idloc[1], idloc[2]):
chdlist.append([(k, i, j), 0.0])
chdspdict = {0: chdlist}
# injection well with rate and concentration of 1.
w = {0: [[(0, int(nrow / 2), int(ncol / 2)), 1.0, 1.0]]}
nouter, ninner = 100, 300
hclose, rclose, relax = 1e-6, 1e-6, 1.0
tdis_rc = []
for i in range(nper):
tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
name = ex[idx]
# build MODFLOW 6 files
ws = dir
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
# create tdis package
tdis = flopy.mf6.ModflowTdis(sim, time_units="DAYS", nper=nper, perioddata=tdis_rc)
# create gwf model
gwfname = "gwf_" + name
gwf = flopy.mf6.MFModel(
sim,
model_type="gwf6",
modelname=gwfname,
model_nam_file="{}.nam".format(gwfname),
)
# create iterative model solution and register the gwf model with it
imsgwf = flopy.mf6.ModflowIms(
sim,
print_option="SUMMARY",
outer_dvclose=hclose,
outer_maximum=nouter,
under_relaxation="NONE",
inner_maximum=ninner,
inner_dvclose=hclose,
rcloserecord=rclose,
linear_acceleration="CG",
scaling_method="NONE",
reordering_method="NONE",
relaxation_factor=relax,
filename="{}.ims".format(gwfname),
)
sim.register_ims_package(imsgwf, [gwf.name])
dis = flopy.mf6.ModflowGwfdis(
gwf,
nlay=nlay,
nrow=nrow,
ncol=ncol,
delr=delr,
delc=delc,
top=top,
botm=botm,
idomain=np.ones((nlay, nrow, ncol), dtype=int),
filename="{}.dis".format(gwfname),
)
# initial conditions
ic = flopy.mf6.ModflowGwfic(gwf, strt=strt, filename="{}.ic".format(gwfname))
# node property flow
npf = flopy.mf6.ModflowGwfnpf(gwf, save_flows=False, icelltype=laytyp, k=hk, k33=hk)
# storage
# sto = flopy.mf6.ModflowGwfsto(gwf, save_flows=False,
# iconvert=laytyp[idx],
# ss=ss[idx], sy=sy[idx],
# steady_state={0: True, 2: True},
# transient={1: True})
# chd files
chd = flopy.mf6.ModflowGwfchd(
gwf, stress_period_data=chdspdict, save_flows=False, pname="CHD-1"
)
# wel files
wel = flopy.mf6.ModflowGwfwel(
gwf,
print_input=True,
print_flows=True,
stress_period_data=w,
save_flows=False,
auxiliary="CONCENTRATION",
pname="WEL-1",
)
# output control
oc = flopy.mf6.ModflowGwfoc(
gwf,
budget_filerecord="{}.cbc".format(gwfname),
head_filerecord="{}.hds".format(gwfname),
headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")],
saverecord=[("HEAD", "LAST")],
printrecord=[("HEAD", "LAST"), ("BUDGET", "LAST")],
)
# create gwt model
gwtname = "gwt_" + name
gwt = flopy.mf6.MFModel(
sim,
model_type="gwt6",
modelname=gwtname,
model_nam_file="{}.nam".format(gwtname),
)
# create iterative model solution and register the gwt model with it
imsgwt = flopy.mf6.ModflowIms(
sim,
print_option="SUMMARY",
outer_dvclose=hclose,
outer_maximum=nouter,
under_relaxation="NONE",
inner_maximum=ninner,
inner_dvclose=hclose,
rcloserecord=rclose,
linear_acceleration="BICGSTAB",
scaling_method="NONE",
reordering_method="NONE",
relaxation_factor=relax,
filename="{}.ims".format(gwtname),
)
sim.register_ims_package(imsgwt, [gwt.name])
dis = flopy.mf6.ModflowGwtdis(
gwt,
nlay=nlay,
nrow=nrow,
ncol=ncol,
delr=delr,
delc=delc,
top=top,
botm=botm,
idomain=1,
filename="{}.dis".format(gwtname),
)
# initial conditions
ic = flopy.mf6.ModflowGwtic(gwt, strt=0.0, filename="{}.ic".format(gwtname))
# advection
adv = flopy.mf6.ModflowGwtadv(
gwt, scheme=scheme[idx], filename="{}.adv".format(gwtname)
)
# mass storage and transfer
mst = flopy.mf6.ModflowGwtmst(gwt, porosity=0.1)
# sources
sourcerecarray = [("WEL-1", "AUX", "CONCENTRATION")]
ssm = flopy.mf6.ModflowGwtssm(
gwt, sources=sourcerecarray, filename="{}.ssm".format(gwtname)
)
# output control
oc = flopy.mf6.ModflowGwtoc(
gwt,
budget_filerecord="{}.cbc".format(gwtname),
concentration_filerecord="{}.ucn".format(gwtname),
concentrationprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")],
saverecord=[("CONCENTRATION", "LAST")],
printrecord=[("CONCENTRATION", "LAST"), ("BUDGET", "LAST")],
)
# GWF GWT exchange
gwfgwt = flopy.mf6.ModflowGwfgwt(
sim,
exgtype="GWF6-GWT6",
exgmnamea=gwfname,
exgmnameb=gwtname,
filename="{}.gwfgwt".format(name),
)
return sim, None
def eval_transport(sim):
print("evaluating transport...")
name = ex[sim.idxsim]
gwtname = "gwt_" + name
fpth = os.path.join(sim.simpath, "{}.ucn".format(gwtname))
try:
cobj = flopy.utils.HeadFile(fpth, precision="double", text="CONCENTRATION")
conc = cobj.get_data()
except:
assert False, 'could not load data from "{}"'.format(fpth)
# Check to make sure that the concentrations are symmetric in both the
# up-down and left-right directions
concud = np.flipud(conc)
assert np.allclose(concud, conc), (
"simulated concentrations are not " "symmetric in up-down direction."
)
conclr = np.fliplr(conc)
assert np.allclose(conclr, conc), (
"simulated concentrations are not " "symmetric in left-right direction."
)
return
# - No need to change any code below
@pytest.mark.parametrize(
"idx, dir",
list(enumerate(exdirs)),
)
def test_mf6model(idx, dir):
# initialize testing framework
test = testing_framework()
# build the models
test.build_mf6_models(build_model, idx, dir)
# run the test model
test.run_mf6(Simulation(dir, exfunc=eval_transport, idxsim=idx))
def main():
# initialize testing framework
test = testing_framework()
# build the models
# run the test model
for idx, dir in enumerate(exdirs):
test.build_mf6_models(build_model, idx, dir)
sim = Simulation(dir, exfunc=eval_transport, idxsim=idx)
test.run_mf6(sim)
return
if __name__ == "__main__":
# print message
print("standalone run of {}".format(os.path.basename(__file__)))
# run main routine
main()
|
reinforcement_learning/common/markdown_helper.py | pollyrolly/amazon-sagemaker-examples | 2,610 | 11194198 | <gh_stars>1000+
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
def generate_s3_write_permission_for_sagemaker_role(role):
role_name = role.split("/")[-1]
url = "https://console.aws.amazon.com/iam/home#/roles/%s" % role_name
text = "1. Go to IAM console to edit current SageMaker role: [%s](%s).\n" % (role_name, url)
text += "2. Next, go to the `Permissions tab` and click on `Attach Policy.` \n"
text += "3. Search and select `AmazonKinesisVideoStreamsFullAccess` policy\n"
return text
def generate_kinesis_create_permission_for_sagemaker_role(role):
role_name = role.split("/")[-1]
url = "https://console.aws.amazon.com/iam/home#/roles/%s" % role_name
text = "1. Go to IAM console to edit current SageMaker role: [%s](%s).\n" % (role_name, url)
text += "2. Next, go to the `Permissions tab` and click on `Attach Policy.` \n"
text += "3. Search and select `AmazonS3FullAccess` policy\n"
return text
def generate_help_for_s3_endpoint_permissions(role):
role_name = role.split("/")[-1]
url = "https://console.aws.amazon.com/iam/home#/roles/%s" % role_name
text = ">It looks like your SageMaker role has insufficient premissions. Please do the following:\n"
text += "1. Go to IAM console to edit current SageMaker role: [%s](%s).\n" % (role_name, url)
text += "2. Select %s and then click on `Edit Policy`\n" % role_name
text += "3. Select the JSON tab and add the following JSON blob to the `Statement` list:\n"
text += """```json
{
"Action": [
"ec2:DescribeRouteTables",
"ec2:CreateVpcEndpoint"
],
"Effect": "Allow",
"Resource": "*"
},```\n"""
text += "4. Now wait for a few minutes before executing this cell again!"
return text
def generate_help_for_robomaker_trust_relationship(role):
role_name = role.split("/")[-1]
url = "https://console.aws.amazon.com/iam/home#/roles/%s" % role_name
text = "1. Go to IAM console to edit current SageMaker role: [%s](%s).\n" % (role_name, url)
text += (
"2. Next, go to the `Trust relationships tab` and click on `Edit Trust Relationship.` \n"
)
text += "3. Replace the JSON blob with the following:\n"
text += """```json
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": [
"sagemaker.amazonaws.com",
"robomaker.amazonaws.com"
]
},
"Action": "sts:AssumeRole"
}
]
}```\n"""
text += "4. Once this is complete, click on Update Trust Policy and you are done."
return text
def generate_help_for_robomaker_all_permissions(role):
role_name = role.split("/")[-1]
url = "https://console.aws.amazon.com/iam/home#/roles/%s" % role_name
text = ">It looks like your SageMaker role has insufficient premissions. Please do the following:\n"
text += "1. Go to IAM console to edit current SageMaker role: [%s](%s).\n" % (role_name, url)
text += (
"2. Click on policy starting with `AmazonSageMaker-ExecutionPolicy` and then edit policy.\n"
)
text += (
"3. Go to JSON tab, add the following JSON blob to the `Statement` list and save policy:\n"
)
text += f"""```json
{{
"Effect": "Allow",
"Action": [
"robomaker:CreateSimulationApplication",
"robomaker:DescribeSimulationApplication",
"robomaker:DeleteSimulationApplication",
"robomaker:CreateSimulationJob",
"robomaker:DescribeSimulationJob",
"robomaker:CancelSimulationJob",
"robomaker:ListSimulationApplications"
],
"Resource": [
"*"
]
}},
{{
"Effect": "Allow",
"Action": "iam:CreateServiceLinkedRole",
"Resource": "*",
"Condition": {{
"StringEquals": {{
"iam:AWSServiceName": "robomaker.amazonaws.com"
}}
}}
}},
{{
"Effect": "Allow",
"Action": "iam:PassRole",
"Resource": \"{role}\",
"Condition": {{
"StringEquals": {{
"iam:PassedToService": [
"robomaker.amazonaws.com"
]
}}
}}
}},```\n"""
text += (
"4. Next, go to the `Trust relationships tab` and click on `Edit Trust Relationship.` \n"
)
text += "5. Add the following JSON blob to the `Statement` list:\n"
text += """```json
{
"Effect": "Allow",
"Principal": {
"Service": "robomaker.amazonaws.com"
},
"Action": "sts:AssumeRole"
},```\n"""
text += "6. Now wait for a few minutes before executing this cell again!"
return text
def generate_robomaker_links(job_arns, aws_region):
simulation_ids = [job_arn.split("/")[-1] for job_arn in job_arns]
robomaker_links = []
for simulation_id in simulation_ids:
robomaker_link = (
"https://%s.console.aws.amazon.com/robomaker/home?region=%s#simulationJobs/%s"
% (aws_region, aws_region, simulation_id)
)
robomaker_links.append(robomaker_link)
markdown_content = (
"> Click on the following links for visualization of simulation jobs on RoboMaker Console\n"
)
for i in range(len(robomaker_links)):
markdown_content += "- [Simulation %s](%s) \n" % (i + 1, robomaker_links[i])
markdown_content += (
"\nYou can click on Gazebo after you open the above link to start the simulator."
)
return markdown_content
def create_s3_endpoint_manually(aws_region, default_vpc):
url = "https://%s.console.aws.amazon.com/vpc/home?region=%s#Endpoints:sort=vpcEndpointId" % (
aws_region,
aws_region,
)
text = ">VPC S3 endpoint creation failed. Please do the following to create an endpoint manually:\n"
text += "1. Go to [VPC console | Endpoints](%s)\n" % url
text += "2. Click on `Create Endpoint`. Select Service Name as `com.amazonaws.%s.s3`.\n" % (
aws_region
)
text += (
"3. Next, select your Default VPC: `%s` and click the checkbox against the main Route Table ID\n"
% (default_vpc)
)
text += "4. Select `Full Access` in policy and click on `Create Endpoint`\n"
text += "5. That should be it! Now wait for a few seconds before proceeding to the next cell."
return text
def generate_help_for_administrator_policy(role):
role_name = role.split("/")[-1]
url = "https://console.aws.amazon.com/iam/home#/roles/%s" % role_name
text = "1. Go to IAM console to edit current SageMaker role: [%s](%s).\n" % (role_name, url)
text += "2. Next, go to the `Permissions tab` and click on `Attach policies`. \n"
text += "3. Check the box for `AdministratorAccess`\n"
text += "4. Click on `Attach policy` at the bottom.\n"
text += (
"5. You'll see message `Policy AdministratorAccess has been attached for the %s`. \n"
% (role)
)
text += "6. Once this is complete, you are all set."
return text
def generate_help_for_experiment_manager_permissions(role):
role_name = role.split("/")[-1]
url = "https://console.aws.amazon.com/iam/home#/roles/%s" % role_name
text = ">It looks like your SageMaker role has insufficient premissions. Please do the following:\n"
text += "1. Go to IAM console to edit current SageMaker role: [%s](%s).\n" % (role_name, url)
text += (
"2. Click on policy starting with `AmazonSageMaker-ExecutionPolicy` and then edit policy.\n"
)
text += (
"3. Go to JSON tab, add the following JSON blob to the `Statement` list and save policy:\n"
)
text += f"""```json
{{
"Effect": "Allow",
"Action": [
"cloudformation:DescribeStacks",
"cloudformation:ValidateTemplate",
"cloudformation:CreateStack",
"dynamodb:DescribeTable",
"dynamodb:CreateTable",
"dynamodb:DeleteTable",
"dynamodb:PutItem",
"dynamodb:UpdateItem",
"dynamodb:DeleteItem",
"dynamodb:Query",
"dynamodb:BatchWriteItem",
"iam:CreateRole",
"iam:GetRole",
"iam:PutRolePolicy",
"iam:DeleteRolePolicy",
"iam:DeleteRole",
"cloudwatch:PutDashboard",
"firehose:ListDeliveryStreams",
"firehose:DeleteDeliveryStream",
"firehose:DescribeDeliveryStream",
"firehose:CreateDeliveryStream",
"athena:StartQueryExecution",
"athena:GetQueryExecution",
"glue:GetTable",
"glue:DeleteTable",
"glue:GetPartitions",
"glue:UpdateTable",
"glue:CreateTable",
"glue:GetDatabase"
],
"Resource": [
"*"
]
}},
{{
"Effect": "Allow",
"Action": "iam:PassRole",
"Resource": \"{role}\"
}}```\n"""
text += "4. Now wait for a few minutes before executing this cell again!"
return text
|
sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/scope_job_properties_py3.py | rsdoherty/azure-sdk-for-python | 2,728 | 11194203 | <reponame>rsdoherty/azure-sdk-for-python
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .job_properties_py3 import JobProperties
class ScopeJobProperties(JobProperties):
"""Scope job properties used when submitting and retrieving Scope jobs. (Only
for use internally with Scope job type.).
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param runtime_version: The runtime version of the Data Lake Analytics
engine to use for the specific type of job being run.
:type runtime_version: str
:param script: Required. The script to run. Please note that the maximum
script size is 3 MB.
:type script: str
:param type: Required. Constant filled by server.
:type type: str
:ivar resources: The list of resources that are required by the job.
:vartype resources:
list[~azure.mgmt.datalake.analytics.job.models.ScopeJobResource]
:ivar user_algebra_path: The algebra file path after the job has
completed.
:vartype user_algebra_path: str
:param notifier: The list of email addresses, separated by semi-colons, to
notify when the job reaches a terminal state.
:type notifier: str
:ivar total_compilation_time: The total time this job spent compiling.
This value should not be set by the user and will be ignored if it is.
:vartype total_compilation_time: timedelta
:ivar total_queued_time: The total time this job spent queued. This value
should not be set by the user and will be ignored if it is.
:vartype total_queued_time: timedelta
:ivar total_running_time: The total time this job spent executing. This
value should not be set by the user and will be ignored if it is.
:vartype total_running_time: timedelta
:ivar total_paused_time: The total time this job spent paused. This value
should not be set by the user and will be ignored if it is.
:vartype total_paused_time: timedelta
:ivar root_process_node_id: The ID used to identify the job manager
coordinating job execution. This value should not be set by the user and
will be ignored if it is.
:vartype root_process_node_id: str
:ivar yarn_application_id: The ID used to identify the yarn application
executing the job. This value should not be set by the user and will be
ignored if it is.
:vartype yarn_application_id: str
"""
_validation = {
'script': {'required': True},
'type': {'required': True},
'resources': {'readonly': True},
'user_algebra_path': {'readonly': True},
'total_compilation_time': {'readonly': True},
'total_queued_time': {'readonly': True},
'total_running_time': {'readonly': True},
'total_paused_time': {'readonly': True},
'root_process_node_id': {'readonly': True},
'yarn_application_id': {'readonly': True},
}
_attribute_map = {
'runtime_version': {'key': 'runtimeVersion', 'type': 'str'},
'script': {'key': 'script', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'resources': {'key': 'resources', 'type': '[ScopeJobResource]'},
'user_algebra_path': {'key': 'userAlgebraPath', 'type': 'str'},
'notifier': {'key': 'notifier', 'type': 'str'},
'total_compilation_time': {'key': 'totalCompilationTime', 'type': 'duration'},
'total_queued_time': {'key': 'totalQueuedTime', 'type': 'duration'},
'total_running_time': {'key': 'totalRunningTime', 'type': 'duration'},
'total_paused_time': {'key': 'totalPausedTime', 'type': 'duration'},
'root_process_node_id': {'key': 'rootProcessNodeId', 'type': 'str'},
'yarn_application_id': {'key': 'yarnApplicationId', 'type': 'str'},
}
def __init__(self, *, script: str, runtime_version: str=None, notifier: str=None, **kwargs) -> None:
super(ScopeJobProperties, self).__init__(runtime_version=runtime_version, script=script, **kwargs)
self.resources = None
self.user_algebra_path = None
self.notifier = notifier
self.total_compilation_time = None
self.total_queued_time = None
self.total_running_time = None
self.total_paused_time = None
self.root_process_node_id = None
self.yarn_application_id = None
self.type = 'Scope'
|
transformers/augmentation/uszipcode_features_light.py | ucds-sg/h2oai | 194 | 11194207 | """Lightweight transformer to parse and augment US zipcodes with info from zipcode database."""
from h2oaicore.transformer_utils import CustomTransformer
import datatable as dt
import numpy as np
from abc import ABC, abstractmethod
_global_modules_needed_by_name = ['zipcodes==1.0.5']
import zipcodes
class ZipcodeLightBaseTransformer(ABC):
@staticmethod
def get_default_properties():
return dict(col_type="categorical", min_cols=1, max_cols=1, relative_importance=1)
@abstractmethod
def get_property_name(self):
raise NotImplementedError
def get_zipcode_property(self, zipcode_obj):
if zipcode_obj is None:
return None
else:
return zipcode_obj[self.get_property_name()]
def parse_zipcode(self, value):
try:
result = zipcodes.matching(value)
if (len(result) > 1):
return result[0]
else:
return None
except ValueError:
return None
except TypeError:
raise TypeError
def fit_transform(self, X: dt.Frame, y: np.array = None):
return self.transform(X)
def transform(self, X: dt.Frame):
try:
X = dt.Frame(X)
X.names = ['zip_key']
X = X[:, str('zip_key')]
zip_list = dt.unique(X[~dt.isna(dt.f.zip_key), 0]).to_list()[0]
zip_features = [self.get_zipcode_property(self.parse_zipcode(x)) for x in zip_list]
X_g = dt.Frame({"zip_key": zip_list, self.get_property_name(): zip_features})
X_g.key = 'zip_key'
X_result = X[:, :, dt.join(X_g)]
return X_result[:, 1:]
except:
return np.zeros(X.shape[0])
class ZipcodeTypeTransformer(ZipcodeLightBaseTransformer, CustomTransformer):
def get_property_name(self, value):
return 'zip_code_type'
class ZipcodeCityTransformer(ZipcodeLightBaseTransformer, CustomTransformer):
def get_property_name(self, value):
return 'city'
class ZipcodeStateTransformer(ZipcodeLightBaseTransformer, CustomTransformer):
def get_property_name(self, value):
return 'state'
class ZipcodeLatitudeTransformer(ZipcodeLightBaseTransformer, CustomTransformer):
def get_property_name(self, value):
return 'lat'
class ZipcodeLongitudeTransformer(ZipcodeLightBaseTransformer, CustomTransformer):
def get_property_name(self, value):
return 'long'
class ZipcodeIsActiveTransformer(ZipcodeLightBaseTransformer, CustomTransformer):
def get_property_name(self, value):
return 'active'
class Zipcode5Transformer(ZipcodeLightBaseTransformer, CustomTransformer):
def get_property_name(self, value):
return 'zip_code'
|
DN_RGB/code/data/__init__.py | Whoo-jl/Pyramid-Attention-Networks | 338 | 11194217 | <reponame>Whoo-jl/Pyramid-Attention-Networks<filename>DN_RGB/code/data/__init__.py<gh_stars>100-1000
from importlib import import_module
#from dataloader import MSDataLoader
from torch.utils.data import dataloader
from torch.utils.data import ConcatDataset
# This is a simple wrapper function for ConcatDataset
class MyConcatDataset(ConcatDataset):
def __init__(self, datasets):
super(MyConcatDataset, self).__init__(datasets)
self.train = datasets[0].train
def set_scale(self, idx_scale):
for d in self.datasets:
if hasattr(d, 'set_scale'): d.set_scale(idx_scale)
class Data:
def __init__(self, args):
self.loader_train = None
if not args.test_only:
datasets = []
for d in args.data_train:
module_name = d if d.find('DIV2K-Q') < 0 else 'DIV2KJPEG'
m = import_module('data.' + module_name.lower())
datasets.append(getattr(m, module_name)(args, name=d))
self.loader_train = dataloader.DataLoader(
MyConcatDataset(datasets),
batch_size=args.batch_size,
shuffle=True,
pin_memory=not args.cpu,
num_workers=args.n_threads,
)
self.loader_test = []
for d in args.data_test:
if d in ['CBSD68','Kodak24','Set5', 'Set14', 'B100', 'Urban100']:
m = import_module('data.benchmark')
testset = getattr(m, 'Benchmark')(args, train=False, name=d)
else:
module_name = d if d.find('DIV2K-Q') < 0 else 'DIV2KJPEG'
m = import_module('data.' + module_name.lower())
testset = getattr(m, module_name)(args, train=False, name=d)
self.loader_test.append(
dataloader.DataLoader(
testset,
batch_size=1,
shuffle=False,
pin_memory=not args.cpu,
num_workers=args.n_threads,
)
)
|
src/olympia/amo/tests/test_commands.py | covariant/addons-server | 843 | 11194220 | <gh_stars>100-1000
import os
import io
from importlib import import_module
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import CommandError
from django.test.utils import override_settings
from unittest import mock
import pytest
def sample_cron_job(*args):
pass
@override_settings(CRON_JOBS={'sample_cron_job': 'olympia.amo.tests.test_commands'})
@mock.patch('olympia.amo.tests.test_commands.sample_cron_job')
def test_cron_command(_mock):
assert _mock.call_count == 0
call_command('cron', 'sample_cron_job', 'arg1', 'arg2')
assert _mock.call_count == 1
_mock.assert_called_with('arg1', 'arg2')
call_command('cron', 'sample_cron_job', 'kwarg1=a', 'kwarg2=b')
assert _mock.call_count == 2
_mock.assert_called_with(kwarg1='a', kwarg2='b')
@override_settings(CRON_JOBS={'sample_cron_job': 'olympia.amo.tests.test_commands'})
def test_cron_command_no_job():
with pytest.raises(CommandError) as error_info:
call_command('cron')
assert 'These jobs are available:' in str(error_info.value)
assert 'sample_cron_job' in str(error_info.value)
def test_cron_command_invalid_job():
with pytest.raises(CommandError) as error_info:
call_command('cron', 'made_up_job')
assert 'Unrecognized job name: made_up_job' in str(error_info.value)
def test_cron_jobs_setting():
for name, path in settings.CRON_JOBS.items():
module = import_module(path)
getattr(module, name)
@pytest.mark.static_assets
def test_compress_assets_correctly_fetches_static_images(settings, tmpdir):
"""
Make sure that `compress_assets` correctly fetches static assets
such as icons and writes them correctly into our compressed
and concatted files.
Refs https://github.com/mozilla/addons-server/issues/8760
"""
settings.MINIFY_BUNDLES = {'css': {'zamboni/_test_css': ['css/legacy/main.css']}}
css_all = os.path.join(settings.STATIC_ROOT, 'css', 'zamboni', '_test_css-all.css')
css_min = os.path.join(settings.STATIC_ROOT, 'css', 'zamboni', '_test_css-min.css')
# Delete the files if they exist - they are specific to tests.
try:
os.remove(css_all)
except FileNotFoundError:
pass
try:
os.remove(css_min)
except FileNotFoundError:
pass
# Capture output to avoid it being logged and allow us to validate it
# later if needed
out = io.StringIO()
# Now run compress and collectstatic
call_command('compress_assets', force=True, stdout=out)
call_command('collectstatic', interactive=False, stdout=out)
with open(css_all) as fobj:
expected = 'background-image: url(../../img/icons/stars.png'
assert expected in fobj.read()
# Compressed doesn't have any whitespace between `background-image:` and
# the url and the path is slightly different
with open(css_min) as fobj:
data = fobj.read()
assert 'background-image:url(' in data
assert 'img/icons/stars.png' in data
@pytest.mark.static_assets
def test_compress_assets_correctly_compresses_js(settings, tmpdir):
"""
Make sure that `compress_assets` correctly calls the JS minifier and that
it generates a minified file.
"""
settings.MINIFY_BUNDLES = {'js': {'zamboni/_test_js': ['js/zamboni/global.js']}}
js_all = os.path.join(settings.STATIC_ROOT, 'js', 'zamboni', '_test_js-all.js')
js_min = os.path.join(settings.STATIC_ROOT, 'js', 'zamboni', '_test_js-min.js')
# Delete the files if they exist - they are specific to tests.
try:
os.remove(js_all)
except FileNotFoundError:
pass
try:
os.remove(js_min)
except FileNotFoundError:
pass
# Capture output to avoid it being logged and allow us to validate it
# later if needed
out = io.StringIO()
# Now run compress and collectstatic
call_command('compress_assets', force=True, stdout=out)
call_command('collectstatic', interactive=False, stdout=out)
# Files should exist now.
assert os.path.getsize(js_all)
assert os.path.getsize(js_min)
@pytest.mark.needs_locales_compilation
def test_generate_jsi18n_files():
dirname = os.path.join(settings.STATICFILES_DIRS[0], 'js', 'i18n')
assert os.path.exists(dirname)
filename = os.path.join(dirname, 'fr.js')
call_command('generate_jsi18n_files')
# Regardless of whether or not the file existed before, it needs to exist
# now.
assert os.path.exists(filename), filename
# Spot-check: Look for a string we know should be in the french file
# (Translation for "Error").
filename = os.path.join(settings.STATICFILES_DIRS[0], 'js', 'i18n', 'fr.js')
with open(filename) as f:
content = f.read()
assert 'Erreur' in content
|
factory/base.py | vuthede/MMNet | 179 | 11194222 | <gh_stars>100-1000
from abc import ABC
from abc import abstractmethod
import tensorflow as tf
import tensorflow.contrib.slim as slim
import common.tf_utils as tf_utils
class CNNModel(ABC):
def preprocess_images(self, images, preprocess_method, reuse=False):
with tf.variable_scope("preprocess", reuse=reuse):
if images.dtype == tf.uint8:
images = tf.cast(images, tf.float32)
if preprocess_method == "preprocess_normalize":
# -- * -- preprocess_normalize
# Scale input images to range [0, 1], same scales like mean of masks
images = tf.divide(images, tf.constant(255.0))
elif preprocess_method == "no_preprocessing":
pass
else:
raise ValueError("Unsupported preprocess_method: {}".format(preprocess_method))
return images
@staticmethod
def add_arguments(parser, default_type):
g_cnn = parser.add_argument_group("(CNNModel) Arguments")
assert default_type in ["matting", None]
g_cnn.add_argument("--task_type", type=str, required=True,
choices=[
"matting",
])
g_cnn.add_argument("--num_classes", type=int, default=None,
help=(
"It is currently not used in multi-task learning, "
"so it can't *required*"
))
g_cnn.add_argument("--checkpoint_path", default="", type=str)
g_cnn.add_argument("--input_name", type=str, default="input/image")
g_cnn.add_argument("--input_batch_size", type=int, default=1)
g_cnn.add_argument("--output_name", type=str, required=True)
g_cnn.add_argument("--output_type", type=str, help="mainly used in convert.py", required=True)
g_cnn.add_argument("--no-use_fused_batchnorm", dest="use_fused_batchnorm", action="store_false")
g_cnn.add_argument("--use_fused_batchnorm", dest="use_fused_batchnorm", action="store_true")
g_cnn.set_defaults(use_fused_batchnorm=True)
g_cnn.add_argument("--verbosity", default=0, type=int,
help="If verbosity > 0, then summary batch_norm scalar metrics etc")
g_cnn.add_argument("--preprocess_method", required=True, type=str,
choices=["no_preprocessing", "preprocess_normalize"])
g_cnn.add_argument("--no-ignore_missing_vars", dest="ignore_missing_vars", action="store_false")
g_cnn.add_argument("--ignore_missing_vars", dest="ignore_missing_vars", action="store_true")
g_cnn.set_defaults(ignore_missing_vars=False)
g_cnn.add_argument("--checkpoint_exclude_scopes", default="", type=str,
help=("Prefix scopes that shoule be EXLUDED for restoring variables "
"(comma separated)\n Usually Logits e.g. InceptionResnetV2/Logits/Logits, "
"InceptionResnetV2/AuxLogits/Logits"))
g_cnn.add_argument("--checkpoint_include_scopes", default="", type=str,
help=("Prefix scopes that should be INCLUDED for restoring variables "
"(comma separated)"))
def build_finish(self, is_training, log):
total_params = tf_utils.show_models(log)
if self.args.verbosity >= 1:
slim.model_analyzer.analyze_ops(tf.get_default_graph(), print_info=True)
return total_params
@abstractmethod
def build_output(self):
pass
@property
@abstractmethod
def images(self):
pass
@property
@abstractmethod
def images_original(self):
pass
@property
@abstractmethod
def total_loss(self):
pass
@property
@abstractmethod
def model_loss(self):
pass
|
smt/sampling_methods/random.py | Laurentww/smt | 354 | 11194233 | <filename>smt/sampling_methods/random.py
"""
Author: Dr. <NAME> <<EMAIL>>
This package is distributed under New BSD license.
Random sampling.
"""
import numpy as np
from smt.sampling_methods.sampling_method import ScaledSamplingMethod
class Random(ScaledSamplingMethod):
def _compute(self, nt):
"""
Implemented by sampling methods to compute the requested number of sampling points.
The number of dimensions (nx) is determined based on `xlimits.shape[0]`.
Arguments
---------
nt : int
Number of points requested.
Returns
-------
ndarray[nt, nx]
The sampling locations in the unit hypercube.
"""
xlimits = self.options["xlimits"]
nx = xlimits.shape[0]
return np.random.rand(nt, nx)
|
library/source1/bsp/lumps/game_lump.py | anderlli0053/SourceIO | 199 | 11194237 | from typing import List
from .. import Lump, lump_tag
from ..datatypes.game_lump_header import GameLumpHeader, VindictusGameLumpHeader
from ..datatypes.gamelumps.detail_prop_lump import DetailPropLump
from ..datatypes.gamelumps.static_prop_lump import StaticPropLump
from . import SteamAppId
from . import ByteIO
@lump_tag(35, 'LUMP_GAME_LUMP')
class GameLump(Lump):
def __init__(self, bsp, lump_id):
super().__init__(bsp, lump_id)
self.lump_count = 0
self.game_lumps_info: List[GameLumpHeader] = []
self.game_lumps = {}
def parse(self):
reader = self.reader
self.lump_count = reader.read_uint32()
for _ in range(self.lump_count):
lump = GameLumpHeader(self, self._bsp).parse(reader)
if not lump.id:
continue
self.game_lumps_info.append(lump)
for lump in self.game_lumps_info:
relative_offset = lump.offset - self._lump.offset
print(f'GLump "{lump.id}" offset: {relative_offset} size: {lump.size} ')
with reader.save_current_pos():
reader.seek(relative_offset)
if lump.flags == 1:
curr_index = self.game_lumps_info.index(lump)
if curr_index + 1 != len(self.game_lumps_info):
next_offset = self.game_lumps_info[curr_index + 1].offset - self._lump.offset
else:
next_offset = self._lump.size
compressed_size = next_offset - relative_offset
buffer = reader.read(compressed_size)
game_lump_reader = Lump.decompress_lump(ByteIO(buffer))
else:
game_lump_reader = ByteIO(reader.read(lump.size))
pass # TODO
if lump.id == 'sprp':
game_lump = StaticPropLump(lump)
game_lump.parse(game_lump_reader)
self.game_lumps[lump.id] = game_lump
elif lump.id == 'dprp':
detail_lump = DetailPropLump(lump)
detail_lump.parse(game_lump_reader)
self.game_lumps[lump.id] = detail_lump
return self
@lump_tag(35, 'LUMP_GAME_LUMP', steam_id=SteamAppId.VINDICTUS)
class VGameLump(Lump):
def __init__(self, bsp, lump_id):
super().__init__(bsp, lump_id)
self.lump_count = 0
self.game_lumps_info: List[GameLumpHeader] = []
self.game_lumps = {}
def parse(self):
reader = self.reader
self.lump_count = reader.read_uint32()
for _ in range(self.lump_count):
lump = VindictusGameLumpHeader(self, self._bsp).parse(reader)
if not lump.id:
continue
self.game_lumps_info.append(lump)
for lump in self.game_lumps_info:
relative_offset = lump.offset - self._lump.offset
print(f'GLump "{lump.id}" offset: {relative_offset} size: {lump.size} ')
with reader.save_current_pos():
reader.seek(relative_offset)
if lump.flags == 1:
curr_index = self.game_lumps_info.index(lump)
if curr_index + 1 != len(self.game_lumps_info):
next_offset = self.game_lumps_info[curr_index + 1].offset - self._lump.offset
else:
next_offset = self._lump.size
compressed_size = next_offset - relative_offset
buffer = reader.read(compressed_size)
game_lump_reader = Lump.decompress_lump(ByteIO(buffer))
else:
game_lump_reader = ByteIO(reader.read(lump.size))
pass # TODO
if lump.id == 'sprp':
game_lump = StaticPropLump(lump)
game_lump.parse(game_lump_reader)
self.game_lumps[lump.id] = game_lump
elif lump.id == 'dprp':
detail_lump = DetailPropLump(lump)
detail_lump.parse(game_lump_reader)
self.game_lumps[lump.id] = detail_lump
return self
|
tests/test_backend_hdf5_00_hdf5_01.py | jjmachan/hangar-py | 202 | 11194246 | <reponame>jjmachan/hangar-py<filename>tests/test_backend_hdf5_00_hdf5_01.py<gh_stars>100-1000
import pytest
import numpy as np
@pytest.fixture(params=['00', '01'])
def be_filehandle(request):
if request.param == '00':
from hangar.backends.hdf5_00 import HDF5_00_FileHandles
return HDF5_00_FileHandles
elif request.param == '01':
from hangar.backends.hdf5_01 import HDF5_01_FileHandles
return HDF5_01_FileHandles
else:
raise ValueError(f'request param "{request.param}" for backend code unknown.')
@pytest.mark.parametrize('clib,clibCode',
[('blosc:blosclz', 0), ('blosc:lz4', 1),
('blosc:lz4hc', 2), ('blosc:zlib', 4),
('blosc:zstd', 5)])
@pytest.mark.parametrize('clevel', [1, 4, 8])
@pytest.mark.parametrize('cshuffle,cshuffleCode', [(None, 0), ('byte', 1), ('bit', 2)])
@pytest.mark.parametrize('beCode', ['00', '01'])
def test_blosc_filter_opts_result_in_correct_dataset_args(
be_filehandle, clib, clibCode, clevel, cshuffle, cshuffleCode, beCode):
out = be_filehandle._dataset_opts(complib=clib,
complevel=clevel,
shuffle=cshuffle)
expected = {
'compression': 32001,
'compression_opts': (0, 0, 0, 0, clevel, cshuffleCode, clibCode),
'shuffle': False}
assert out == expected
@pytest.mark.parametrize('cshuffle,cshuffleCode', [(None, False), ('byte', True)])
def test_lzf_filter_opts_result_in_correct_dataset_args(be_filehandle, cshuffle, cshuffleCode):
out = be_filehandle._dataset_opts(complib='lzf',
complevel=None,
shuffle=cshuffle)
expected = {
'compression': 'lzf',
'compression_opts': None,
'shuffle': cshuffleCode}
assert out == expected
@pytest.mark.parametrize('clevel', [1, 4, 8])
@pytest.mark.parametrize('cshuffle,cshuffleCode', [(None, False), ('byte', True)])
def test_gzip_filter_opts_result_in_correct_dataset_args(be_filehandle, clevel, cshuffle, cshuffleCode):
out = be_filehandle._dataset_opts(complib='gzip',
complevel=clevel,
shuffle=cshuffle)
expected = {
'compression': 'gzip',
'compression_opts': clevel,
'shuffle': cshuffleCode}
assert out == expected
# ------------------------- test actual compression ---------------------------
@pytest.mark.parametrize('clib,clibCode',
[('blosc:blosclz', 0), ('blosc:lz4', 1),
('blosc:lz4hc', 2), ('blosc:zlib', 4),
('blosc:zstd', 5)])
@pytest.mark.parametrize('clevel', [1, 4, 8])
@pytest.mark.parametrize('cshuffle,cshuffleCode', [(None, 0), ('byte', 1), ('bit', 2)])
@pytest.mark.parametrize('be_code', ['00', '01'])
def test_arrayset_init_with_various_blosc_opts(repo, array5by7, clib, clibCode, clevel, cshuffle, cshuffleCode, be_code):
opts = {
'shuffle': cshuffle,
'complib': clib,
'complevel': clevel,
}
wco = repo.checkout(write=True)
aset = wco.add_ndarray_column('aset', prototype=array5by7, backend=be_code, backend_options=opts)
assert aset.backend == be_code
with aset as a:
for i in range(10):
a[i] = array5by7 + i
wuid = aset._be_fs[be_code].w_uid
plist = aset._be_fs[be_code].wFp[wuid]['/0'].id.get_create_plist()
_, _, resopts, _ = plist.get_filter(0)
res_clevel, res_cshuffle, res_clib = resopts[4:7]
assert res_clevel == clevel
assert res_clib == clibCode
assert res_cshuffle == cshuffleCode
wco.commit('hi')
wco.close()
@pytest.mark.parametrize('cshuffle,cshuffleCode', [(False, False), (True, True)])
@pytest.mark.parametrize('be_code', ['00', '01'])
def test_arrayset_init_with_various_lzf_opts(repo, array5by7, cshuffle, cshuffleCode, be_code):
opts = {
'shuffle': cshuffle,
'complib': 'lzf',
'complevel': None,
}
wco = repo.checkout(write=True)
aset = wco.add_ndarray_column('aset', prototype=array5by7, backend=be_code, backend_options=opts)
assert aset.backend == be_code
with aset as a:
for i in range(10):
a[i] = array5by7 + i
res_compression = aset._be_fs[be_code].wFp[aset._be_fs[be_code].w_uid]['/0'].compression
res_shuffle = aset._be_fs[be_code].wFp[aset._be_fs[be_code].w_uid]['/0'].shuffle
assert res_compression == 'lzf'
assert res_shuffle == cshuffleCode
wco.commit('hi')
wco.close()
@pytest.mark.parametrize('clevel', [1, 4, 8])
@pytest.mark.parametrize('cshuffle,cshuffleCode', [(False, False), (True, True)])
@pytest.mark.parametrize('be_code', ['00', '01'])
def test_arrayset_init_with_various_gzip_opts(repo, array5by7, clevel, cshuffle, cshuffleCode, be_code):
opts = {
'shuffle': cshuffle,
'complib': 'gzip',
'complevel': clevel,
}
wco = repo.checkout(write=True)
aset = wco.add_ndarray_column(
'aset', prototype=array5by7, backend=be_code, backend_options=opts)
assert aset.backend == be_code
with aset as a:
for i in range(10):
a[i] = array5by7 + i
res_compression = aset._be_fs[be_code].wFp[aset._be_fs[be_code].w_uid]['/0'].compression
res_compression_opts = aset._be_fs[be_code].wFp[aset._be_fs[be_code].w_uid]['/0'].compression_opts
res_shuffle = aset._be_fs[be_code].wFp[aset._be_fs[be_code].w_uid]['/0'].shuffle
assert res_compression == 'gzip'
assert res_shuffle == cshuffleCode
assert res_compression_opts == clevel
wco.commit('hi')
wco.close()
@pytest.mark.parametrize('be_code', ['00', '01'])
def test_arrayset_overflows_collection_size_collection_count(be_code, repo, monkeypatch):
if be_code == '00':
from hangar.backends import hdf5_00
monkeypatch.setattr(hdf5_00, 'COLLECTION_COUNT', 5)
monkeypatch.setattr(hdf5_00, 'COLLECTION_SIZE', 10)
elif be_code == '01':
from hangar.backends import hdf5_01
monkeypatch.setattr(hdf5_01, 'COLLECTION_COUNT', 5)
monkeypatch.setattr(hdf5_01, 'COLLECTION_SIZE', 10)
else:
raise ValueError(f'be_code param "{be_code}" unknown.')
wco = repo.checkout(write=True)
proto = np.arange(50).astype(np.uint16)
aset = wco.add_ndarray_column('aset', prototype=proto, backend=be_code)
with aset as cm_aset:
for i in range(500):
proto[:] = i
cm_aset[i] = proto
assert aset._be_fs[be_code].hColsRemain == 4
assert aset._be_fs[be_code].hMaxSize == 10
wco.commit('hello')
with aset as cm_aset:
for i in range(500):
proto[:] = i
assert np.allclose(proto, cm_aset[i])
wco.close()
rco = repo.checkout()
naset = rco.columns['aset']
with naset as ncm_aset:
for i in range(500):
proto[:] = i
assert np.allclose(proto, ncm_aset[i])
rco.close()
|
test/utils.py | b1d-farewell/pytorch_sparse | 623 | 11194248 | <reponame>b1d-farewell/pytorch_sparse
import torch
reductions = ['sum', 'add', 'mean', 'min', 'max']
dtypes = [torch.half, torch.float, torch.double, torch.int, torch.long]
grad_dtypes = [torch.half, torch.float, torch.double]
devices = [torch.device('cpu')]
if torch.cuda.is_available():
devices += [torch.device(f'cuda:{torch.cuda.current_device()}')]
def tensor(x, dtype, device):
return None if x is None else torch.tensor(x, dtype=dtype, device=device)
|
leo/plugins/importers/rust.py | thomasbuttler/leo-editor | 1,550 | 11194284 | <gh_stars>1000+
#@+leo-ver=5-thin
#@+node:ekr.20200316100818.1: * @file ../plugins/importers/rust.py
"""The @auto importer for rust."""
import re
from leo.core import leoGlobals as g
from leo.plugins.importers import linescanner
assert g
Importer = linescanner.Importer
Target = linescanner.Target
#@+others
#@+node:ekr.20200316101240.2: ** class Rust_Importer
class Rust_Importer(Importer):
def __init__(self, importCommands, **kwargs):
"""rust_Importer.__init__"""
# Init the base class.
super().__init__(
importCommands,
language='rust',
state_class=Rust_ScanState,
)
self.headline = None
#@+others
#@+node:ekr.20200317114526.1: *3* rust_i.clean_headline
arg_pat = re.compile(r'(\(.*?\))')
type_pat = re.compile(r'(\s*->.*)')
life_pat = re.compile(r'(\<.*\>)')
body_pat = re.compile(r'(\{.*\})')
def clean_headline(self, s, p=None):
"""
Remove argument list and return value.
"""
s = s.strip()
m = self.func_pattern.match(s)
if not m:
return s
g1 = m.group(1) or ''
g2 = m.group(2) or ''
head = f"{g1} {g2}".strip()
# Remove the argument list and return value.
tail = m.group(3) or ''.strip()
tail = re.sub(self.arg_pat, '', tail, count=1)
tail = re.sub(self.type_pat, '', tail, count=1)
tail = re.sub(self.body_pat, '', tail, count=1)
# Clean lifetime specs except for impl.
if not head.startswith('impl'):
tail = re.sub(self.life_pat, '', tail, count=1)
# Remove trailing '(' or '{'
tail = tail.strip()
while tail.endswith(('{', '(', ',', ')')):
tail = tail[:-1].rstrip()
# Remove trailing '>' sometimes.
while '<' not in tail and tail.endswith('>'):
tail = tail[:-1].rstrip()
return f"{head} {tail}".strip().replace(' ', ' ')
#@+node:ekr.20200316101240.4: *3* rust_i.match_start_patterns
# clean_headline also uses this pattern.
func_pattern = re.compile(r'\s*(pub )?\s*(enum|fn|impl|mod|struct|trait)\b(.*)')
def match_start_patterns(self, line):
"""
True if line matches any block-starting pattern.
If true, set self.headline.
"""
m = self.func_pattern.match(line)
if m:
self.headline = line.strip()
return bool(m)
#@+node:ekr.20200623083608.1: *3* rust_i.promote_last_lines
def promote_last_lines(self, parent):
"""
Move trailing comment and macro lines to the start of the next node.
For now, @others anywhere in a node prevents all moves.
"""
for p in parent.subtree():
next = p.threadNext()
if not next:
continue
lines = self.get_lines(p)
if '@others' in ''.join(lines):
# Don't move anything.
continue
comment_lines = []
for line in reversed(lines):
if line.strip().startswith(('//', '#[', '#!')):
comment_lines.insert(0, line)
lines.pop()
elif line.strip():
break
else:
lines.pop()
if ''.join(comment_lines).strip():
next_lines = self.get_lines(next)
self.set_lines(next, comment_lines + next_lines)
self.set_lines(p, lines)
#@+node:ekr.20200316101240.5: *3* rust_i.start_new_block
def start_new_block(self, i, lines, new_state, prev_state, stack):
"""Create a child node and update the stack."""
line = lines[i]
target = stack[-1]
# Insert the reference in *this* node.
h = self.gen_ref(line, target.p, target)
# Create a new child and associated target.
if self.headline:
h = self.headline
if new_state.level() > prev_state.level():
child = self.create_child_node(target.p, line, h)
else:
# We may not have seen the { yet, so adjust.
# Without this, the new block becomes a child of the preceding.
new_state = Rust_ScanState()
new_state.curlies = prev_state.curlies + 1
child = self.create_child_node(target.p, line, h)
stack.append(Target(child, new_state))
# Add all additional lines of the signature.
skip = self.skip # Don't change the ivar!
while skip > 0:
skip -= 1
i += 1
assert i < len(lines), (i, len(lines))
line = lines[i]
self.add_line(child, lines[i])
#@+node:ekr.20200316101240.6: *3* rust_i.starts_block
def starts_block(self, i, lines, new_state, prev_state):
"""True if the new state starts a block."""
self.headline = None
line = lines[i]
if prev_state.context:
return False
if not self.match_start_patterns(line):
return False
# Must not be a complete statement.
if line.find(';') > -1:
return False
return True
#@+node:ekr.20200316114132.1: *3* rust_i.get_new_dict
#@@nobeautify
def get_new_dict(self, context):
"""
Return a *general* state dictionary for the given context.
Subclasses may override...
"""
comment, block1, block2 = self.single_comment, self.block1, self.block2
def add_key(d, pattern, data):
key = pattern[0]
aList = d.get(key,[])
aList.append(data)
d[key] = aList
#
# About context dependent lifetime tokens:
# https://doc.rust-lang.org/stable/reference/tokens.html#lifetimes-and-loop-labels
#
# It looks like we can just ignore 'x' and 'x tokens.
if context:
d = {
# key kind pattern ends?
'\\': [('len+1', '\\', None),],
'"': [('len', '"', context == '"'),],
# "'": [('len', "'", context == "'"),],
}
if block1 and block2:
add_key(d, block2, ('len', block2, True))
else:
# Not in any context.
d = {
# key kind pattern new-ctx deltas
'\\':[('len+1', '\\', context, None)],
'"': [('len', '"', '"', None)],
# "'": [('len', "'", "'", None)],
'{': [('len', '{', context, (1,0,0))],
'}': [('len', '}', context, (-1,0,0))],
'(': [('len', '(', context, (0,1,0))],
')': [('len', ')', context, (0,-1,0))],
'[': [('len', '[', context, (0,0,1))],
']': [('len', ']', context, (0,0,-1))],
}
if comment:
add_key(d, comment, ('all', comment, '', None))
if block1 and block2:
add_key(d, block1, ('len', block1, block1, None))
return d
#@-others
#@+node:ekr.20200316101240.7: ** class Rust_ScanState
class Rust_ScanState:
"""A class representing the state of the line-oriented scan for rust."""
def __init__(self, d=None):
"""Rust_ScanSate ctor"""
if d:
prev = d.get('prev')
self.context = prev.context
self.curlies = prev.curlies
self.parens = prev.parens
else:
self.context = ''
self.curlies = 0
self.parens = 0
def __repr__(self):
"""Rust_ScanState.__repr__"""
return (
f"<Rust_ScanState "
f"context: {self.context!r} "
f"curlies: {self.curlies} "
f"parens: {self.parens}>")
__str__ = __repr__
#@+others
#@+node:ekr.20200316101240.8: *3* rust_state.level
def level(self):
"""Rust_ScanState.level."""
# return self.curlies
return (self.curlies, self.parens)
#@+node:ekr.20200316101240.9: *3* rust_state.update
def update(self, data):
"""
Update the state using the 6-tuple returned by i.scan_line.
Return i = data[1]
"""
context, i, delta_c, delta_p, delta_s, bs_nl = data
self.context = context
self.curlies += delta_c
self.parens += delta_p
return i
#@-others
#@-others
importer_dict = {
'class': Rust_Importer,
'extensions': ['.rs',],
}
#@@language python
#@@tabwidth -4
#@-leo
|
mayan/apps/events/migrations/0007_auto_20170802_0823.py | nattangwiwat/Mayan-EDMS-recitation | 343 | 11194303 | <reponame>nattangwiwat/Mayan-EDMS-recitation<filename>mayan/apps/events/migrations/0007_auto_20170802_0823.py
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('events', '0006_objecteventsubscription'),
]
operations = [
migrations.AlterModelOptions(
name='notification',
options={
'ordering': ('-action__timestamp',),
'verbose_name': 'Notification',
'verbose_name_plural': 'Notifications'
},
),
]
|
versions_tests/models.py | DocTocToc/cleanerversion | 121 | 11194310 | # -*- coding: utf-8 -*-
from django.db.models import CharField, IntegerField, Model, ForeignKey, \
CASCADE
from django.db.models.deletion import DO_NOTHING, PROTECT, SET, SET_NULL
from django.utils.encoding import python_2_unicode_compatible
from versions.fields import VersionedManyToManyField, VersionedForeignKey
from versions.models import Versionable
def versionable_description(obj):
return "<" + str(obj.__class__.__name__) + " object: " + \
obj.name + " {valid: [" + obj.version_start_date.isoformat() + \
" | " + \
(obj.version_end_date.isoformat()
if obj.version_end_date else "None") + \
"], created: " + obj.version_birth_date.isoformat() + "}>"
############################################
# The following model is used for:
# - CreationTest
# - DeletionTest
# - CurrentVersionTest
# - VersionedQuerySetTest
# - VersionNavigationTest
# - HistoricObjectsHandling
class B(Versionable):
name = CharField(max_length=200)
__str__ = versionable_description
############################################
# Models for
# - DeletionHandlerTest
# - OneToManyTest
# - PrefetchingTest
# - VersionNavigationAsOfTest
# - VersionRestoreTest
# - DetachTest
# - DeferredFieldsTest
# – VersionedAdminTest
@python_2_unicode_compatible
class City(Versionable):
name = CharField(max_length=200)
__str__ = versionable_description
@python_2_unicode_compatible
class Team(Versionable):
name = CharField(max_length=200)
city = VersionedForeignKey(City, null=True, on_delete=CASCADE)
__str__ = versionable_description
@python_2_unicode_compatible
class Player(Versionable):
name = CharField(max_length=200)
team = VersionedForeignKey(Team, null=True, on_delete=CASCADE)
__str__ = versionable_description
class Award(Versionable):
name = CharField(max_length=200)
players = VersionedManyToManyField(Player, related_name='awards')
@python_2_unicode_compatible
class Mascot(Versionable):
name = CharField(max_length=200)
team = VersionedForeignKey(Team, null=False, on_delete=CASCADE)
__str__ = versionable_description
def default_team():
return Team.objects.current.get(name__startswith='default_team.')
@python_2_unicode_compatible
class Fan(Versionable):
name = CharField(max_length=200)
team = VersionedForeignKey(Team, null=False, on_delete=SET(default_team))
__str__ = versionable_description
@python_2_unicode_compatible
class RabidFan(Versionable):
name = CharField(max_length=200)
team = VersionedForeignKey(Team, null=True, on_delete=SET_NULL)
__str__ = versionable_description
@python_2_unicode_compatible
class WizardFan(Versionable):
name = CharField(max_length=200)
team = VersionedForeignKey(Team, null=True, on_delete=PROTECT)
__str__ = versionable_description
@python_2_unicode_compatible
class NonFan(Versionable):
name = CharField(max_length=200)
team = VersionedForeignKey(Team, null=False, on_delete=DO_NOTHING)
__str__ = versionable_description
############################################
# SelfOneToManyTest models
class Directory(Versionable):
name = CharField(max_length=100)
parent = VersionedForeignKey('self', null=True, on_delete=CASCADE)
# ############################################
# MultiM2MTest models
@python_2_unicode_compatible
class Professor(Versionable):
name = CharField(max_length=200)
address = CharField(max_length=200)
phone_number = CharField(max_length=200)
__str__ = versionable_description
@python_2_unicode_compatible
class Classroom(Versionable):
name = CharField(max_length=200)
building = CharField(max_length=200)
__str__ = versionable_description
@python_2_unicode_compatible
class Student(Versionable):
name = CharField(max_length=200)
professors = VersionedManyToManyField("Professor", related_name='students')
classrooms = VersionedManyToManyField("Classroom", related_name='students')
__str__ = versionable_description
############################################
# MultiM2MToSameTest models
@python_2_unicode_compatible
class Pupil(Versionable):
name = CharField(max_length=200)
phone_number = CharField(max_length=200)
language_teachers = VersionedManyToManyField(
'Teacher', related_name='language_students')
science_teachers = VersionedManyToManyField(
'Teacher', related_name='science_students')
__str__ = versionable_description
@python_2_unicode_compatible
class Teacher(Versionable):
name = CharField(max_length=200)
domain = CharField(max_length=200)
__str__ = versionable_description
############################################
# ManyToManyFilteringTest models
@python_2_unicode_compatible
class C1(Versionable):
name = CharField(max_length=50)
c2s = VersionedManyToManyField("C2", related_name='c1s')
__str__ = versionable_description
@python_2_unicode_compatible
class C2(Versionable):
name = CharField(max_length=50)
c3s = VersionedManyToManyField("C3", related_name='c2s')
__str__ = versionable_description
@python_2_unicode_compatible
class C3(Versionable):
name = CharField(max_length=50)
__str__ = versionable_description
############################################
# HistoricM2MOperationsTests models
@python_2_unicode_compatible
class Observer(Versionable):
name = CharField(max_length=200)
__str__ = versionable_description
@python_2_unicode_compatible
class Subject(Versionable):
name = CharField(max_length=200)
observers = VersionedManyToManyField('Observer', related_name='subjects')
__str__ = versionable_description
############################################
# VersionUniqueTests models
class ChainStore(Versionable):
subchain_id = IntegerField()
city = CharField(max_length=40)
name = CharField(max_length=40)
opening_hours = CharField(max_length=40)
door_frame_color = VersionedForeignKey('Color', on_delete=CASCADE)
door_color = VersionedForeignKey('Color', related_name='cs',
on_delete=CASCADE)
# There are lots of these chain stores. They follow these rules:
# - only one store with the same name and subchain_id can exist in a
# single city
# - no two stores can share the same door_frame_color and door_color
# Yea, well, they want to appeal to people who want to be different.
VERSION_UNIQUE = [['subchain_id', 'city', 'name'],
['door_frame_color', 'door_color']]
class Color(Versionable):
name = CharField(max_length=40)
############################################
# IntegrationNonVersionableModelsTests models
@python_2_unicode_compatible
class Wine(Model):
name = CharField(max_length=200)
vintage = IntegerField()
def __str__(self):
return "<" + str(self.__class__.__name__) + " object: " + str(
self.name) + " (" + str(self.vintage) + ")>"
@python_2_unicode_compatible
class WineDrinker(Versionable):
name = CharField(max_length=200)
glass_content = ForeignKey(Wine, related_name='drinkers', null=True,
on_delete=CASCADE)
__str__ = versionable_description
@python_2_unicode_compatible
class WineDrinkerHat(Model):
shape_choices = [('Sailor', 'Sailor'),
('Cloche', 'Cloche'),
('Cartwheel', 'Cartwheel'),
('Turban', 'Turban'),
('Breton', 'Breton'),
('Vagabond', 'Vagabond')]
color = CharField(max_length=40)
shape = CharField(max_length=200, choices=shape_choices, default='Sailor')
wearer = VersionedForeignKey(WineDrinker, related_name='hats', null=True,
on_delete=CASCADE)
def __str__(self):
return "<" + str(self.__class__.__name__) + " object: " + str(
self.shape) + " (" + str(self.color) + ")>"
############################################
# SelfReferencingManyToManyTest models
class Person(Versionable):
name = CharField(max_length=200)
children = VersionedManyToManyField('self', symmetrical=False,
related_name='parents')
|
examples/imagenet/configs/fake_data_benchmark.py | mattjj/flax | 114 | 11194334 | # Copyright 2022 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hyperparameter configuration for Fake data benchmark."""
import jax
from configs import default as default_lib
def get_config():
"""Get the hyperparameter configuration for Fake data benchmark."""
# Override default configuration to avoid duplication of field definition.
config = default_lib.get_config()
config.batch_size = 256 * jax.device_count()
config.half_precision = True
config.num_epochs = 5
# Previously the input pipeline computed:
# `steps_per_epoch` as input_pipeline.TRAIN_IMAGES // batch_size
config.num_train_steps = 1024 // config.batch_size
# and `steps_per_eval` as input_pipeline.EVAL_IMAGES // batch_size
config.steps_per_eval = 512 // config.batch_size
return config
|
preprocess/ljspeech.py | ishine/self-attention-tacotron | 111 | 11194335 | <filename>preprocess/ljspeech.py<gh_stars>100-1000
# ==============================================================================
# Copyright (c) 2018, Yamagishi Laboratory, National Institute of Informatics
# Author: <NAME> (<EMAIL>)
# All rights reserved.
# ==============================================================================
""" Preprocess for LJSpeech dataset. """
from pyspark import SparkContext, RDD, StorageLevel
import tensorflow as tf
import numpy as np
import os
from collections import namedtuple
from utils.tfrecord import bytes_feature, int64_feature, write_tfrecord
from utils.audio import Audio
from preprocess.cleaners import english_cleaners
from preprocess.text import text_to_sequence
class TextAndPath(namedtuple("TextAndPath", ["id", "key", "wav_path", "labels_path", "text"])):
pass
def write_preprocessed_target_data(_id: int, key: str, mel: np.ndarray, filename: str):
raw_mel = mel.tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'id': int64_feature([_id]),
'key': bytes_feature([key.encode('utf-8')]),
'mel': bytes_feature([raw_mel]),
'target_length': int64_feature([len(mel)]),
'mel_width': int64_feature([mel.shape[1]]),
}))
write_tfrecord(example, filename)
def write_preprocessed_source_data(_id: int, key: str, source: np.ndarray, text, filename: str):
raw_source = source.tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'id': int64_feature([_id]),
'key': bytes_feature([key.encode('utf-8')]),
'source': bytes_feature([raw_source]),
'source_length': int64_feature([len(source)]),
'text': bytes_feature([text.encode('utf-8')]),
}))
write_tfrecord(example, filename)
class MelStatistics(namedtuple("MelStatistics", ["id", "key", "max", "min", "sum", "length", "moment2"])):
pass
class TargetRDD:
def __init__(self, rdd: RDD):
self.rdd = rdd
def keys(self):
return self.rdd.map(lambda kv: kv[1].key).collect()
def max(self):
return self.rdd.map(lambda kv: kv[1].max).reduce(lambda a, b: np.maximum(a, b))
def min(self):
return self.rdd.map(lambda kv: kv[1].min).reduce(lambda a, b: np.minimum(a, b))
def average(self):
total_value = self.rdd.map(lambda kv: kv[1].sum).reduce(lambda a, b: a + b)
total_length = self.rdd.map(lambda kv: kv[1].length).reduce(lambda a, b: a + b)
return total_value / total_length
def moment2(self):
total_value = self.rdd.map(lambda kv: kv[1].moment2).reduce(lambda a, b: a + b)
total_length = self.rdd.map(lambda kv: kv[1].length).reduce(lambda a, b: a + b)
return total_value / total_length
class LJSpeech:
def __init__(self, in_dir, out_dir, hparams):
self.in_dir = in_dir
self.out_dir = out_dir
self.audio = Audio(hparams)
@property
def record_ids(self):
return map(lambda v: str(v), range(1, 13101))
def record_file_path(self, record_id, kind):
assert kind in ["source", "target"]
return os.path.join(self.out_dir, f"ljspeech-{kind}-{int(record_id):05d}.tfrecord")
def text_and_path_rdd(self, sc: SparkContext):
return sc.parallelize(
self._extract_all_text_and_path())
def process_targets(self, rdd: RDD):
return TargetRDD(rdd.mapValues(self._process_target).persist(StorageLevel.MEMORY_AND_DISK))
def process_sources(self, rdd: RDD):
return rdd.mapValues(self._process_source)
def _extract_text_and_path(self, line, index):
parts = line.strip().split('|')
key = parts[0]
text = parts[2]
wav_path = os.path.join(self.in_dir, 'wavs', '%s.wav' % key)
return TextAndPath(index, key, wav_path, None, text)
def _extract_all_text_and_path(self):
with open(os.path.join(self.in_dir, 'metadata.csv'), mode='r', encoding='utf-8') as f:
for index, line in enumerate(f):
extracted = self._extract_text_and_path(line, index)
if extracted is not None:
yield (index, extracted)
def _text_to_sequence(self, text):
sequence, clean_text = text_to_sequence(text, english_cleaners)
sequence = np.array(sequence, dtype=np.int64)
return sequence, clean_text
def _process_target(self, paths: TextAndPath):
wav = self.audio.load_wav(paths.wav_path)
mel_spectrogram = self.audio.melspectrogram(wav).astype(np.float32).T
filename = f"{paths.key}.target.tfrecord"
filepath = os.path.join(self.out_dir, filename)
write_preprocessed_target_data(paths.id, paths.key, mel_spectrogram, filepath)
return MelStatistics(id=paths.id,
key=paths.key,
min=np.min(mel_spectrogram, axis=0),
max=np.max(mel_spectrogram, axis=0),
sum=np.sum(mel_spectrogram, axis=0),
length=len(mel_spectrogram),
moment2=np.sum(np.square(mel_spectrogram), axis=0))
def _process_source(self, paths: TextAndPath):
sequence, clean_text = self._text_to_sequence(paths.text)
filename = f"{paths.key}.source.tfrecord"
filepath = os.path.join(self.out_dir, filename)
write_preprocessed_source_data(paths.id, paths.key, sequence, clean_text, filepath)
return paths.key
|
desktop/libs/libzookeeper/src/libzookeeper/models.py | yetsun/hue | 5,079 | 11194337 | <filename>desktop/libs/libzookeeper/src/libzookeeper/models.py<gh_stars>1000+
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import object
import logging
import os
from kazoo.client import KazooClient
from hadoop import cluster
from libzookeeper.conf import ENSEMBLE, PRINCIPAL_NAME
LOG = logging.getLogger(__name__)
class ReadOnlyClientException(Exception):
pass
class ZookeeperConfigurationException(Exception):
pass
class ZookeeperClient(object):
def __init__(self, hosts=None, read_only=True):
self.hosts = hosts if hosts else ENSEMBLE.get()
self.read_only = read_only
hdfs = cluster.get_hdfs()
if hdfs is None:
raise ZookeeperConfigurationException('No [hdfs] configured in hue.ini.')
if hdfs.security_enabled:
self.sasl_server_principal = PRINCIPAL_NAME.get()
else:
self.sasl_server_principal = None
self.zk = KazooClient(hosts=self.hosts,
read_only=self.read_only,
sasl_server_principal=self.sasl_server_principal)
def start(self):
"""Start the zookeeper session."""
self.zk.start()
def stop(self):
"""Stop the zookeeper session, but leaves the socket open."""
self.zk.stop()
def close(self):
"""Closes a stopped zookeeper socket."""
self.zk.close()
def get_children_data(self, namespace):
children = self.zk.get_children(namespace)
children_data = []
for node in children:
data, stat = self.zk.get("%s/%s" % (namespace, node))
children_data.append(data)
return children_data
def path_exists(self, namespace):
return self.zk.exists(namespace) is not None
def set(self, path, value, version=-1):
return self.zk.set(path, value, version)
def copy_path(self, namespace, filepath):
if self.read_only:
raise ReadOnlyClientException('Cannot execute copy_path when read_only is set to True.')
self.zk.ensure_path(namespace)
for dir, subdirs, files in os.walk(filepath):
path = dir.replace(filepath, '').strip('/')
if path:
node_path = '%s/%s' % (namespace, path)
self.zk.create(path=node_path, value='', makepath=True)
for filename in files:
node_path = '%s/%s/%s' % (namespace, path, filename)
with open(os.path.join(dir, filename), 'r') as f:
file_content = f.read()
self.zk.create(path=node_path, value=file_content, makepath=True)
def delete_path(self, namespace):
if self.read_only:
raise ReadOnlyClientException('Cannot execute delete_path when read_only is set to True.')
self.zk.delete(namespace, recursive=True)
def __enter__(self):
"""Start a zookeeper session and return a `with` context."""
self.zk.start()
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Stops and closes zookeeper session at the end of the `with` context."""
try:
self.stop()
finally:
self.close()
|
src/data_loader/datasets.py | yewzijian/RPMNet | 223 | 11194367 | <reponame>yewzijian/RPMNet
"""Data loader
"""
import argparse
import logging
import os
from typing import List
import h5py
import numpy as np
import open3d as o3d
from torch.utils.data import Dataset
import torchvision
import data_loader.transforms as Transforms
import common.math.se3 as se3
_logger = logging.getLogger()
def get_train_datasets(args: argparse.Namespace):
train_categories, val_categories = None, None
if args.train_categoryfile:
train_categories = [line.rstrip('\n') for line in open(args.train_categoryfile)]
train_categories.sort()
if args.val_categoryfile:
val_categories = [line.rstrip('\n') for line in open(args.val_categoryfile)]
val_categories.sort()
train_transforms, val_transforms = get_transforms(args.noise_type, args.rot_mag, args.trans_mag,
args.num_points, args.partial)
_logger.info('Train transforms: {}'.format(', '.join([type(t).__name__ for t in train_transforms])))
_logger.info('Val transforms: {}'.format(', '.join([type(t).__name__ for t in val_transforms])))
train_transforms = torchvision.transforms.Compose(train_transforms)
val_transforms = torchvision.transforms.Compose(val_transforms)
if args.dataset_type == 'modelnet_hdf':
train_data = ModelNetHdf(args.dataset_path, subset='train', categories=train_categories,
transform=train_transforms)
val_data = ModelNetHdf(args.dataset_path, subset='test', categories=val_categories,
transform=val_transforms)
else:
raise NotImplementedError
return train_data, val_data
def get_test_datasets(args: argparse.Namespace):
test_categories = None
if args.test_category_file:
test_categories = [line.rstrip('\n') for line in open(args.test_category_file)]
test_categories.sort()
_, test_transforms = get_transforms(args.noise_type, args.rot_mag, args.trans_mag,
args.num_points, args.partial)
_logger.info('Test transforms: {}'.format(', '.join([type(t).__name__ for t in test_transforms])))
test_transforms = torchvision.transforms.Compose(test_transforms)
if args.dataset_type == 'modelnet_hdf':
test_data = ModelNetHdf(args.dataset_path, subset='test', categories=test_categories,
transform=test_transforms)
else:
raise NotImplementedError
return test_data
def get_transforms(noise_type: str,
rot_mag: float = 45.0, trans_mag: float = 0.5,
num_points: int = 1024, partial_p_keep: List = None):
"""Get the list of transformation to be used for training or evaluating RegNet
Args:
noise_type: Either 'clean', 'jitter', 'crop'.
Depending on the option, some of the subsequent arguments may be ignored.
rot_mag: Magnitude of rotation perturbation to apply to source, in degrees.
Default: 45.0 (same as Deep Closest Point)
trans_mag: Magnitude of translation perturbation to apply to source.
Default: 0.5 (same as Deep Closest Point)
num_points: Number of points to uniformly resample to.
Note that this is with respect to the full point cloud. The number of
points will be proportionally less if cropped
partial_p_keep: Proportion to keep during cropping, [src_p, ref_p]
Default: [0.7, 0.7], i.e. Crop both source and reference to ~70%
Returns:
train_transforms, test_transforms: Both contain list of transformations to be applied
"""
partial_p_keep = partial_p_keep if partial_p_keep is not None else [0.7, 0.7]
if noise_type == "clean":
# 1-1 correspondence for each point (resample first before splitting), no noise
train_transforms = [Transforms.Resampler(num_points),
Transforms.SplitSourceRef(),
Transforms.RandomTransformSE3_euler(rot_mag=rot_mag, trans_mag=trans_mag),
Transforms.ShufflePoints()]
test_transforms = [Transforms.SetDeterministic(),
Transforms.FixedResampler(num_points),
Transforms.SplitSourceRef(),
Transforms.RandomTransformSE3_euler(rot_mag=rot_mag, trans_mag=trans_mag),
Transforms.ShufflePoints()]
elif noise_type == "jitter":
# Points randomly sampled (might not have perfect correspondence), gaussian noise to position
train_transforms = [Transforms.SplitSourceRef(),
Transforms.RandomTransformSE3_euler(rot_mag=rot_mag, trans_mag=trans_mag),
Transforms.Resampler(num_points),
Transforms.RandomJitter(),
Transforms.ShufflePoints()]
test_transforms = [Transforms.SetDeterministic(),
Transforms.SplitSourceRef(),
Transforms.RandomTransformSE3_euler(rot_mag=rot_mag, trans_mag=trans_mag),
Transforms.Resampler(num_points),
Transforms.RandomJitter(),
Transforms.ShufflePoints()]
elif noise_type == "crop":
# Both source and reference point clouds cropped, plus same noise in "jitter"
train_transforms = [Transforms.SplitSourceRef(),
Transforms.RandomCrop(partial_p_keep),
Transforms.RandomTransformSE3_euler(rot_mag=rot_mag, trans_mag=trans_mag),
Transforms.Resampler(num_points),
Transforms.RandomJitter(),
Transforms.ShufflePoints()]
test_transforms = [Transforms.SetDeterministic(),
Transforms.SplitSourceRef(),
Transforms.RandomCrop(partial_p_keep),
Transforms.RandomTransformSE3_euler(rot_mag=rot_mag, trans_mag=trans_mag),
Transforms.Resampler(num_points),
Transforms.RandomJitter(),
Transforms.ShufflePoints()]
else:
raise NotImplementedError
return train_transforms, test_transforms
class ModelNetHdf(Dataset):
def __init__(self, dataset_path: str, subset: str = 'train', categories: List = None, transform=None):
"""ModelNet40 dataset from PointNet.
Automatically downloads the dataset if not available
Args:
dataset_path (str): Folder containing processed dataset
subset (str): Dataset subset, either 'train' or 'test'
categories (list): Categories to use
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self._logger = logging.getLogger(self.__class__.__name__)
self._root = dataset_path
metadata_fpath = os.path.join(self._root, '{}_files.txt'.format(subset))
self._logger.info('Loading data from {} for {}'.format(metadata_fpath, subset))
if not os.path.exists(os.path.join(dataset_path)):
self._download_dataset(dataset_path)
with open(os.path.join(dataset_path, 'shape_names.txt')) as fid:
self._classes = [l.strip() for l in fid]
self._category2idx = {e[1]: e[0] for e in enumerate(self._classes)}
self._idx2category = self._classes
with open(os.path.join(dataset_path, '{}_files.txt'.format(subset))) as fid:
h5_filelist = [line.strip() for line in fid]
h5_filelist = [x.replace('data/modelnet40_ply_hdf5_2048/', '') for x in h5_filelist]
h5_filelist = [os.path.join(self._root, f) for f in h5_filelist]
if categories is not None:
categories_idx = [self._category2idx[c] for c in categories]
self._logger.info('Categories used: {}.'.format(categories_idx))
self._classes = categories
else:
categories_idx = None
self._logger.info('Using all categories.')
self._data, self._labels = self._read_h5_files(h5_filelist, categories_idx)
# self._data, self._labels = self._data[:32], self._labels[:32, ...]
self._transform = transform
self._logger.info('Loaded {} {} instances.'.format(self._data.shape[0], subset))
def __getitem__(self, item):
sample = {'points': self._data[item, :, :], 'label': self._labels[item], 'idx': np.array(item, dtype=np.int32)}
if self._transform:
sample = self._transform(sample)
return sample
def __len__(self):
return self._data.shape[0]
@property
def classes(self):
return self._classes
@staticmethod
def _read_h5_files(fnames, categories):
all_data = []
all_labels = []
for fname in fnames:
f = h5py.File(fname, mode='r')
data = np.concatenate([f['data'][:], f['normal'][:]], axis=-1)
labels = f['label'][:].flatten().astype(np.int64)
if categories is not None: # Filter out unwanted categories
mask = np.isin(labels, categories).flatten()
data = data[mask, ...]
labels = labels[mask, ...]
all_data.append(data)
all_labels.append(labels)
all_data = np.concatenate(all_data, axis=0)
all_labels = np.concatenate(all_labels, axis=0)
return all_data, all_labels
@staticmethod
def _download_dataset(dataset_path: str):
os.makedirs(dataset_path, exist_ok=True)
www = 'https://rpmnet.s3.us-east-2.amazonaws.com/modelnet40_ply_hdf5_2048.zip'
zipfile = os.path.basename(www)
os.system('wget {}'.format(www))
os.system('unzip {} -d .'.format(zipfile))
os.system('mv {} {}'.format(zipfile[:-4], os.path.dirname(dataset_path)))
os.system('rm {}'.format(zipfile))
def to_category(self, i):
return self._idx2category[i]
|
src/genie/libs/parser/iosxe/tests/ShowPlatform/cli/equal/golden_output_c9200_expected.py | balmasea/genieparser | 204 | 11194373 | expected_output = {
'main': {
'mac_persistency_wait_time': 'indefinite',
'switch_mac_address': 'dc8c.37ff.ad21',
'swstack': True,
},
'slot': {
'1': {
'rp': {
'C9200-24P': {
'hw_ver': 'V01',
'mac_address': 'dc8c.37ff.ad21',
'name': 'C9200-24P',
'ports': '32',
'slot': '1',
'sn': 'JAD2310213C',
'state': 'Ready',
'sw_ver': '17.05.01',
'swstack_priority': '2',
'swstack_role': 'Active',
},
},
},
},
}
|
Machine Learning Projects/Xeno-Deep Learning library from scratch/xeno/utils/random.py | TeacherManoj0131/HacktoberFest2020-Contributions | 256 | 11194404 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
import numpy as np
_rng = np.random
_dtype = 'float32'
def get_rng():
return _rng
def set_rng(rng):
global _rng
_rng = rng
def set_seed(seed):
global _rng
_rng = np.random.RandomState(seed)
def get_dtype():
return _dtype
def set_dtype(dtype):
global _dtype
_dtype = dtype |
fastai2/notebook/showdoc.py | Tato14/fastai2 | 380 | 11194405 | <filename>fastai2/notebook/showdoc.py
#AUTOGENERATED! DO NOT EDIT! File to edit: dev/92_notebook_showdoc.ipynb (unless otherwise specified).
__all__ = ['is_enum', 'add_pytorch_index', 'is_fastai_module', 'FASTAI_DOCS', 'doc_link', 'add_doc_links',
'get_source_link', 'SOURCE_URL', 'get_nb_source_link', 'FASTAI_NB_DEV', 'nb_source_link', 'type_repr',
'format_param', 'show_doc', 'md2html', 'doc']
#Cell
from ..core.imports import *
from .core import *
from .export import *
import inspect,enum,nbconvert
from IPython.display import Markdown,display
from IPython.core import page
from nbconvert import HTMLExporter
#Cell
def is_enum(cls):
"Check if `cls` is an enum or another type of class"
return type(cls) in (enum.Enum, enum.EnumMeta)
#Cell
def _get_pytorch_index():
if not (Path(__file__).parent/'index_pytorch.txt').exists(): return {}
return json.load(open(Path(__file__).parent/'index_pytorch.txt', 'r'))
def add_pytorch_index(func_name, url):
"Add `func_name` in the PyTorch index for automatic links."
index = _get_pytorch_index()
if not url.startswith("https://pytorch.org/docs/stable/"):
url = "https://pytorch.org/docs/stable/" + url
index[func_name] = url
json.dump(index, open(Path(__file__).parent/'index_pytorch.txt', 'w'), indent=2)
#Cell
def is_fastai_module(name):
"Test if `name` is a fastai module."
dir_name = os.path.sep.join(name.split('.'))
return (Path(__file__).parent.parent/f"{dir_name}.py").exists()
#Cell
#Might change once the library is renamed fastai.
def _is_fastai_class(ft): return belongs_to_module(ft, 'fastai_source')
def _strip_fastai(s): return re.sub(r'^local\.', '', s)
FASTAI_DOCS = ''
#Cell
def doc_link(name, include_bt:bool=True):
"Create link to documentation for `name`."
cname = f'`{name}`' if include_bt else name
#Link to modules
if is_fastai_module(name): return f'[{cname}]({FASTAI_DOCS}/{name}.html)'
#Link to fastai functions
try_fastai = source_nb(name, is_name=True)
if try_fastai:
page = '.'.join(try_fastai.split('_')[1:]).replace('.ipynb', '.html')
return f'[{cname}]({FASTAI_DOCS}/{page}#{name})'
#Link to PyTorch
try_pytorch = _get_pytorch_index().get(name, None)
if try_pytorch: return f'[{cname}]({try_pytorch})'
#Leave as is
return cname
#Cell
_re_backticks = re.compile(r"""
# Catches any link of the form \[`obj`\](old_link) or just `obj` to either update old links or add the link to the docs of obj
\[` # Opening [ and `
([^`]*) # Catching group with anything but a `
`\] # ` then closing ]
(?: # Beginning of non-catching group
\( # Opening (
[^)]* # Anything but a closing )
\) # Closing )
) # End of non-catching group
| # OR
` # Opening `
([^`]*) # Antyhing but a `
` # Closing `
""", re.VERBOSE)
#Cell
def add_doc_links(text):
"Search for doc links for any item between backticks in `text`."
def _replace_link(m): return doc_link(m.group(1) or m.group(2))
return _re_backticks.sub(_replace_link, text)
#Cell
def _is_type_dispatch(x): return type(x).__name__ == "TypeDispatch"
def _unwrapped_type_dispatch_func(x): return x.first() if _is_type_dispatch(x) else x
def _is_property(x): return type(x)==property
def _has_property_getter(x): return _is_property(x) and hasattr(x, 'fget') and hasattr(x.fget, 'func')
def _property_getter(x): return x.fget.func if _has_property_getter(x) else x
def _unwrapped_func(x):
x = _unwrapped_type_dispatch_func(x)
x = _property_getter(x)
return x
#Cell
SOURCE_URL = "https://github.com/fastai/fastai_dev/tree/master/dev/"
def get_source_link(func):
"Return link to `func` in source code"
func = _unwrapped_func(func)
try: line = inspect.getsourcelines(func)[1]
except Exception: return ''
module = inspect.getmodule(func).__name__.replace('.', '/') + '.py'
return f"{SOURCE_URL}{module}#L{line}"
#Cell
_re_header = re.compile(r"""
# Catches any header in markdown with the title in group 1
^\s* # Beginning of text followed by any number of whitespace
\#+ # One # or more
\s* # Any number of whitespace
(.*) # Catching group with anything
$ # End of text
""", re.VERBOSE)
#Cell
FASTAI_NB_DEV = 'https://nbviewer.jupyter.org/github/fastai/fastai_docs/blob/master/dev/'
def get_nb_source_link(func, local=False, is_name=None):
"Return a link to the notebook where `func` is defined."
func = _unwrapped_type_dispatch_func(func)
pref = '' if local else FASTAI_NB_DEV
is_name = is_name or isinstance(func, str)
src = source_nb(func, is_name=is_name, return_all=True)
if src is None: return '' if is_name else get_source_link(func)
find_name,nb_name = src
nb = read_nb(nb_name)
pat = re.compile(f'^{find_name}\s+=|^(def|class)\s+{find_name}\s*\(', re.MULTILINE)
if len(find_name.split('.')) == 2:
clas,func = find_name.split('.')
pat2 = re.compile(f'@patch\s*\ndef\s+{func}\s*\([^:]*:\s*{clas}\s*(?:,|\))')
else: pat2 = None
for i,cell in enumerate(nb['cells']):
if cell['cell_type'] == 'code':
if re.search(pat, cell['source']): break
if pat2 is not None and re.search(pat2, cell['source']): break
if re.search(pat, cell['source']) is None and (pat2 is not None and re.search(pat2, cell['source']) is None):
return '' if is_name else get_function_source(func)
header_pat = re.compile(r'^\s*#+\s*(.*)$')
while i >= 0:
cell = nb['cells'][i]
if cell['cell_type'] == 'markdown' and _re_header.search(cell['source']):
title = _re_header.search(cell['source']).groups()[0]
anchor = '-'.join([s for s in title.split(' ') if len(s) > 0])
return f'{pref}{nb_name}#{anchor}'
i -= 1
return f'{pref}{nb_name}'
#Cell
def nb_source_link(func, is_name=None, disp=True):
"Show a relative link to the notebook where `func` is defined"
is_name = is_name or isinstance(func, str)
func_name = func if is_name else qual_name(func)
link = get_nb_source_link(func, local=True, is_name=is_name)
if disp: display(Markdown(f'[{func_name}]({link})'))
else: return link
#Cell
def type_repr(t):
"Representation of type `t` (in a type annotation)"
if getattr(t, '__args__', None):
args = t.__args__
if len(args)==2 and args[1] == type(None):
return f'`Optional`\[{type_repr(args[0])}\]'
reprs = ', '.join([type_repr(o) for o in args])
return f'{doc_link(get_name(t))}\[{reprs}\]'
else: return doc_link(get_name(t))
#Cell
_arg_prefixes = {inspect._VAR_POSITIONAL: '\*', inspect._VAR_KEYWORD:'\*\*'}
def format_param(p):
"Formats function param to `param1:Type=val`. Font weights: param1=bold, val=italic"
arg_prefix = _arg_prefixes.get(p.kind, '') # asterisk prefix for *args and **kwargs
res = f"**{arg_prefix}`{p.name}`**"
if hasattr(p, 'annotation') and p.annotation != p.empty: res += f':{type_repr(p.annotation)}'
if p.default != p.empty:
default = getattr(p.default, 'func', p.default) #For partials
default = getattr(default, '__name__', default) #Tries to find a name
if is_enum(default.__class__): #Enum have a crappy repr
res += f'=*`{default.__class__.__name__}.{default.name}`*'
else: res += f'=*`{repr(default)}`*'
return res
#Cell
def _format_enum_doc(enum, full_name):
"Formatted `enum` definition to show in documentation"
vals = ', '.join(enum.__members__.keys())
return f'<code>{full_name}</code>',f'<code>Enum</code> = [{vals}]'
#Cell
def _escape_chars(s):
return s.replace('_', '\_')
def _format_func_doc(func, full_name=None):
"Formatted `func` definition to show in documentation"
try:
sig = inspect.signature(func)
fmt_params = [format_param(param) for name,param
in sig.parameters.items() if name not in ('self','cls')]
except: fmt_params = []
name = f'<code>{full_name or func.__name__}</code>'
arg_str = f"({', '.join(fmt_params)})"
f_name = f"<code>class</code> {name}" if inspect.isclass(func) else name
return f'{f_name}',f'{name}{arg_str}'
#Cell
def _format_cls_doc(cls, full_name):
"Formatted `cls` definition to show in documentation"
parent_class = inspect.getclasstree([cls])[-1][0][1][0]
name,args = _format_func_doc(cls, full_name)
if parent_class != object: args += f' :: {doc_link(get_name(parent_class))}'
return name,args
#Cell
def show_doc(elt, doc_string=True, name=None, title_level=None, disp=True, default_cls_level=2):
"Show documentation for element `elt`. Supported types: class, function, and enum."
elt = getattr(elt, '__func__', elt)
qname = name or qual_name(elt)
if inspect.isclass(elt):
if is_enum(elt.__class__): name,args = _format_enum_doc(elt, qname)
else: name,args = _format_cls_doc (elt, qname)
elif callable(elt): name,args = _format_func_doc(elt, qname)
else: name,args = f"<code>{qname}</code>", ''
link = get_source_link(elt) #TODO: use get_source_link when it works
source_link = f'<a href="{link}" class="source_link" style="float:right">[source]</a>'
title_level = title_level or (default_cls_level if inspect.isclass(elt) else 4)
doc = f'<h{title_level} id="{qname}" class="doc_header">{name}{source_link}</h{title_level}>'
doc += f'\n\n> {args}\n\n' if len(args) > 0 else '\n\n'
if doc_string and inspect.getdoc(elt): doc += add_doc_links(inspect.getdoc(elt))
if disp: display(Markdown(doc))
else: return doc
#Cell
def md2html(md):
"Convert markdown `md` to HTML code"
if nbconvert.__version__ < '5.5.0': return HTMLExporter().markdown2html(md)
else: return HTMLExporter().markdown2html(defaultdict(lambda: defaultdict(dict)), md)
#Cell
def doc(elt):
"Show `show_doc` info in preview window"
md = show_doc(elt, disp=False)
output = md2html(md)
if IN_COLAB: get_ipython().run_cell_magic(u'html', u'', output)
else:
try: page.page({'text/html': output})
except: display(Markdown(md)) |
glue/train_multitask.py | ziwei7437/bert_on_stilts | 105 | 11194408 | import argparse
import json
import os
import pandas as pd
import logging
from glue.tasks import get_task, MnliMismatchedProcessor
from glue.runners import GlueTaskRunner, RunnerParameters
from glue import model_setup as glue_model_setup
from shared import model_setup as shared_model_setup
from pytorch_pretrained_bert.utils import at_most_one_of
import shared.initialization as initialization
import shared.log_info as log_info
# todo: cleanup imports
def get_args(*in_args):
parser = argparse.ArgumentParser()
# === Required parameters === #
parser.add_argument("--data_dir",
default=None,
type=str,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
"bert-base-multilingual-cased, bert-base-chinese.")
parser.add_argument("--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.")
# === Model parameters === #
parser.add_argument("--bert_load_path", default=None, type=str)
parser.add_argument("--bert_load_mode", default="from_pretrained", type=str,
help="from_pretrained, model_only, state_model_only, state_all")
parser.add_argument("--bert_load_args", default=None, type=str)
parser.add_argument("--bert_config_json_path", default=None, type=str)
parser.add_argument("--bert_vocab_path", default=None, type=str)
parser.add_argument("--bert_save_mode", default="all", type=str)
# === Other parameters === #
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_save", action="store_true")
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_val",
action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_test",
action='store_true',
help="Whether to run eval on the test set.")
parser.add_argument("--do_val_history",
action='store_true',
help="")
parser.add_argument("--train_save_every", type=int, default=None)
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=32,
type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=-1,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. "
"Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument('--print-trainable-params', action="store_true")
parser.add_argument('--not-verbose', action="store_true")
parser.add_argument('--force-overwrite', action="store_true")
args = parser.parse_args(*in_args)
return args
def main():
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
args = get_args()
log_info.print_args(args)
device, n_gpu = initialization.init_cuda_from_args(args, logger=logger)
initialization.init_seed(args, n_gpu=n_gpu, logger=logger)
initialization.init_train_batch_size(args)
initialization.init_output_dir(args)
initialization.save_args(args)
task = get_task(args.task_name, args.data_dir)
tokenizer = shared_model_setup.create_tokenizer(
bert_model_name=args.bert_model,
bert_load_mode=args.bert_load_mode,
do_lower_case=args.do_lower_case,
bert_vocab_path=args.bert_vocab_path,
)
all_state = shared_model_setup.load_overall_state(args.bert_load_path, relaxed=True)
model = glue_model_setup.create_model(
task_type=task.processor.TASK_TYPE,
bert_model_name=args.bert_model,
bert_load_mode=args.bert_load_mode,
bert_load_args=args.bert_load_args,
all_state=all_state,
num_labels=len(task.processor.get_labels()),
device=device,
n_gpu=n_gpu,
fp16=args.fp16,
local_rank=args.local_rank,
bert_config_json_path=args.bert_config_json_path,
)
if args.do_train:
if args.print_trainable_params:
log_info.print_trainable_params(model)
train_examples = task.get_train_examples()
t_total = shared_model_setup.get_opt_train_steps(
num_train_examples=len(train_examples),
args=args,
)
optimizer = shared_model_setup.create_optimizer(
model=model,
learning_rate=args.learning_rate,
t_total=t_total,
loss_scale=args.loss_scale,
fp16=args.fp16,
warmup_proportion=args.warmup_proportion,
state_dict=all_state["optimizer"] if args.bert_load_mode == "state_all" else None,
)
else:
train_examples = None
t_total = 0
optimizer = None
runner = GlueTaskRunner(
model=model,
optimizer=optimizer,
tokenizer=tokenizer,
label_list=task.get_labels(),
device=device,
rparams=RunnerParameters(
max_seq_length=args.max_seq_length,
local_rank=args.local_rank, n_gpu=n_gpu, fp16=args.fp16,
learning_rate=args.learning_rate, gradient_accumulation_steps=args.gradient_accumulation_steps,
t_total=t_total, warmup_proportion=args.warmup_proportion,
num_train_epochs=args.num_train_epochs,
train_batch_size=args.train_batch_size, eval_batch_size=args.eval_batch_size,
)
)
if args.do_train:
assert at_most_one_of([args.do_val_history, args.train_save_every])
if args.do_val_history:
val_examples = task.get_dev_examples()
results = runner.run_train_val(
train_examples=train_examples,
val_examples=val_examples,
task_name=task.name,
)
metrics_str = json.dumps(results, indent=2)
with open(os.path.join(args.output_dir, "val_metrics_history.json"), "w") as f:
f.write(metrics_str)
elif args.train_save_every:
train_dataloader = runner.get_train_dataloader(train_examples, verbose=not args.not_verbose)
for epoch in range(int(args.num_train_epochs)):
for step, _, _ in runner.run_train_epoch_context(train_dataloader):
if step % args.train_save_every == args.train_save_every - 1 \
or step == len(train_dataloader) - 1:
glue_model_setup.save_bert(
model=model, optimizer=optimizer, args=args,
save_path=os.path.join(
args.output_dir, f"all_state___epoch{epoch:04d}___batch{step:06d}.p"
),
save_mode=args.bert_save_mode,
verbose=not args.not_verbose,
)
else:
runner.run_train(train_examples)
if args.do_save:
# Save a trained model
glue_model_setup.save_bert(
model=model, optimizer=optimizer, args=args,
save_path=os.path.join(args.output_dir, "all_state.p"),
save_mode=args.bert_save_mode,
)
if args.do_val:
val_examples = task.get_dev_examples()
results = runner.run_val(val_examples, task_name=task.name, verbose=not args.not_verbose)
df = pd.DataFrame(results["logits"])
df.to_csv(os.path.join(args.output_dir, "val_preds.csv"), header=False, index=False)
metrics_str = json.dumps({"loss": results["loss"], "metrics": results["metrics"]}, indent=2)
print(metrics_str)
with open(os.path.join(args.output_dir, "val_metrics.json"), "w") as f:
f.write(metrics_str)
# HACK for MNLI-mismatched
if task.name == "mnli":
mm_val_examples = MnliMismatchedProcessor().get_dev_examples(task.data_dir)
mm_results = runner.run_val(mm_val_examples, task_name=task.name, verbose=not args.not_verbose)
df = pd.DataFrame(results["logits"])
df.to_csv(os.path.join(args.output_dir, "mm_val_preds.csv"), header=False, index=False)
combined_metrics = {}
for k, v in results["metrics"]:
combined_metrics[k] = v
for k, v in mm_results["metrics"]:
combined_metrics["mm-"+k] = v
combined_metrics_str = json.dumps({
"loss": results["loss"],
"metrics": combined_metrics,
}, indent=2)
with open(os.path.join(args.output_dir, "val_metrics.json"), "w") as f:
f.write(combined_metrics_str)
if args.do_test:
test_examples = task.get_test_examples()
logits = runner.run_test(test_examples, verbose=not args.not_verbose)
df = pd.DataFrame(logits)
df.to_csv(os.path.join(args.output_dir, "test_preds.csv"), header=False, index=False)
# HACK for MNLI-mismatched
if task.name == "mnli":
test_examples = MnliMismatchedProcessor().get_test_examples(task.data_dir)
logits = runner.run_test(test_examples)
df = pd.DataFrame(logits)
df.to_csv(os.path.join(args.output_dir, "mm_test_preds.csv"), header=False, index=False)
if __name__ == "__main__":
main()
|
tests/test_get_option.py | jacobwhall/panflute | 361 | 11194417 | import panflute as pf
def test_get_variable():
doc = pf.Doc(metadata={"a": pf.MetaString("x"),
"b": pf.MetaMap(c=pf.MetaString("y"))})
assert pf.get_option(default="a") == "a"
assert pf.get_option({"a": 1}, "a") == 1
assert pf.get_option({"a": None}, "a", default=2) == 2
assert pf.get_option({"a": None}, "a", doc, "a") == "x"
assert pf.get_option(doc=doc, doc_tag="b.c") == "y"
|
python/ua_gec/__init__.py | kaidisn/ua-gec | 181 | 11194446 | <reponame>kaidisn/ua-gec
from .corpus import Corpus, Document
from .annotated_text import AnnotatedText
|
apim-migration-testing-tool/Python/venv/lib/python3.6/site-packages/google/protobuf/descriptor.py | tharindu1st/apim-migration-resources | 4,071 | 11194447 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Descriptors essentially contain exactly the information found in a .proto
file, in types that make this information accessible in Python.
"""
__author__ = '<EMAIL> (<NAME>)'
import threading
import six
from google.protobuf.internal import api_implementation
_USE_C_DESCRIPTORS = False
if api_implementation.Type() == 'cpp':
# Used by MakeDescriptor in cpp mode
import binascii
import os
from google.protobuf.pyext import _message
_USE_C_DESCRIPTORS = getattr(_message, '_USE_C_DESCRIPTORS', False)
class Error(Exception):
"""Base error for this module."""
class TypeTransformationError(Error):
"""Error transforming between python proto type and corresponding C++ type."""
if _USE_C_DESCRIPTORS:
# This metaclass allows to override the behavior of code like
# isinstance(my_descriptor, FieldDescriptor)
# and make it return True when the descriptor is an instance of the extension
# type written in C++.
class DescriptorMetaclass(type):
def __instancecheck__(cls, obj):
if super(DescriptorMetaclass, cls).__instancecheck__(obj):
return True
if isinstance(obj, cls._C_DESCRIPTOR_CLASS):
return True
return False
else:
# The standard metaclass; nothing changes.
DescriptorMetaclass = type
class _Lock(object):
"""Wrapper class of threading.Lock(), which is allowed by 'with'."""
def __new__(cls):
self = object.__new__(cls)
self._lock = threading.Lock() # pylint: disable=protected-access
return self
def __enter__(self):
self._lock.acquire()
def __exit__(self, exc_type, exc_value, exc_tb):
self._lock.release()
_lock = threading.Lock()
class DescriptorBase(six.with_metaclass(DescriptorMetaclass)):
"""Descriptors base class.
This class is the base of all descriptor classes. It provides common options
related functionality.
Attributes:
has_options: True if the descriptor has non-default options. Usually it
is not necessary to read this -- just call GetOptions() which will
happily return the default instance. However, it's sometimes useful
for efficiency, and also useful inside the protobuf implementation to
avoid some bootstrapping issues.
"""
if _USE_C_DESCRIPTORS:
# The class, or tuple of classes, that are considered as "virtual
# subclasses" of this descriptor class.
_C_DESCRIPTOR_CLASS = ()
def __init__(self, options, serialized_options, options_class_name):
"""Initialize the descriptor given its options message and the name of the
class of the options message. The name of the class is required in case
the options message is None and has to be created.
"""
self._options = options
self._options_class_name = options_class_name
self._serialized_options = serialized_options
# Does this descriptor have non-default options?
self.has_options = (options is not None) or (serialized_options is not None)
def _SetOptions(self, options, options_class_name):
"""Sets the descriptor's options
This function is used in generated proto2 files to update descriptor
options. It must not be used outside proto2.
"""
self._options = options
self._options_class_name = options_class_name
# Does this descriptor have non-default options?
self.has_options = options is not None
def GetOptions(self):
"""Retrieves descriptor options.
This method returns the options set or creates the default options for the
descriptor.
"""
if self._options:
return self._options
from google.protobuf import descriptor_pb2
try:
options_class = getattr(descriptor_pb2,
self._options_class_name)
except AttributeError:
raise RuntimeError('Unknown options class name %s!' %
(self._options_class_name))
with _lock:
if self._serialized_options is None:
self._options = options_class()
else:
self._options = _ParseOptions(options_class(),
self._serialized_options)
return self._options
class _NestedDescriptorBase(DescriptorBase):
"""Common class for descriptors that can be nested."""
def __init__(self, options, options_class_name, name, full_name,
file, containing_type, serialized_start=None,
serialized_end=None, serialized_options=None):
"""Constructor.
Args:
options: Protocol message options or None
to use default message options.
options_class_name: (str) The class name of the above options.
name: (str) Name of this protocol message type.
full_name: (str) Fully-qualified name of this protocol message type,
which will include protocol "package" name and the name of any
enclosing types.
file: (FileDescriptor) Reference to file info.
containing_type: if provided, this is a nested descriptor, with this
descriptor as parent, otherwise None.
serialized_start: The start index (inclusive) in block in the
file.serialized_pb that describes this descriptor.
serialized_end: The end index (exclusive) in block in the
file.serialized_pb that describes this descriptor.
serialized_options: Protocol message serilized options or None.
"""
super(_NestedDescriptorBase, self).__init__(
options, serialized_options, options_class_name)
self.name = name
# TODO(falk): Add function to calculate full_name instead of having it in
# memory?
self.full_name = full_name
self.file = file
self.containing_type = containing_type
self._serialized_start = serialized_start
self._serialized_end = serialized_end
def CopyToProto(self, proto):
"""Copies this to the matching proto in descriptor_pb2.
Args:
proto: An empty proto instance from descriptor_pb2.
Raises:
Error: If self couldnt be serialized, due to to few constructor arguments.
"""
if (self.file is not None and
self._serialized_start is not None and
self._serialized_end is not None):
proto.ParseFromString(self.file.serialized_pb[
self._serialized_start:self._serialized_end])
else:
raise Error('Descriptor does not contain serialization.')
class Descriptor(_NestedDescriptorBase):
"""Descriptor for a protocol message type.
A Descriptor instance has the following attributes:
name: (str) Name of this protocol message type.
full_name: (str) Fully-qualified name of this protocol message type,
which will include protocol "package" name and the name of any
enclosing types.
containing_type: (Descriptor) Reference to the descriptor of the
type containing us, or None if this is top-level.
fields: (list of FieldDescriptors) Field descriptors for all
fields in this type.
fields_by_number: (dict int -> FieldDescriptor) Same FieldDescriptor
objects as in |fields|, but indexed by "number" attribute in each
FieldDescriptor.
fields_by_name: (dict str -> FieldDescriptor) Same FieldDescriptor
objects as in |fields|, but indexed by "name" attribute in each
FieldDescriptor.
fields_by_camelcase_name: (dict str -> FieldDescriptor) Same
FieldDescriptor objects as in |fields|, but indexed by
"camelcase_name" attribute in each FieldDescriptor.
nested_types: (list of Descriptors) Descriptor references
for all protocol message types nested within this one.
nested_types_by_name: (dict str -> Descriptor) Same Descriptor
objects as in |nested_types|, but indexed by "name" attribute
in each Descriptor.
enum_types: (list of EnumDescriptors) EnumDescriptor references
for all enums contained within this type.
enum_types_by_name: (dict str ->EnumDescriptor) Same EnumDescriptor
objects as in |enum_types|, but indexed by "name" attribute
in each EnumDescriptor.
enum_values_by_name: (dict str -> EnumValueDescriptor) Dict mapping
from enum value name to EnumValueDescriptor for that value.
extensions: (list of FieldDescriptor) All extensions defined directly
within this message type (NOT within a nested type).
extensions_by_name: (dict, string -> FieldDescriptor) Same FieldDescriptor
objects as |extensions|, but indexed by "name" attribute of each
FieldDescriptor.
is_extendable: Does this type define any extension ranges?
oneofs: (list of OneofDescriptor) The list of descriptors for oneof fields
in this message.
oneofs_by_name: (dict str -> OneofDescriptor) Same objects as in |oneofs|,
but indexed by "name" attribute.
file: (FileDescriptor) Reference to file descriptor.
"""
if _USE_C_DESCRIPTORS:
_C_DESCRIPTOR_CLASS = _message.Descriptor
def __new__(cls, name, full_name, filename, containing_type, fields,
nested_types, enum_types, extensions, options=None,
serialized_options=None,
is_extendable=True, extension_ranges=None, oneofs=None,
file=None, serialized_start=None, serialized_end=None, # pylint: disable=redefined-builtin
syntax=None):
_message.Message._CheckCalledFromGeneratedFile()
return _message.default_pool.FindMessageTypeByName(full_name)
# NOTE(tmarek): The file argument redefining a builtin is nothing we can
# fix right now since we don't know how many clients already rely on the
# name of the argument.
def __init__(self, name, full_name, filename, containing_type, fields,
nested_types, enum_types, extensions, options=None,
serialized_options=None,
is_extendable=True, extension_ranges=None, oneofs=None,
file=None, serialized_start=None, serialized_end=None, # pylint: disable=redefined-builtin
syntax=None):
"""Arguments to __init__() are as described in the description
of Descriptor fields above.
Note that filename is an obsolete argument, that is not used anymore.
Please use file.name to access this as an attribute.
"""
super(Descriptor, self).__init__(
options, 'MessageOptions', name, full_name, file,
containing_type, serialized_start=serialized_start,
serialized_end=serialized_end, serialized_options=serialized_options)
# We have fields in addition to fields_by_name and fields_by_number,
# so that:
# 1. Clients can index fields by "order in which they're listed."
# 2. Clients can easily iterate over all fields with the terse
# syntax: for f in descriptor.fields: ...
self.fields = fields
for field in self.fields:
field.containing_type = self
self.fields_by_number = dict((f.number, f) for f in fields)
self.fields_by_name = dict((f.name, f) for f in fields)
self._fields_by_camelcase_name = None
self.nested_types = nested_types
for nested_type in nested_types:
nested_type.containing_type = self
self.nested_types_by_name = dict((t.name, t) for t in nested_types)
self.enum_types = enum_types
for enum_type in self.enum_types:
enum_type.containing_type = self
self.enum_types_by_name = dict((t.name, t) for t in enum_types)
self.enum_values_by_name = dict(
(v.name, v) for t in enum_types for v in t.values)
self.extensions = extensions
for extension in self.extensions:
extension.extension_scope = self
self.extensions_by_name = dict((f.name, f) for f in extensions)
self.is_extendable = is_extendable
self.extension_ranges = extension_ranges
self.oneofs = oneofs if oneofs is not None else []
self.oneofs_by_name = dict((o.name, o) for o in self.oneofs)
for oneof in self.oneofs:
oneof.containing_type = self
self.syntax = syntax or "proto2"
@property
def fields_by_camelcase_name(self):
if self._fields_by_camelcase_name is None:
self._fields_by_camelcase_name = dict(
(f.camelcase_name, f) for f in self.fields)
return self._fields_by_camelcase_name
def EnumValueName(self, enum, value):
"""Returns the string name of an enum value.
This is just a small helper method to simplify a common operation.
Args:
enum: string name of the Enum.
value: int, value of the enum.
Returns:
string name of the enum value.
Raises:
KeyError if either the Enum doesn't exist or the value is not a valid
value for the enum.
"""
return self.enum_types_by_name[enum].values_by_number[value].name
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.DescriptorProto.
Args:
proto: An empty descriptor_pb2.DescriptorProto.
"""
# This function is overridden to give a better doc comment.
super(Descriptor, self).CopyToProto(proto)
# TODO(robinson): We should have aggressive checking here,
# for example:
# * If you specify a repeated field, you should not be allowed
# to specify a default value.
# * [Other examples here as needed].
#
# TODO(robinson): for this and other *Descriptor classes, we
# might also want to lock things down aggressively (e.g.,
# prevent clients from setting the attributes). Having
# stronger invariants here in general will reduce the number
# of runtime checks we must do in reflection.py...
class FieldDescriptor(DescriptorBase):
"""Descriptor for a single field in a .proto file.
A FieldDescriptor instance has the following attributes:
name: (str) Name of this field, exactly as it appears in .proto.
full_name: (str) Name of this field, including containing scope. This is
particularly relevant for extensions.
camelcase_name: (str) Camelcase name of this field.
index: (int) Dense, 0-indexed index giving the order that this
field textually appears within its message in the .proto file.
number: (int) Tag number declared for this field in the .proto file.
type: (One of the TYPE_* constants below) Declared type.
cpp_type: (One of the CPPTYPE_* constants below) C++ type used to
represent this field.
label: (One of the LABEL_* constants below) Tells whether this
field is optional, required, or repeated.
has_default_value: (bool) True if this field has a default value defined,
otherwise false.
default_value: (Varies) Default value of this field. Only
meaningful for non-repeated scalar fields. Repeated fields
should always set this to [], and non-repeated composite
fields should always set this to None.
containing_type: (Descriptor) Descriptor of the protocol message
type that contains this field. Set by the Descriptor constructor
if we're passed into one.
Somewhat confusingly, for extension fields, this is the
descriptor of the EXTENDED message, not the descriptor
of the message containing this field. (See is_extension and
extension_scope below).
message_type: (Descriptor) If a composite field, a descriptor
of the message type contained in this field. Otherwise, this is None.
enum_type: (EnumDescriptor) If this field contains an enum, a
descriptor of that enum. Otherwise, this is None.
is_extension: True iff this describes an extension field.
extension_scope: (Descriptor) Only meaningful if is_extension is True.
Gives the message that immediately contains this extension field.
Will be None iff we're a top-level (file-level) extension field.
options: (descriptor_pb2.FieldOptions) Protocol message field options or
None to use default field options.
containing_oneof: (OneofDescriptor) If the field is a member of a oneof
union, contains its descriptor. Otherwise, None.
file: (FileDescriptor) Reference to file descriptor.
"""
# Must be consistent with C++ FieldDescriptor::Type enum in
# descriptor.h.
#
# TODO(robinson): Find a way to eliminate this repetition.
TYPE_DOUBLE = 1
TYPE_FLOAT = 2
TYPE_INT64 = 3
TYPE_UINT64 = 4
TYPE_INT32 = 5
TYPE_FIXED64 = 6
TYPE_FIXED32 = 7
TYPE_BOOL = 8
TYPE_STRING = 9
TYPE_GROUP = 10
TYPE_MESSAGE = 11
TYPE_BYTES = 12
TYPE_UINT32 = 13
TYPE_ENUM = 14
TYPE_SFIXED32 = 15
TYPE_SFIXED64 = 16
TYPE_SINT32 = 17
TYPE_SINT64 = 18
MAX_TYPE = 18
# Must be consistent with C++ FieldDescriptor::CppType enum in
# descriptor.h.
#
# TODO(robinson): Find a way to eliminate this repetition.
CPPTYPE_INT32 = 1
CPPTYPE_INT64 = 2
CPPTYPE_UINT32 = 3
CPPTYPE_UINT64 = 4
CPPTYPE_DOUBLE = 5
CPPTYPE_FLOAT = 6
CPPTYPE_BOOL = 7
CPPTYPE_ENUM = 8
CPPTYPE_STRING = 9
CPPTYPE_MESSAGE = 10
MAX_CPPTYPE = 10
_PYTHON_TO_CPP_PROTO_TYPE_MAP = {
TYPE_DOUBLE: CPPTYPE_DOUBLE,
TYPE_FLOAT: CPPTYPE_FLOAT,
TYPE_ENUM: CPPTYPE_ENUM,
TYPE_INT64: CPPTYPE_INT64,
TYPE_SINT64: CPPTYPE_INT64,
TYPE_SFIXED64: CPPTYPE_INT64,
TYPE_UINT64: CPPTYPE_UINT64,
TYPE_FIXED64: CPPTYPE_UINT64,
TYPE_INT32: CPPTYPE_INT32,
TYPE_SFIXED32: CPPTYPE_INT32,
TYPE_SINT32: CPPTYPE_INT32,
TYPE_UINT32: CPPTYPE_UINT32,
TYPE_FIXED32: CPPTYPE_UINT32,
TYPE_BYTES: CPPTYPE_STRING,
TYPE_STRING: CPPTYPE_STRING,
TYPE_BOOL: CPPTYPE_BOOL,
TYPE_MESSAGE: CPPTYPE_MESSAGE,
TYPE_GROUP: CPPTYPE_MESSAGE
}
# Must be consistent with C++ FieldDescriptor::Label enum in
# descriptor.h.
#
# TODO(robinson): Find a way to eliminate this repetition.
LABEL_OPTIONAL = 1
LABEL_REQUIRED = 2
LABEL_REPEATED = 3
MAX_LABEL = 3
# Must be consistent with C++ constants kMaxNumber, kFirstReservedNumber,
# and kLastReservedNumber in descriptor.h
MAX_FIELD_NUMBER = (1 << 29) - 1
FIRST_RESERVED_FIELD_NUMBER = 19000
LAST_RESERVED_FIELD_NUMBER = 19999
if _USE_C_DESCRIPTORS:
_C_DESCRIPTOR_CLASS = _message.FieldDescriptor
def __new__(cls, name, full_name, index, number, type, cpp_type, label,
default_value, message_type, enum_type, containing_type,
is_extension, extension_scope, options=None,
serialized_options=None,
has_default_value=True, containing_oneof=None, json_name=None,
file=None): # pylint: disable=redefined-builtin
_message.Message._CheckCalledFromGeneratedFile()
if is_extension:
return _message.default_pool.FindExtensionByName(full_name)
else:
return _message.default_pool.FindFieldByName(full_name)
def __init__(self, name, full_name, index, number, type, cpp_type, label,
default_value, message_type, enum_type, containing_type,
is_extension, extension_scope, options=None,
serialized_options=None,
has_default_value=True, containing_oneof=None, json_name=None,
file=None): # pylint: disable=redefined-builtin
"""The arguments are as described in the description of FieldDescriptor
attributes above.
Note that containing_type may be None, and may be set later if necessary
(to deal with circular references between message types, for example).
Likewise for extension_scope.
"""
super(FieldDescriptor, self).__init__(
options, serialized_options, 'FieldOptions')
self.name = name
self.full_name = full_name
self.file = file
self._camelcase_name = None
if json_name is None:
self.json_name = _ToJsonName(name)
else:
self.json_name = json_name
self.index = index
self.number = number
self.type = type
self.cpp_type = cpp_type
self.label = label
self.has_default_value = has_default_value
self.default_value = default_value
self.containing_type = containing_type
self.message_type = message_type
self.enum_type = enum_type
self.is_extension = is_extension
self.extension_scope = extension_scope
self.containing_oneof = containing_oneof
if api_implementation.Type() == 'cpp':
if is_extension:
self._cdescriptor = _message.default_pool.FindExtensionByName(full_name)
else:
self._cdescriptor = _message.default_pool.FindFieldByName(full_name)
else:
self._cdescriptor = None
@property
def camelcase_name(self):
if self._camelcase_name is None:
self._camelcase_name = _ToCamelCase(self.name)
return self._camelcase_name
@staticmethod
def ProtoTypeToCppProtoType(proto_type):
"""Converts from a Python proto type to a C++ Proto Type.
The Python ProtocolBuffer classes specify both the 'Python' datatype and the
'C++' datatype - and they're not the same. This helper method should
translate from one to another.
Args:
proto_type: the Python proto type (descriptor.FieldDescriptor.TYPE_*)
Returns:
descriptor.FieldDescriptor.CPPTYPE_*, the C++ type.
Raises:
TypeTransformationError: when the Python proto type isn't known.
"""
try:
return FieldDescriptor._PYTHON_TO_CPP_PROTO_TYPE_MAP[proto_type]
except KeyError:
raise TypeTransformationError('Unknown proto_type: %s' % proto_type)
class EnumDescriptor(_NestedDescriptorBase):
"""Descriptor for an enum defined in a .proto file.
An EnumDescriptor instance has the following attributes:
name: (str) Name of the enum type.
full_name: (str) Full name of the type, including package name
and any enclosing type(s).
values: (list of EnumValueDescriptors) List of the values
in this enum.
values_by_name: (dict str -> EnumValueDescriptor) Same as |values|,
but indexed by the "name" field of each EnumValueDescriptor.
values_by_number: (dict int -> EnumValueDescriptor) Same as |values|,
but indexed by the "number" field of each EnumValueDescriptor.
containing_type: (Descriptor) Descriptor of the immediate containing
type of this enum, or None if this is an enum defined at the
top level in a .proto file. Set by Descriptor's constructor
if we're passed into one.
file: (FileDescriptor) Reference to file descriptor.
options: (descriptor_pb2.EnumOptions) Enum options message or
None to use default enum options.
"""
if _USE_C_DESCRIPTORS:
_C_DESCRIPTOR_CLASS = _message.EnumDescriptor
def __new__(cls, name, full_name, filename, values,
containing_type=None, options=None,
serialized_options=None, file=None, # pylint: disable=redefined-builtin
serialized_start=None, serialized_end=None):
_message.Message._CheckCalledFromGeneratedFile()
return _message.default_pool.FindEnumTypeByName(full_name)
def __init__(self, name, full_name, filename, values,
containing_type=None, options=None,
serialized_options=None, file=None, # pylint: disable=redefined-builtin
serialized_start=None, serialized_end=None):
"""Arguments are as described in the attribute description above.
Note that filename is an obsolete argument, that is not used anymore.
Please use file.name to access this as an attribute.
"""
super(EnumDescriptor, self).__init__(
options, 'EnumOptions', name, full_name, file,
containing_type, serialized_start=serialized_start,
serialized_end=serialized_end, serialized_options=serialized_options)
self.values = values
for value in self.values:
value.type = self
self.values_by_name = dict((v.name, v) for v in values)
self.values_by_number = dict((v.number, v) for v in values)
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.EnumDescriptorProto.
Args:
proto: An empty descriptor_pb2.EnumDescriptorProto.
"""
# This function is overridden to give a better doc comment.
super(EnumDescriptor, self).CopyToProto(proto)
class EnumValueDescriptor(DescriptorBase):
"""Descriptor for a single value within an enum.
name: (str) Name of this value.
index: (int) Dense, 0-indexed index giving the order that this
value appears textually within its enum in the .proto file.
number: (int) Actual number assigned to this enum value.
type: (EnumDescriptor) EnumDescriptor to which this value
belongs. Set by EnumDescriptor's constructor if we're
passed into one.
options: (descriptor_pb2.EnumValueOptions) Enum value options message or
None to use default enum value options options.
"""
if _USE_C_DESCRIPTORS:
_C_DESCRIPTOR_CLASS = _message.EnumValueDescriptor
def __new__(cls, name, index, number,
type=None, # pylint: disable=redefined-builtin
options=None, serialized_options=None):
_message.Message._CheckCalledFromGeneratedFile()
# There is no way we can build a complete EnumValueDescriptor with the
# given parameters (the name of the Enum is not known, for example).
# Fortunately generated files just pass it to the EnumDescriptor()
# constructor, which will ignore it, so returning None is good enough.
return None
def __init__(self, name, index, number,
type=None, # pylint: disable=redefined-builtin
options=None, serialized_options=None):
"""Arguments are as described in the attribute description above."""
super(EnumValueDescriptor, self).__init__(
options, serialized_options, 'EnumValueOptions')
self.name = name
self.index = index
self.number = number
self.type = type
class OneofDescriptor(DescriptorBase):
"""Descriptor for a oneof field.
name: (str) Name of the oneof field.
full_name: (str) Full name of the oneof field, including package name.
index: (int) 0-based index giving the order of the oneof field inside
its containing type.
containing_type: (Descriptor) Descriptor of the protocol message
type that contains this field. Set by the Descriptor constructor
if we're passed into one.
fields: (list of FieldDescriptor) The list of field descriptors this
oneof can contain.
"""
if _USE_C_DESCRIPTORS:
_C_DESCRIPTOR_CLASS = _message.OneofDescriptor
def __new__(
cls, name, full_name, index, containing_type, fields, options=None,
serialized_options=None):
_message.Message._CheckCalledFromGeneratedFile()
return _message.default_pool.FindOneofByName(full_name)
def __init__(
self, name, full_name, index, containing_type, fields, options=None,
serialized_options=None):
"""Arguments are as described in the attribute description above."""
super(OneofDescriptor, self).__init__(
options, serialized_options, 'OneofOptions')
self.name = name
self.full_name = full_name
self.index = index
self.containing_type = containing_type
self.fields = fields
class ServiceDescriptor(_NestedDescriptorBase):
"""Descriptor for a service.
name: (str) Name of the service.
full_name: (str) Full name of the service, including package name.
index: (int) 0-indexed index giving the order that this services
definition appears withing the .proto file.
methods: (list of MethodDescriptor) List of methods provided by this
service.
methods_by_name: (dict str -> MethodDescriptor) Same MethodDescriptor
objects as in |methods_by_name|, but indexed by "name" attribute in each
MethodDescriptor.
options: (descriptor_pb2.ServiceOptions) Service options message or
None to use default service options.
file: (FileDescriptor) Reference to file info.
"""
if _USE_C_DESCRIPTORS:
_C_DESCRIPTOR_CLASS = _message.ServiceDescriptor
def __new__(cls, name, full_name, index, methods, options=None,
serialized_options=None, file=None, # pylint: disable=redefined-builtin
serialized_start=None, serialized_end=None):
_message.Message._CheckCalledFromGeneratedFile() # pylint: disable=protected-access
return _message.default_pool.FindServiceByName(full_name)
def __init__(self, name, full_name, index, methods, options=None,
serialized_options=None, file=None, # pylint: disable=redefined-builtin
serialized_start=None, serialized_end=None):
super(ServiceDescriptor, self).__init__(
options, 'ServiceOptions', name, full_name, file,
None, serialized_start=serialized_start,
serialized_end=serialized_end, serialized_options=serialized_options)
self.index = index
self.methods = methods
self.methods_by_name = dict((m.name, m) for m in methods)
# Set the containing service for each method in this service.
for method in self.methods:
method.containing_service = self
def FindMethodByName(self, name):
"""Searches for the specified method, and returns its descriptor."""
return self.methods_by_name.get(name, None)
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.ServiceDescriptorProto.
Args:
proto: An empty descriptor_pb2.ServiceDescriptorProto.
"""
# This function is overridden to give a better doc comment.
super(ServiceDescriptor, self).CopyToProto(proto)
class MethodDescriptor(DescriptorBase):
"""Descriptor for a method in a service.
name: (str) Name of the method within the service.
full_name: (str) Full name of method.
index: (int) 0-indexed index of the method inside the service.
containing_service: (ServiceDescriptor) The service that contains this
method.
input_type: The descriptor of the message that this method accepts.
output_type: The descriptor of the message that this method returns.
options: (descriptor_pb2.MethodOptions) Method options message or
None to use default method options.
"""
if _USE_C_DESCRIPTORS:
_C_DESCRIPTOR_CLASS = _message.MethodDescriptor
def __new__(cls, name, full_name, index, containing_service,
input_type, output_type, options=None, serialized_options=None):
_message.Message._CheckCalledFromGeneratedFile() # pylint: disable=protected-access
return _message.default_pool.FindMethodByName(full_name)
def __init__(self, name, full_name, index, containing_service,
input_type, output_type, options=None, serialized_options=None):
"""The arguments are as described in the description of MethodDescriptor
attributes above.
Note that containing_service may be None, and may be set later if necessary.
"""
super(MethodDescriptor, self).__init__(
options, serialized_options, 'MethodOptions')
self.name = name
self.full_name = full_name
self.index = index
self.containing_service = containing_service
self.input_type = input_type
self.output_type = output_type
class FileDescriptor(DescriptorBase):
"""Descriptor for a file. Mimics the descriptor_pb2.FileDescriptorProto.
Note that enum_types_by_name, extensions_by_name, and dependencies
fields are only set by the message_factory module, and not by the
generated proto code.
name: name of file, relative to root of source tree.
package: name of the package
syntax: string indicating syntax of the file (can be "proto2" or "proto3")
serialized_pb: (str) Byte string of serialized
descriptor_pb2.FileDescriptorProto.
dependencies: List of other FileDescriptors this FileDescriptor depends on.
public_dependencies: A list of FileDescriptors, subset of the dependencies
above, which were declared as "public".
message_types_by_name: Dict of message names of their descriptors.
enum_types_by_name: Dict of enum names and their descriptors.
extensions_by_name: Dict of extension names and their descriptors.
services_by_name: Dict of services names and their descriptors.
pool: the DescriptorPool this descriptor belongs to. When not passed to the
constructor, the global default pool is used.
"""
if _USE_C_DESCRIPTORS:
_C_DESCRIPTOR_CLASS = _message.FileDescriptor
def __new__(cls, name, package, options=None,
serialized_options=None, serialized_pb=None,
dependencies=None, public_dependencies=None,
syntax=None, pool=None):
# FileDescriptor() is called from various places, not only from generated
# files, to register dynamic proto files and messages.
if serialized_pb:
# TODO(amauryfa): use the pool passed as argument. This will work only
# for C++-implemented DescriptorPools.
return _message.default_pool.AddSerializedFile(serialized_pb)
else:
return super(FileDescriptor, cls).__new__(cls)
def __init__(self, name, package, options=None,
serialized_options=None, serialized_pb=None,
dependencies=None, public_dependencies=None,
syntax=None, pool=None):
"""Constructor."""
super(FileDescriptor, self).__init__(
options, serialized_options, 'FileOptions')
if pool is None:
from google.protobuf import descriptor_pool
pool = descriptor_pool.Default()
self.pool = pool
self.message_types_by_name = {}
self.name = name
self.package = package
self.syntax = syntax or "proto2"
self.serialized_pb = serialized_pb
self.enum_types_by_name = {}
self.extensions_by_name = {}
self.services_by_name = {}
self.dependencies = (dependencies or [])
self.public_dependencies = (public_dependencies or [])
if (api_implementation.Type() == 'cpp' and
self.serialized_pb is not None):
_message.default_pool.AddSerializedFile(self.serialized_pb)
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.FileDescriptorProto.
Args:
proto: An empty descriptor_pb2.FileDescriptorProto.
"""
proto.ParseFromString(self.serialized_pb)
def _ParseOptions(message, string):
"""Parses serialized options.
This helper function is used to parse serialized options in generated
proto2 files. It must not be used outside proto2.
"""
message.ParseFromString(string)
return message
def _ToCamelCase(name):
"""Converts name to camel-case and returns it."""
capitalize_next = False
result = []
for c in name:
if c == '_':
if result:
capitalize_next = True
elif capitalize_next:
result.append(c.upper())
capitalize_next = False
else:
result += c
# Lower-case the first letter.
if result and result[0].isupper():
result[0] = result[0].lower()
return ''.join(result)
def _OptionsOrNone(descriptor_proto):
"""Returns the value of the field `options`, or None if it is not set."""
if descriptor_proto.HasField('options'):
return descriptor_proto.options
else:
return None
def _ToJsonName(name):
"""Converts name to Json name and returns it."""
capitalize_next = False
result = []
for c in name:
if c == '_':
capitalize_next = True
elif capitalize_next:
result.append(c.upper())
capitalize_next = False
else:
result += c
return ''.join(result)
def MakeDescriptor(desc_proto, package='', build_file_if_cpp=True,
syntax=None):
"""Make a protobuf Descriptor given a DescriptorProto protobuf.
Handles nested descriptors. Note that this is limited to the scope of defining
a message inside of another message. Composite fields can currently only be
resolved if the message is defined in the same scope as the field.
Args:
desc_proto: The descriptor_pb2.DescriptorProto protobuf message.
package: Optional package name for the new message Descriptor (string).
build_file_if_cpp: Update the C++ descriptor pool if api matches.
Set to False on recursion, so no duplicates are created.
syntax: The syntax/semantics that should be used. Set to "proto3" to get
proto3 field presence semantics.
Returns:
A Descriptor for protobuf messages.
"""
if api_implementation.Type() == 'cpp' and build_file_if_cpp:
# The C++ implementation requires all descriptors to be backed by the same
# definition in the C++ descriptor pool. To do this, we build a
# FileDescriptorProto with the same definition as this descriptor and build
# it into the pool.
from google.protobuf import descriptor_pb2
file_descriptor_proto = descriptor_pb2.FileDescriptorProto()
file_descriptor_proto.message_type.add().MergeFrom(desc_proto)
# Generate a random name for this proto file to prevent conflicts with any
# imported ones. We need to specify a file name so the descriptor pool
# accepts our FileDescriptorProto, but it is not important what that file
# name is actually set to.
proto_name = binascii.hexlify(os.urandom(16)).decode('ascii')
if package:
file_descriptor_proto.name = os.path.join(package.replace('.', '/'),
proto_name + '.proto')
file_descriptor_proto.package = package
else:
file_descriptor_proto.name = proto_name + '.proto'
_message.default_pool.Add(file_descriptor_proto)
result = _message.default_pool.FindFileByName(file_descriptor_proto.name)
if _USE_C_DESCRIPTORS:
return result.message_types_by_name[desc_proto.name]
full_message_name = [desc_proto.name]
if package: full_message_name.insert(0, package)
# Create Descriptors for enum types
enum_types = {}
for enum_proto in desc_proto.enum_type:
full_name = '.'.join(full_message_name + [enum_proto.name])
enum_desc = EnumDescriptor(
enum_proto.name, full_name, None, [
EnumValueDescriptor(enum_val.name, ii, enum_val.number)
for ii, enum_val in enumerate(enum_proto.value)])
enum_types[full_name] = enum_desc
# Create Descriptors for nested types
nested_types = {}
for nested_proto in desc_proto.nested_type:
full_name = '.'.join(full_message_name + [nested_proto.name])
# Nested types are just those defined inside of the message, not all types
# used by fields in the message, so no loops are possible here.
nested_desc = MakeDescriptor(nested_proto,
package='.'.join(full_message_name),
build_file_if_cpp=False,
syntax=syntax)
nested_types[full_name] = nested_desc
fields = []
for field_proto in desc_proto.field:
full_name = '.'.join(full_message_name + [field_proto.name])
enum_desc = None
nested_desc = None
if field_proto.json_name:
json_name = field_proto.json_name
else:
json_name = None
if field_proto.HasField('type_name'):
type_name = field_proto.type_name
full_type_name = '.'.join(full_message_name +
[type_name[type_name.rfind('.')+1:]])
if full_type_name in nested_types:
nested_desc = nested_types[full_type_name]
elif full_type_name in enum_types:
enum_desc = enum_types[full_type_name]
# Else type_name references a non-local type, which isn't implemented
field = FieldDescriptor(
field_proto.name, full_name, field_proto.number - 1,
field_proto.number, field_proto.type,
FieldDescriptor.ProtoTypeToCppProtoType(field_proto.type),
field_proto.label, None, nested_desc, enum_desc, None, False, None,
options=_OptionsOrNone(field_proto), has_default_value=False,
json_name=json_name)
fields.append(field)
desc_name = '.'.join(full_message_name)
return Descriptor(desc_proto.name, desc_name, None, None, fields,
list(nested_types.values()), list(enum_types.values()), [],
options=_OptionsOrNone(desc_proto))
|
xadmin/models.py | A-tiantian/xadmin_bugfix | 120 | 11194474 | <reponame>A-tiantian/xadmin_bugfix
import json
import django
from django.db import models
from django.utils import timezone
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _, ugettext
from django.urls.base import reverse
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models.base import ModelBase
from django.utils.encoding import smart_text
from django.db.models.signals import post_migrate
from django.contrib.auth.models import Permission
import datetime
import decimal
from xadmin.util import quote
AUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
def add_view_permissions(sender, **kwargs):
"""
This syncdb hooks takes care of adding a view permission too all our
content types.
"""
# for each of our content types
for content_type in ContentType.objects.all():
# build our permission slug
codename = "view_%s" % content_type.model
# if it doesn't exist..
if not Permission.objects.filter(content_type=content_type, codename=codename):
# add it
Permission.objects.create(content_type=content_type,
codename=codename,
name="Can view %s" % content_type.name)
# print "Added view permission for %s" % content_type.name
# check for all our view permissions after a syncdb
post_migrate.connect(add_view_permissions)
class Bookmark(models.Model):
title = models.CharField(_(u'Title'), max_length=128)
user = models.ForeignKey(AUTH_USER_MODEL, on_delete=models.CASCADE, verbose_name=_(u"user"), blank=True, null=True)
url_name = models.CharField(_(u'Url Name'), max_length=64)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
query = models.CharField(_(u'Query String'), max_length=1000, blank=True)
is_share = models.BooleanField(_(u'Is Shared'), default=False)
@property
def url(self):
base_url = reverse(self.url_name)
if self.query:
base_url = base_url + '?' + self.query
return base_url
def __str__(self):
return self.title
class Meta:
verbose_name = _(u'Bookmark')
verbose_name_plural = _('Bookmarks')
class JSONEncoder(DjangoJSONEncoder):
def default(self, o):
if isinstance(o, datetime.datetime):
return o.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(o, datetime.date):
return o.strftime('%Y-%m-%d')
elif isinstance(o, decimal.Decimal):
return str(o)
elif isinstance(o, ModelBase):
return '%s.%s' % (o._meta.app_label, o._meta.model_name)
else:
try:
return super(JSONEncoder, self).default(o)
except Exception:
return smart_text(o)
class UserSettings(models.Model):
user = models.ForeignKey(AUTH_USER_MODEL, on_delete=models.CASCADE, verbose_name=_(u"user"))
key = models.CharField(_('Settings Key'), max_length=256)
value = models.TextField(_('Settings Content'))
def json_value(self):
return json.loads(self.value)
def set_json(self, obj):
self.value = json.dumps(obj, cls=JSONEncoder, ensure_ascii=False)
def __str__(self):
return "%s %s" % (self.user, self.key)
class Meta:
verbose_name = _(u'User Setting')
verbose_name_plural = _('User Settings')
class UserWidget(models.Model):
user = models.ForeignKey(AUTH_USER_MODEL, on_delete=models.CASCADE, verbose_name=_(u"user"))
page_id = models.CharField(_(u"Page"), max_length=256)
widget_type = models.CharField(_(u"Widget Type"), max_length=50)
value = models.TextField(_(u"Widget Params"))
def get_value(self):
value = json.loads(self.value)
value['id'] = self.id
value['type'] = self.widget_type
return value
def set_value(self, obj):
self.value = json.dumps(obj, cls=JSONEncoder, ensure_ascii=False)
def save(self, *args, **kwargs):
created = self.pk is None
super(UserWidget, self).save(*args, **kwargs)
if created:
try:
portal_pos = UserSettings.objects.get(
user=self.user, key="dashboard:%s:pos" % self.page_id)
portal_pos.value = "%s,%s" % (self.pk, portal_pos.value) if portal_pos.value else self.pk
portal_pos.save()
except Exception:
pass
def __str__(self):
return "%s %s widget" % (self.user, self.widget_type)
class Meta:
verbose_name = _(u'User Widget')
verbose_name_plural = _('User Widgets')
class Log(models.Model):
action_time = models.DateTimeField(
_('action time'),
default=timezone.now,
editable=False,
)
user = models.ForeignKey(
AUTH_USER_MODEL,
models.CASCADE,
verbose_name=_('user'),
)
ip_addr = models.GenericIPAddressField(_('action ip'), blank=True, null=True)
content_type = models.ForeignKey(
ContentType,
models.SET_NULL,
verbose_name=_('content type'),
blank=True, null=True,
)
object_id = models.TextField(_('object id'), blank=True, null=True)
object_repr = models.CharField(_('object repr'), max_length=200)
action_flag = models.CharField(_('action flag'), max_length=32)
message = models.TextField(_('change message'), blank=True)
class Meta:
verbose_name = _('log entry')
verbose_name_plural = _('log entries')
ordering = ('-action_time',)
def __repr__(self):
return smart_text(self.action_time)
def __str__(self):
if self.action_flag == 'create':
return ugettext('Added "%(object)s".') % {'object': self.object_repr}
elif self.action_flag == 'change':
return ugettext('Changed "%(object)s" - %(changes)s') % {
'object': self.object_repr,
'changes': self.message,
}
elif self.action_flag == 'delete' and self.object_repr:
return ugettext('Deleted "%(object)s."') % {'object': self.object_repr}
return self.message
def get_edited_object(self):
"Returns the edited object represented by this log entry"
return self.content_type.get_object_for_this_type(pk=self.object_id)
|
accelerator/unixhttp.py | eBay/accelerator | 143 | 11194475 | ############################################################################
# #
# Copyright (c) 2017 eBay Inc. #
# Modifications copyright (c) 2019-2021 <NAME> #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
############################################################################
from __future__ import print_function
from __future__ import division
from accelerator.compat import PY3, unquote_plus
from accelerator.compat import urlopen, Request, URLError, HTTPError
from accelerator.extras import json_encode, json_decode
from accelerator.error import ServerError, UrdError, UrdPermissionError, UrdConflictError
from accelerator import g, __version__ as ax_version
if PY3:
from urllib.request import install_opener, build_opener, AbstractHTTPHandler
from http.client import HTTPConnection
else:
from urllib2 import install_opener, build_opener, AbstractHTTPHandler
from httplib import HTTPConnection
import sys
import time
import socket
class UnixHTTPConnection(HTTPConnection):
def __init__(self, host, *a, **kw):
HTTPConnection.__init__(self, 'localhost', *a, **kw)
self.unix_path = unquote_plus(host.split(':', 1)[0])
def connect(self):
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(self.unix_path)
self.sock = s
class UnixHTTPHandler(AbstractHTTPHandler):
def unixhttp_open(self, req):
return self.do_open(UnixHTTPConnection, req)
unixhttp_request = AbstractHTTPHandler.do_request_
install_opener(build_opener(UnixHTTPHandler))
import bottle
# The standard bottle WaitressServer can't handle unix sockets and doesn't set threads.
class WaitressServer(bottle.ServerAdapter):
def run(self, handler):
from waitress import create_server
if self.port:
kw = dict(host=self.host, port=self.port)
else:
kw = dict(unix_socket=self.host, unix_socket_perms='777')
server = create_server(handler, threads=12, **kw)
server.run()
def call(url, data=None, fmt=json_decode, headers={}, server_name='server', retries=4, quiet=False):
if data is not None and not isinstance(data, bytes):
data = json_encode(data)
err = None
req = Request(url, data=data, headers=headers)
for attempt in range(1, retries + 2):
resp = None
try:
r = urlopen(req)
try:
resp = r.read()
if server_name == 'server' and g.running in ('build', 'shell',):
s_version = r.headers['Accelerator-Version'] or '<unknown (old)>'
if s_version != ax_version:
# Nothing is supposed to catch this, so just print and die.
print('Server is running version %s but we are running version %s' % (s_version, ax_version,), file=sys.stderr)
exit(1)
if PY3:
resp = resp.decode('utf-8')
# It is inconsistent if we get HTTPError or not.
# It seems we do when using TCP sockets, but not when using unix sockets.
if r.getcode() >= 400:
raise HTTPError(url, r.getcode(), resp, {}, None)
return fmt(resp)
finally:
try:
r.close()
except Exception:
pass
except HTTPError as e:
if resp is None and e.fp:
resp = e.fp.read()
if PY3:
resp = resp.decode('utf-8')
msg = '%s says %d: %s' % (server_name, e.code, resp,)
if server_name == 'urd' and 400 <= e.code < 500:
if e.code == 401:
err = UrdPermissionError()
if e.code == 409:
err = UrdConflictError()
break
if server_name == 'server' and e.code != 503 and resp:
return fmt(resp)
except URLError:
# Don't say anything the first times, because the output
# tests get messed up if this happens during them.
if attempt < retries - 1:
msg = None
else:
msg = 'error contacting ' + server_name
except ValueError as e:
msg = 'Bad data from %s, %s: %s' % (server_name, type(e).__name__, e,)
if msg and not quiet:
print(msg, file=sys.stderr)
if attempt < retries + 1:
time.sleep(attempt / 15)
if msg and not quiet:
print('Retrying (%d/%d).' % (attempt, retries,), file=sys.stderr)
else:
if not quiet:
print('Giving up.', file=sys.stderr)
if err:
raise err
if server_name == 'urd':
raise UrdError(msg)
else:
raise ServerError(msg)
|
tensorflow/python/kernel_tests/io_ops/record_input_test.py | EricRemmerswaal/tensorflow | 190,993 | 11194495 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for record_input_op."""
import os
from tensorflow.python.framework import test_util
from tensorflow.python.framework.errors_impl import NotFoundError
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class RecordInputOpTest(test.TestCase):
def generateTestData(self,
prefix,
n,
m,
compression_type=tf_record.TFRecordCompressionType.NONE):
options = tf_record.TFRecordOptions(compression_type)
for i in range(n):
f = os.path.join(self.get_temp_dir(), prefix + "." + str(i))
w = tf_record.TFRecordWriter(f, options=options)
for j in range(m):
w.write("{0:0{width}}".format(i * m + j, width=10).encode("utf-8"))
w.close()
def testRecordInputSimple(self):
with self.cached_session() as sess:
self.generateTestData("basic", 1, 1)
yield_op = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=1,
buffer_size=1,
batch_size=1,
name="record_input").get_yield_op()
self.assertEqual(self.evaluate(yield_op), b"0000000000")
def testRecordInputSimpleGzip(self):
with self.cached_session() as sess:
self.generateTestData(
"basic",
1,
1,
compression_type=tf_record.TFRecordCompressionType.GZIP)
yield_op = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=1,
buffer_size=1,
batch_size=1,
name="record_input",
compression_type=tf_record.TFRecordCompressionType.GZIP).get_yield_op(
)
self.assertEqual(self.evaluate(yield_op), b"0000000000")
def testRecordInputSimpleZlib(self):
with self.cached_session() as sess:
self.generateTestData(
"basic",
1,
1,
compression_type=tf_record.TFRecordCompressionType.ZLIB)
yield_op = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=1,
buffer_size=1,
batch_size=1,
name="record_input",
compression_type=tf_record.TFRecordCompressionType.ZLIB).get_yield_op(
)
self.assertEqual(self.evaluate(yield_op), b"0000000000")
@test_util.run_deprecated_v1
def testRecordInputEpochs(self):
files = 100
records_per_file = 100
batches = 2
with self.cached_session() as sess:
self.generateTestData("basic", files, records_per_file)
records = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=2,
buffer_size=2000,
batch_size=1,
shift_ratio=0.33,
seed=10,
name="record_input",
batches=batches)
yield_op = records.get_yield_op()
# cycle over 3 epochs and make sure we never duplicate
for _ in range(3):
epoch_set = set()
for _ in range(int(files * records_per_file / batches)):
op_list = self.evaluate(yield_op)
self.assertTrue(len(op_list) is batches)
for r in op_list:
self.assertTrue(r[0] not in epoch_set)
epoch_set.add(r[0])
@test_util.run_deprecated_v1
def testDoesNotDeadlock(self):
# Iterate multiple times to cause deadlock if there is a chance it can occur
for _ in range(30):
with self.cached_session() as sess:
self.generateTestData("basic", 1, 1)
records = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=1,
buffer_size=100,
batch_size=1,
name="record_input")
yield_op = records.get_yield_op()
for _ in range(50):
self.evaluate(yield_op)
@test_util.run_deprecated_v1
def testEmptyGlob(self):
with self.cached_session() as sess:
record_input = data_flow_ops.RecordInput(file_pattern="foo")
yield_op = record_input.get_yield_op()
self.evaluate(variables.global_variables_initializer())
with self.assertRaises(NotFoundError):
self.evaluate(yield_op)
@test_util.run_deprecated_v1
def testBufferTooSmall(self):
files = 10
records_per_file = 10
batches = 2
with self.cached_session() as sess:
self.generateTestData("basic", files, records_per_file)
records = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=2,
buffer_size=2000,
batch_size=1,
shift_ratio=0.33,
seed=10,
name="record_input",
batches=batches)
yield_op = records.get_yield_op()
# cycle over 3 epochs and make sure we never duplicate
for _ in range(3):
epoch_set = set()
for _ in range(int(files * records_per_file / batches)):
op_list = self.evaluate(yield_op)
self.assertTrue(len(op_list) is batches)
for r in op_list:
self.assertTrue(r[0] not in epoch_set)
epoch_set.add(r[0])
if __name__ == "__main__":
test.main()
|
pushservices/gcm.py | nakanin/airnotifier | 558 | 11194508 | <reponame>nakanin/airnotifier
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012, <NAME>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Dongsheng Cai nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL DONGSHENG CAI BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from . import PushService
from util import strip_tags
import json
import logging
import requests
import time
from util import json_decode, json_encode
GCM_ENDPOINT = "https://fcm.googleapis.com/fcm/send"
class GCMException(Exception):
pass
class GCMNotRegisteredException(GCMException):
def __init__(self, regids):
Exception.__init__(self, "Not Registered")
self.regids = regids
class GCMInvalidRegistrationException(GCMException):
def __init__(self, regids):
Exception.__init__(self, "Invalid Registration")
self.regids = regids
class GCMUpdateRegIDsException(GCMException):
def __init__(self, canonical_ids):
Exception.__init__(self, "Canonical ids")
self.canonical_ids = canonical_ids
class GCMClient(PushService):
def __init__(
self, projectnumber, apikey, appname, instanceid=0, endpoint=GCM_ENDPOINT
):
self.projectnumber = projectnumber
self.apikey = apikey
self.appname = appname
self.instanceid = instanceid
self.endpoint = endpoint
def build_request(self, regids, data, collapse_key, ttl):
payload = {"registration_ids": regids}
if data:
payload["data"] = data
if ttl >= 0:
payload["time_to_live"] = ttl
if collapse_key:
payload["collapse_key"] = collapse_key
return json_encode(payload)
def reverse_response_info(self, key, ids, results):
zipped = list(zip(ids, results))
# Get items having error key
filtered = [x for x in zipped if key in x[1]]
# Expose error value
exposed = [(s[0], s[1][key]) for s in filtered]
errors = {}
for k, v in exposed:
if v not in errors:
errors[v] = []
errors[v].append(k)
return errors
def process(self, **kwargs):
gcmparam = kwargs.get("gcm", {})
collapse_key = gcmparam.get("collapse_key", None)
ttl = gcmparam.get("ttl", None)
alert = kwargs.get("alert", None)
data = gcmparam.get("data", {})
if "message" not in data:
data["message"] = kwargs.get("alert", "")
appdb = kwargs.get("appdb", None)
return self.send(
kwargs["token"], data=data, collapse_key=collapse_key, ttl=ttl, appdb=appdb
)
def send(
self, regids, data=None, collapse_key=None, ttl=None, retries=5, appdb=None
):
"""
Send message to google gcm endpoint
:param regids: list
:param data: dict
:param collapse_key: string
:param ttl: int
:param retries: int
:param appdb: Database
"""
if not regids:
raise GCMException("Registration IDs cannot be empty")
payload = self.build_request(regids, data, collapse_key, ttl)
headers = {
"content-type": "application/json",
"Authorization": "key=%s" % self.apikey,
}
response = requests.post(self.endpoint, data=payload, headers=headers)
if response.status_code == 400:
raise GCMException(
"Request could not be parsed as JSON, or it contained invalid fields."
)
elif response.status_code == 401:
raise GCMException("There was an error authenticating the sender account.")
elif response.status_code >= 500:
raise GCMException("GCMClient server is temporarily unavailable .")
responsedata = response.json()
if responsedata.get("canonical_ids", 0) != 0:
# means we need to take a look at results, looking for registration_id key
responsedata["canonical_ids"] = self.reverse_response_info(
"registration_id", regids, responsedata["results"]
)
# Handling errors
if responsedata.get("failure", 0) != 0:
# means we need to take a look at results, looking for error key
errors = self.reverse_response_info(
"error", regids, responsedata["results"]
)
for errorkey, packed_rregisteration_ids in list(errors.items()):
# Check for errors and act accordingly
if errorkey == "NotRegistered":
# Should remove the registration ID from your server database
# because the application was uninstalled from the device or
# it does not have a broadcast receiver configured to receive
if appdb is not None:
appdb.tokens.delete_many(
{"token": {"$in": packed_rregisteration_ids}}
)
self.add_to_log(
appdb,
"GCM",
"Cleaned unregistered tokens: "
+ ", ".join(packed_rregisteration_ids),
)
else:
raise GCMNotRegisteredException(packed_rregisteration_ids)
elif errorkey == "InvalidRegistration":
# You should remove the registration ID from your server
# database because the application was uninstalled from the device or it does not have a broadcast receiver configured to receive
if appdb is not None:
appdb.tokens.delete_many(
{"token": {"$in": packed_rregisteration_ids}}
)
self.add_to_log(
appdb,
"GCM",
"Cleaned invalid tokens: "
+ ", ".join(packed_rregisteration_ids),
)
else:
raise GCMInvalidRegistrationException(packed_rregisteration_ids)
elif errorkey == "MismatchSenderId":
"""
A registration ID is tied to a certain group of senders. When an application registers for GCMClient usage,
it must specify which senders are allowed to send messages. Make sure you're using one of those when
trying to send messages to the device. If you switch to a different sender, the existing registration
IDs won't work.
"""
raise GCMException("Mismatch sender Id")
elif errorkey == "MissingRegistration":
"""
Check that the request contains a registration ID (either in the registration_id parameter in a
plain text message, or in the registration_ids field in JSON).
"""
raise GCMException("Missing registration")
elif errorkey == "MessageTooBig":
raise GCMException("Message too big")
elif errorkey == "InvalidDataKey":
raise GCMException("Invalid data key")
elif errorkey == "InvalidTtl":
raise GCMException("Invalid Ttl")
elif errorkey == "InvalidPackageName":
raise GCMException("Invalid package name")
raise GCMException("Unknown error, contact admin")
return response
def add_to_log(self, appdb, action, info=None, level="info"):
log = {}
log["action"] = strip_tags(action)
log["info"] = strip_tags(info)
log["level"] = strip_tags(level)
log["created"] = int(time.time())
if appdb is not None:
appdb.logs.insert(log)
|
tests/resources/selenium/python/test_setup_exception.py | IamSaurabh1/taurus | 1,743 | 11194512 | import unittest
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
class TestBlazemeterPass(unittest.TestCase):
@classmethod
def setUpClass(cls):
raise Exception("Catch that")
def test_pass(self):
pass
|
zerver/management/commands/export_single_user.py | TylerPham2000/zulip | 17,004 | 11194559 | <reponame>TylerPham2000/zulip<gh_stars>1000+
import os
import shutil
import subprocess
import tempfile
from argparse import ArgumentParser
from typing import Any
from zerver.lib.export import do_export_user
from zerver.lib.management import ZulipBaseCommand
class Command(ZulipBaseCommand):
help = """Exports message data from a Zulip user
This command exports the message history for a single Zulip user.
Note that this only exports the user's message history and
realm-public metadata needed to understand it; it does nothing
with (for example) any bots owned by the user."""
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument("email", metavar="<email>", help="email of user to export")
parser.add_argument(
"--output", dest="output_dir", help="Directory to write exported data to."
)
self.add_realm_args(parser)
def handle(self, *args: Any, **options: Any) -> None:
realm = self.get_realm(options)
user_profile = self.get_user(options["email"], realm)
output_dir = options["output_dir"]
if output_dir is None:
output_dir = tempfile.mkdtemp(prefix="zulip-export-")
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
print(f"Exporting user {user_profile.delivery_email}")
do_export_user(user_profile, output_dir)
print(f"Finished exporting to {output_dir}; tarring")
tarball_path = output_dir.rstrip("/") + ".tar.gz"
subprocess.check_call(["tar", "--strip-components=1", "-czf", tarball_path, output_dir])
print(f"Tarball written to {tarball_path}")
|
.github/workflows/check_entangled_specs.py | slowy07/CBL-Mariner | 3,337 | 11194566 | <reponame>slowy07/CBL-Mariner
from typing import FrozenSet, List, Set
from pyrpm.spec import Spec
import argparse
from collections import defaultdict
from pathlib import Path
import pprint
import sys
version_release_matching_groups = [
frozenset([
"SPECS-SIGNED/kernel-signed/kernel-signed.spec",
"SPECS/kernel/kernel.spec",
"SPECS/kernel-headers/kernel-headers.spec"
]),
frozenset([
"SPECS-SIGNED/grub2-efi-binary-signed/grub2-efi-binary-signed.spec",
"SPECS/grub2/grub2.spec"
]),
frozenset([
"SPECS/ca-certificates/ca-certificates.spec",
"SPECS/prebuilt-ca-certificates-base/prebuilt-ca-certificates-base.spec"
])
]
version_matching_groups = [
frozenset([
"SPECS/hyperv-daemons/hyperv-daemons.spec",
"SPECS/kernel/kernel.spec",
"SPECS/kernel-hyperv/kernel-hyperv.spec"
]),
frozenset([
"SPECS/azure-iotedge/azure-iotedge.spec",
"SPECS/libiothsm-std/libiothsm-std.spec"
])
]
def check_spec_tags(base_path: str, tags: List[str], groups: List[FrozenSet]) -> Set[FrozenSet]:
"""Returns spec sets which violate matching rules for given tags. """
err_groups = set()
for group in groups:
variants = defaultdict(set)
for spec_filename in group:
parsed_spec = Spec.from_file(Path(base_path, spec_filename))
for tag in tags:
variants[tag].add(getattr(
parsed_spec, tag))
for tag in tags:
if len(variants[tag]) > 1:
err_groups.add(group)
return err_groups
def check_version_release_match_groups(base_path: str) -> Set[FrozenSet]:
return check_spec_tags(base_path, ['version', 'release'], version_release_matching_groups)
def check_version_match_groups(base_path: str) -> Set[FrozenSet]:
return check_spec_tags(base_path, ['version'], version_matching_groups)
def check_matches(base_path: str):
version_match_errors = check_version_match_groups(base_path)
version_release_match_errors = check_version_release_match_groups(
base_path)
printer = pprint.PrettyPrinter()
if len(version_match_errors) or len(version_release_match_errors):
print('The current repository state violates a spec entanglement rule!')
if len(version_match_errors):
print(
'\nPlease update the following sets of specs to have the same Version tags:')
for e in version_match_errors:
printer.pprint(e)
if len(version_release_match_errors):
print(
'\nPlease update the following sets of specs to have the same Version and Release tags:')
for e in version_release_match_errors:
printer.pprint(e)
sys.exit(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'repo_root', help='path to the root of the CBL-Mariner repository')
args = parser.parse_args()
check_matches(args.repo_root)
|
Doc/includes/sqlite3/complete_statement.py | cemeyer/tauthon | 473 | 11194588 | # A minimal SQLite shell for experiments
import sqlite3
con = sqlite3.connect(":memory:")
con.isolation_level = None
cur = con.cursor()
buffer = ""
print "Enter your SQL commands to execute in sqlite3."
print "Enter a blank line to exit."
while True:
line = raw_input()
if line == "":
break
buffer += line
if sqlite3.complete_statement(buffer):
try:
buffer = buffer.strip()
cur.execute(buffer)
if buffer.lstrip().upper().startswith("SELECT"):
print cur.fetchall()
except sqlite3.Error as e:
print "An error occurred:", e.args[0]
buffer = ""
con.close()
|
visualiser/facades/ociDbHome.py | flover4/oci-designer-toolkit | 186 | 11194591 | <reponame>flover4/oci-designer-toolkit<gh_stars>100-1000
#!/usr/bin/python
# Copyright (c) 2020, 2021, Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
"""Provide Module Description
"""
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
__author__ = ["<NAME> (Oracle Cloud Solutions A-Team)"]
__version__ = "1.0.0"
__module__ = "ociDbHome"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
import oci
from common.okitLogging import getLogger
from facades.ociConnection import OCIDatabaseConnection
# Configure logging
logger = getLogger()
class OCIDbHomes(OCIDatabaseConnection):
def __init__(self, config=None, configfile=None, profile=None, compartment_id=None, vm_cluster_id=None):
self.compartment_id = compartment_id
self.vm_cluster_id = vm_cluster_id
self.db_homes_json = []
super(OCIDbHomes, self).__init__(config=config, configfile=configfile, profile=profile)
def list(self, compartment_id=None, filter=None):
if compartment_id is None:
compartment_id = self.compartment_id
# Add filter to only return AVAILABLE Compartments
if filter is None:
filter = {}
# if 'lifecycle_state' not in filter:
# filter['lifecycle_state'] = 'AVAILABLE'
db_homes = oci.pagination.list_call_get_all_results(self.client.list_db_homes, compartment_id=compartment_id).data if self.vm_cluster_id is None else oci.pagination.list_call_get_all_results(self.client.list_db_homes, compartment_id=compartment_id, vm_cluster_id=self.vm_cluster_id).data
# Convert to Json object
db_homes_json = self.toJson(db_homes)
logger.debug(str(db_homes_json))
# Filter results
self.db_homes_json = self.filterJsonObjectList(db_homes_json, filter)
logger.debug(str(self.db_homes_json))
return self.db_homes_json
|
Lib/test/test_dict2java.py | jeff5/jython-whinchat | 577 | 11194611 | <reponame>jeff5/jython-whinchat
from javatests import Dict2JavaTest
import unittest, test.test_support
# Test the java.util.Map interface of org.python.core.PyDictionary.
# This tests the functionality of being able to pass a dictionaries
# created in Jython to a java method, and the ability to manipulate
# the dictionary object once in Java code. The Java Dict2JavaTest is
# used to run some tests in Java code since they cannot be done on
# the Jython side.
class JythonMapInJavaTest(unittest.TestCase):
def checkcontains(self, keys):
for k in keys:
self.failUnless(k in self.testdict)
self.failUnless(self.testmap.containsKey(k))
def checkdoesntcontain(self, keys):
for k in keys:
self.failIf(k in self.testdict)
self.failIf(self.testmap.containsKey(k))
def checkvalues(self, *keyvalues):
for k, v in keyvalues:
self.assertEquals(v, self.testdict[k])
def checksize(self, correctsize):
self.assertEquals(self.testmap.size(), len(self.testdict))
self.assertEquals(self.testmap.size(), correctsize)
def maketestdict(self, base):
self.testdict = base
self.testmap = Dict2JavaTest(self.testdict)
def test_basic_map_operations(self):
self.maketestdict({"a":"x", "b":"y", "c":"z", "d": None, None: "foo"})
# Make sure we see it on the java side
self.assertEquals(len(self.testdict), self.testmap.size())
self.checkcontains('abcd')
# Add {"e":"1", "f":null, "g":"2"} using the Map.putAll method
oldlen = len(self.testdict)
self.failUnless(self.testmap.test_putAll_efg())
self.checksize(oldlen + 3)
self.checkvalues(('e', '1'), ('f', None), ('g', '2'))
# test Map.get method, get "g" and "d" test will throw an exception if fail
self.failUnless(self.testmap.test_get_gd())
# remove elements with keys "a" and "c" with the Map.remove method
oldlen = len(self.testdict)
self.failUnless(self.testmap.test_remove_ac())
self.checksize(oldlen - 2)
self.checkdoesntcontain('ac')
# test Map.put method, adds {"h":null} and {"i": Integer(3)} and {"g": "3"}
# "g" replaces a previous value of "2"
oldlen = len(self.testdict)
self.failUnless(self.testmap.test_put_hig())
self.checksize(oldlen + 2)
self.checkvalues(('h', None), ('i', 3), ('g', '3'))
self.failUnless(self.testmap.test_java_mapentry())
def test_entryset(self):
self.maketestdict({"h":"x", "b":"y", "g":"z", "e": None, None: "foo", "d":7})
set = self.testmap.entrySet()
self.checksize(set.size())
# Make sure the set is consistent with the self.testdictionary
for entry in set:
self.failUnless(self.testdict.has_key(entry.getKey()))
self.assertEquals(self.testdict[entry.getKey()], entry.getValue())
self.failUnless(set.contains(entry))
# make sure changes in the set are reflected in the self.testdictionary
for entry in set:
if entry.getKey() == "h":
hentry = entry
if entry.getKey() == "e":
eentry = entry
# Make sure nulls and non Map.Entry object do not match anything in the set
self.failUnless(self.testmap.test_entry_set_nulls())
self.failUnless(set.remove(eentry))
self.failIf(set.contains(eentry))
self.failIf("e" in self.testdict)
self.failUnless(set.remove(hentry))
self.failIf(set.contains(hentry))
self.failIf("h" in self.testdict)
self.checksize(set.size())
oldlen = set.size()
self.failIf(set.remove(eentry))
self.checksize(oldlen)
# test Set.removeAll method
oldlen = len(self.testdict)
elist = [ entry for entry in set if entry.key in ["b", "g", "d", None]]
self.assertEqual(len(elist), 4)
self.failUnless(set.removeAll(elist))
self.checkdoesntcontain('bdg')
# can't check for None in self.testmap, so do it just for testdict
self.failIf(None in self.testdict)
self.checksize(oldlen - 4)
itr = set.iterator()
while (itr.hasNext()):
val = itr.next()
itr.remove()
self.failUnless(set.isEmpty())
self.checksize(0)
def test_keyset(self):
self.maketestdict({})
self.testmap.put("foo", "bar")
self.testmap.put("num", 5)
self.testmap.put(None, 4.3)
self.testmap.put(34, None)
keyset = self.testmap.keySet()
self.checksize(4)
self.failUnless(keyset.remove(None))
self.checksize(3)
self.failIf(keyset.contains(None))
self.failUnless(keyset.remove(34))
self.checksize(2)
self.failIf(keyset.contains(34))
itr = keyset.iterator()
while itr.hasNext():
key = itr.next()
if key == "num":
itr.remove()
self.checksize(1)
def test_values(self):
self.maketestdict({})
self.testmap.put("foo", "bar")
self.testmap.put("num", "bar")
self.testmap.put(None, 3.2)
self.testmap.put(34, None)
values = self.testmap.values()
self.assertEquals(values.size(), len(self.testdict))
self.checksize(4)
self.failUnless(values.remove(None))
self.checksize(3)
self.assertEquals(values.size(), len(self.testdict))
itr = values.iterator()
while itr.hasNext():
val = itr.next()
if val == "bar":
itr.remove()
self.checksize(1)
self.assertEquals(values.size(), len(self.testdict))
values.clear()
self.failUnless(values.isEmpty())
self.checksize(0)
def test_main():
test.test_support.run_unittest(JythonMapInJavaTest)
if __name__ == '__main__':
test_main()
|
codigo/Live150/exemplo_05.py | cassiasamp/live-de-python | 572 | 11194615 | from typing import Dict
class Pessoa:
def __init__(self, nome: str, sobrenome: str, telefone: Dict[str, str], ddd: int):
self.nome = nome
self.sobrenome = sobrenome
self.telefone = telefone
self.ddd = ddd
eduardo_1 = Pessoa('Eduardo', 8, {'residencial': '1111-111', 'móvel': '999-999-999'}, 19)
eduardo_2 = Pessoa('Eduardo', 8, {'residencial': '1111-111', 'móvel': '999-999-999'}, 19)
|
external/DCNv2/testcpu.py | RoadoneP/aloha | 1,136 | 11194634 | <filename>external/DCNv2/testcpu.py
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import time
import torch
import torch.nn as nn
from torch.autograd import gradcheck
from dcn_v2 import dcn_v2_conv, DCNv2, DCN
from dcn_v2 import dcn_v2_pooling, DCNv2Pooling, DCNPooling
deformable_groups = 1
N, inC, inH, inW = 2, 2, 4, 4
outC = 2
kH, kW = 3, 3
def conv_identify(weight, bias):
weight.data.zero_()
bias.data.zero_()
o, i, h, w = weight.shape
y = h//2
x = w//2
for p in range(i):
for q in range(o):
if p == q:
weight.data[q, p, y, x] = 1.0
def check_zero_offset():
conv_offset = nn.Conv2d(inC, deformable_groups * 2 * kH * kW,
kernel_size=(kH, kW),
stride=(1, 1),
padding=(1, 1),
bias=True)
conv_mask = nn.Conv2d(inC, deformable_groups * 1 * kH * kW,
kernel_size=(kH, kW),
stride=(1, 1),
padding=(1, 1),
bias=True)
dcn_v2 = DCNv2(inC, outC, (kH, kW),
stride=1, padding=1, dilation=1,
deformable_groups=deformable_groups)
conv_offset.weight.data.zero_()
conv_offset.bias.data.zero_()
conv_mask.weight.data.zero_()
conv_mask.bias.data.zero_()
conv_identify(dcn_v2.weight, dcn_v2.bias)
input = torch.randn(N, inC, inH, inW)
offset = conv_offset(input)
mask = conv_mask(input)
mask = torch.sigmoid(mask)
output = dcn_v2(input, offset, mask)
output *= 2
d = (input - output).abs().max()
if d < 1e-10:
print('Zero offset passed')
else:
print('Zero offset failed')
print(input)
print(output)
def check_gradient_dconv():
input = torch.rand(N, inC, inH, inW) * 0.01
input.requires_grad = True
offset = torch.randn(N, deformable_groups * 2 * kW * kH, inH, inW) * 2
# offset.data.zero_()
# offset.data -= 0.5
offset.requires_grad = True
mask = torch.rand(N, deformable_groups * 1 * kW * kH, inH, inW)
# mask.data.zero_()
mask.requires_grad = True
mask = torch.sigmoid(mask)
weight = torch.randn(outC, inC, kH, kW)
weight.requires_grad = True
bias = torch.rand(outC)
bias.requires_grad = True
stride = 1
padding = 1
dilation = 1
print('check_gradient_dconv: ',
gradcheck(dcn_v2_conv, (input, offset, mask, weight, bias,
stride, padding, dilation, deformable_groups),
eps=1e-3, atol=1e-4, rtol=1e-2))
def check_pooling_zero_offset():
input = torch.randn(2, 16, 64, 64).zero_()
input[0, :, 16:26, 16:26] = 1.
input[1, :, 10:20, 20:30] = 2.
rois = torch.tensor([
[0, 65, 65, 103, 103],
[1, 81, 41, 119, 79],
]).float()
pooling = DCNv2Pooling(spatial_scale=1.0 / 4,
pooled_size=7,
output_dim=16,
no_trans=True,
group_size=1,
trans_std=0.0)
out = pooling(input, rois, input.new())
s = ', '.join(['%f' % out[i, :, :, :].mean().item()
for i in range(rois.shape[0])])
print(s)
dpooling = DCNv2Pooling(spatial_scale=1.0 / 4,
pooled_size=7,
output_dim=16,
no_trans=False,
group_size=1,
trans_std=0.0)
offset = torch.randn(20, 2, 7, 7).zero_()
dout = dpooling(input, rois, offset)
s = ', '.join(['%f' % dout[i, :, :, :].mean().item()
for i in range(rois.shape[0])])
print(s)
def check_gradient_dpooling():
input = torch.randn(2, 3, 5, 5) * 0.01
N = 4
batch_inds = torch.randint(2, (N, 1)).float()
x = torch.rand((N, 1)).float() * 15
y = torch.rand((N, 1)).float() * 15
w = torch.rand((N, 1)).float() * 10
h = torch.rand((N, 1)).float() * 10
rois = torch.cat((batch_inds, x, y, x + w, y + h), dim=1)
offset = torch.randn(N, 2, 3, 3)
input.requires_grad = True
offset.requires_grad = True
spatial_scale = 1.0 / 4
pooled_size = 3
output_dim = 3
no_trans = 0
group_size = 1
trans_std = 0.0
sample_per_part = 4
part_size = pooled_size
print('check_gradient_dpooling:',
gradcheck(dcn_v2_pooling, (input, rois, offset,
spatial_scale,
pooled_size,
output_dim,
no_trans,
group_size,
part_size,
sample_per_part,
trans_std),
eps=1e-4))
def example_dconv():
input = torch.randn(2, 64, 128, 128)
# wrap all things (offset and mask) in DCN
dcn = DCN(64, 64, kernel_size=(3, 3), stride=1,
padding=1, deformable_groups=2)
# print(dcn.weight.shape, input.shape)
output = dcn(input)
targert = output.new(*output.size())
targert.data.uniform_(-0.01, 0.01)
error = (targert - output).mean()
error.backward()
print(output.shape)
def example_dpooling():
input = torch.randn(2, 32, 64, 64)
batch_inds = torch.randint(2, (20, 1)).float()
x = torch.randint(256, (20, 1)).float()
y = torch.randint(256, (20, 1)).float()
w = torch.randint(64, (20, 1)).float()
h = torch.randint(64, (20, 1)).float()
rois = torch.cat((batch_inds, x, y, x + w, y + h), dim=1)
offset = torch.randn(20, 2, 7, 7)
input.requires_grad = True
offset.requires_grad = True
# normal roi_align
pooling = DCNv2Pooling(spatial_scale=1.0 / 4,
pooled_size=7,
output_dim=32,
no_trans=True,
group_size=1,
trans_std=0.1)
# deformable pooling
dpooling = DCNv2Pooling(spatial_scale=1.0 / 4,
pooled_size=7,
output_dim=32,
no_trans=False,
group_size=1,
trans_std=0.1)
out = pooling(input, rois, offset)
dout = dpooling(input, rois, offset)
print(out.shape)
print(dout.shape)
target_out = out.new(*out.size())
target_out.data.uniform_(-0.01, 0.01)
target_dout = dout.new(*dout.size())
target_dout.data.uniform_(-0.01, 0.01)
e = (target_out - out).mean()
e.backward()
e = (target_dout - dout).mean()
e.backward()
def example_mdpooling():
input = torch.randn(2, 32, 64, 64)
input.requires_grad = True
batch_inds = torch.randint(2, (20, 1)).float()
x = torch.randint(256, (20, 1)).float()
y = torch.randint(256, (20, 1)).float()
w = torch.randint(64, (20, 1)).float()
h = torch.randint(64, (20, 1)).float()
rois = torch.cat((batch_inds, x, y, x + w, y + h), dim=1)
# mdformable pooling (V2)
dpooling = DCNPooling(spatial_scale=1.0 / 4,
pooled_size=7,
output_dim=32,
no_trans=False,
group_size=1,
trans_std=0.1,
deform_fc_dim=1024)
dout = dpooling(input, rois)
target = dout.new(*dout.size())
target.data.uniform_(-0.1, 0.1)
error = (target - dout).mean()
error.backward()
print(dout.shape)
if __name__ == '__main__':
example_dconv()
example_dpooling()
example_mdpooling()
check_pooling_zero_offset()
# zero offset check
if inC == outC:
check_zero_offset()
check_gradient_dpooling()
check_gradient_dconv()
# """
# ****** Note: backward is not reentrant error may not be a serious problem,
# ****** since the max error is less than 1e-7,
# ****** Still looking for what trigger this problem
# """
|
22_itictactoe/typehints.py | kent13/tiny_python_projects | 742 | 11194647 | <gh_stars>100-1000
#!/usr/bin/env python3
""" Demonstrating type hints """
from typing import List, NamedTuple, Optional
class State(NamedTuple):
board: List[str] = list('.' * 9)
player: str = 'X'
quit: bool = False
draw: bool = False
error: Optional[str] = None
winner: Optional[str] = None
state = State(quit='False')
print(state)
|
venv/lib/python3.9/site-packages/configparser.py | almmello/frozen | 6,989 | 11194662 | <filename>venv/lib/python3.9/site-packages/configparser.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Convenience module importing everything from backports.configparser."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from backports.configparser import (
RawConfigParser,
ConfigParser,
SafeConfigParser,
SectionProxy,
Interpolation,
BasicInterpolation,
ExtendedInterpolation,
LegacyInterpolation,
NoSectionError,
DuplicateSectionError,
DuplicateOptionError,
NoOptionError,
InterpolationError,
InterpolationMissingOptionError,
InterpolationSyntaxError,
InterpolationDepthError,
ParsingError,
MissingSectionHeaderError,
ConverterMapping,
DEFAULTSECT,
MAX_INTERPOLATION_DEPTH,
)
from backports.configparser import Error, _UNSET, _default_dict, _ChainMap # noqa: F401
__all__ = [
"NoSectionError",
"DuplicateOptionError",
"DuplicateSectionError",
"NoOptionError",
"InterpolationError",
"InterpolationDepthError",
"InterpolationMissingOptionError",
"InterpolationSyntaxError",
"ParsingError",
"MissingSectionHeaderError",
"ConfigParser",
"SafeConfigParser",
"RawConfigParser",
"Interpolation",
"BasicInterpolation",
"ExtendedInterpolation",
"LegacyInterpolation",
"SectionProxy",
"ConverterMapping",
"DEFAULTSECT",
"MAX_INTERPOLATION_DEPTH",
]
# NOTE: names missing from __all__ imported anyway for backwards compatibility.
|
FWCore/Integration/test/ref_merge_cfg.py | ckamtsikis/cmssw | 852 | 11194667 | import FWCore.ParameterSet.Config as cms
process = cms.Process("MERGE")
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring("file:ref_merge_prod1.root",
"file:ref_merge_prod2.root")
)
process.out = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string("ref_merge.root")
)
process.tester = cms.EDAnalyzer("OtherThingAnalyzer",
other = cms.untracked.InputTag("d","testUserTag"))
process.o = cms.EndPath(process.out+process.tester)
|
src/ntlmrecon/__init__.py | bbhunter/NTLMRecon | 139 | 11194694 | <filename>src/ntlmrecon/__init__.py
import argparse
import json
import requests
import csv
import sys
import os
from colorama import init as init_colorama
from multiprocessing.dummy import Pool as ThreadPool
from ntlmrecon.ntlmutil import gather_ntlm_info
from ntlmrecon.misc import print_banner, INTERNAL_WORDLIST
from ntlmrecon.inpututils import readfile_and_gen_input, read_input_and_gen_list
from termcolor import colored
from urllib.parse import urlsplit
# Initialize colors in Windows - Because I like Windows too!
init_colorama()
# make the Pool of workers
# TODO: Make this an argument
FOUND_DOMAINS = []
def in_found_domains(url):
split_url = urlsplit(url)
if split_url.hostname in FOUND_DOMAINS:
return True
else:
return False
def write_records_to_csv(records, filename):
if os.path.exists(filename):
append_write = 'a'
else:
append_write = 'w+'
with open(filename, append_write) as file:
writer = csv.writer(file)
if append_write == 'w+':
writer.writerow(['URL', 'AD Domain Name', 'Server Name', 'DNS Domain Name', 'FQDN', 'Parent DNS Domain'])
for record in records:
csv_record = list()
url = list(record.keys())[0]
csv_record.append(url)
csv_record.extend(list(record[url]['data'].values()))
writer.writerow(csv_record)
def main():
# Init arg parser
parser = argparse.ArgumentParser(description=print_banner())
group = parser.add_mutually_exclusive_group()
group.add_argument('--input', '-i', help='Pass input as an IP address, URL or CIDR to enumerate NTLM endpoints')
group.add_argument('--infile', '-I', help='Pass input from a local file')
parser.add_argument('--wordlist', help='Override the internal wordlist with a custom wordlist', required=False)
parser.add_argument('--threads', help="Set number of threads (Default: 10)", required=False, default=10)
parser.add_argument('--output-type', '-o', help='Set output type. JSON (TODO) and CSV supported (Default: CSV)',
required=False, default='csv', action="store_true")
parser.add_argument('--outfile', '-O', help='Set output file name (Default: ntlmrecon.csv)', default='ntlmrecon.csv')
parser.add_argument('--random-user-agent', help="TODO: Randomize user agents when sending requests (Default: False)",
default=False, action="store_true")
parser.add_argument('--force-all', help="Force enumerate all endpoints even if a valid endpoint is found for a URL "
"(Default : False)", default=False, action="store_true")
parser.add_argument('--shuffle', help="Break order of the input files", default=False, action="store_true")
parser.add_argument('-f', '--force', help="Force replace output file if it already exists", action="store_true",
default=False)
args = parser.parse_args()
if not args.input and not args.infile:
print(colored("[!] How about you check the -h flag?", "red"))
if os.path.isdir(args.outfile):
print(colored("[!] Invalid filename. Please enter a valid filename!", "red"))
sys.exit()
elif os.path.exists(args.outfile) and not args.force:
print(colored("[!] Output file {} already exists. "
"Choose a different file name or use -f to overwrite the file".format(args.outfile), "red"))
sys.exit()
pool = ThreadPool(int(args.threads))
if args.input:
records = read_input_and_gen_list(args.input, shuffle=args.shuffle)
elif args.infile:
records = readfile_and_gen_input(args.infile, shuffle=args.shuffle)
else:
sys.exit(1)
# Check if a custom wordlist is specified
if args.wordlist:
try:
with open(args.wordlist, 'r') as fr:
wordlist = fr.read().split('\n')
wordlist = [x for x in wordlist if x]
except (OSError, FileNotFoundError):
print(colored("[!] Cannot read the specified file {}. Check if file exists and you have "
"permission to read it".format(args.wordlist), "red"))
sys.exit(1)
else:
wordlist = INTERNAL_WORDLIST
# Identify all URLs with web servers running
for record in records:
print(colored("[+] Brute-forcing {} endpoints on {}".format(len(wordlist), record), "yellow"))
all_combos = []
for word in wordlist:
if word.startswith('/'):
all_combos.append(str(record+word))
else:
all_combos.append(str(record+"/"+word))
results = pool.map(gather_ntlm_info, all_combos)
results = [x for x in results if x]
if results:
write_records_to_csv(results, args.outfile)
print(colored('[+] Output for {} saved to {} '.format(record, args.outfile), 'green'))
|
angler/__init__.py | joamatab/angler | 102 | 11194698 | <reponame>joamatab/angler<filename>angler/__init__.py
# used for setup.py
name = "angler"
__version__ = '0.0.15'
# import the main classes
from .optimization import Optimization
from .simulation import Simulation
# import the various utilities
from .constants import *
from .plot import *
from .structures import *
from .utils import * |
local_configs/segformer/B3/segformer.b3.1024x1024.city.160k.py | wzpscott/SegformerDistillation | 903 | 11194699 | _base_ = [
'../../_base_/models/segformer.py',
'../../_base_/datasets/cityscapes_1024x1024_repeat.py',
'../../_base_/default_runtime.py',
'../../_base_/schedules/schedule_160k_adamw.py'
]
# model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
find_unused_parameters = True
model = dict(
type='EncoderDecoder',
pretrained='pretrained/mit_b3.pth',
backbone=dict(
type='mit_b3',
style='pytorch'),
decode_head=dict(
type='SegFormerHead',
in_channels=[64, 128, 320, 512],
in_index=[0, 1, 2, 3],
feature_strides=[4, 8, 16, 32],
channels=128,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
decoder_params=dict(embed_dim=768),
loss_decode=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(),
# test_cfg=dict(mode='whole'))
test_cfg=dict(mode='slide', crop_size=(1024,1024), stride=(768,768)))
# data
data = dict(samples_per_gpu=1)
evaluation = dict(interval=4000, metric='mIoU')
# optimizer
optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
paramwise_cfg=dict(custom_keys={'pos_block': dict(decay_mult=0.),
'norm': dict(decay_mult=0.),
'head': dict(lr_mult=10.)
}))
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
|
Python/Tests/TestData/TestDiscoverer/ConfigPythonFunctions/test_misc_prefixes.py | techkey/PTVS | 404 | 11194705 | <filename>Python/Tests/TestData/TestDiscoverer/ConfigPythonFunctions/test_misc_prefixes.py
def test_func():
pass
def check_func():
pass
def example_func():
pass
def verify_func():
pass
|
academicstoday_project/account/tests/test_profile.py | LeeDoona/EasyGrading | 146 | 11194707 | <reponame>LeeDoona/EasyGrading
from django.core.urlresolvers import resolve
from django.http import HttpRequest
from django.http import QueryDict
from django.test import TestCase
from django.test import Client
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
import json
from account.views import profile
# Contants
TEST_USER_EMAIL = "<EMAIL>"
TEST_USER_USERNAME = "Ledo"
TEST_USER_PASSWORD = "password"
class ProfileTestCase(TestCase):
def tearDown(self):
User.objects.all().delete()
def setUp(self):
# Create our user.
user = User.objects.create_user(
email=TEST_USER_EMAIL,
username=TEST_USER_USERNAME,
password=<PASSWORD>
)
user.is_active = True
user.save()
def test_url_resolves_to_profile_page_view(self):
found = resolve('/profile')
self.assertEqual(found.func, profile.profile_page)
def test_profile_page_with_success(self):
# Extra parameters to make this a Ajax style request.
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
# Test
client = Client()
client.login(
username=TEST_USER_USERNAME,
password=<PASSWORD>
)
response = client.post('/profile',{}, **kwargs)
# Verify: Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
# Verify: Check that donation content was returned.
self.assertIn(b'Email',response.content)
self.assertIn(b'Profile',response.content)
def test_update_user_with_success(self):
# Extra parameters to make this a Ajax style request.
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
# Test
client = Client()
client.login(
username=TEST_USER_USERNAME,
password=<PASSWORD>
)
response = client.post('/update_user',{
'first_name': 'Evolver',
'last_name': '1<PASSWORD>',
'email': '<EMAIL>',
}, **kwargs)
# Verify: Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
# Verify: Successful response.
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['status'], 'success')
self.assertEqual(array['message'], 'updated user')
# Verfiy: Updated in database
try:
user = User.objects.get(email="<EMAIL>")
except User.DoesNotExist:
user = None
self.assertEqual(user.first_name, 'Evolver')
self.assertEqual(user.last_name, '1234')
|
riscv/machine.py | kevinyuan/pydgin | 159 | 11194745 | #=========================================================================
# machine.py
#=========================================================================
from pydgin.storage import RegisterFile
from pydgin.utils import specialize, r_ulonglong
from utils import trim_64
from isa import ENABLE_FP
from csr import Csr, PRV_M, PRV_H, PRV_S, PRV_U
#-------------------------------------------------------------------------
# State
#-------------------------------------------------------------------------
class State( object ):
_virtualizable_ = ['pc', 'num_insts']
# defines immutable fields that can't change during execution
_immutable_fields_ = ['xlen', 'flen', 'extensions']
def __init__( self, memory, debug, reset_addr=0x400,
xlen=64,
flen=64,
extensions="imafd" ):
self.pc = reset_addr
self.xlen = xlen # defines the bitwidth of int registers
self.flen = flen # defines the bitwidth of fp registers
# TODO: convert to lower
self.extensions = extensions
self.rf = RiscVRegisterFile( nbits=self.xlen )
self.csr = Csr( self )
# TODO: a bit hacky...
if self.extension_enabled( "f" ):
self.fp = RiscVFPRegisterFile()
self.fp.debug = debug
self.fcsr = r_ulonglong( 0 ) # Bits( 32 )
self.mem = memory
if self.extension_enabled( "a" ):
self.load_reservation = 0 # Bits( 64 )
self .debug = debug
self.rf .debug = debug
self.mem.debug = debug
# other state
self.prv = PRV_M
self.mepc = 0
self.mbadaddr = 0
self.mtimecmp = 0
self.mscratch = 0
self.mcause = 0
self.minstret = 0
self.mie = 0
self.mip = 0
self.sepc = 0
self.sbadaddr = 0
self.sscratch = 0
self.stvec = 0
self.sptbr = 0
self.scause = 0
self.sutime_delta = 0
self.suinstret_delta = 0
self.tohost = 0
self.fromhost = 0
# coprocessor registers
self.status = 0
self.stats_en = 0
self.num_insts = 0
self.stat_num_insts = 0
# we need a dedicated running flag bacase status could be 0 on a
# syscall_exit
self.running = True
# indicate if this is running a self-checking test
self.testbin = False
# executable name
self.exe_name = ""
# syscall stuff... TODO: should this be here?
self.breakpoint = 0
def fetch_pc( self ):
return self.pc
def extension_enabled( self, ext ):
return ext in self.extensions
#-----------------------------------------------------------------------
# RiscVRegisterFile
#-----------------------------------------------------------------------
# TODO: we should use generic register file if possible
class RiscVRegisterFile( RegisterFile ):
def __init__( self, nbits ):
RegisterFile.__init__( self,
constant_zero=True,
num_regs=32,
nbits=nbits
)
@specialize.argtype(2)
def __setitem__( self, idx, value ):
return RegisterFile.__setitem__( self, idx, trim_64( value ) )
#class RiscVFPRegisterFile( RegisterFile ):
# def __init__( self ):
# RegisterFile.__init__( self,
# constant_zero=False,
# num_regs=32,
# nbits=64
# )
#
# @specialize.argtype(2)
# def __setitem__( self, idx, value ):
# return RegisterFile.__setitem__( self, idx, value )
#-------------------------------------------------------------------------
# RiscVFPRegisterFile
#-------------------------------------------------------------------------
# TODO: Hacky RPython Workaround
from pydgin.utils import r_uint
from pydgin.debug import Debug, pad, pad_hex
class RiscVFPRegisterFile( object ):
def __init__( self, num_regs=32, nbits=64 ):
self.num_regs = num_regs
self.regs = [ r_uint(0) ] * self.num_regs
self.debug = Debug()
self.nbits = nbits
self.debug_nchars = nbits / 4
def __getitem__( self, idx ):
if self.debug.enabled( "rf" ):
print ':: RD.RF[%s] = %s' % (
pad( "%d" % idx, 2 ),
pad_hex( self.regs[idx],
len=self.debug_nchars ) ),
return self.regs[idx]
@specialize.argtype(2)
def __setitem__( self, idx, value ):
value = trim_64(value)
self.regs[idx] = value
if self.debug.enabled( "rf" ):
print ':: WR.RF[%s] = %s' % (
pad( "%d" % idx, 2 ),
pad_hex( self.regs[idx],
len=self.debug_nchars ) ),
#-----------------------------------------------------------------------
# print_regs
#-----------------------------------------------------------------------
# prints all registers (register dump)
# per_row specifies the number of registers to display per row
def print_regs( self, per_row=6 ):
for c in xrange( 0, self.num_regs, per_row ):
str = ""
for r in xrange( c, min( self.num_regs, c+per_row ) ):
str += "%s:%s " % ( pad( "%d" % r, 2 ),
pad_hex( self.regs[r] ) )
print str
|
saleor/payment/migrations/0024_auto_20210326_0837.py | fairhopeweb/saleor | 15,337 | 11194747 | # Generated by Django 3.1.7 on 2021-03-26 08:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("payment", "0023_auto_20201110_0834"),
]
operations = [
migrations.AlterField(
model_name="transaction",
name="error",
field=models.CharField(max_length=256, null=True),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.