max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tests/orm/data/test_enum.py | aiidateam/aiida_core | 153 | 11182070 | # -*- coding: utf-8 -*-
"""Tests for the :class:`aiida.orm.nodes.data.enum.Enum` data plugin."""
import enum
import pytest
from aiida.common import links
from aiida.orm import load_node
from aiida.orm.nodes.data.enum import EnumData
class DummyEnum(enum.Enum):
"""Dummy enum for testing."""
OPTION_A = 'a'
OPTION_B = 'b'
def test_construct():
"""Test the ``EnumData`` constructor."""
instance = DummyEnum.OPTION_A
node = EnumData(instance)
assert isinstance(node, EnumData)
assert not node.is_stored
@pytest.mark.parametrize('value', (None, 'string'))
def test_construct_invalid_type(value):
"""Test the ``EnumData`` constructor raises if object is ``None``."""
with pytest.raises(TypeError, match=r'Got object of type .*, expecting .*.'):
EnumData(value)
def test_load_node():
"""Test loading a stored ``EnumData`` node."""
member = DummyEnum.OPTION_A
node = EnumData(member)
node.store()
loaded = load_node(node.pk)
assert isinstance(loaded, EnumData)
assert loaded.is_stored
def test_name():
"""Test the ``name`` property."""
member = DummyEnum.OPTION_A
node = EnumData(member)
assert node.name == member.name
node.store()
assert node.name == member.name
loaded = load_node(node.pk)
assert loaded.name == member.name
def test_value():
"""Test the ``value`` property."""
member = DummyEnum.OPTION_A
node = EnumData(member)
assert node.value == member.value
node.store()
assert node.value == member.value
loaded = load_node(node.pk)
assert loaded.value == member.value
def test_get_enum():
"""Test the ``get_enum`` method."""
member = DummyEnum.OPTION_A
node = EnumData(member)
assert node.get_enum() == DummyEnum
node.store()
assert node.get_enum() == DummyEnum
loaded = load_node(node.pk)
assert loaded.get_enum() == DummyEnum
def test_get_member():
"""Test the ``get_member`` method."""
member = DummyEnum.OPTION_A
node = EnumData(member)
assert node.get_member() == member
node.store()
assert node.get_member() == member
loaded = load_node(node.pk)
assert loaded.get_member() == member
def test_get_member_module_not_importable():
"""Test the ``get_member`` property when the enum cannot be imported from the identifier."""
member = DummyEnum.OPTION_A
node = EnumData(member)
node.base.attributes.set(EnumData.KEY_IDENTIFIER, 'aiida.common.links:NonExistingEnum')
node.store()
loaded = load_node(node.pk)
with pytest.raises(ImportError):
loaded.get_member() # pylint: disable=pointless-statement
def test_get_member_invalid_value(monkeypatch):
"""Test the ``get_member`` method when stored value is no longer valid for the class loaded from the identifier."""
member = links.LinkType.RETURN
node = EnumData(member).store()
class ChangedLinkType(enum.Enum):
"""Change the definition of the :class:`aiida.common.links.LinkType`"""
RETURN = 'different_return'
# And then monkeypatch the :mod:`aiida.common.links` module with the mock enum.
monkeypatch.setattr(links, 'LinkType', ChangedLinkType)
loaded = load_node(node.pk)
with pytest.raises(ValueError, match=r'The stored value `return` is no longer a valid value for the enum `.*`'):
loaded.get_member() # pylint: disable=pointless-statement
def test_eq():
"""Test the ``__eq__`` implementation."""
node_a = EnumData(DummyEnum.OPTION_A)
node_b = EnumData(DummyEnum.OPTION_B)
assert node_a == DummyEnum.OPTION_A
assert node_a != DummyEnum.OPTION_B
assert node_a == node_a # pylint: disable=comparison-with-itself
assert node_a != node_b
assert node_a != DummyEnum.OPTION_A.value
# If the identifier cannot be resolved, the equality should not raise but simply return ``False``.
node_a.base.attributes.set(EnumData.KEY_IDENTIFIER, 'aiida.common.links:NonExistingEnum')
assert node_a != DummyEnum.OPTION_A
# If the value is incorrect for the resolved identifier, the equality should not raise but simply return ``False``.
node_b.base.attributes.set(EnumData.KEY_VALUE, 'c')
assert node_b != DummyEnum.OPTION_B
|
orc8r/gateway/python/magma/common/redis/mocks/mock_redis.py | Aitend/magma | 849 | 11182133 | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from redis.exceptions import RedisError
# For non-failure cases, just use the fakeredis module
class MockUnavailableRedis(object):
"""
MockUnavailableRedis implements a mock Redis Server that always raises
a connection exception
"""
def __init__(self, host, port):
self.host = host
self.port = port
def lock(self, key):
raise RedisError("mock redis error")
def keys(self, pattern=".*"):
""" Mock keys with regex pattern matching."""
raise RedisError("mock redis error")
|
scripts/eval/eval.py | bertinetto/r2d2 | 111 | 11182136 | <reponame>bertinetto/r2d2
# This file originally appeared in https://github.com/jakesnell/prototypical-networks
# and has been modified for the purpose of this project
import os
import json
import math
import torch
import torchnet as tnt
from fewshots.utils import filter_opt, merge_dict
import fewshots.utils.data as data_utils
import fewshots.utils.model as model_utils
def main(opts):
os.environ['CUDA_VISIBLE_DEVICES'] = str(opts['data.gpu'])
model = torch.load(opts['model.model_path'])
if opts['data.cuda']:
model.cuda()
model.eval()
# load opts
model_opt_file = os.path.join(os.path.dirname(opts['model.model_path']), 'opt.json')
with open(model_opt_file, 'r') as f:
model_opt = json.load(f)
# construct data
data_opt = {'data.' + k: v for k, v in filter_opt(model_opt, 'data').items()}
augm_opt = {'augm.' + k: v for k, v in filter_opt(model_opt, 'augm').items()}
episode_fields = {
'data.test_way': 'data.way',
'data.test_shot': 'data.shot',
'data.test_episodes': 'data.train_episodes'
}
for k, v in episode_fields.items():
if opts[k] != 0:
data_opt[k] = opts[k]
elif model_opt[k] != 0:
data_opt[k] = model_opt[k]
else:
data_opt[k] = model_opt[v]
merged_opt = merge_dict(data_opt, augm_opt)
print("Evaluating {:d}-way, {:d}-shot over {:d} episodes".format(
data_opt['data.test_way'], data_opt['data.test_shot'], data_opt['data.test_episodes']))
torch.manual_seed(1234)
if opts['data.cuda']:
torch.cuda.manual_seed(1234)
data = data_utils.load_data(merged_opt, ['test'])
meters = {field: tnt.meter.AverageValueMeter() for field in model_opt['log.fields']}
model_utils.evaluate(model, data['test'], meters, desc="test")
expm_path = opts['model.model_path'].split('/')
expm_path = '/'.join(expm_path[:-1])
fh = open(os.path.join(expm_path, 'eval_results.txt'), 'w')
for field, meter in meters.items():
mean, std = meter.value()
results_str = "test {:s}: {:0.6f} +/- {:0.6f}".format(field, mean,
1.96 * std / math.sqrt(data_opt['data.test_episodes']))
print(results_str)
fh.write(results_str + '\n')
fh.close()
|
examples/object_serialization.py | wyfo/apimodel | 118 | 11182141 | <reponame>wyfo/apimodel<filename>examples/object_serialization.py
from dataclasses import dataclass
from typing import Any
from apischema import alias, serialize, type_name
from apischema.json_schema import JsonSchemaVersion, definitions_schema
from apischema.objects import get_field, object_serialization
@dataclass
class Data:
id: int
content: str
@property
def size(self) -> int:
return len(self.content)
def get_details(self) -> Any:
...
# Serialization fields can be a str/field or a function/method/property
size_only = object_serialization(
Data, [get_field(Data).id, Data.size], type_name("DataSize")
)
# ["id", Data.size] would also work
def complete_data():
return [
..., # shortcut to include all the fields
Data.size,
(Data.get_details, alias("details")), # add/override metadata using tuple
]
# Serialization fields computation can be deferred in a function
# The serialization name will then be defaulted to the function name
complete = object_serialization(Data, complete_data)
data = Data(0, "data")
assert serialize(Data, data, conversion=size_only) == {"id": 0, "size": 4}
assert serialize(Data, data, conversion=complete) == {
"id": 0,
"content": "data",
"size": 4,
"details": None, # because get_details return None in this example
}
assert definitions_schema(
serialization=[(Data, size_only), (Data, complete)],
version=JsonSchemaVersion.OPEN_API_3_0,
) == {
"DataSize": {
"type": "object",
"properties": {"id": {"type": "integer"}, "size": {"type": "integer"}},
"required": ["id", "size"],
"additionalProperties": False,
},
"CompleteData": {
"type": "object",
"properties": {
"id": {"type": "integer"},
"content": {"type": "string"},
"size": {"type": "integer"},
"details": {},
},
"required": ["id", "content", "size", "details"],
"additionalProperties": False,
},
}
|
utility/preprocess/queries_split.py | techthiyanes/ColBERT | 421 | 11182191 | <filename>utility/preprocess/queries_split.py
"""
Divide a query set into two.
"""
import os
import math
import ujson
import random
from argparse import ArgumentParser
from collections import OrderedDict
from colbert.utils.utils import print_message
def main(args):
random.seed(12345)
"""
Load the queries
"""
Queries = OrderedDict()
print_message(f"#> Loading queries from {args.input}..")
with open(args.input) as f:
for line in f:
qid, query = line.strip().split('\t')
assert qid not in Queries
Queries[qid] = query
"""
Apply the splitting
"""
size_a = len(Queries) - args.holdout
size_b = args.holdout
size_a, size_b = max(size_a, size_b), min(size_a, size_b)
assert size_a > 0 and size_b > 0, (len(Queries), size_a, size_b)
print_message(f"#> Deterministically splitting the queries into ({size_a}, {size_b})-sized splits.")
keys = list(Queries.keys())
sample_b_indices = sorted(list(random.sample(range(len(keys)), size_b)))
sample_a_indices = sorted(list(set.difference(set(list(range(len(keys)))), set(sample_b_indices))))
assert len(sample_a_indices) == size_a
assert len(sample_b_indices) == size_b
sample_a = [keys[idx] for idx in sample_a_indices]
sample_b = [keys[idx] for idx in sample_b_indices]
"""
Write the output
"""
output_path_a = f'{args.input}.a'
output_path_b = f'{args.input}.b'
assert not os.path.exists(output_path_a), output_path_a
assert not os.path.exists(output_path_b), output_path_b
print_message(f"#> Writing the splits out to {output_path_a} and {output_path_b} ...")
for output_path, sample in [(output_path_a, sample_a), (output_path_b, sample_b)]:
with open(output_path, 'w') as f:
for qid in sample:
query = Queries[qid]
line = '\t'.join([qid, query]) + '\n'
f.write(line)
if __name__ == "__main__":
parser = ArgumentParser(description="queries_split.")
# Input Arguments.
parser.add_argument('--input', dest='input', required=True)
parser.add_argument('--holdout', dest='holdout', required=True, type=int)
args = parser.parse_args()
main(args)
|
nautobot/tenancy/migrations/0002_auto_slug.py | psmware-ltd/nautobot | 384 | 11182220 | # Generated by Django 3.1.13 on 2021-08-05 03:23
from django.db import migrations
import nautobot.core.fields
class Migration(migrations.Migration):
dependencies = [
("tenancy", "0001_initial"),
]
operations = [
migrations.AlterField(
model_name="tenant",
name="slug",
field=nautobot.core.fields.AutoSlugField(blank=True, max_length=100, populate_from="name", unique=True),
),
migrations.AlterField(
model_name="tenantgroup",
name="slug",
field=nautobot.core.fields.AutoSlugField(blank=True, max_length=100, populate_from="name", unique=True),
),
]
|
biliup/engine/__init__.py | xxxxuanran/biliup | 145 | 11182229 | <filename>biliup/engine/__init__.py<gh_stars>100-1000
from .decorators import Plugin
def invert_dict(d: dict):
inverse_dict = {}
for k, v in d.items():
for item in v:
inverse_dict[item] = k
return inverse_dict
__all__ = ['invert_dict', 'Plugin']
|
mayan/apps/motd/permissions.py | eshbeata/open-paperless | 2,743 | 11182235 | from __future__ import absolute_import, unicode_literals
from django.utils.translation import ugettext_lazy as _
from permissions import PermissionNamespace
namespace = PermissionNamespace('motd', _('Message of the day'))
permission_message_create = namespace.add_permission(
name='message_create', label=_('Create messages')
)
permission_message_delete = namespace.add_permission(
name='message_delete', label=_('Delete messages')
)
permission_message_edit = namespace.add_permission(
name='message_edit', label=_('Edit messages')
)
permission_message_view = namespace.add_permission(
name='message_view', label=_('View messages')
)
|
src/ZODB/tests/testZODB.py | pretagov/ZODB | 514 | 11182262 | <reponame>pretagov/ZODB<filename>src/ZODB/tests/testZODB.py<gh_stars>100-1000
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
from persistent import Persistent
from persistent.mapping import PersistentMapping
from ZODB.POSException import TransactionFailedError
import doctest
from BTrees.OOBTree import OOBTree
import transaction
import unittest
import ZODB
import ZODB.FileStorage
import ZODB.MappingStorage
import ZODB.tests.util
class P(Persistent):
pass
class ZODBTests(ZODB.tests.util.TestCase):
def setUp(self):
ZODB.tests.util.TestCase.setUp(self)
self._storage = ZODB.FileStorage.FileStorage(
'ZODBTests.fs', create=1)
self._db = ZODB.DB(self._storage)
def tearDown(self):
self._db.close()
ZODB.tests.util.TestCase.tearDown(self)
def populate(self):
transaction.begin()
conn = self._db.open()
root = conn.root()
root['test'] = pm = PersistentMapping()
for n in range(100):
pm[n] = PersistentMapping({0: 100 - n})
transaction.get().note(u'created test data')
transaction.commit()
conn.close()
def checkExportImport(self, abort_it=False):
self.populate()
conn = self._db.open()
try:
self.duplicate(conn, abort_it)
finally:
conn.close()
conn = self._db.open()
try:
self.verify(conn, abort_it)
finally:
conn.close()
def duplicate(self, conn, abort_it):
transaction.begin()
transaction.get().note(u'duplication')
root = conn.root()
ob = root['test']
assert len(ob) > 10, 'Insufficient test data'
try:
import tempfile
with tempfile.TemporaryFile(prefix="DUP") as f:
ob._p_jar.exportFile(ob._p_oid, f)
assert f.tell() > 0, 'Did not export correctly'
f.seek(0)
new_ob = ob._p_jar.importFile(f)
self.assertEqual(new_ob, ob)
root['dup'] = new_ob
if abort_it:
transaction.abort()
else:
transaction.commit()
except:
transaction.abort()
raise
def verify(self, conn, abort_it):
transaction.begin()
root = conn.root()
ob = root['test']
try:
ob2 = root['dup']
except KeyError:
if abort_it:
# Passed the test.
return
else:
raise
else:
self.assertTrue(not abort_it, 'Did not abort duplication')
l1 = list(ob.items())
l1.sort()
l2 = list(ob2.items())
l2.sort()
l1 = list(map(lambda k_v: (k_v[0], k_v[1][0]), l1))
l2 = list(map(lambda k_v1: (k_v1[0], k_v1[1][0]), l2))
self.assertEqual(l1, l2)
self.assertTrue(ob._p_oid != ob2._p_oid)
self.assertEqual(ob._p_jar, ob2._p_jar)
oids = {}
for v in ob.values():
oids[v._p_oid] = 1
for v in ob2.values():
assert v._p_oid not in oids, (
'Did not fully separate duplicate from original')
transaction.commit()
def checkExportImportAborted(self):
self.checkExportImport(abort_it=True)
def checkResetCache(self):
# The cache size after a reset should be 0. Note that
# _resetCache is not a public API, but the resetCaches()
# function is, and resetCaches() causes _resetCache() to be
# called.
self.populate()
conn = self._db.open()
conn.root()
self.assertTrue(len(conn._cache) > 0) # Precondition
conn._resetCache()
self.assertEqual(len(conn._cache), 0)
def checkResetCachesAPI(self):
# Checks the resetCaches() API.
# (resetCaches used to be called updateCodeTimestamp.)
self.populate()
conn = self._db.open()
conn.root()
self.assertTrue(len(conn._cache) > 0) # Precondition
ZODB.Connection.resetCaches()
conn.close()
self.assertTrue(len(conn._cache) > 0) # Still not flushed
conn.open() # simulate the connection being reopened
self.assertEqual(len(conn._cache), 0)
def checkExplicitTransactionManager(self):
# Test of transactions that apply to only the connection,
# not the thread.
tm1 = transaction.TransactionManager()
conn1 = self._db.open(transaction_manager=tm1)
tm2 = transaction.TransactionManager()
conn2 = self._db.open(transaction_manager=tm2)
try:
r1 = conn1.root()
r2 = conn2.root()
if 'item' in r1:
del r1['item']
tm1.get().commit()
r1.get('item')
r2.get('item')
r1['item'] = 1
tm1.get().commit()
self.assertEqual(r1['item'], 1)
# r2 has not seen a transaction boundary,
# so it should be unchanged.
self.assertEqual(r2.get('item'), None)
conn2.sync()
# Now r2 is updated.
self.assertEqual(r2['item'], 1)
# Now, for good measure, send an update in the other direction.
r2['item'] = 2
tm2.get().commit()
self.assertEqual(r1['item'], 1)
self.assertEqual(r2['item'], 2)
conn1.sync()
conn2.sync()
self.assertEqual(r1['item'], 2)
self.assertEqual(r2['item'], 2)
finally:
conn1.close()
conn2.close()
def checkSavepointDoesntGetInvalidations(self):
# Prior to ZODB 3.2.9 and 3.4, Connection.tpc_finish() processed
# invalidations even for a subtxn commit. This could make
# inconsistent state visible after a subtxn commit. There was a
# suspicion that POSKeyError was possible as a result, but I wasn't
# able to construct a case where that happened.
# Subtxns are deprecated now, but it's good to check that the
# same kind of thing doesn't happen when making savepoints either.
# Set up the database, to hold
# root --> "p" -> value = 1
# --> "q" -> value = 2
tm1 = transaction.TransactionManager()
conn = self._db.open(transaction_manager=tm1)
r1 = conn.root()
p = P()
p.value = 1
r1["p"] = p
q = P()
q.value = 2
r1["q"] = q
tm1.commit()
# Now txn T1 changes p.value to 3 locally (subtxn commit).
p.value = 3
tm1.savepoint()
# Start new txn T2 with a new connection.
tm2 = transaction.TransactionManager()
cn2 = self._db.open(transaction_manager=tm2)
r2 = cn2.root()
p2 = r2["p"]
self.assertEqual(p._p_oid, p2._p_oid)
# T2 shouldn't see T1's change of p.value to 3, because T1 didn't
# commit yet.
self.assertEqual(p2.value, 1)
# Change p.value to 4, and q.value to 5. Neither should be visible
# to T1, because T1 is still in progress.
p2.value = 4
q2 = r2["q"]
self.assertEqual(q._p_oid, q2._p_oid)
self.assertEqual(q2.value, 2)
q2.value = 5
tm2.commit()
# Back to T1. p and q still have the expected values.
rt = conn.root()
self.assertEqual(rt["p"].value, 3)
self.assertEqual(rt["q"].value, 2)
# Now make another savepoint in T1. This shouldn't change what
# T1 sees for p and q.
rt["r"] = P()
tm1.savepoint()
# Making that savepoint in T1 should not process invalidations
# from T2's commit. p.value should still be 3 here (because that's
# what T1 savepointed earlier), and q.value should still be 2.
# Prior to ZODB 3.2.9 and 3.4, q.value was 5 here.
rt = conn.root()
try:
self.assertEqual(rt["p"].value, 3)
self.assertEqual(rt["q"].value, 2)
finally:
tm1.abort()
def checkTxnBeginImpliesAbort(self):
# begin() should do an abort() first, if needed.
cn = self._db.open()
rt = cn.root()
rt['a'] = 1
transaction.begin() # should abort adding 'a' to the root
rt = cn.root()
self.assertRaises(KeyError, rt.__getitem__, 'a')
transaction.begin()
rt = cn.root()
self.assertRaises(KeyError, rt.__getitem__, 'a')
# One more time.
transaction.begin()
rt = cn.root()
rt['a'] = 3
transaction.begin()
rt = cn.root()
self.assertRaises(KeyError, rt.__getitem__, 'a')
self.assertRaises(KeyError, rt.__getitem__, 'b')
# That used methods of the default transaction *manager*. Alas,
# that's not necessarily the same as using methods of the current
# transaction, and, in fact, when this test was written,
# Transaction.begin() didn't do anything (everything from here
# down failed).
# Later (ZODB 3.6): Transaction.begin() no longer exists, so the
# rest of this test was tossed.
def checkFailingCommitSticks(self):
# See also checkFailingSavepointSticks.
cn = self._db.open()
rt = cn.root()
rt['a'] = 1
# Arrange for commit to fail during tpc_vote.
poisoned_jar = PoisonedJar(break_tpc_vote=True)
poisoned = PoisonedObject(poisoned_jar)
transaction.get().join(poisoned_jar)
self.assertRaises(PoisonedError, transaction.get().commit)
# Trying to commit again fails too.
self.assertRaises(TransactionFailedError, transaction.commit)
self.assertRaises(TransactionFailedError, transaction.commit)
self.assertRaises(TransactionFailedError, transaction.commit)
# The change to rt['a'] is lost.
self.assertRaises(KeyError, rt.__getitem__, 'a')
# Trying to modify an object also fails, because Transaction.join()
# also raises TransactionFailedError.
self.assertRaises(TransactionFailedError, rt.__setitem__, 'b', 2)
# Clean up via abort(), and try again.
transaction.abort()
rt['a'] = 1
transaction.commit()
self.assertEqual(rt['a'], 1)
# Cleaning up via begin() should also work.
rt['a'] = 2
transaction.get().join(poisoned_jar)
self.assertRaises(PoisonedError, transaction.commit)
self.assertRaises(TransactionFailedError, transaction.commit)
# The change to rt['a'] is lost.
self.assertEqual(rt['a'], 1)
# Trying to modify an object also fails.
self.assertRaises(TransactionFailedError, rt.__setitem__, 'b', 2)
# Clean up via begin(), and try again.
transaction.begin()
rt['a'] = 2
transaction.commit()
self.assertEqual(rt['a'], 2)
cn.close()
def checkSavepointRollbackAndReadCurrent(self):
'''
savepoint rollback after readcurrent was called on a new object
should not raise POSKeyError
'''
cn = self._db.open()
try:
transaction.begin()
root = cn.root()
added_before_savepoint = P()
root['added_before_savepoint'] = added_before_savepoint
sp = transaction.savepoint()
added_before_savepoint.btree = new_btree = OOBTree()
cn.add(new_btree)
new_btree['change_to_trigger_read_current'] = P()
sp.rollback()
transaction.commit()
self.assertTrue('added_before_savepoint' in root)
finally:
transaction.abort()
cn.close()
def checkFailingSavepointSticks(self):
cn = self._db.open()
rt = cn.root()
rt['a'] = 1
transaction.savepoint()
self.assertEqual(rt['a'], 1)
rt['b'] = 2
# Make a jar that raises PoisonedError when making a savepoint.
poisoned = PoisonedJar(break_savepoint=True)
transaction.get().join(poisoned)
self.assertRaises(PoisonedError, transaction.savepoint)
# Trying to make a savepoint again fails too.
self.assertRaises(TransactionFailedError, transaction.savepoint)
self.assertRaises(TransactionFailedError, transaction.savepoint)
# Top-level commit also fails.
self.assertRaises(TransactionFailedError, transaction.commit)
# The changes to rt['a'] and rt['b'] are lost.
self.assertRaises(KeyError, rt.__getitem__, 'a')
self.assertRaises(KeyError, rt.__getitem__, 'b')
# Trying to modify an object also fails, because Transaction.join()
# also raises TransactionFailedError.
self.assertRaises(TransactionFailedError, rt.__setitem__, 'b', 2)
# Clean up via abort(), and try again.
transaction.abort()
rt['a'] = 1
transaction.commit()
self.assertEqual(rt['a'], 1)
# Cleaning up via begin() should also work.
rt['a'] = 2
transaction.get().join(poisoned)
self.assertRaises(PoisonedError, transaction.savepoint)
# Trying to make a savepoint again fails too.
self.assertRaises(TransactionFailedError, transaction.savepoint)
# The change to rt['a'] is lost.
self.assertEqual(rt['a'], 1)
# Trying to modify an object also fails.
self.assertRaises(TransactionFailedError, rt.__setitem__, 'b', 2)
# Clean up via begin(), and try again.
transaction.begin()
rt['a'] = 2
transaction.savepoint()
self.assertEqual(rt['a'], 2)
transaction.commit()
cn2 = self._db.open()
rt = cn.root()
self.assertEqual(rt['a'], 2)
cn.close()
cn2.close()
def checkMultipleUndoInOneTransaction(self):
# Verify that it's possible to perform multiple undo
# operations within a transaction. If ZODB performs the undo
# operations in a nondeterministic order, this test will often
# fail.
conn = self._db.open()
try:
root = conn.root()
# Add transactions that set root["state"] to (0..5)
for state_num in range(6):
transaction.begin()
root['state'] = state_num
transaction.get().note(u'root["state"] = %d' % state_num)
transaction.commit()
# Undo all but the first. Note that no work is actually
# performed yet.
transaction.begin()
log = self._db.undoLog()
self._db.undoMultiple([log[i]['id'] for i in range(5)])
transaction.get().note(u'undo states 1 through 5')
# Now attempt all those undo operations.
transaction.commit()
# Sanity check: we should be back to the first state.
self.assertEqual(root['state'], 0)
finally:
transaction.abort()
conn.close()
class PoisonedError(Exception):
pass
# PoisonedJar arranges to raise PoisonedError from interesting places.
class PoisonedJar(object):
def __init__(self, break_tpc_begin=False, break_tpc_vote=False,
break_savepoint=False):
self.break_tpc_begin = break_tpc_begin
self.break_tpc_vote = break_tpc_vote
self.break_savepoint = break_savepoint
def sortKey(self):
return str(id(self))
def tpc_begin(self, *args):
if self.break_tpc_begin:
raise PoisonedError("tpc_begin fails")
# A way to poison a top-level commit.
def tpc_vote(self, *args):
if self.break_tpc_vote:
raise PoisonedError("tpc_vote fails")
# A way to poison a savepoint -- also a way to poison a subtxn commit.
def savepoint(self):
if self.break_savepoint:
raise PoisonedError("savepoint fails")
def commit(*args):
pass
def abort(*self):
pass
class PoisonedObject(object):
def __init__(self, poisonedjar):
self._p_jar = poisonedjar
def test_suite():
return unittest.TestSuite((
unittest.makeSuite(ZODBTests, 'check'),
))
if __name__ == "__main__":
unittest.main(defaultTest="test_suite")
|
src/alphabet.py | meghbhalerao/cnnormaliztion | 221 | 11182301 | <reponame>meghbhalerao/cnnormaliztion<filename>src/alphabet.py
class Alphabet(dict):
def __init__(self, start_feature_id=1):
self.fid = start_feature_id
def add(self, item):
idx = self.get(item, None)
if idx is None:
idx = self.fid
self[item] = idx
self.fid += 1
return idx
def dump(self, fname):
with open(fname, "w") as out:
for k in sorted(self.keys()):
out.write("{}\t{}\n".format(k, self[k]))
def test():
import pickle
a = Alphabet()
print a.fid
a.add('2')
a.add('1')
a.add('1')
print a.fid, a
pickle.dump(a, open('/tmp/tmp.pickle', 'w'))
del a
a = pickle.load(open('/tmp/tmp.pickle'))
print a.fid, a
a.add('4')
print a.fid, a
a = Alphabet(start_feature_id=0)
a.add('4')
print a
if __name__ == '__main__':
test() |
app/handler/encoder.py | cdlaimin/pity | 135 | 11182323 | <reponame>cdlaimin/pity
import json
from datetime import datetime
from typing import Any
class JsonEncoder(json.JSONEncoder):
def default(self, o: Any) -> Any:
if isinstance(o, set):
return list(o)
if isinstance(o, datetime):
return o.strftime("%Y-%m-%d %H:%M:%S")
return self.default(o)
|
neutron/db/migration/alembic_migrations/versions/xena/expand/1bb3393de75d_add_qos_pps_rule.py | congnt95/neutron | 1,080 | 11182327 | <filename>neutron/db/migration/alembic_migrations/versions/xena/expand/1bb3393de75d_add_qos_pps_rule.py<gh_stars>1000+
# Copyright (c) 2021 China Unicom Cloud Data Co.,Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""add qos policy rule Packet Rate Limit
Revision ID: 1bb3393de75d
Revises: <PASSWORD>
Create Date: 2021-01-22 17:00:03.085196
"""
from alembic import op
import sqlalchemy as sa
from neutron_lib import constants
from neutron.db import migration
# revision identifiers, used by Alembic.
revision = '1bb3393de75d'
down_revision = '<PASSWORD>'
# milestone identifier, used by neutron-db-manage
neutron_milestone = [migration.XENA]
direction_enum = sa.Enum(
constants.EGRESS_DIRECTION, constants.INGRESS_DIRECTION,
name='qos_packet_rate_limit_rules_directions'
)
def upgrade():
op.create_table(
'qos_packet_rate_limit_rules',
sa.Column('id', sa.String(36), primary_key=True),
sa.Column('qos_policy_id',
sa.String(length=36),
sa.ForeignKey('qos_policies.id', ondelete='CASCADE'),
nullable=False, index=True),
sa.Column('max_kpps', sa.Integer()),
sa.Column('max_burst_kpps', sa.Integer()),
sa.Column('direction', direction_enum,
nullable=False,
server_default=constants.EGRESS_DIRECTION),
sa.UniqueConstraint('qos_policy_id', 'direction',
name='qos_packet_rate_limit_rules0qos_policy_id0direction')
)
|
hls4ml/model/flow/flow.py | jaemyungkim/hls4ml | 380 | 11182361 | <filename>hls4ml/model/flow/flow.py<gh_stars>100-1000
class Flow(object):
def __init__(self, name, optimizers, requires=None):
self.name = name
if optimizers is None:
self.optimizers = []
else:
self.optimizers = optimizers
if requires is None:
self.requires = []
else:
self.requires = requires
flow_map = {}
def _get_backend_name_prefix(name, backend):
if backend is not None and not name.startswith(backend.lower()):
name = backend.lower() + ':' + name
return name
def register_flow(name, optimizers, requires=None, backend=None):
name = _get_backend_name_prefix(name, backend)
if name in flow_map:
raise Exception('Flow {} already registered'.format(name))
flow_map[name] = Flow(name, optimizers, requires)
return name
def get_flow(name):
if name in flow_map:
return flow_map[name]
else:
raise Exception('Unknown flow: {}'.format(name))
def get_backend_flows(backend):
return [flow for flow in flow_map.keys() if flow.startswith(backend.lower() + ':')]
def get_available_flows():
return list(flow_map.keys())
|
FinMind/plotting/pie.py | vishalbelsare/FinMind | 1,106 | 11182379 | import typing
from IPython.display import HTML, display
from pyecharts import options as opts
from pyecharts.charts import Pie
from FinMind.schema.plot import Labels, Series, convert_labels_series_schema
def pie(
labels: typing.Union[typing.List[typing.Union[str, int]], Labels],
series: typing.Union[typing.List[typing.Union[int, float]], Series],
title: str = "title",
series_name: str = "",
width: str = "800px",
height: str = "600px",
radius: typing.List[str] = ["30%", "50%"],
pos_left: str = "legft",
pos_top: str = "10%",
filename: str = "pie.html",
):
"""plot bar
:param: bar_plot_data (:obj:FinMind.PiePlotSchema)
PiePlotSchema(labels=labels, series=series)
:param: title (str) default "title"
:param: series_name (str) default ''
:param: width (str) default "800px"
:param: height (str) default "600px"
:param: radius (List[str]) default ["30%", "50%"]
:param: pos_left (str) default "legft"
:param: pos_top (str) default "10%"
:param: filename (str) default "pie.html", output filename
:return: display pie
:rtype pyecharts.charts.Pie
"""
labels, series = convert_labels_series_schema(labels, series)
pie_plot = (
Pie()
.add(
series_name=series_name,
data_pair=[list(z) for z in zip(labels.labels, series.series)],
radius=radius,
)
.set_global_opts(
title_opts=opts.TitleOpts(title=title),
legend_opts=opts.LegendOpts(
pos_left=pos_left, orient="vertical", pos_top="10%"
),
)
)
pie_plot.render(filename)
display(HTML(filename=filename))
return pie_plot
|
tests/test_distfit.py | ksachdeva/distfit | 126 | 11182512 | import numpy as np
from distfit import distfit
def test_distfit():
X = np.random.normal(0, 2, 1000)
y = [-14,-8,-6,0,1,2,3,4,5,6,7,8,9,10,11,15]
# Initialize
dist = distfit()
assert np.all(np.isin(['method', 'alpha', 'bins', 'distr', 'multtest', 'n_perm'], dir(dist)))
# Fit and transform data
dist.fit_transform(X, verbose=3)
# TEST 1: check output is unchanged
assert np.all(np.isin(['method', 'model', 'summary', 'histdata', 'size'], dir(dist)))
# TEST 2: Check model output is unchanged
assert [*dist.model.keys()]==['distr', 'stats', 'params', 'name', 'model', 'score', 'loc', 'scale', 'arg', 'CII_min_alpha', 'CII_max_alpha']
# TEST 3: Check specific distribution
dist = distfit(distr='t')
dist.fit_transform(X)
assert dist.model['name']=='t'
# TEST 4: Check specific distribution
dist = distfit(distr='t', alpha=None)
dist.fit_transform(X)
assert dist.model['CII_min_alpha'] is not None
assert dist.model['CII_max_alpha'] is not None
# TEST 4A: Check multiple distribution
dist = distfit(distr=['norm', 't', 'gamma'])
results = dist.fit_transform(X)
assert np.all(np.isin(results['summary']['distr'].values, ['gamma', 't', 'norm']))
# TEST 5: Bound check
dist = distfit(distr='t', bound='up', alpha=0.05)
dist.fit_transform(X, verbose=0)
assert dist.model['CII_min_alpha'] is None
assert dist.model['CII_max_alpha'] is not None
dist = distfit(distr='t', bound='down', alpha=0.05)
dist.fit_transform(X, verbose=0)
assert dist.model['CII_min_alpha'] is not None
assert dist.model['CII_max_alpha'] is None
dist = distfit(distr='t', bound='both', alpha=0.05)
dist.fit_transform(X, verbose=0)
assert dist.model['CII_min_alpha'] is not None
assert dist.model['CII_max_alpha'] is not None
# TEST 6: Distribution check: Make sure the right loc and scale paramters are detected
X = np.random.normal(0, 2, 10000)
dist = distfit(distr='norm', alpha=0.05)
dist.fit_transform(X, verbose=0)
dist.model['loc']
'%.1f' %dist.model['scale']=='2.0'
'%.1f' %np.abs(dist.model['loc'])=='0.0'
# TEST 7
X = np.random.normal(0, 2, 1000)
y = [-14,-8,-6,0,1,2,3,4,5,6,7,8,9,10,11,15]
# TEST 1: Check bounds
out1 = distfit(distr='norm', bound='up')
out1.fit_transform(X, verbose=0)
out1.predict(y, verbose=0)
assert np.all(np.isin(np.unique(out1.results['y_pred']), ['none','up']))
out2 = distfit(distr='norm', bound='down')
out2.fit_transform(X, verbose=0)
out2.predict(y, verbose=0)
assert np.all(np.isin(np.unique(out2.results['y_pred']), ['none','down']))
out3 = distfit(distr='norm', bound='down')
out3.fit_transform(X, verbose=0)
out3.predict(y, verbose=0)
assert np.all(np.isin(np.unique(out3.results['y_pred']), ['none','down','up']))
# TEST 8: Check different sizes array
X = np.random.normal(0, 2, [10,100])
dist = distfit(distr='norm', bound='up')
dist.fit_transform(X, verbose=0)
dist.predict(y, verbose=0)
assert np.all(np.isin(np.unique(dist.results['y_pred']), ['none','up']))
# TEST 9
data_random = np.random.normal(0, 2, 1000)
data = [-14,-8,-6,0,1,2,3,4,5,6,7,8,9,10,11,15]
dist = distfit()
dist.fit_transform(X, verbose=0)
# TEST 10 Check number of output probabilities
dist.fit_transform(X, verbose=0)
dist.predict(y)
assert dist.results['y_proba'].shape[0]==len(y)
# TEST 11: Check whether alpha responds on results
out1 = distfit(alpha=0.05)
out1.fit_transform(X, verbose=0)
out1.predict(y)
out2 = distfit(alpha=0.2)
out2.fit_transform(X, verbose=0)
out2.predict(y)
assert np.all(out1.y_proba==out2.y_proba)
assert not np.all(out1.results['y_pred']==out2.results['y_pred'])
assert np.all(out1.results['P']==out2.results['P'])
assert sum(out1.results['y_pred']=='none')>sum(out2.results['y_pred']=='none')
# TEST 12: Check different sizes array
X = np.random.normal(0, 2, [10,100])
y = [-14,-8,-6,0,1,2,3,4,5,6,7,8,9,10,11,15]
dist = distfit(bound='up')
dist.fit_transform(X, verbose=0)
dist.predict(y)
assert np.all(np.isin(np.unique(dist.results['y_pred']), ['none','up']))
# TEST 13: Precentile
X = np.random.normal(0, 2, [10,100])
y = [-14,-8,-6,0,1,2,3,4,5,6,7,8,9,10,11,15]
dist = distfit(method='percentile')
dist.fit_transform(X, verbose=0)
results=dist.predict(y)
assert np.all(np.isin([*results.keys()], ['y', 'y_proba', 'y_pred', 'P', 'teststat']))
# TEST 14: Quantile
dist = distfit(method='quantile')
dist.fit_transform(X, verbose=0)
results=dist.predict(y)
assert np.all(np.isin([*results.keys()], ['y', 'y_proba', 'y_pred', 'teststat']))
# TEST 15: Discrete
import random
random.seed(10)
from scipy.stats import binom
# Generate random numbers
X = binom(8, 0.5).rvs(10000)
dist = distfit(method='discrete', f=1.5, weighted=True)
dist.fit_transform(X, verbose=3)
assert dist.model['n']==8
assert np.round(dist.model['p'], decimals=1)==0.5
# check output is unchanged
assert np.all(np.isin(['method', 'model', 'summary', 'histdata', 'size'], dir(dist)))
# TEST 15A
assert [*dist.model.keys()]==['name', 'distr', 'model', 'params', 'score', 'chi2r', 'n', 'p', 'CII_min_alpha', 'CII_max_alpha']
# TEST 15B
results = dist.predict([0, 1, 10, 11, 12])
assert np.all(np.isin([*results.keys()], ['y', 'y_proba', 'y_pred', 'P']))
|
rqt_carla_control/setup.py | SebastianHuch/ros-bridge | 314 | 11182517 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Setup for rqt_carla_control
"""
import os
ROS_VERSION = int(os.environ['ROS_VERSION'])
if ROS_VERSION == 1:
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
d = generate_distutils_setup(
packages=['rqt_carla_control'],
package_dir={'': 'src'},
)
setup(**d)
elif ROS_VERSION == 2:
from setuptools import setup
package_name = 'rqt_carla_control'
setup(
name=package_name,
version='0.0.0',
packages=[package_name],
package_dir={'': 'src'},
data_files=[
('share/ament_index/resource_index/packages', ['resource/' + package_name]),
(os.path.join('share', package_name), ['package.xml']),
(os.path.join('share', package_name), ['plugin.xml']),
('share/' + package_name + '/resource',
['resource/CarlaControl.ui', 'resource/pause.png', 'resource/play.png', 'resource/step_once.png']),
('lib/' + package_name, ['scripts/rqt_carla_control'])
],
install_requires=['setuptools'],
zip_safe=True,
maintainer='CARLA Simulator Team',
maintainer_email='<EMAIL>',
description='CARLA ROS2 RQT CONTROL',
license='MIT',
scripts=['scripts/rqt_carla_control'],
)
|
src/third_party/angle/src/commit_id.py | Chilledheart/naiveproxy | 2,219 | 11182542 | <reponame>Chilledheart/naiveproxy<gh_stars>1000+
#!/usr/bin/env python
# Copyright 2018 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Generate commit.h with git commit hash.
#
import subprocess as sp
import sys
import os
usage = """\
Usage: commit_id.py check - check if git is present
commit_id.py unpack <ref_file> - check if <ref_file> exists, and if not
create it based on .git/packed-refs
commit_id.py position - print commit position
commit_id.py gen <file_to_write> - generate commit.h"""
def grab_output(command, cwd):
return sp.Popen(
command, stdout=sp.PIPE, shell=True, cwd=cwd).communicate()[0].strip().decode('utf-8')
def get_commit_position(cwd):
return grab_output('git rev-list HEAD --count', cwd)
def unpack_ref(ref_file, ref_file_full_path, packed_refs_full_path):
with open(packed_refs_full_path) as fin:
refs = fin.read().strip().split('\n')
# Strip comments
refs = [ref.split(' ') for ref in refs if ref.strip()[0] != '#']
# Parse lines (which are in the format <hash> <ref_file>) and find the input file
refs = [git_hash for (git_hash, file_path) in refs if file_path == ref_file]
assert (len(refs) == 1)
git_hash = refs[0]
with open(ref_file_full_path, 'w') as fout:
fout.write(git_hash + '\n')
if len(sys.argv) < 2:
sys.exit(usage)
operation = sys.argv[1]
# Set the root of ANGLE's repo as the working directory
cwd = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')
aosp_angle_path = os.path.join(os.path.dirname('.'), 'external', 'angle')
if os.path.exists(aosp_angle_path):
cwd = aosp_angle_path
git_dir_exists = os.path.exists(os.path.join(cwd, '.git', 'HEAD'))
if operation == 'check':
if git_dir_exists:
print("1")
else:
print("0")
sys.exit(0)
elif operation == 'unpack':
if len(sys.argv) < 3:
sys.exit(usage)
ref_file = sys.argv[2]
ref_file_full_path = os.path.join(cwd, '.git', ref_file)
ref_file_exists = os.path.exists(ref_file_full_path)
if not ref_file_exists:
packed_refs_full_path = os.path.join(cwd, '.git', 'packed-refs')
unpack_ref(ref_file, ref_file_full_path, packed_refs_full_path)
sys.exit(0)
elif operation == 'position':
if git_dir_exists:
print(get_commit_position(cwd))
else:
print("0")
sys.exit(0)
if len(sys.argv) < 3 or operation != 'gen':
sys.exit(usage)
output_file = sys.argv[2]
commit_id_size = 12
commit_id = 'unknown hash'
commit_date = 'unknown date'
commit_position = '0'
enable_binary_loading = False
if git_dir_exists:
try:
commit_id = grab_output('git rev-parse --short=%d HEAD' % commit_id_size, cwd)
commit_date = grab_output('git show -s --format=%ci HEAD', cwd)
commit_position = get_commit_position(cwd)
enable_binary_loading = True
except:
pass
hfile = open(output_file, 'w')
hfile.write('#define ANGLE_COMMIT_HASH "%s"\n' % commit_id)
hfile.write('#define ANGLE_COMMIT_HASH_SIZE %d\n' % commit_id_size)
hfile.write('#define ANGLE_COMMIT_DATE "%s"\n' % commit_date)
hfile.write('#define ANGLE_COMMIT_POSITION %s\n' % commit_position)
if not enable_binary_loading:
hfile.write('#define ANGLE_DISABLE_PROGRAM_BINARY_LOAD\n')
hfile.close()
|
rest_registration/decorators.py | psibean/django-rest-registration | 329 | 11182549 | <filename>rest_registration/decorators.py
import types
def api_view_serializer_class_getter(serializer_class_getter):
def _get_serializer_class(self):
return serializer_class_getter()
def _get_serializer(self, *args, **kwargs):
serializer_class = self.get_serializer_class()
return serializer_class(*args, **kwargs)
def decorator(func):
if not hasattr(func, 'cls'):
raise Exception(
'@api_view_serializer_class_getter can only decorate'
' @api_view decorated functions')
apiview_cls = func.cls
apiview_cls.get_serializer_class = types.MethodType(
_get_serializer_class,
apiview_cls)
if not hasattr(apiview_cls, 'get_serializer'):
# In case get_serializer() method is missing.
apiview_cls.get_serializer = types.MethodType(
_get_serializer,
apiview_cls)
return func
return decorator
def api_view_serializer_class(serializer_class):
return api_view_serializer_class_getter(lambda: serializer_class)
|
bookwyrm/tests/views/test_get_started.py | mouse-reeve/fedireads | 270 | 11182550 | """ test for app action functionality """
from unittest.mock import patch
from django.template.response import TemplateResponse
from django.test import TestCase
from django.test.client import RequestFactory
from bookwyrm import forms, models, views
from bookwyrm.tests.validate_html import validate_html
@patch("bookwyrm.activitystreams.populate_stream_task.delay")
class GetStartedViews(TestCase):
"""helping new users get oriented"""
def setUp(self):
"""we need basic test data and mocks"""
self.factory = RequestFactory()
with patch("bookwyrm.suggested_users.rerank_suggestions_task.delay"), patch(
"bookwyrm.activitystreams.populate_stream_task.delay"
), patch("bookwyrm.lists_stream.populate_lists_task.delay"):
self.local_user = models.User.objects.create_user(
"<EMAIL>",
"<EMAIL>",
"password",
local=True,
localname="mouse",
)
self.local_user = models.User.objects.create_user(
"<EMAIL>",
"<EMAIL>",
"password",
local=True,
localname="rat",
)
self.book = models.Edition.objects.create(
parent_work=models.Work.objects.create(title="hi"),
title="Example Edition",
remote_id="https://example.com/book/1",
)
models.SiteSettings.objects.create()
def test_profile_view(self, *_):
"""there are so many views, this just makes sure it LOADS"""
view = views.GetStartedProfile.as_view()
request = self.factory.get("")
request.user = self.local_user
result = view(request)
self.assertIsInstance(result, TemplateResponse)
validate_html(result.render())
self.assertEqual(result.status_code, 200)
@patch("bookwyrm.suggested_users.rerank_suggestions_task.delay")
@patch("bookwyrm.suggested_users.rerank_user_task.delay")
def test_profile_view_post(self, *_):
"""save basic user details"""
view = views.GetStartedProfile.as_view()
form = forms.LimitedEditUserForm(instance=self.local_user)
form.data["name"] = "<NAME>"
form.data["discoverable"] = "True"
request = self.factory.post("", form.data)
request.user = self.local_user
self.assertIsNone(self.local_user.name)
with patch(
"bookwyrm.models.activitypub_mixin.broadcast_task.apply_async"
) as delay_mock:
view(request)
self.assertEqual(delay_mock.call_count, 1)
self.assertEqual(self.local_user.name, "<NAME>")
self.assertTrue(self.local_user.discoverable)
def test_books_view(self, _):
"""there are so many views, this just makes sure it LOADS"""
view = views.GetStartedBooks.as_view()
request = self.factory.get("")
request.user = self.local_user
result = view(request)
self.assertIsInstance(result, TemplateResponse)
validate_html(result.render())
self.assertEqual(result.status_code, 200)
def test_books_view_with_query(self, _):
"""there are so many views, this just makes sure it LOADS"""
view = views.GetStartedBooks.as_view()
request = self.factory.get("?query=Example")
request.user = self.local_user
result = view(request)
self.assertIsInstance(result, TemplateResponse)
validate_html(result.render())
self.assertEqual(result.status_code, 200)
@patch("bookwyrm.suggested_users.rerank_suggestions_task.delay")
@patch("bookwyrm.activitystreams.add_book_statuses_task.delay")
def test_books_view_post(self, *_):
"""shelve some books"""
view = views.GetStartedBooks.as_view()
data = {self.book.id: self.local_user.shelf_set.first().id}
request = self.factory.post("", data)
request.user = self.local_user
self.assertFalse(self.local_user.shelfbook_set.exists())
with patch(
"bookwyrm.models.activitypub_mixin.broadcast_task.apply_async"
) as delay_mock:
view(request)
self.assertEqual(delay_mock.call_count, 1)
shelfbook = self.local_user.shelfbook_set.first()
self.assertEqual(shelfbook.book, self.book)
self.assertEqual(shelfbook.user, self.local_user)
@patch("bookwyrm.suggested_users.SuggestedUsers.get_suggestions")
def test_users_view(self, *_):
"""there are so many views, this just makes sure it LOADS"""
view = views.GetStartedUsers.as_view()
request = self.factory.get("")
request.user = self.local_user
result = view(request)
self.assertIsInstance(result, TemplateResponse)
validate_html(result.render())
self.assertEqual(result.status_code, 200)
def test_users_view_with_query(self, *_):
"""there are so many views, this just makes sure it LOADS"""
view = views.GetStartedUsers.as_view()
request = self.factory.get("?query=rat")
request.user = self.local_user
with patch("bookwyrm.suggested_users.SuggestedUsers.get_suggestions") as mock:
mock.return_value = models.User.objects.all()
result = view(request)
self.assertIsInstance(result, TemplateResponse)
validate_html(result.render())
self.assertEqual(result.status_code, 200)
|
plugin.video.xbmcfilm/resources/lib/utils/rowbalance.py | mrknow/filmkodi | 105 | 11182583 | #!/usr/bin/env python2
# -*- coding: UTF-8 -*-
import random
servers = [ '172.16.58.3:1935',
'192.168.3.11:1935',
'172.16.31.10:1935',
'192.168.127.12:1935',
'192.168.127.12:1935',
'192.168.3.11:1935',
'172.16.58.3:1935',
'172.16.17.32:1935',
'172.16.17.32:1935',
'172.16.31.10:1935',
'192.168.127.12:1935',
'192.168.3.11:1935',
'192.168.3.11:1935',
'172.16.58.3:1935',
'192.168.3.11:1935',
'192.168.127.12:1935',
'172.16.58.3:1935',
'172.16.58.3:1935',
'192.168.127.12:1935',
'192.168.3.11:1935',
'172.16.58.3:1935',
'172.16.58.3:1935',
'192.168.127.12:1935',
'172.16.58.3:1935',
'192.168.127.12:1935',
'172.16.58.3:1935',
'172.16.58.3:1935',
'192.168.3.11:1935',
'192.168.127.12:1935',
'192.168.127.12:1935',
'172.16.31.10:1935',
'172.16.31.10:1935',
'192.168.3.11:1935',
'172.16.17.32:1935',
'192.168.3.11:1935',
'172.16.17.32:1935',
'172.16.58.3:1935',
'172.16.58.3:1935',
'192.168.3.11:1935',
'192.168.127.12:1935',
'192.168.127.12:1935',
'192.168.3.11:1935',
'172.16.17.32:1935',
'172.16.58.3:1935',
'172.16.31.10:1935',
'172.16.17.32:1935',
'172.16.17.32:1935',
'172.16.31.10:1935',
'192.168.3.11:1935',
'192.168.3.11:1935',
'172.16.17.32:1935',
'172.16.58.3:1935',
'172.16.58.3:1935',
'172.16.17.32:1935',
'172.16.17.32:1935',
'172.16.58.3:1935',
'172.16.17.32:1935',
'192.168.127.12:1935',
'172.16.17.32:1935',
'172.16.17.32:1935',
'172.16.58.3:1935',
'172.16.31.10:1935',
'192.168.3.11:1935',
'172.16.58.3:1935',
'172.16.58.3:1935',
'172.16.31.10:1935',
'172.16.31.10:1935',
'172.16.31.10:1935',
'192.168.127.12:1935',
'172.16.58.3:1935',
'192.168.127.12:1935',
'192.168.127.12:1935',
'192.168.127.12:1935',
'172.16.31.10:1935',
'172.16.58.3:1935',
'192.168.3.11:1935',
'192.168.127.12:1935' ]
def get():
return random.choice(servers)
|
test/library/draft/DataFrames/psahabu/IterSeries.py | jhh67/chapel | 1,602 | 11182632 | import pandas as pd
I = ["A", "B", "C", "D", "E"]
A = ["a", "b", "c", "d", "e"]
letters = pd.Series(A, pd.Index(I))
for i in letters.index:
print i
print
for i in zip(letters.index, range(0, 5)):
print i
print
for i in letters:
print i
print
for i in letters.items():
print i
|
examples/tutorials/03_vision/03_annotate.py | rootless4real/cozmo-python-sdk | 794 | 11182640 | #!/usr/bin/env python3
# Copyright (c) 2016 Anki, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Display a GUI window showing an annotated camera view.
Note:
This example requires Python to have Tkinter installed to display the GUI.
It also requires the Pillow and numpy python packages to be pip installed.
The :class:`cozmo.world.World` object collects raw images from Cozmo's camera
and makes them available as a property (:attr:`~cozmo.world.World.latest_image`)
and by generating :class:`cozmo.world.EvtNewCamerImages` events as they come in.
Each image is an instance of :class:`cozmo.world.CameraImage` which provides
access both to the raw camera image, and to a scalable annotated image which
can show where Cozmo sees faces and objects, along with any other information
your program may wish to display.
This example uses the tkviewer to display the annotated camera on the screen
and adds a couple of custom annotations of its own using two different methods.
'''
import sys
import time
try:
from PIL import ImageDraw, ImageFont
except ImportError:
sys.exit('run `pip3 install --user Pillow numpy` to run this example')
import cozmo
# Define an annotator using the annotator decorator
@cozmo.annotate.annotator
def clock(image, scale, annotator=None, world=None, **kw):
d = ImageDraw.Draw(image)
bounds = (0, 0, image.width, image.height)
text = cozmo.annotate.ImageText(time.strftime("%H:%m:%S"),
position=cozmo.annotate.TOP_LEFT)
text.render(d, bounds)
# Define another decorator as a subclass of Annotator
class Battery(cozmo.annotate.Annotator):
def apply(self, image, scale):
d = ImageDraw.Draw(image)
bounds = (0, 0, image.width, image.height)
batt = self.world.robot.battery_voltage
text = cozmo.annotate.ImageText('BATT %.1fv' % batt, color='green')
text.render(d, bounds)
def cozmo_program(robot: cozmo.robot.Robot):
robot.world.image_annotator.add_static_text('text', 'Coz-Cam', position=cozmo.annotate.TOP_RIGHT)
robot.world.image_annotator.add_annotator('clock', clock)
robot.world.image_annotator.add_annotator('battery', Battery)
time.sleep(2)
print("Turning off all annotations for 2 seconds")
robot.world.image_annotator.annotation_enabled = False
time.sleep(2)
print('Re-enabling all annotations')
robot.world.image_annotator.annotation_enabled = True
# Disable the face annotator after 10 seconds
time.sleep(10)
print("Disabling face annotations (light cubes still annotated)")
robot.world.image_annotator.disable_annotator('faces')
# Shutdown the program after 100 seconds
time.sleep(100)
cozmo.run_program(cozmo_program, use_viewer=True, force_viewer_on_top=True)
|
examples/train.py | amirjaber/vaegan | 101 | 11182692 | # coding : utf-8
import sys
import os
module_path = os.path.abspath( u'..' )
sys.path.append( module_path )
import json
import numpy as np
import theano
from vaegan.data import load_data
from vaegan import Morph, Reconstruct
from vaegan import VAEGAN
from vaegan.optimizers import Adam
def load_json( infile_name ):
fin = open( infile_name, u'r' )
conf = json.load( fin )
fin.close()
return conf
conf = load_json( sys.argv[1] )
output_params_dir = conf[u'output_params_dir']
output_dir = conf[u'output_dir']
train_data_dir = conf[u'train_data_dir']
test_data_dir = conf[u'test_data_dir']
box = conf[u'box'] # cropped region
shape = (64, 64)
pretrain = True
seed = 1234
np.random.seed( seed )
# Build VAEGAN
vaegan = VAEGAN( seed )
train_data = load_data( train_data_dir, shape, box )
test_data = load_data( test_data_dir, shape, box )
# Set transformation
indices = np.arange( len(train_data) )
grid_shape = (10,10)
np.random.shuffle( indices )
indices = indices[:grid_shape[0]*grid_shape[1]]
form_type = 0 # Form of output images.
morph = Morph( vaegan, form_type, train_data[indices],
output_dir=output_dir, shape=grid_shape )
# Train
if pretrain:
print u'#Early phase'
solver = Adam( vaegan, eta=1e-3, beta1=0.9, minibatch_size=64 )
solver.run( train_data, T=20, params_dir=output_params_dir, callback=morph,
Xt=test_data )
print u'#Final phase'
vaegan.set_phase(1)
solver = Adam( vaegan, eta=1e-3, beta1=0.9, minibatch_size=64 )
solver.run( train_data, T=200, params_dir=output_params_dir, callback=morph,
Xt=test_data )
|
openshift_tools/web/rest.py | fahlmant/openshift-tools | 164 | 11182701 | <reponame>fahlmant/openshift-tools
#!/usr/bin/env python2
# vim: expandtab:tabstop=4:shiftwidth=4
"""
Generic REST class using python requests module
see zagg_client.py for example on how to use
"""
import requests
# pylint: disable=import-error,no-name-in-module
import requests.packages.urllib3.connectionpool as httplib
import time
import urllib3
#Currently only one method is used.
#More will be added in the future, and this can be disabled
#pylint: disable=too-few-public-methods
class RestApi(object):
"""
A base connection class to derive from.
"""
# All args are required
#pylint: disable=too-many-arguments,too-many-instance-attributes
def __init__(self,
host=None,
username=None,
password=<PASSWORD>,
headers=None,
token=<PASSWORD>,
ssl_verify=False,
debug=False):
self.host = host
self.username = username
self.password = password
self.token = token
self.headers = headers
self.ssl_verify = ssl_verify
self.debug = debug
if self.debug:
httplib.HTTPConnection.debuglevel = 1
httplib.HTTPSConnection.debuglevel = 1
self.base_uri = "http://" + self.host + "/"
@property
def _auth(self):
"""
implement authentication for the rest call
"""
if self.username and self.password:
return requests.auth.HTTPBasicAuth(self.username, self.password)
return None
def request(self, url, method, timeout=120, headers=None, params=None,
data=None, retries=0):
"""
wrapper method for Requests' methods
"""
if not url.startswith("https://") and not url.startswith("http://"):
url = self.base_uri + url
# This will disable the SSL warning for certificate verification
if not self.ssl_verify and url.startswith('https://'):
urllib3.disable_warnings()
# pylint: disable=no-member
requests.packages.urllib3.disable_warnings()
_headers = self.headers or {}
if headers:
_headers.update(headers)
attempts = retries + 1
while attempts > 0:
try:
response = requests.request(
auth=None if not self._auth else self._auth,
allow_redirects=True,
method=method,
url=url,
params=params,
data=data,
headers=_headers,
timeout=timeout,
verify=self.ssl_verify,
)
data = None
if response.status_code == 200:
data = response.json()
return (response.status_code, data)
# Reason: disable pylint bare-except because we are retrying on any and all exceptions
# Status: permanent unless we start catching only certain types of errors
# pylint: disable=bare-except
except:
print "Timed out: {}".format(url)
attempts -= 1
if attempts == 0:
raise
else:
time.sleep(1)
|
RankFlair/rankflair.py | zatherz/reddit | 444 | 11182745 | #/u/GoldenSights
import praw
import time
import traceback
''' USER CONFIGURATION '''
APP_ID = ""
APP_SECRET = ""
APP_URI = ""
APP_REFRESH = ""
# https://www.reddit.com/comments/3cm1p8/how_to_make_your_bot_use_oauth2/
USERAGENT = ""
#This is a short description of what the bot does.
#For example "/u/GoldenSights' Newsletter bot"
SUBREDDIT = "Goldtesting"
#This is the sub or list of subs to scan for new posts.
#For a single sub, use "sub1".
#For multiple subreddits, use "sub1+sub2+sub3"
WAIT = 30
#The number of seconds between cycles. Bot is completely inactive during
#this time.
MULTI_SUBREDDIT_RANK = False
#If you entered a single subreddit in SUBREDDIT, you can ignore this.
#If you entered multiple:
# True = Posts are ranked relative to all the subreddits combined
# False = Each subreddit is managed individually
IGNORE_UNKNOWN = True
#If a post has a flair that is not part of the rank system, should
# we just ignore it?
#If False, that post's flair will be overwritten with a rank!
RANKINGS = {1: "#1 Top poster!",
5: "Top 5 poster",
10: "Top 10 poster",
25: "Top 25 poster",
50: "Top 50 poster",
100: "Top 100 poster"}
#Flair text
RANKINGCSS = {1: "toprank",
5: "fiverank",
10: "tenrank",
25: "twfiverank",
50: "fiftyrank",
100: "hundredrank"}
#Flair CSS class. Use empty quotes if you don't have any.
SEND_MODMAIL = True
#Send subreddit modmail when a post achieves a rank
MODMAIL_SUBJECT = "Automated post ranking system"
#The subjectline for the sent modmail
MODMAIL_BODY = """
_username_ has just earned rank _ranktext_ on their
post ["_posttitle_"](_postlink_)
"""
#This is the modmail message
#Post information can be insterted into the message with these injectors
# _posttitle_ : Post title
# _postid_ : Post ID number
# _postlink_ : Post permalink
# _ranktext_ : Rank text
# _rankvalue_ : Rank value
# _username_ : Post author
#If you would like more injectors, please message me.
''' All done! '''
# Automatic preparation
RANKINGS_REVERSE = {}
for key in RANKINGS:
val = RANKINGS[key].lower()
RANKINGS_REVERSE[val] = key
RANKKEYS = sorted(list(RANKINGS.keys()))
MAXRANK = RANKKEYS[-1]
if MULTI_SUBREDDIT_RANK or '+' not in SUBREDDIT:
SUBREDDIT_L = [SUBREDDIT]
else:
SUBREDDIT_L = SUBREDDIT.split('+')
try:
import bot
USERAGENT = bot.aG
except ImportError:
pass
# /Automatic preparation
print('Logging in')
r = praw.Reddit(USERAGENT)
r.set_oauth_app_info(APP_ID, APP_SECRET, APP_URI)
r.refresh_access_information(APP_REFRESH)
def manageranks():
''' Do it. Do it now. '''
for subreddit in SUBREDDIT_L:
print('Getting posts from ' + subreddit)
subreddit = r.get_subreddit(subreddit)
topall = subreddit.get_top_from_all(limit=MAXRANK)
topall = list(topall)
for position in range(len(topall)):
post = topall[position]
position += 1
# Add 1 because indices start at 0
print(post.id)
actual_flair = post.link_flair_text
suggested = get_rank_from_pos(position)
suggested_rank = suggested[0]
suggested_flair = suggested[1]
suggested_css = RANKINGCSS[suggested_rank]
if flair_is_better(new=suggested_flair, old=actual_flair):
print('\tSetting flair: %s' % suggested_flair)
post.set_flair(flair_text=suggested_flair,
flair_css_class=suggested_css)
if SEND_MODMAIL:
compose_modmail(post, suggested_flair, suggested_rank)
pass
def get_rank_from_pos(position):
''' Given a position in a listing, return the appropriate rank '''
for rankkey in RANKKEYS:
if rankkey >= position:
return [rankkey, RANKINGS[rankkey]]
def flair_is_better(new, old):
''' compare whether the newer flair is better than the older flair '''
if old == "" or old is None:
#Post has no flair yet. Anything is better
return True
newrank = RANKINGS_REVERSE[new.lower()]
try:
oldrank = RANKINGS_REVERSE[old.lower()]
except KeyError:
if IGNORE_UNKNOWN:
print('\t"%s" is not a recognized rank. Ignoring' % old)
return False
print('\t"%s" is not a recognized rank. Overwriting' % old)
return True
print('\tN:%d, O:%d' % (newrank, oldrank))
if newrank < oldrank:
print('\tBetter')
return True
print('\tNot better')
return False
def compose_modmail(post, new, newrank):
print('\tWriting modmail')
subreddit = '/r/' + post.subreddit.display_name
try:
author = post.author.name
except AttributeError:
author = "[deleted]"
message = MODMAIL_BODY
message = message.replace('_posttitle_', post.title)
message = message.replace('_postid_', post.id)
message = message.replace('_postlink_', post.short_link)
message = message.replace('_ranktext_', new)
message = message.replace('_rankvalue_', str(newrank))
message = message.replace('_username_', author)
r.send_message(subreddit, MODMAIL_SUBJECT, message)
while True:
try:
manageranks()
except Exception:
traceback.print_exc()
print('Sleeping %d seconds\n' % WAIT)
time.sleep(WAIT) |
django/load_devices.py | fallenfuzz/pynet | 528 | 11182780 | from net_system.models import NetworkSwitch, InventoryGroup
from net_system.models import Credentials
from getpass import getpass
import django
def main():
django.setup()
management_ip = raw_input("Please enter IP address: ")
my_switches = (('pynet-sw1', 8222, '10.220.88.28'),
('pynet-sw2', 8322, '10.220.88.29'),
('pynet-sw3', 8422, '10.220.88.30'),
('pynet-sw4', 8522, '10.220.88.31'))
passwd = getpass()
# Create Arista inventory group
arista_group = InventoryGroup.objects.get_or_create(group_name='arista')
print arista_group
# Create credential object
arista_creds = Credentials.objects.get_or_create(
username='admin1',
password=<PASSWORD>,
description='Arista credentials'
)
print arista_creds
# Create four switch objects
for switch_name, ssh_port, ip_addr in my_switches:
switch_obj = NetworkSwitch.objects.get_or_create(
device_name=switch_name,
device_type='arista_eos',
ip_address=ip_addr,
management_ip=management_ip,
port=ssh_port,
group_name=arista_group[0],
credentials = arista_creds[0],
)
print switch_obj
if __name__ == "__main__":
main()
|
tests/tools/assigner/models/test_replica_election.py | akashvacher/kafka-tools | 578 | 11182794 | import json
import unittest
from mock import patch, ANY
from kafka.tools.models.broker import Broker
from kafka.tools.models.topic import Topic
from kafka.tools.assigner.models.replica_election import ReplicaElection
class ReplicaElectionTests(unittest.TestCase):
def setUp(self):
self.topic = Topic('testTopic', 10)
self.broker = Broker('brokerhost1.example.com', id=1)
for i in range(10):
self.topic.partitions[i].replicas = [self.broker]
self.replica_election = ReplicaElection(self.topic.partitions, pause_time=0)
def test_replica_election_create(self):
assert self.replica_election is not None
def test_replica_election_repr(self):
t_repr = json.loads(repr(self.replica_election))
expect_repr = {'partitions': []}
for i in range(10):
expect_repr['partitions'].append({'topic': 'testTopic', 'partition': i})
assert t_repr == expect_repr
def test_replica_election_dict(self):
t_repr = self.replica_election.dict_for_replica_election()
expect_repr = {'partitions': []}
for i in range(10):
expect_repr['partitions'].append({'topic': 'testTopic', 'partition': i})
assert t_repr == expect_repr
@patch('kafka.tools.assigner.models.replica_election.subprocess.call')
def test_replica_election_execute(self, mock_call):
self.replica_election.execute(1, 1, 'zk_connect_string', '/path/to/tools', plugins=[], dry_run=False)
mock_call.assert_called_once_with(['/path/to/tools/kafka-preferred-replica-election.sh',
'--zookeeper', 'zk_connect_string',
'--path-to-json-file', ANY],
stderr=ANY, stdout=ANY)
|
napari/layers/utils/_color_manager_constants.py | MaksHess/napari | 1,345 | 11182815 | <gh_stars>1000+
from enum import Enum
class ColorMode(str, Enum):
"""
ColorMode: Color setting mode.
DIRECT (default mode) allows each point to be set arbitrarily
CYCLE allows the color to be set via a color cycle over an attribute
COLORMAP allows color to be set via a color map over an attribute
"""
DIRECT = 'direct'
CYCLE = 'cycle'
COLORMAP = 'colormap'
|
aleph/views/notifications_api.py | Rosencrantz/aleph | 1,213 | 11182816 | <filename>aleph/views/notifications_api.py
from flask import Blueprint, request
from aleph.search import NotificationsQuery
from aleph.views.serializers import NotificationSerializer
from aleph.views.util import require
blueprint = Blueprint("notifications_api", __name__)
@blueprint.route("/api/2/notifications", methods=["GET"])
def index():
"""
---
get:
summary: Get notifications
description: Get all the notifications for the user
responses:
'200':
content:
application/json:
schema:
type: object
allOf:
- $ref: '#/components/schemas/QueryResponse'
properties:
results:
type: array
items:
$ref: '#/components/schemas/Notification'
description: OK
tags:
- Notification
"""
require(request.authz.logged_in)
result = NotificationsQuery.handle(request)
return NotificationSerializer.jsonify_result(result)
|
transly/seq2seq/config.py | nikgit17/transly | 116 | 11182847 | <reponame>nikgit17/transly<filename>transly/seq2seq/config.py
import pickle
import pandas as pd
from transly.base.config import Config
class SConfig(Config):
"""
Configuration for encoder decoder model
"""
def __init__(
self,
configuration_file=None,
training_data_path="khoj/seq2seq/train.data.csv",
testing_data_path="data/test.data.csv",
static_config=None,
input_mode="character_level",
output_mode="character_level",
):
"""
Initialise configuration
:param configuration_file: path to configuration file of a pre-trained model, defaults to None
:type configuration_file: str, optional
:param training_data_path: path to training data, defaults to 'seq2seq/train.data.csv'
:type training_data_path: str, optional
:param testing_data_path: path to testing data, defaults to 'data/test.data.csv'
:type testing_data_path: str, optional
:param static_config: defaults to {'number_of_units': 64, 'batch_size': 1500, 'epochs': 100, 'PAD_INDEX': 0, 'GO_INDEX': 1}
:type static_config: dict, optional
"""
Config.__init__(
self,
configuration_file,
training_data_path,
testing_data_path,
static_config,
)
self.input_mode = input_mode
self.output_mode = output_mode
def get_config(self):
"""
Computes the entire configuration, including that at the time of initialisation
:return: full configuration
:rtype: dict
"""
if self.configuration_file:
self.config = pickle.load(open(self.configuration_file, "rb"))
return self.config
# derived configuration
print("fetching training file")
train_data = pd.read_csv(self.training_data_path)
train_data = train_data.apply(
lambda x: x.astype(str)
.str.upper()
.str.replace(r"[^A-Z0-9.%/\s]", " ")
.str.replace(r" +", " ")
)
train_data = train_data[
[
True if max([len(str(v[0])), len(str(v[1]))]) < 20 else False
for v in train_data.values
]
]
train_input, train_output = train_data.values[:, 0], train_data.values[:, 1]
self.config["train_input"], self.config["train_output"] = (
train_input,
train_output,
)
self.config["max_length_input"], self.config["max_length_output"] = (
max([len(str(v)) for v in train_input]),
max([len(str(v)) for v in train_output]),
)
if self.input_mode == "character_level":
(
self.config["input_char2ix"],
self.config["input_ix2char"],
self.config["input_dict_len"],
) = self.__char2index2char__(train_input)
elif self.input_mode == "word_level":
(
self.config["input_char2ix"],
self.config["input_ix2char"],
self.config["input_dict_len"],
) = self.__word2index2word__(train_input)
else:
raise Exception(
"input mode needs to be either 'character_level' or 'word_level'. Received {} instead.".format(
self.input_mode
)
)
if self.output_mode == "character_level":
(
self.config["output_char2ix"],
self.config["output_ix2char"],
self.config["output_dict_len"],
) = self.__char2index2char__(train_output)
elif self.output_mode == "word_level":
(
self.config["output_char2ix"],
self.config["output_ix2char"],
self.config["output_dict_len"],
) = self.__word2index2word__(train_output)
else:
raise Exception(
"output mode needs to be either 'character_level' or 'word_level'. Received {} instead.".format(
self.output_mode
)
)
self.config["input_mode"], self.config["output_mode"] = (
self.input_mode,
self.output_mode,
)
return self.config
def __char2index2char__(self, words):
"""
Computes character to indices dict (encoding) as well as indices to character dict (decoding)
:param words: list of words
:type words: list
:return: character encoding dict, decoding dict, length of dict
:rtype: dict, dict, int
"""
char2ix = {
w: i + 2 for i, w in enumerate(set([wc for w in words for wc in str(w)]))
}
char2ix["PAD"], char2ix["GO"] = (
self.config["PAD_INDEX"],
self.config["GO_INDEX"],
)
ix2char = {v: k for k, v in char2ix.items()}
return char2ix, ix2char, len(ix2char)
def __word2index2word__(self, sentences):
"""
Computes character to indices dict (encoding) as well as indices to character dict (decoding)
:param sentences: list of sentences
:type sentences: list
:return: word encoding dict, decoding dict, length of dict
:rtype: dict, dict, int
"""
word2ix = {
w: i + 2
for i, w in enumerate(set([w for s in sentences for w in str(s).split()]))
}
word2ix["PAD"], word2ix["GO"] = (
self.config["PAD_INDEX"],
self.config["GO_INDEX"],
)
ix2word = {v: k for k, v in word2ix.items()}
return word2ix, ix2word, len(ix2word)
|
gym_collision_avoidance/envs/__init__.py | krishna-bala/gym-collision-avoidance | 128 | 11182874 | # Find the config file (path provided as an environment variable),
# import and instantiate it here so all modules have access
import os
gym_config_path = os.environ.get('GYM_CONFIG_PATH', os.path.join(os.path.dirname(os.path.realpath(__file__)), 'config.py'))
gym_config_class = os.environ.get('GYM_CONFIG_CLASS', 'Config')
import sys
if sys.version[0] == '3':
import importlib.util
spec = importlib.util.spec_from_file_location(gym_config_class, gym_config_path)
foo = importlib.util.module_from_spec(spec)
spec.loader.exec_module(foo)
elif sys.version[0] == '2':
import imp
foo = imp.load_source(gym_config_class.split('.')[-1], gym_config_path)
config_class = getattr(foo, gym_config_class, None)
assert(callable(config_class))
Config = config_class()
|
2010/aes-encrypt-pycrypto/pycrypto_aes.py | minimum-necessary-change/code-for-blog | 1,199 | 11182896 | from Crypto.Cipher import AES
import hashlib
password = b'<PASSWORD>'
key = hashlib.sha256(password).digest()
key = b'<KEY>'
IV = 16 * '\x00'
mode = AES.MODE_CBC
encryptor = AES.new(key, mode, IV=IV)
text = b'j' * 32 + b'i' * 64
ciphertext = encryptor.encrypt(text)
decryptor = AES.new(key, mode, IV=IV)
plain = decryptor.decrypt(ciphertext)
print(plain)
|
kb/knowbert_utils.py | XuhuiZhou/kb | 295 | 11183051 | <reponame>XuhuiZhou/kb
from typing import Union, List
from allennlp.common import Params
from allennlp.data import Instance, DataIterator, Vocabulary
from allennlp.common.file_utils import cached_path
from kb.include_all import TokenizerAndCandidateGenerator
from kb.bert_pretraining_reader import replace_candidates_with_mask_entity
import json
def _extract_config_from_archive(model_archive):
import tarfile
import tempfile
import os
with tempfile.TemporaryDirectory() as tmp:
with tarfile.open(model_archive, 'r:gz') as archive:
archive.extract('config.json', path=tmp)
config = Params.from_file(os.path.join(tmp, 'config.json'))
return config
def _find_key(d, key):
val = None
stack = [d.items()]
while len(stack) > 0 and val is None:
s = stack.pop()
for k, v in s:
if k == key:
val = v
break
elif isinstance(v, dict):
stack.append(v.items())
return val
class KnowBertBatchifier:
"""
Takes a list of sentence strings and returns a tensor dict usable with
a KnowBert model
"""
def __init__(self, model_archive, batch_size=32,
masking_strategy=None,
wordnet_entity_file=None, vocab_dir=None):
# get bert_tokenizer_and_candidate_generator
config = _extract_config_from_archive(cached_path(model_archive))
# look for the bert_tokenizers and candidate_generator
candidate_generator_params = _find_key(
config['dataset_reader'].as_dict(), 'tokenizer_and_candidate_generator'
)
if wordnet_entity_file is not None:
candidate_generator_params['entity_candidate_generators']['wordnet']['entity_file'] = wordnet_entity_file
self.tokenizer_and_candidate_generator = TokenizerAndCandidateGenerator.\
from_params(Params(candidate_generator_params))
self.tokenizer_and_candidate_generator.whitespace_tokenize = False
assert masking_strategy is None or masking_strategy == 'full_mask'
self.masking_strategy = masking_strategy
# need bert_tokenizer_and_candidate_generator
if vocab_dir is not None:
vocab_params = Params({"directory_path": vocab_dir})
else:
vocab_params = config['vocabulary']
self.vocab = Vocabulary.from_params(vocab_params)
self.iterator = DataIterator.from_params(
Params({"type": "basic", "batch_size": batch_size})
)
self.iterator.index_with(self.vocab)
def _replace_mask(self, s):
return s.replace('[MASK]', ' [MASK] ')
def iter_batches(self, sentences_or_sentence_pairs: Union[List[str], List[List[str]]], verbose=True):
# create instances
instances = []
for sentence_or_sentence_pair in sentences_or_sentence_pairs:
if isinstance(sentence_or_sentence_pair, list):
assert len(sentence_or_sentence_pair) == 2
tokens_candidates = self.tokenizer_and_candidate_generator.\
tokenize_and_generate_candidates(
self._replace_mask(sentence_or_sentence_pair[0]),
self._replace_mask(sentence_or_sentence_pair[1]))
else:
tokens_candidates = self.tokenizer_and_candidate_generator.\
tokenize_and_generate_candidates(self._replace_mask(sentence_or_sentence_pair))
if verbose:
print(self._replace_mask(sentence_or_sentence_pair))
print(tokens_candidates['tokens'])
# now modify the masking if needed
if self.masking_strategy == 'full_mask':
# replace the mask span with a @@mask@@ span
masked_indices = [index for index, token in enumerate(tokens_candidates['tokens'])
if token == '[MASK]']
spans_to_mask = set([(i, i) for i in masked_indices])
replace_candidates_with_mask_entity(
tokens_candidates['candidates'], spans_to_mask
)
# now make sure the spans are actually masked
for key in tokens_candidates['candidates'].keys():
for span_to_mask in spans_to_mask:
found = False
for span in tokens_candidates['candidates'][key]['candidate_spans']:
if tuple(span) == tuple(span_to_mask):
found = True
if not found:
tokens_candidates['candidates'][key]['candidate_spans'].append(list(span_to_mask))
tokens_candidates['candidates'][key]['candidate_entities'].append(['@@MASK@@'])
tokens_candidates['candidates'][key]['candidate_entity_priors'].append([1.0])
tokens_candidates['candidates'][key]['candidate_segment_ids'].append(0)
# hack, assume only one sentence
assert not isinstance(sentence_or_sentence_pair, list)
fields = self.tokenizer_and_candidate_generator.\
convert_tokens_candidates_to_fields(tokens_candidates)
instances.append(Instance(fields))
for batch in self.iterator(instances, num_epochs=1, shuffle=False):
yield batch
|
mutant/contrib/geo/migrations/0002_update_field_defs_app_label.py | pombredanne/django-mutant | 152 | 11183078 | <reponame>pombredanne/django-mutant<filename>mutant/contrib/geo/migrations/0002_update_field_defs_app_label.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import functools
from django.db import migrations
def _update_field_def_cts_app_label(from_app_label, to_app_label, apps, schema_editor):
cts = apps.get_model('contenttypes', 'contenttype').objects
model_names = [
'geometrycollectionfielddefinition',
'geometryfielddefinition',
'linestringfielddefinition',
'multilinestringfielddefinition',
'multipointfielddefinition',
'multipolygonfielddefinition',
'pointfielddefinition',
'polygonfielddefinition',
]
cts.filter(
app_label=from_app_label, model__in=model_names
).update(app_label=to_app_label)
class Migration(migrations.Migration):
dependencies = [
('geo', '0001_initial'),
]
operations = [
migrations.RunPython(
functools.partial(_update_field_def_cts_app_label, 'mutant', 'geo'),
functools.partial(_update_field_def_cts_app_label, 'geo', 'mutant'),
),
migrations.AlterModelTable(
name='geometryfielddefinition',
table=None,
),
]
|
net/settings_bbc.py | juandesant/astrometry.net | 460 | 11183087 | from __future__ import absolute_import
from .settings_common import *
TEMPDIR = '/data2/tmp'
DATABASES['default']['NAME'] = 'an-bbc'
LOGGING['loggers']['django.request']['level'] = 'WARN'
SESSION_COOKIE_NAME = 'BBCAstrometrySession'
ssh_solver_config = 'an-bbc'
sitename = 'bbc'
SOCIAL_AUTH_GITHUB_KEY = github_secrets[sitename].key
SOCIAL_AUTH_GITHUB_SECRET = github_secrets[sitename].secret
|
videos/067_you_should_put_this_in_all_your_python_scripts/if_name_main_pkg/import_func.py | matthewstidham/VideosSampleCode | 285 | 11183088 | from if_name_main_pkg.bad_script import useful_function
def main():
print(f'{useful_function(3)=}')
if __name__ == '__main__':
main() |
glance/tests/functional/db/migrations/test_ocata_expand01.py | Steap/glance | 309 | 11183095 | <reponame>Steap/glance
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslo_db.sqlalchemy import test_fixtures
from oslo_db.sqlalchemy import utils as db_utils
from glance.tests.functional.db import test_migrations
import glance.tests.utils as test_utils
class TestOcataExpand01Mixin(test_migrations.AlembicMigrationsMixin):
def _get_revisions(self, config):
return test_migrations.AlembicMigrationsMixin._get_revisions(
self, config, head='ocata_expand01')
def _pre_upgrade_ocata_expand01(self, engine):
images = db_utils.get_table(engine, 'images')
now = datetime.datetime.now()
self.assertIn('is_public', images.c)
self.assertNotIn('visibility', images.c)
self.assertFalse(images.c.is_public.nullable)
# inserting a public image record
public_temp = dict(deleted=False,
created_at=now,
status='active',
is_public=True,
min_disk=0,
min_ram=0,
id='public_id_before_expand')
with engine.connect() as conn, conn.begin():
conn.execute(images.insert().values(public_temp))
# inserting a private image record
shared_temp = dict(deleted=False,
created_at=now,
status='active',
is_public=False,
min_disk=0,
min_ram=0,
id='private_id_before_expand')
with engine.connect() as conn, conn.begin():
conn.execute(images.insert().values(shared_temp))
def _check_ocata_expand01(self, engine, data):
# check that after migration, 'visibility' column is introduced
images = db_utils.get_table(engine, 'images')
self.assertIn('visibility', images.c)
self.assertIn('is_public', images.c)
self.assertTrue(images.c.is_public.nullable)
self.assertTrue(images.c.visibility.nullable)
# tests visibility set to None for existing images
with engine.connect() as conn:
rows = conn.execute(
images.select().where(
images.c.id.like('%_before_expand')
).order_by(images.c.id)
).fetchall()
self.assertEqual(2, len(rows))
# private image first
self.assertEqual(0, rows[0]['is_public'])
self.assertEqual('private_id_before_expand', rows[0]['id'])
self.assertIsNone(rows[0]['visibility'])
# then public image
self.assertEqual(1, rows[1]['is_public'])
self.assertEqual('public_id_before_expand', rows[1]['id'])
self.assertIsNone(rows[1]['visibility'])
self._test_trigger_old_to_new(engine, images)
self._test_trigger_new_to_old(engine, images)
def _test_trigger_new_to_old(self, engine, images):
now = datetime.datetime.now()
# inserting a public image record after expand
public_temp = dict(deleted=False,
created_at=now,
status='active',
visibility='public',
min_disk=0,
min_ram=0,
id='public_id_new_to_old')
with engine.connect() as conn, conn.begin():
conn.execute(images.insert().values(public_temp))
# inserting a private image record after expand
shared_temp = dict(deleted=False,
created_at=now,
status='active',
visibility='private',
min_disk=0,
min_ram=0,
id='private_id_new_to_old')
with engine.connect() as conn, conn.begin():
conn.execute(images.insert().values(shared_temp))
# inserting a shared image record after expand
shared_temp = dict(deleted=False,
created_at=now,
status='active',
visibility='shared',
min_disk=0,
min_ram=0,
id='shared_id_new_to_old')
with engine.connect() as conn, conn.begin():
conn.execute(images.insert().values(shared_temp))
# test visibility is set appropriately by the trigger for new images
with engine.connect() as conn:
rows = conn.execute(
images.select().where(
images.c.id.like('%_new_to_old')
).order_by(images.c.id)
).fetchall()
self.assertEqual(3, len(rows))
# private image first
self.assertEqual(0, rows[0]['is_public'])
self.assertEqual('private_id_new_to_old', rows[0]['id'])
self.assertEqual('private', rows[0]['visibility'])
# then public image
self.assertEqual(1, rows[1]['is_public'])
self.assertEqual('public_id_new_to_old', rows[1]['id'])
self.assertEqual('public', rows[1]['visibility'])
# then shared image
self.assertEqual(0, rows[2]['is_public'])
self.assertEqual('shared_id_new_to_old', rows[2]['id'])
self.assertEqual('shared', rows[2]['visibility'])
def _test_trigger_old_to_new(self, engine, images):
now = datetime.datetime.now()
# inserting a public image record after expand
public_temp = dict(deleted=False,
created_at=now,
status='active',
is_public=True,
min_disk=0,
min_ram=0,
id='public_id_old_to_new')
with engine.connect() as conn, conn.begin():
conn.execute(images.insert().values(public_temp))
# inserting a private image record after expand
shared_temp = dict(deleted=False,
created_at=now,
status='active',
is_public=False,
min_disk=0,
min_ram=0,
id='private_id_old_to_new')
with engine.connect() as conn, conn.begin():
conn.execute(images.insert().values(shared_temp))
# tests visibility is set appropriately by the trigger for new images
with engine.connect() as conn:
rows = conn.execute(
images.select().where(
images.c.id.like('%_old_to_new')
).order_by(images.c.id)
).fetchall()
self.assertEqual(2, len(rows))
# private image first
self.assertEqual(0, rows[0]['is_public'])
self.assertEqual('private_id_old_to_new', rows[0]['id'])
self.assertEqual('shared', rows[0]['visibility'])
# then public image
self.assertEqual(1, rows[1]['is_public'])
self.assertEqual('public_id_old_to_new', rows[1]['id'])
self.assertEqual('public', rows[1]['visibility'])
class TestOcataExpand01MySQL(
TestOcataExpand01Mixin,
test_fixtures.OpportunisticDBTestMixin,
test_utils.BaseTestCase,
):
FIXTURE = test_fixtures.MySQLOpportunisticFixture
|
RecoBTag/Combined/python/caloDeepCSVTagInfos_cfi.py | ckamtsikis/cmssw | 852 | 11183100 | <reponame>ckamtsikis/cmssw
import FWCore.ParameterSet.Config as cms
from RecoBTag.SecondaryVertex.combinedSecondaryVertexCommon_cff import combinedSecondaryVertexCommon
caloDeepCSVTagInfos = cms.EDProducer(
'TrackDeepNNTagInfoProducer',
svTagInfos = cms.InputTag('inclusiveSecondaryVertexFinderTagInfos'),
computer = combinedSecondaryVertexCommon
) |
audit-forked-sites/check_status.py | t-develo/covid19 | 6,890 | 11183101 | import csv
import datetime
import markdown
import pandas as pd
import re
import urllib.request
def markdown_to_html(input: str):
return markdown.markdown(
input,
extensions=['markdown.extensions.tables']
)
def now_in_jst():
jst = datetime.timezone(datetime.timedelta(hours=9))
return datetime.datetime.now(jst).isoformat(timespec='seconds')
MD_PATH: str = './FORKED_SITES.md'
LOG_PATH: str = './audit-forked-sites/error.csv'
with open(MD_PATH, 'r', encoding='utf-8') as f:
md: str = f.read()
df = pd.read_html(markdown_to_html(md))
forked_sites: list = [
{
'timestamp': None,
'url': url,
'exists': None,
'status': None
}
for url in df[0]['公開サイト']
]
for website in forked_sites:
req = urllib.request.Request(
website['url'],
data=None,
headers={
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:86.0) Gecko/20100101 Firefox/86.0'
}
)
try:
with urllib.request.urlopen(req) as r:
website['exists'] = True
website['status'] = str(r.status)
except urllib.error.HTTPError as e:
website['exists'] = False
website['status'] = str(e.code)
except urllib.error.URLError as e:
website['exists'] = False
website['status'] = str(e.reason)
finally:
website['timestamp'] = now_in_jst()
alive_sites: list = [
website for website in forked_sites if website['exists'] is True]
if len(alive_sites) > 0:
for website in alive_sites:
# alive_sites の内,アクセスが復帰した URL を含む行を検出する
row_find: str = r'(\n\| *\[\]\(\d*\)[^\W\s]+ *\| *' + \
website['url'] + r' *\|.*\|.*\| *)\*\*リンク切れ\*\*( *\|)'
# 該当する行の「リンク切れ」表記を削除する
row_replace: str = r'\1\2'
md = re.sub(
row_find,
row_replace,
md
)
else:
print('There is no website which has been alive or recovered.')
dead_sites: list = [
website for website in forked_sites if website['exists'] is False]
if len(dead_sites) > 0:
for website in dead_sites:
# dead_sites の URL を含む行を検出する
row_find: str = r'(\n\| *\[\]\(\d*\)[^\W\s]+ *\| *' + \
website['url'] + r' *\|.*\|.*\| *)( *\|)'
# 該当する URL を含む行に「リンク切れ」を追記
row_replace: str = r'\1**リンク切れ**\2'
md = re.sub(
row_find,
row_replace,
md
)
with open(LOG_PATH, 'a', encoding='utf-8', newline='\n') as f:
writer = csv.writer(f)
for data in dead_sites:
writer.writerow(data.values())
else:
print('There is no website which has been dead.')
with open(MD_PATH, 'w', encoding='utf-8', newline='\n') as f:
f.write(md)
|
tests/test_pipeline.py | huntrax11/redis-shard | 284 | 11183121 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
from nose.tools import eq_
from redis.exceptions import WatchError
from redis_shard.shard import RedisShardAPI
from redis_shard._compat import b, xrange
from .config import settings
class TestShard(unittest.TestCase):
def setUp(self):
self.client = RedisShardAPI(**settings)
self.clear_db()
def tearDown(self):
pass
def clear_db(self):
self.client.delete('testset')
self.client.delete('testzset')
self.client.delete('testlist')
def test_pipeline(self):
self.client.set('test', '1')
pipe = self.client.pipeline()
pipe.set('test', '2')
pipe.zadd('testzset', {'first': 1})
pipe.zincrby('testzset', 1, 'first')
pipe.zadd('testzset', {'second': 2})
pipe.execute()
pipe.reset()
eq_(self.client.get('test'), b'2')
eq_(self.client.zscore('testzset', 'first'), 2.0)
eq_(self.client.zscore('testzset', 'second'), 2.0)
with self.client.pipeline() as pipe:
pipe.set('test', '3')
pipe.zadd('testzset', {'first': 4})
pipe.zincrby('testzset', 1, 'first')
pipe.zadd('testzset', {'second': 5})
pipe.execute()
eq_(self.client.get('test'), b'3')
eq_(self.client.zscore('testzset', 'first'), 5.0)
eq_(self.client.zscore('testzset', 'second'), 5.0)
with self.client.pipeline() as pipe:
pipe.watch('test')
eq_(self.client.get('test'), b'3')
pipe.multi()
pipe.incr('test')
eq_(pipe.execute(), [4])
eq_(self.client.get('test'), b'4')
with self.client.pipeline() as pipe:
pipe.watch('test')
pipe.multi()
pipe.incr('test')
self.client.decr('test')
self.assertRaises(WatchError, pipe.execute)
eq_(self.client.get('test'), b'3')
keys_of_names = {}
with self.client.pipeline() as pipe:
for key in xrange(100):
key = str(key)
name = pipe.shard_api.get_server_name(key)
if name not in keys_of_names:
keys_of_names[name] = key
else:
key1 = key
key2 = keys_of_names[name]
pipe.watch(key1, key2)
pipe.multi()
pipe.set(key1, 1)
pipe.set(key2, 2)
pipe.execute()
eq_(self.client.get(key1), b'1')
eq_(self.client.get(key2), b'2')
break
def test_pipeline_script(self):
pipe = self.client.pipeline()
for i in range(100):
pipe.eval("""
redis.call('set', KEYS[1], ARGV[1])
""", 1, 'testx%d' % i, i)
pipe.execute()
for i in range(100):
eq_(self.client.get('testx%d' % i), b('%d' % i))
|
xmasctf2020/doiknowyou/exploit.py | nhtri2003gmail/ctf-write-ups | 101 | 11183127 | #!/usr/bin/env python3
from pwn import *
p = remote('challs.xmas.htsp.ro', 2008)
payload = b''
payload += (0x38 - 0x18) * b'A'
payload += p64(0xdeadbeef)
p.sendlineafter('you?\n', payload)
p.stream()
|
data/source/tests/pyyal_test_type/open_file_object-end.py | libyal/libyal | 176 | 11183142 |
${library_name_suffix}_${type_name}.open_file_object(file_object)
with self.assertRaises(IOError):
${library_name_suffix}_${type_name}.open_file_object(file_object)
${library_name_suffix}_${type_name}.close()
with self.assertRaises(TypeError):
${library_name_suffix}_${type_name}.open_file_object(None)
with self.assertRaises(ValueError):
${library_name_suffix}_${type_name}.open_file_object(file_object, mode="w")
|
observations/r/train.py | hajime9652/observations | 199 | 11183160 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def train(path):
"""Stated Preferences for Train Traveling
a cross-section from 1987
*number of observations* : 2929
*observation* : individuals
*country* : Netherland
A dataframe containing :
id
individual identifier
choiceid
choice identifier
choice
one of choice1, choice2
pricez
price of proposition z (z=1,2) in cents of guilders
timez
travel time of proposition z (z=1,2) in minutes
comfortz
comfort of proposition z (z=1,2), 0, 1 or 2 in decreasing comfort
order
changez
number of changes for proposition z (z=1,2)
Meijer, Erik and <NAME> (2005) “Measuring welfare effects in
models with random coefficients”, *Journal of Applied Econometrics*,
**forthcoming**.
<NAME>., <NAME> and <NAME> (1993) “Estimation of travel
choice models with randomly distributed values of time”, *Transportation
Research Record*, **1413**, 88–97.
<NAME>., <NAME> and <NAME> (1994) “Valuing the preservation of
Australia's Kakadu conservation zone”, *Oxford Economic Papers*, **46**,
727–749.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `train.csv`.
Returns:
Tuple of np.ndarray `x_train` with 2929 rows and 11 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'train.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/Ecdat/Train.csv'
maybe_download_and_extract(path, url,
save_file_name='train.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
silverstrike/tests/test_importers.py | CodingForVega/silverstrike | 298 | 11183171 | <reponame>CodingForVega/silverstrike
import os
from datetime import date
from unittest import skipUnless
from django.test import TestCase
from silverstrike import importers
class ImportTests(TestCase):
def setUp(self):
super(ImportTests, self).setUp()
self.base_dir = os.path.join(os.path.dirname(__file__), 'fixtures')
def test_firefly_import(self):
importers.firefly.import_firefly(os.path.join(self.base_dir, 'firefly.csv'))
def test_pc_mastercard(self):
transactions = importers.pc_mastercard.import_transactions(
os.path.join(self.base_dir, 'president-choice-mastercard.csv'))
self.assertEqual(len(transactions), 4)
t = transactions[0]
self.assertEqual(t.amount, -40.03)
self.assertEqual(t.book_date, date(2018, 10, 18))
@skipUnless(hasattr(importers, 'ofx'), 'ofxparse is not installed')
def test_ofx(self):
transactions = importers.ofx.import_transactions(
os.path.join(self.base_dir, 'ofx.qfx'))
t = transactions[0]
self.assertEqual(t.amount, 34.50)
self.assertEqual(t.book_date, date(2018, 1, 2))
|
test/cli/test_readelf.py | rakati/ppci-mirror | 161 | 11183176 | import unittest
import io
import os
from unittest.mock import patch
from ppci.cli.readelf import readelf
bash_path = '/usr/bin/bash'
class ReadelfTestCase(unittest.TestCase):
@patch('sys.stdout', new_callable=io.StringIO)
def test_help(self, mock_stdout):
""" Check readelf help message """
with self.assertRaises(SystemExit) as cm:
readelf(['-h'])
self.assertEqual(0, cm.exception.code)
@unittest.skipUnless(
os.path.exists(bash_path), '{} does not exist'.format(bash_path))
@patch('sys.stdout', new_callable=io.StringIO)
def test_bash(self, mock_stdout):
""" Check readelf on /usr/bin/bash message """
readelf(['-a', bash_path])
if __name__ == '__main__':
unittest.main(verbosity=2)
|
scripts/reimage.py | varshar16/teuthology | 117 | 11183179 | <filename>scripts/reimage.py
import docopt
import sys
import teuthology.reimage
doc = """
usage: teuthology-reimage --help
teuthology-reimage --os-type distro --os-version version [options] <nodes>...
Reimage nodes without locking using specified distro type and version.
The nodes must be locked by the current user, otherwise an error occurs.
Custom owner can be specified in order to provision someone else nodes.
Reimaging unlocked nodes cannot be provided.
Standard arguments:
-h, --help Show this help message and exit
-v, --verbose Be more verbose
--os-type <os-type> Distro type like: rhel, ubuntu, etc.
--os-version <os-version> Distro version like: 7.6, 16.04, etc.
--owner user@host Owner of the locked machines
"""
def main(argv=sys.argv[1:]):
args = docopt.docopt(doc, argv=argv)
return teuthology.reimage.main(args)
|
corehq/apps/case_search/urls.py | rochakchauhan/commcare-hq | 471 | 11183245 | from django.conf.urls import url
from corehq.apps.case_search.views import CaseSearchView
urlpatterns = [
url(r'^search/$', CaseSearchView.as_view(), name=CaseSearchView.urlname),
]
|
packages/Python/lldbsuite/test/lang/swift/protocols/class_protocol/TestClassConstrainedProtocolArgument.py | xiaobai/swift-lldb | 765 | 11183252 | """
Test that variables passed in as a class constrained protocol type
are correctly printed.
"""
import lldbsuite.test.lldbinline as lldbinline
from lldbsuite.test.decorators import *
lldbinline.MakeInlineTest(
__file__, globals(), decorators=[skipUnlessDarwin])
|
contrib/opencensus-ext-azure/opencensus/ext/azure/metrics_exporter/statsbeat_metrics/__init__.py | serjshevchenko/opencensus-python | 650 | 11183262 | # Copyright 2020, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from opencensus.ext.azure.metrics_exporter import MetricsExporter
from opencensus.ext.azure.metrics_exporter.statsbeat_metrics.statsbeat import (
_STATS_CONNECTION_STRING,
_STATS_SHORT_EXPORT_INTERVAL,
_StatsbeatMetrics,
)
from opencensus.metrics import transport
from opencensus.metrics.export.metric_producer import MetricProducer
_STATSBEAT_METRICS = None
_STATSBEAT_LOCK = threading.Lock()
def collect_statsbeat_metrics(options):
with _STATSBEAT_LOCK:
# Only start statsbeat if did not exist before
global _STATSBEAT_METRICS # pylint: disable=global-statement
if _STATSBEAT_METRICS is None:
exporter = MetricsExporter(
is_stats=True,
connection_string=_STATS_CONNECTION_STRING,
enable_standard_metrics=False,
export_interval=_STATS_SHORT_EXPORT_INTERVAL, # 15m by default
)
# The user's ikey is the one being tracked
producer = _AzureStatsbeatMetricsProducer(options)
_STATSBEAT_METRICS = producer
# Export some initial stats on program start
exporter.export_metrics(_STATSBEAT_METRICS.get_initial_metrics())
exporter.exporter_thread = \
transport.get_exporter_thread([_STATSBEAT_METRICS],
exporter,
exporter.options.export_interval)
class _AzureStatsbeatMetricsProducer(MetricProducer):
"""Implementation of the producer of statsbeat metrics.
Includes Azure attach rate, network and feature metrics,
implemented using gauges.
"""
def __init__(self, options):
self._statsbeat = _StatsbeatMetrics(options)
def get_metrics(self):
return self._statsbeat.get_metrics()
def get_initial_metrics(self):
return self._statsbeat.get_initial_metrics()
|
python/ql/src/Security/CWE-079/examples/jinja2.py | vadi2/codeql | 4,036 | 11183263 | from flask import Flask, request, make_response, escape
from jinja2 import Environment, select_autoescape, FileSystemLoader
app = Flask(__name__)
loader = FileSystemLoader( searchpath="templates/" )
unsafe_env = Environment(loader=loader)
safe1_env = Environment(loader=loader, autoescape=True)
safe2_env = Environment(loader=loader, autoescape=select_autoescape())
def render_response_from_env(env):
name = request.args.get('name', '')
template = env.get_template('template.html')
return make_response(template.render(name=name))
@app.route('/unsafe')
def unsafe():
return render_response_from_env(unsafe_env)
@app.route('/safe1')
def safe1():
return render_response_from_env(safe1_env)
@app.route('/safe2')
def safe2():
return render_response_from_env(safe2_env)
|
lib/data_utils/kp_utils.py | ziniuwan/maed | 145 | 11183266 | <gh_stars>100-1000
import numpy as np
def convert_kps_to_mask(kp_2d, visibility, mask_size):
"""
Args:
kp_2d: size (49, 2) range from 0 to 224
visibility: size (49,)
"""
img_size = 224
mask = np.zeros((mask_size,mask_size), dtype=np.float16)
kp_2d = kp_2d.copy()
kp_2d = kp_2d // (img_size//mask_size) # normalize from 0~224 to 0~mask_size
kp_2d = np.floor(kp_2d).astype(np.int8)
kp_2d[kp_2d>mask_size-1] = mask_size-1
kp_2d[kp_2d<0] = 0
for kp, vis in zip(kp_2d, visibility):
if vis == 0:
continue
mask[kp[1], kp[0]] = 1
return mask
def keypoint_2d_hflip(kp_2d, img_width):
"""Flip 2d keypoint horizontally around the y-axis
Args:
kp_2d: numpy array of shape (T, N, *) or (N, *) where N is the number of keypoints.
img_width: int
Return:
numpy array of the same size of input
"""
# exchange left limbs and right limbs due to visual chirality
if len(kp_2d.shape) == 2:
kp_2d = kp_2d[None,:,:]
kp_2d = convert_kps(kp_2d, src='spin', dst='spin', flip=True)
# flip it along y-axis
kp_2d[:,:,0] = (img_width - 1.) - kp_2d[:,:,0]
return kp_2d.squeeze()
def keypoint_3d_hflip(kp_3d):
"""Flip 3d keypoint horizontally around the y-axis
Args:
kp_3d: (T, N, *) or (N, *) where N is the number of keypoints.
Return:
numpy array of the same size of input
"""
# exchange left limbs and right limbs due to visual chirality
if len(kp_3d.shape) == 2:
kp_3d = kp_3d[None,:,:]
kp_3d = convert_kps(kp_3d, src='spin', dst='spin', flip=True)
# flip it along y-axis
pelvis = (kp_3d[:,27,:] + kp_3d[:, 28,:]) / 2
kp_3d = kp_3d - pelvis[:, None, :]
kp_3d[:,:,0] = -kp_3d[:,:,0]
kp_3d += pelvis[:, None, :]
return kp_3d.squeeze()
def smpl_pose_hflip(pose):
"""Flip smpl pose parameters
Args:
pose: numpy array of shape (T, 72, ) or (72, )
Return:
numpy array of the same size of input
"""
if len(pose.shape) == 1:
pose = pose[None, :]
pose_orig = np.reshape(pose,(-1, 24, 3))
pose_flip = pose_orig.copy()
for idx in range(pose_flip.shape[1]-1): # skip root joint
flip_name = get_smpl_joint_names(True)[idx]
flip_idx = get_smpl_joint_names().index(flip_name)
pose_flip[:, idx, 0] = pose_orig[:, flip_idx, 0]
pose_flip[:, idx, 1:] = -pose_orig[:, flip_idx, 1:]
return np.reshape(pose_flip, (-1, 72)).squeeze()
def convert_kps(joints, src, dst, flip=False):
src_names = eval(f'get_{src}_joint_names')()
dst_names = eval(f'get_{dst}_joint_names')(flip)
out_joints = np.zeros((joints.shape[0], len(dst_names), joints.shape[2]))
for idx, jn in enumerate(dst_names):
if jn in src_names:
out_joints[:, idx] = joints[:, src_names.index(jn)]
return out_joints
def get_perm_idxs(src, dst):
src_names = eval(f'get_{src}_joint_names')()
dst_names = eval(f'get_{dst}_joint_names')()
idxs = [src_names.index(h) for h in dst_names if h in src_names]
return idxs
def get_connectivity():
return [
('headtop', 'neck'),
('neck', 'lshoulder'),
('neck', 'rshoulder'),
('rshoulder', 'relbow'),
('relbow', 'rwrist'),
('lshoulder', 'lelbow'),
('lelbow', 'lwrist'),
('neck', 'lhip'),
('neck', 'rhip'),
('rhip', 'rknee'),
('rknee', 'rankle'),
('lhip', 'lknee'),
('lknee', 'lankle')
]
def get_mpii3d_test_joint_names():
return [
'headtop', # 'head_top',
'neck',
'rshoulder',# 'right_shoulder',
'relbow',# 'right_elbow',
'rwrist',# 'right_wrist',
'lshoulder',# 'left_shoulder',
'lelbow', # 'left_elbow',
'lwrist', # 'left_wrist',
'rhip', # 'right_hip',
'rknee', # 'right_knee',
'rankle',# 'right_ankle',
'lhip',# 'left_hip',
'lknee',# 'left_knee',
'lankle',# 'left_ankle'
'hip',# 'pelvis',
'Spine (H36M)',# 'spine',
'Head (H36M)',# 'head'
]
def get_mpii3d_joint_names():
return [
'spine3', # 0,
'spine4', # 1,
'spine2', # 2,
'Spine (H36M)', #'spine', # 3,
'hip', # 'pelvis', # 4,
'neck', # 5,
'Head (H36M)', # 'head', # 6,
"headtop", # 'head_top', # 7,
'left_clavicle', # 8,
"lshoulder", # 'left_shoulder', # 9,
"lelbow", # 'left_elbow',# 10,
"lwrist", # 'left_wrist',# 11,
'left_hand',# 12,
'right_clavicle',# 13,
'rshoulder',# 'right_shoulder',# 14,
'relbow',# 'right_elbow',# 15,
'rwrist',# 'right_wrist',# 16,
'right_hand',# 17,
'lhip', # left_hip',# 18,
'lknee', # 'left_knee',# 19,
'lankle', #left ankle # 20
'left_foot', # 21
'left_toe', # 22
"rhip", # 'right_hip',# 23
"rknee", # 'right_knee',# 24
"rankle", #'right_ankle', # 25
'right_foot',# 26
'right_toe' # 27
]
def get_insta_joint_names():
return [
'OP RHeel',
'OP RKnee',
'OP RHip',
'OP LHip',
'OP LKnee',
'OP LHeel',
'OP RWrist',
'OP RElbow',
'OP RShoulder',
'OP LShoulder',
'OP LElbow',
'OP LWrist',
'OP Neck',
'headtop',
'OP Nose',
'OP LEye',
'OP REye',
'OP LEar',
'OP REar',
'OP LBigToe',
'OP RBigToe',
'OP LSmallToe',
'OP RSmallToe',
'OP LAnkle',
'OP RAnkle',
]
def get_insta_skeleton():
return np.array(
[
[0 , 1],
[1 , 2],
[2 , 3],
[3 , 4],
[4 , 5],
[6 , 7],
[7 , 8],
[8 , 9],
[9 ,10],
[2 , 8],
[3 , 9],
[10,11],
[8 ,12],
[9 ,12],
[12,13],
[12,14],
[14,15],
[14,16],
[15,17],
[16,18],
[0 ,20],
[20,22],
[5 ,19],
[19,21],
[5 ,23],
[0 ,24],
])
def get_staf_skeleton():
return np.array(
[
[0, 1],
[1, 2],
[2, 3],
[3, 4],
[1, 5],
[5, 6],
[6, 7],
[1, 8],
[8, 9],
[9, 10],
[10, 11],
[8, 12],
[12, 13],
[13, 14],
[0, 15],
[0, 16],
[15, 17],
[16, 18],
[2, 9],
[5, 12],
[1, 19],
[20, 19],
]
)
def get_staf_joint_names():
return [
'OP Nose', # 0,
'OP Neck', # 1,
'OP RShoulder', # 2,
'OP RElbow', # 3,
'OP RWrist', # 4,
'OP LShoulder', # 5,
'OP LElbow', # 6,
'OP LWrist', # 7,
'OP MidHip', # 8,
'OP RHip', # 9,
'OP RKnee', # 10,
'OP RAnkle', # 11,
'OP LHip', # 12,
'OP LKnee', # 13,
'OP LAnkle', # 14,
'OP REye', # 15,
'OP LEye', # 16,
'OP REar', # 17,
'OP LEar', # 18,
'Neck (LSP)', # 19,
'Top of Head (LSP)', # 20,
]
def get_spin_joint_names(flip=False):
if flip:
# spin joints names, flipped version. i.e. all left to right; right to left.
return [
'OP Nose', # 0
'OP Neck', # 1
'OP LShoulder', # 2
'OP LElbow', # 3
'OP LWrist', # 4
'OP RShoulder', # 5
'OP RElbow', # 6
'OP RWrist', # 7
'OP MidHip', # 8
'OP LHip', # 9
'OP LKnee', # 10
'OP LAnkle', # 11
'OP RHip', # 12
'OP RKnee', # 13
'OP RAnkle', # 14
'OP LEye', # 15
'OP REye', # 16
'OP LEar', # 17
'OP REar', # 18
'OP RBigToe', # 19
'OP RSmallToe', # 20
'OP RHeel', # 21
'OP LBigToe', # 22
'OP LSmallToe', # 23
'OP LHeel', # 24
'lankle', # 25
'lknee', # 26
'lhip', # 27
'rhip', # 28
'rknee', # 29
'rankle', # 30
'lwrist', # 31
'lelbow', # 32
'lshoulder', # 33
'rshoulder', # 34
'relbow', # 35
'rwrist', # 36
'neck', # 37
'headtop', # 38
'hip', # 39 'Pelvis (MPII)', # 39
'thorax', # 40 'Thorax (MPII)', # 40
'Spine (H36M)', # 41
'Jaw (H36M)', # 42
'Head (H36M)', # 43
'nose', # 44
'reye', # 45 'Left Eye', # 45
'leye', # 46 'Right Eye', # 46
'rear', # 47 'Left Ear', # 47
'lear', # 48 'Right Ear', # 48
]
else:
return [
'OP Nose', # 0
'OP Neck', # 1
'OP RShoulder', # 2
'OP RElbow', # 3
'OP RWrist', # 4
'OP LShoulder', # 5
'OP LElbow', # 6
'OP LWrist', # 7
'OP MidHip', # 8
'OP RHip', # 9
'OP RKnee', # 10
'OP RAnkle', # 11
'OP LHip', # 12
'OP LKnee', # 13
'OP LAnkle', # 14
'OP REye', # 15
'OP LEye', # 16
'OP REar', # 17
'OP LEar', # 18
'OP LBigToe', # 19
'OP LSmallToe', # 20
'OP LHeel', # 21
'OP RBigToe', # 22
'OP RSmallToe', # 23
'OP RHeel', # 24
'rankle', # 25
'rknee', # 26
'rhip', # 27
'lhip', # 28
'lknee', # 29
'lankle', # 30
'rwrist', # 31
'relbow', # 32
'rshoulder', # 33
'lshoulder', # 34
'lelbow', # 35
'lwrist', # 36
'neck', # 37
'headtop', # 38
'hip', # 39 'Pelvis (MPII)', # 39
'thorax', # 40 'Thorax (MPII)', # 40
'Spine (H36M)', # 41
'Jaw (H36M)', # 42
'Head (H36M)', # 43
'nose', # 44
'leye', # 45 'Left Eye', # 45
'reye', # 46 'Right Eye', # 46
'lear', # 47 'Left Ear', # 47
'rear', # 48 'Right Ear', # 48
]
def get_spin2_joint_names():
return [
'rankle', # 0
'rknee', # 1
'rhip', # 2
'lhip', # 3
'lknee', # 4
'lankle', # 5
'rwrist', # 6
'relbow', # 7
'rshoulder', # 8
'lshoulder', # 9
'lelbow', # 10
'lwrist', # 11
'neck', # 12
'headtop', # 13
'hip', # 14 'Pelvis (MPII)',
'thorax', # 15 'Thorax (MPII)',
'Spine (H36M)', # 16
'Jaw (H36M)', # 17
'Head (H36M)', # 18
'nose', # 19
'leye', # 20 'Left Eye',
'reye', # 21 'Right Eye',
'lear', # 22 'Left Ear',
'rear', # 23 'Right Ear',
]
def get_h36m_joint_names():
return [
'hip', # 0
'lhip', # 1
'lknee', # 2
'lankle', # 3
'rhip', # 4
'rknee', # 5
'rankle', # 6
'Spine (H36M)', # 7
'neck', # 8
'Head (H36M)', # 9
'headtop', # 10
'lshoulder', # 11
'lelbow', # 12
'lwrist', # 13
'rshoulder', # 14
'relbow', # 15
'rwrist', # 16
]
def get_spin_skeleton():
return np.array(
[
[0 , 1],
[1 , 2],
[2 , 3],
[3 , 4],
[1 , 5],
[5 , 6],
[6 , 7],
[1 , 8],
[8 , 9],
[9 ,10],
[10,11],
[8 ,12],
[12,13],
[13,14],
[0 ,15],
[0 ,16],
[15,17],
[16,18],
[21,19],
[19,20],
[14,21],
[11,24],
[24,22],
[22,23],
[0 ,38],
]
)
def get_posetrack_joint_names():
return [
"nose",
"neck",
"headtop",
"lear",
"rear",
"lshoulder",
"rshoulder",
"lelbow",
"relbow",
"lwrist",
"rwrist",
"lhip",
"rhip",
"lknee",
"rknee",
"lankle",
"rankle"
]
def get_posetrack_original_kp_names():
return [
'nose',
'head_bottom',
'head_top',
'left_ear',
'right_ear',
'left_shoulder',
'right_shoulder',
'left_elbow',
'right_elbow',
'left_wrist',
'right_wrist',
'left_hip',
'right_hip',
'left_knee',
'right_knee',
'left_ankle',
'right_ankle'
]
def get_pennaction_joint_names():
return [
"lankle", # 0
"lknee", # 1
"lhip", # 2
"rhip", # 3
"rknee", # 4
"rankle", # 5
"lwrist", # 6
"lelbow" , # 7
"lshoulder" , # 8
"rshoulder", # 9
"relbow" , # 10
"rwrist", # 11
"headtop" # 12
]
"""
def get_pennaction_joint_names():
return [
"headtop", # 0
"lshoulder", # 1
"rshoulder", # 2
"lelbow", # 3
"relbow", # 4
"lwrist", # 5
"rwrist", # 6
"lhip" , # 7
"rhip" , # 8
"lknee", # 9
"rknee" , # 10
"lankle", # 11
"rankle" # 12
]
"""
def get_common_joint_names():
return [
"rankle", # 0 "lankle", # 0
"rknee", # 1 "lknee", # 1
"rhip", # 2 "lhip", # 2
"lhip", # 3 "rhip", # 3
"lknee", # 4 "rknee", # 4
"lankle", # 5 "rankle", # 5
"rwrist", # 6 "lwrist", # 6
"relbow", # 7 "lelbow", # 7
"rshoulder", # 8 "lshoulder", # 8
"lshoulder", # 9 "rshoulder", # 9
"lelbow", # 10 "relbow", # 10
"lwrist", # 11 "rwrist", # 11
"neck", # 12 "neck", # 12
"headtop", # 13 "headtop", # 13
]
def get_common_skeleton():
return np.array(
[
[ 0, 1 ],
[ 1, 2 ],
[ 3, 4 ],
[ 4, 5 ],
[ 6, 7 ],
[ 7, 8 ],
[ 8, 2 ],
[ 8, 9 ],
[ 9, 3 ],
[ 2, 3 ],
[ 8, 12],
[ 9, 10],
[12, 9 ],
[10, 11],
[12, 13],
]
)
def get_coco_joint_names():
return [
"nose", # 0
"leye", # 1
"reye", # 2
"lear", # 3
"rear", # 4
"lshoulder", # 5
"rshoulder", # 6
"lelbow", # 7
"relbow", # 8
"lwrist", # 9
"rwrist", # 10
"lhip", # 11
"rhip", # 12
"lknee", # 13
"rknee", # 14
"lankle", # 15
"rankle", # 16
]
def get_coco_skeleton():
# 0 - nose,
# 1 - leye,
# 2 - reye,
# 3 - lear,
# 4 - rear,
# 5 - lshoulder,
# 6 - rshoulder,
# 7 - lelbow,
# 8 - relbow,
# 9 - lwrist,
# 10 - rwrist,
# 11 - lhip,
# 12 - rhip,
# 13 - lknee,
# 14 - rknee,
# 15 - lankle,
# 16 - rankle,
return np.array(
[
[15, 13],
[13, 11],
[16, 14],
[14, 12],
[11, 12],
[ 5, 11],
[ 6, 12],
[ 5, 6 ],
[ 5, 7 ],
[ 6, 8 ],
[ 7, 9 ],
[ 8, 10],
[ 1, 2 ],
[ 0, 1 ],
[ 0, 2 ],
[ 1, 3 ],
[ 2, 4 ],
[ 3, 5 ],
[ 4, 6 ]
]
)
def get_mpii_joint_names():
return [
"rankle", # 0
"rknee", # 1
"rhip", # 2
"lhip", # 3
"lknee", # 4
"lankle", # 5
"hip", # 6
"thorax", # 7
"neck", # 8
"headtop", # 9
"rwrist", # 10
"relbow", # 11
"rshoulder", # 12
"lshoulder", # 13
"lelbow", # 14
"lwrist", # 15
]
def get_mpii_skeleton():
# 0 - rankle,
# 1 - rknee,
# 2 - rhip,
# 3 - lhip,
# 4 - lknee,
# 5 - lankle,
# 6 - hip,
# 7 - thorax,
# 8 - neck,
# 9 - headtop,
# 10 - rwrist,
# 11 - relbow,
# 12 - rshoulder,
# 13 - lshoulder,
# 14 - lelbow,
# 15 - lwrist,
return np.array(
[
[ 0, 1 ],
[ 1, 2 ],
[ 2, 6 ],
[ 6, 3 ],
[ 3, 4 ],
[ 4, 5 ],
[ 6, 7 ],
[ 7, 8 ],
[ 8, 9 ],
[ 7, 12],
[12, 11],
[11, 10],
[ 7, 13],
[13, 14],
[14, 15]
]
)
def get_aich_joint_names():
return [
"rshoulder", # 0
"relbow", # 1
"rwrist", # 2
"lshoulder", # 3
"lelbow", # 4
"lwrist", # 5
"rhip", # 6
"rknee", # 7
"rankle", # 8
"lhip", # 9
"lknee", # 10
"lankle", # 11
"headtop", # 12
"neck", # 13
]
def get_aich_skeleton():
# 0 - rshoulder,
# 1 - relbow,
# 2 - rwrist,
# 3 - lshoulder,
# 4 - lelbow,
# 5 - lwrist,
# 6 - rhip,
# 7 - rknee,
# 8 - rankle,
# 9 - lhip,
# 10 - lknee,
# 11 - lankle,
# 12 - headtop,
# 13 - neck,
return np.array(
[
[ 0, 1 ],
[ 1, 2 ],
[ 3, 4 ],
[ 4, 5 ],
[ 6, 7 ],
[ 7, 8 ],
[ 9, 10],
[10, 11],
[12, 13],
[13, 0 ],
[13, 3 ],
[ 0, 6 ],
[ 3, 9 ]
]
)
def get_3dpw_joint_names():
return [
"nose", # 0
"thorax", # 1
"rshoulder", # 2
"relbow", # 3
"rwrist", # 4
"lshoulder", # 5
"lelbow", # 6
"lwrist", # 7
"rhip", # 8
"rknee", # 9
"rankle", # 10
"lhip", # 11
"lknee", # 12
"lankle", # 13
]
def get_3dpw_skeleton():
return np.array(
[
[ 0, 1 ],
[ 1, 2 ],
[ 2, 3 ],
[ 3, 4 ],
[ 1, 5 ],
[ 5, 6 ],
[ 6, 7 ],
[ 2, 8 ],
[ 5, 11],
[ 8, 11],
[ 8, 9 ],
[ 9, 10],
[11, 12],
[12, 13]
]
)
def get_smplcoco_joint_names():
return [
"rankle", # 0
"rknee", # 1
"rhip", # 2
"lhip", # 3
"lknee", # 4
"lankle", # 5
"rwrist", # 6
"relbow", # 7
"rshoulder", # 8
"lshoulder", # 9
"lelbow", # 10
"lwrist", # 11
"neck", # 12
"headtop", # 13
"nose", # 14
"leye", # 15
"reye", # 16
"lear", # 17
"rear", # 18
]
def get_smplcoco_skeleton():
return np.array(
[
[ 0, 1 ],
[ 1, 2 ],
[ 3, 4 ],
[ 4, 5 ],
[ 6, 7 ],
[ 7, 8 ],
[ 8, 12],
[12, 9 ],
[ 9, 10],
[10, 11],
[12, 13],
[14, 15],
[15, 17],
[16, 18],
[14, 16],
[ 8, 2 ],
[ 9, 3 ],
[ 2, 3 ],
]
)
def get_smpl_joint_names(flip=False):
if flip:
return [
'hips', # 0
'rightUpLeg', # 1
'leftUpLeg', # 2
'spine', # 3
'rightLeg', # 4
'leftLeg', # 5
'spine1', # 6
'rightFoot', # 7
'leftFoot', # 8
'spine2', # 9
'rightToeBase', # 10
'leftToeBase', # 11
'neck', # 12
'rightShoulder', # 13
'leftShoulder', # 14
'head', # 15
'rightArm', # 16
'leftArm', # 17
'rightForeArm', # 18
'leftForeArm', # 19
'rightHand', # 20
'leftHand', # 21
'rightHandIndex1', # 22
'leftHandIndex1', # 23
]
else:
return [
'hips', # 0
'leftUpLeg', # 1
'rightUpLeg', # 2
'spine', # 3
'leftLeg', # 4
'rightLeg', # 5
'spine1', # 6
'leftFoot', # 7
'rightFoot', # 8
'spine2', # 9
'leftToeBase', # 10
'rightToeBase', # 11
'neck', # 12
'leftShoulder', # 13
'rightShoulder', # 14
'head', # 15
'leftArm', # 16
'rightArm', # 17
'leftForeArm', # 18
'rightForeArm', # 19
'leftHand', # 20
'rightHand', # 21
'leftHandIndex1', # 22
'rightHandIndex1', # 23
]
def get_smpl_skeleton():
return np.array(
[
[ 0, 1 ],
[ 0, 2 ],
[ 0, 3 ],
[ 1, 4 ],
[ 2, 5 ],
[ 3, 6 ],
[ 4, 7 ],
[ 5, 8 ],
[ 6, 9 ],
[ 7, 10],
[ 8, 11],
[ 9, 12],
[ 9, 13],
[ 9, 14],
[12, 15],
[13, 16],
[14, 17],
[16, 18],
[17, 19],
[18, 20],
[19, 21],
[20, 22],
[21, 23],
]
) |
volttrontesting/testutils/test_getinstance_2.py | cloudcomputingabc/volttron | 406 | 11183273 | <filename>volttrontesting/testutils/test_getinstance_2.py
import pytest
from volttrontesting.utils.platformwrapper import PlatformWrapper
@pytest.mark.wrapper
def test_fixture_starts_platforms(get_volttron_instances):
num_instances = 5
wrappers = get_volttron_instances(num_instances)
assert num_instances == len(wrappers)
for w in wrappers:
assert isinstance(w, PlatformWrapper)
assert w.is_running()
w.shutdown_platform()
|
mmfashion/models/losses/margin_ranking_loss.py | RyanJiang0416/mmfashion | 952 | 11183286 | <gh_stars>100-1000
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..registry import LOSSES
@LOSSES.register_module
class MarginRankingLoss(nn.Module):
def __init__(self,
margin=0.2,
loss_weight=5e-5,
size_average=None,
reduce=None,
reduction='mean'):
super(MarginRankingLoss, self).__init__()
self.margin = margin
self.loss_weight = loss_weight
self.reduction = reduction
def forward(self, input1, input2, target):
return self.loss_weight * F.margin_ranking_loss(
input1,
input2,
target,
margin=self.margin,
reduction=self.reduction)
@LOSSES.register_module
class SelectiveMarginLoss(nn.Module):
def __init__(self, loss_weight=5e-5, margin=0.2):
super(SelectiveMarginLoss, self).__init__()
self.margin = margin
self.loss_weight = loss_weight
def forward(self, pos_samples, neg_samples, has_sample):
margin_diff = torch.clamp(
(pos_samples - neg_samples) + self.margin, min=0, max=1e6)
num_sample = max(torch.sum(has_sample), 1)
return self.loss_weight * (
torch.sum(margin_diff * has_sample) / num_sample)
|
image_thumbnailer.py | susannahsoon/oldperth | 302 | 11183305 | <filename>image_thumbnailer.py<gh_stars>100-1000
#!/usr/bin/python
import record
import fetcher
import os
rs = record.AllRecords()
f = fetcher.Fetcher('images', 0)
rs = [r for r in rs if (r.photo_url and f.InCache(r.photo_url))]
for idx, r in enumerate(rs):
in_image = f.CacheFile(r.photo_url)
out_image = 'thumbnails/%s.jpg' % r.photo_id()
cmd = 'convert %s -resize 200x200 %s' % (in_image, out_image)
print '%05d %s' % (idx, cmd)
os.system(cmd)
|
notifiers/providers/email.py | JonShedden/notifiers | 1,508 | 11183307 | <filename>notifiers/providers/email.py
import getpass
import mimetypes
import smtplib
import socket
from email.message import EmailMessage
from email.utils import formatdate
from pathlib import Path
from smtplib import SMTPAuthenticationError
from smtplib import SMTPSenderRefused
from smtplib import SMTPServerDisconnected
from typing import List
from typing import Tuple
from ..core import Provider
from ..core import Response
from ..utils.schema.helpers import list_to_commas
from ..utils.schema.helpers import one_or_more
DEFAULT_SUBJECT = "New email from 'notifiers'!"
DEFAULT_FROM = f"{getpass.getuser()}@{socket.getfqdn()}"
DEFAULT_SMTP_HOST = "localhost"
class SMTP(Provider):
"""Send emails via SMTP"""
base_url = None
site_url = "https://en.wikipedia.org/wiki/Email"
name = "email"
_required = {"required": ["message", "to", "username", "password"]}
_schema = {
"type": "object",
"properties": {
"message": {"type": "string", "title": "the content of the email message"},
"subject": {"type": "string", "title": "the subject of the email message"},
"to": one_or_more(
{
"type": "string",
"format": "email",
"title": "one or more email addresses to use",
}
),
"from": {
"type": "string",
"format": "email",
"title": "the FROM address to use in the email",
},
"from_": {
"type": "string",
"format": "email",
"title": "the FROM address to use in the email",
"duplicate": True,
},
"attachments": one_or_more(
{
"type": "string",
"format": "valid_file",
"title": "one or more attachments to use in the email",
}
),
"host": {
"type": "string",
"format": "hostname",
"title": "the host of the SMTP server",
},
"port": {
"type": "integer",
"format": "port",
"title": "the port number to use",
},
"username": {"type": "string", "title": "username if relevant"},
"password": {"type": "string", "title": "password if relevant"},
"tls": {"type": "boolean", "title": "should TLS be used"},
"ssl": {"type": "boolean", "title": "should SSL be used"},
"html": {
"type": "boolean",
"title": "should the email be parse as an HTML file",
},
"login": {"type": "boolean", "title": "Trigger login to server"},
},
"dependencies": {
"username": ["password"],
"password": ["<PASSWORD>"],
"ssl": ["tls"],
},
"additionalProperties": False,
}
@staticmethod
def _get_mimetype(attachment: Path) -> Tuple[str, str]:
"""Taken from https://docs.python.org/3/library/email.examples.html"""
ctype, encoding = mimetypes.guess_type(str(attachment))
if ctype is None or encoding is not None:
# No guess could be made, or the file is encoded (compressed), so
# use a generic bag-of-bits type.
ctype = "application/octet-stream"
maintype, subtype = ctype.split("/", 1)
return maintype, subtype
def __init__(self):
super().__init__()
self.smtp_server = None
self.configuration = None
@property
def defaults(self) -> dict:
return {
"subject": DEFAULT_SUBJECT,
"from": DEFAULT_FROM,
"host": DEFAULT_SMTP_HOST,
"port": 25,
"tls": False,
"ssl": False,
"html": False,
"login": True,
}
def _prepare_data(self, data: dict) -> dict:
if isinstance(data["to"], list):
data["to"] = list_to_commas(data["to"])
# A workaround since `from` is a reserved word
if data.get("from_"):
data["from"] = data.pop("from_")
return data
@staticmethod
def _build_email(data: dict) -> EmailMessage:
email = EmailMessage()
email["To"] = data["to"]
email["From"] = data["from"]
email["Subject"] = data["subject"]
email["Date"] = formatdate(localtime=True)
content_type = "html" if data["html"] else "plain"
email.add_alternative(data["message"], subtype=content_type)
return email
def _add_attachments(self, attachments: List[str], email: EmailMessage):
for attachment in attachments:
attachment = Path(attachment)
maintype, subtype = self._get_mimetype(attachment)
email.add_attachment(
attachment.read_bytes(),
maintype=maintype,
subtype=subtype,
filename=attachment.name,
)
def _connect_to_server(self, data: dict):
self.smtp_server = smtplib.SMTP_SSL if data["ssl"] else smtplib.SMTP
self.smtp_server = self.smtp_server(data["host"], data["port"])
self.configuration = self._get_configuration(data)
if data["tls"] and not data["ssl"]:
self.smtp_server.ehlo()
self.smtp_server.starttls()
if data["login"] and data.get("username"):
self.smtp_server.login(data["username"], data["password"])
@staticmethod
def _get_configuration(data: dict) -> tuple:
return data["host"], data["port"], data.get("username")
def _send_notification(self, data: dict) -> Response:
errors = None
try:
configuration = self._get_configuration(data)
if (
not self.configuration
or not self.smtp_server
or self.configuration != configuration
):
self._connect_to_server(data)
email = self._build_email(data)
if data.get("attachments"):
self._add_attachments(data["attachments"], email)
self.smtp_server.send_message(email)
except (
SMTPServerDisconnected,
SMTPSenderRefused,
socket.error,
OSError,
IOError,
SMTPAuthenticationError,
) as e:
errors = [str(e)]
return self.create_response(data, errors=errors)
|
tensorflow_decision_forests/component/inspector/blob_sequence.py | Saduf2019/decision-forests | 412 | 11183316 | # Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Blob Sequence reader and writer.
A blob sequence is a stream (e.g. a file) containing a sequence of blob (i.e.
chunk of bytes). It can be used to store sequence of serialized protos.
See yggdrasil_decision_forests/utils/blob_sequence.h for more details.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Optional, Iterator
import tensorflow as tf
class Reader(object):
"""Reader of Blob Sequence files.
Usage example:
for blob in Reader(path):
print(blob)
"""
def __init__(self, path: str):
self.file_ = None
self.path_ = None
if path:
self.open(path)
def open(self, path: str):
"""Open Blob sequence file."""
self.file_ = tf.io.gfile.GFile(path, "rb")
self.path_ = path
# Reader header.
magic = self.file_.read(2)
if magic != b"BS":
raise ValueError(f"Invalid blob sequence file {path}")
version = int.from_bytes(self.file_.read(2), byteorder="little")
if version != 0:
raise ValueError(f"Non supported blob sequence version {path}")
reserved = self.file_.read(4)
del reserved
def close(self):
self.file_.close()
self.path_ = None
self.file_ = None
def read(self) -> Optional[bytes]:
"""Reads and returns the next blob."""
raw_length = self.file_.read(4)
if not raw_length:
return None
if len(raw_length) != 4:
raise ValueError(f"Corrupted blob sequence {self.path_}")
length = int.from_bytes(raw_length, byteorder="little")
blob = self.file_.read(length)
if len(blob) != length:
raise ValueError(f"Truncated blob sequence {self.path_}")
return blob
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.file_.close()
def __iter__(self) -> Iterator[bytes]:
"""Iterates overt the BS file content."""
# Read blobs
while True:
blob = self.read()
if blob is None:
break
yield blob
class Writer(object):
"""Writer of Blob Sequence files.
Usage example:
bs = Writer(path)
bs.write(b"Hello")
bs.write(b"World")
bs.close()
"""
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.file_.close()
def __init__(self, path: Optional[str] = None):
self.file_ = None
self.path_ = None
if path:
self.open(path)
def open(self, path: str):
self.file_ = tf.io.gfile.GFile(path, "wb")
self.path_ = path
self.file_.write(b"BS")
version = 0
self.file_.write(version.to_bytes(2, byteorder="little"))
self.file_.write(b"\0\0\0\0")
def write(self, blob: bytes):
self.file_.write(len(blob).to_bytes(4, byteorder="little"))
self.file_.write(blob)
def close(self):
self.file_.close()
self.path_ = None
self.file_ = None
|
cgan.py | ruifan831/mnistGANs | 138 | 11183353 | <filename>cgan.py<gh_stars>100-1000
# [Conditional Generative Adversarial Nets](https://arxiv.org/pdf/1411.1784.pdf)
import tensorflow as tf
from tensorflow import keras
import numpy as np
from visual import save_gan, cvt_gif
from tensorflow.keras.layers import Dense, Reshape, Input, Embedding
from utils import set_soft_gpu, binary_accuracy, save_weights
from mnist_ds import get_half_batch_ds
from gan_cnn import mnist_uni_disc_cnn, mnist_uni_gen_cnn
import time
class CGAN(keras.Model):
"""
discriminator 标签+图片 预测 真假
generator 标签 生成 图片
"""
def __init__(self, latent_dim, label_dim, img_shape):
super().__init__()
self.latent_dim = latent_dim
self.label_dim = label_dim
self.img_shape = img_shape
self.g = self._get_generator()
self.d = self._get_discriminator()
self.opt = keras.optimizers.Adam(0.0002, beta_1=0.5)
self.loss_func = keras.losses.BinaryCrossentropy(from_logits=True)
def call(self, target_labels, training=None, mask=None):
noise = tf.random.normal((len(target_labels), self.latent_dim))
if isinstance(target_labels, np.ndarray):
target_labels = tf.convert_to_tensor(target_labels, dtype=tf.int32)
return self.g.call([noise, target_labels], training=training)
def _get_discriminator(self):
img = Input(shape=self.img_shape)
label = Input(shape=(), dtype=tf.int32)
label_emb = Embedding(10, 32)(label)
emb_img = Reshape((28, 28, 1))(Dense(28*28, activation=keras.activations.relu)(label_emb))
concat_img = tf.concat((img, emb_img), axis=3)
s = keras.Sequential([
mnist_uni_disc_cnn(input_shape=[28, 28, 2]),
Dense(1)
])
o = s(concat_img)
model = keras.Model([img, label], o, name="discriminator")
model.summary()
return model
def _get_generator(self):
noise = Input(shape=(self.latent_dim,))
label = Input(shape=(), dtype=tf.int32)
label_onehot = tf.one_hot(label, depth=self.label_dim)
model_in = tf.concat((noise, label_onehot), axis=1)
s = mnist_uni_gen_cnn((self.latent_dim+self.label_dim,))
o = s(model_in)
model = keras.Model([noise, label], o, name="generator")
model.summary()
return model
def train_d(self, img, img_label, label):
with tf.GradientTape() as tape:
pred = self.d.call([img, img_label], training=True)
loss = self.loss_func(label, pred)
grads = tape.gradient(loss, self.d.trainable_variables)
self.opt.apply_gradients(zip(grads, self.d.trainable_variables))
return loss, binary_accuracy(label, pred)
def train_g(self, random_img_label):
d_label = tf.ones((len(random_img_label), 1), tf.float32) # let d think generated images are real
with tf.GradientTape() as tape:
g_img = self.call(random_img_label, training=True)
pred = self.d.call([g_img, random_img_label], training=False)
loss = self.loss_func(d_label, pred)
grads = tape.gradient(loss, self.g.trainable_variables)
self.opt.apply_gradients(zip(grads, self.g.trainable_variables))
return loss, g_img, binary_accuracy(d_label, pred)
def step(self, real_img, real_img_label):
random_img_label = tf.convert_to_tensor(np.random.randint(0, 10, len(real_img)*2), dtype=tf.int32)
g_loss, g_img, g_acc = self.train_g(random_img_label)
img = tf.concat((real_img, g_img[:len(g_img)//2]), axis=0)
img_label = tf.concat((real_img_label, random_img_label[:len(g_img)//2]), axis=0)
d_label = tf.concat((tf.ones((len(real_img_label), 1), tf.float32), tf.zeros((len(g_img)//2, 1), tf.float32)), axis=0)
d_loss, d_acc = self.train_d(img, img_label, d_label)
return g_img, d_loss, d_acc, g_loss, g_acc, random_img_label
def train(gan, ds):
t0 = time.time()
for ep in range(EPOCH):
for t, (real_img, real_img_label) in enumerate(ds):
g_img, d_loss, d_acc, g_loss, g_acc, g_img_label = gan.step(real_img, real_img_label)
if t % 400 == 0:
t1 = time.time()
print("ep={} | time={:.1f} | t={} | d_acc={:.2f} | g_acc={:.2f} | d_loss={:.2f} | g_loss={:.2f}".format(
ep, t1-t0, t, d_acc.numpy(), g_acc.numpy(), d_loss.numpy(), g_loss.numpy(), ))
t0 = t1
save_gan(gan, ep)
save_weights(gan)
cvt_gif(gan)
if __name__ == "__main__":
LATENT_DIM = 100
IMG_SHAPE = (28, 28, 1)
LABEL_DIM = 10
BATCH_SIZE = 64
EPOCH = 20
set_soft_gpu(True)
d = get_half_batch_ds(BATCH_SIZE)
m = CGAN(LATENT_DIM, LABEL_DIM, IMG_SHAPE)
train(m, d)
|
mindinsight/backend/debugger/debugger_api.py | mindspore-ai/mindinsight | 216 | 11183355 | <reponame>mindspore-ai/mindinsight
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Debugger restful api."""
import json
import weakref
from urllib.parse import unquote
from flask import Blueprint, jsonify, request, Response
from mindinsight.conf import settings
from mindinsight.debugger.session_manager import SessionManager
from mindinsight.utils.exceptions import ParamMissError, ParamValueError, ParamTypeError
BLUEPRINT = Blueprint("debugger", __name__,
url_prefix=settings.URL_PATH_PREFIX + settings.API_PREFIX)
def _unquote_param(param):
"""
Decode parameter value.
Args:
param (str): Encoded param value.
Returns:
str, decoded param value.
"""
if isinstance(param, str):
try:
param = unquote(param, errors='strict')
except UnicodeDecodeError:
raise ParamValueError('Unquote error with strict mode.')
return param
def _read_post_request(post_request):
"""
Extract the body of post request.
Args:
post_request (object): The post request.
Returns:
dict, the deserialized body of request.
"""
body = post_request.stream.read()
try:
body = json.loads(body if body else "{}")
except Exception:
raise ParamValueError("Json data parse failed.")
return body
def to_int(param, param_name):
"""Transfer param to int type."""
try:
param = int(param)
except ValueError:
raise ParamTypeError(param_name, 'Integer')
return param
def _wrap_reply(func, *args, **kwargs):
"""Serialize reply."""
reply = func(*args, **kwargs)
return jsonify(reply)
@BLUEPRINT.route("/debugger/sessions/<session_id>/poll-data", methods=["GET"])
def poll_data(session_id):
"""
Wait for data to be updated on UI.
Get data from server and display the change on UI.
Returns:
str, the updated data.
Examples:
>>> Get http://xxxx/v1/mindinsight/debugger/sessions/xxxx/poll-data?pos=xx
"""
pos = request.args.get('pos')
reply = _wrap_reply(_session_manager.get_session(session_id).poll_data, pos)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/search", methods=["GET"])
def search(session_id):
"""
Search nodes in specified watchpoint.
Returns:
str, the required data.
Examples:
>>> Get http://xxxx/v1/mindinsight/debugger/sessions/xxxx/search?name=mock_name&watch_point_id=1
"""
name = request.args.get('name')
graph_name = request.args.get('graph_name')
watch_point_id = to_int(request.args.get('watch_point_id', 0), 'watch_point_id')
node_category = request.args.get('node_category')
rank_id = to_int(request.args.get('rank_id', 0), 'rank_id')
stack_pattern = _unquote_param(request.args.get('stack_info_key_word'))
reply = _wrap_reply(_session_manager.get_session(session_id).search,
{'name': name,
'graph_name': graph_name,
'watch_point_id': watch_point_id,
'node_category': node_category,
'rank_id': rank_id,
'stack_pattern': stack_pattern})
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/tensor-comparisons", methods=["GET"])
def tensor_comparisons(session_id):
"""
Get tensor comparisons.
Returns:
str, the required data.
Examples:
>>> Get http://xxxx/v1/mindinsight/debugger/sessions/xxxx/tensor-comparisons
"""
name = request.args.get('name')
detail = request.args.get('detail', 'data')
shape = _unquote_param(request.args.get('shape'))
graph_name = request.args.get('graph_name', '')
tolerance = request.args.get('tolerance', '0')
rank_id = to_int(request.args.get('rank_id', 0), 'rank_id')
reply = _wrap_reply(_session_manager.get_session(session_id).tensor_comparisons, name, shape,
detail, tolerance, rank_id, graph_name)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/retrieve", methods=["POST"])
def retrieve(session_id):
"""
Retrieve data according to mode and params.
Returns:
str, the required data.
Examples:
>>> POST http://xxxx/v1/mindinsight/debugger/sessions/xxxx/retrieve
"""
body = _read_post_request(request)
mode = body.get('mode')
params = body.get('params')
reply = _wrap_reply(_session_manager.get_session(session_id).retrieve, mode, params)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/tensor-history", methods=["POST"])
def retrieve_tensor_history(session_id):
"""
Retrieve data according to mode and params.
Returns:
str, the required data.
Examples:
>>> POST http://xxxx/v1/mindinsight/debugger/sessions/xxxx/tensor-history
"""
body = _read_post_request(request)
name = body.get('name')
graph_name = body.get('graph_name')
rank_id = to_int(body.get('rank_id', 0), 'rank_id')
reply = _wrap_reply(_session_manager.get_session(session_id).retrieve_tensor_history, name, graph_name,
rank_id)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/tensors", methods=["GET"])
def retrieve_tensor_value(session_id):
"""
Retrieve tensor value according to name and shape.
Returns:
str, the required data.
Examples:
>>> GET http://xxxx/v1/mindinsight/debugger/sessions/xxxx/tensors?name=tensor_name&detail=data&shape=[1,1,:,:]
"""
name = request.args.get('name')
detail = request.args.get('detail')
shape = _unquote_param(request.args.get('shape'))
graph_name = request.args.get('graph_name')
prev = bool(request.args.get('prev') == 'true')
rank_id = to_int(request.args.get('rank_id', 0), 'rank_id')
reply = _wrap_reply(_session_manager.get_session(session_id).retrieve_tensor_value, name, detail,
shape, graph_name, prev, rank_id)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/create-watchpoint", methods=["POST"])
def create_watchpoint(session_id):
"""
Create watchpoint.
Returns:
str, watchpoint id.
Raises:
MindInsightException: If method fails to be called.
Examples:
>>> POST http://xxxx/v1/mindinsight/debugger/sessions/xxxx/create-watchpoint
"""
params = _read_post_request(request)
params['watch_condition'] = params.pop('condition', None)
reply = _wrap_reply(_session_manager.get_session(session_id).create_watchpoint, params)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/update-watchpoint", methods=["POST"])
def update_watchpoint(session_id):
"""
Update watchpoint.
Returns:
str, reply message.
Raises:
MindInsightException: If method fails to be called.
Examples:
>>> POST http://xxxx/v1/mindinsight/debugger/sessions/xxxx/update-watchpoint
"""
params = _read_post_request(request)
reply = _wrap_reply(_session_manager.get_session(session_id).update_watchpoint, params)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/delete-watchpoint", methods=["POST"])
def delete_watchpoint(session_id):
"""
Delete watchpoint.
Returns:
str, reply message.
Raises:
MindInsightException: If method fails to be called.
Examples:
>>> POST http://xxxx/v1/mindinsight/debugger/sessions/xxxx/delete-watchpoint
"""
body = _read_post_request(request)
watch_point_id = body.get('watch_point_id')
reply = _wrap_reply(_session_manager.get_session(session_id).delete_watchpoint, watch_point_id)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/control", methods=["POST"])
def control(session_id):
"""
Control request.
Returns:
str, reply message.
Raises:
MindInsightException: If method fails to be called.
Examples:
>>> POST http://xxxx/v1/mindinsight/debugger/sessions/xxxx/control
"""
params = _read_post_request(request)
reply = _wrap_reply(_session_manager.get_session(session_id).control, params)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/recheck", methods=["POST"])
def recheck(session_id):
"""
Recheck request.
Returns:
str, reply message.
Raises:
MindInsightException: If method fails to be called.
Examples:
>>> POST http://xxxx/v1/mindinsight/debugger/sessions/xxxx/recheck
"""
reply = _wrap_reply(_session_manager.get_session(session_id).recheck)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/tensor-graphs", methods=["GET"])
def retrieve_tensor_graph(session_id):
"""
Retrieve tensor value according to name and shape.
Returns:
str, the required data.
Examples:
>>> GET http://xxxx/v1/mindinsight/debugger/sessions/xxxx/tensor-graphs?tensor_name=xxx&graph_name=xxx
"""
tensor_name = request.args.get('tensor_name')
graph_name = request.args.get('graph_name')
rank_id = to_int(request.args.get('rank_id', 0), 'rank_id')
reply = _wrap_reply(_session_manager.get_session(session_id).retrieve_tensor_graph, tensor_name,
graph_name, rank_id)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/tensor-hits", methods=["GET"])
def retrieve_tensor_hits(session_id):
"""
Retrieve tensor value according to name and shape.
Returns:
str, the required data.
Examples:
>>> GET http://xxxx/v1/mindinsight/debugger/sessions/xxxx/tensor-hits?tensor_name=xxx&graph_name=xxx
"""
tensor_name = request.args.get('tensor_name')
graph_name = request.args.get('graph_name')
rank_id = to_int(request.args.get('rank_id', 0), 'rank_id')
reply = _wrap_reply(_session_manager.get_session(session_id).retrieve_tensor_hits, tensor_name,
graph_name, rank_id)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/search-watchpoint-hits", methods=["POST"])
def search_watchpoint_hits(session_id):
"""
Search watchpoint hits by group condition.
Returns:
str, the required data.
Examples:
>>> POST http://xxxx/v1/mindinsight/debugger/sessions/xxxx/search-watchpoint-hits
"""
body = _read_post_request(request)
group_condition = body.get('group_condition')
reply = _wrap_reply(_session_manager.get_session(session_id).search_watchpoint_hits, group_condition)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/condition-collections", methods=["GET"])
def get_condition_collections(session_id):
"""Get condition collections."""
reply = _wrap_reply(_session_manager.get_session(session_id).get_condition_collections)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/set-recommended-watch-points", methods=["POST"])
def set_recommended_watch_points(session_id):
"""Set recommended watch points."""
body = _read_post_request(request)
request_body = body.get('requestBody')
if request_body is None:
raise ParamMissError('requestBody')
set_recommended = request_body.get('set_recommended')
reply = _wrap_reply(_session_manager.get_session(session_id).set_recommended_watch_points, set_recommended)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/tensor-files/load", methods=["POST"])
def load(session_id):
"""
Retrieve tensor value according to name and shape.
Returns:
str, the required data.
Examples:
>>> GET http://xxx/v1/mindinsight/debugger/sessions/xxxx/tensor-files/load
"""
body = _read_post_request(request)
name = body.get('name')
graph_name = body.get('graph_name')
rank_id = to_int(body.get('rank_id', 0), 'rank_id')
prev = bool(body.get('prev') == 'true')
reply = _wrap_reply(_session_manager.get_session(session_id).load, name, prev, graph_name, rank_id)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/tensor-files/download", methods=["GET"])
def download(session_id):
"""
Retrieve tensor value according to name and shape.
Returns:
str, the required data.
Examples:
>>> GET http://xxx/v1/mindinsight/debugger/sessions/xxx/tensor-files/download?name=name&graph_name=xxx&prev=xxx
"""
name = request.args.get('name')
graph_name = request.args.get('graph_name')
rank_id = to_int(request.args.get('rank_id', 0), 'rank_id')
prev = bool(request.args.get('prev') == 'true')
file_name, file_path, clean_func = _session_manager.get_session(session_id).download(name, prev, graph_name,
rank_id)
def file_send():
with open(file_path, 'rb') as fb:
while True:
data = fb.read(50 * 1024 * 1024)
if not data:
break
yield data
response = Response(file_send(), content_type='application/octet-stream')
response.headers["Content-disposition"] = 'attachment; filename=%s' % file_name
weakref.finalize(response, clean_func,)
return response
@BLUEPRINT.route("/debugger/sessions", methods=["POST"])
def create_session():
"""
Get session id if session exist, else create a session.
Returns:
str, session id.
Examples:
>>> POST http://xxxx/v1/mindinsight/debugger/sessions
"""
body = _read_post_request(request)
summary_dir = body.get('dump_dir')
session_type = body.get('session_type')
reply = _wrap_reply(_session_manager.create_session, session_type, summary_dir)
return reply
@BLUEPRINT.route("/debugger/sessions", methods=["GET"])
def get_train_jobs():
"""
Check the current active sessions.
Examples:
>>> GET http://xxxx/v1/mindinsight/debugger/sessions
"""
reply = _wrap_reply(_session_manager.get_train_jobs)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/delete", methods=["POST"])
def delete_session(session_id):
"""
Delete session by session id.
Examples:
>>> POST http://xxxx/v1/mindinsight/debugger/xxx/delete-session
"""
reply = _wrap_reply(_session_manager.delete_session, session_id)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/stacks", methods=["GET"])
def get_stack_infos(session_id):
"""
Get stack infos.
Examples:
>>> GET /v1/mindsight/debugger/sessions/<session_id>/stacks?key_word=xxx&offset=0
"""
key_word = _unquote_param(request.args.get('key_word'))
limit = int(request.args.get('limit', 10))
offset = int(request.args.get('offset', 0))
filter_condition = {
'pattern': key_word,
'limit': limit,
'offset': offset
}
reply = _wrap_reply(_session_manager.get_session(session_id).get_stack_infos, filter_condition)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/ranks/<rank_id>/graph-runs", methods=["GET"])
def get_graph_runs(session_id, rank_id):
"""
Get graph runs.
Examples:
>>> GET /v1/mindsight/debugger/sessions/<session_id>/ranks/<rank_id>/graph-runs
"""
session = _session_manager.get_session(session_id)
rank_id = to_int(rank_id, 'rank_id')
reply = _wrap_reply(session.get_graph_runs, rank_id)
return reply
_session_manager = SessionManager.get_instance()
def init_module(app):
"""
Init module entry.
Args:
app (Flask): The application obj.
"""
app.register_blueprint(BLUEPRINT)
|
tests/test_pair.py | HRI-EU/HRI-nanomsg-python | 334 | 11183361 | import unittest
import os
from nanomsg_wrappers import set_wrapper_choice, get_default_for_platform
set_wrapper_choice(os.environ.get('NANOMSG_PY_TEST_WRAPPER',
get_default_for_platform()))
from nanomsg import (
PAIR,
Socket
)
SOCKET_ADDRESS = os.environ.get('NANOMSG_PY_TEST_ADDRESS', "inproc://a")
class TestPairSockets(unittest.TestCase):
def test_send_recv(self):
with Socket(PAIR) as s1:
with Socket(PAIR) as s2:
s1.bind(SOCKET_ADDRESS)
s2.connect(SOCKET_ADDRESS)
sent = b'ABC'
s2.send(sent)
recieved = s1.recv()
self.assertEqual(sent, recieved)
def test_send_recv_with_embeded_nulls(self):
with Socket(PAIR) as s1:
with Socket(PAIR) as s2:
s1.bind(SOCKET_ADDRESS)
s2.connect(SOCKET_ADDRESS)
sent = b'ABC\x00DEFEDDSS'
s2.send(sent)
recieved = s1.recv()
self.assertEqual(sent, recieved)
def test_send_recv_large_message(self):
with Socket(PAIR) as s1:
with Socket(PAIR) as s2:
s1.bind(SOCKET_ADDRESS)
s2.connect(SOCKET_ADDRESS)
sent = b'B'*(1024*1024)
s2.send(sent)
recieved = s1.recv()
self.assertEqual(sent, recieved)
if __name__ == '__main__':
unittest.main()
|
tests/st/dynamic_shape/test_getnext_dynamic_pipeline.py | PowerOlive/mindspore | 3,200 | 11183412 | <gh_stars>1000+
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import pytest
from mindspore import nn, context
from mindspore import ops as P
from mindspore.train import DatasetHelper, connect_network_with_dataset
import mindspore.dataset as ds
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
def _exec_preprocess(network, is_train, dataset, dataset_sink_mode, sink_size=-1, epoch_num=1, dataset_helper=None):
if dataset_sink_mode and not is_train:
dataset.__loop_size__ = 1
if dataset_helper is None:
dataset_helper = DatasetHelper(dataset, dataset_sink_mode, sink_size, epoch_num)
if dataset_sink_mode:
network = connect_network_with_dataset(network, dataset_helper)
network.set_train(is_train)
return dataset_helper, network
def _eval_dataset_sink_process(network, valid_dataset):
dataset_helper, eval_network = _exec_preprocess(network, is_train=False, dataset=valid_dataset,
dataset_sink_mode=True)
for inputs1, inputs2 in zip(dataset_helper, valid_dataset.create_dict_iterator()):
outputs = eval_network(*inputs1)
for elem1, (_, elem2) in zip(outputs, inputs2.items()):
assert elem1.shape == elem2.shape
def dataset_generator():
for i in range(1, 10):
yield (
np.ones((32, i), dtype=np.float32), np.zeros((32, i, i, 3), dtype=np.int32),
np.ones((32,), dtype=np.float32),
np.ones((32, i, 8), dtype=np.float32), np.ones((32, 8, 8), dtype=np.float32))
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.relu = P.ReLU()
def construct(self, x1, x2, x3, x4, x5):
x1 = self.relu(x1)
x1 = self.relu(x1)
x2 = self.relu(x2)
x3 = self.relu(x3)
x3 = self.relu(x3)
x4 = self.relu(x4)
x5 = self.relu(x5)
return x1, x2, x3, x4, x5
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_getnext_dynamic_pipeline():
network = Net()
dataset = ds.GeneratorDataset(dataset_generator, ["data1", "data2", "data3", "data4", "data5"])
dataset.set_dynamic_columns(columns={"data1": [32, None], "data2": [32, None, None, 3],
"data3": [32], "data4": [32, None, 8], "data5": [32, 8, 8]})
_eval_dataset_sink_process(network, dataset)
|
anuga/utilities/mem_time_equation.py | samcom12/anuga_core | 136 | 11183413 | <gh_stars>100-1000
import sys
from . import system_tools
TEST_CON = 'test_constants'
test_constants = {'tri_a_T':1, 'tri_b_T': 1,
'tim_a_T':1,'fil_a_T':1.,'cons_T':1,
'tri_a_S':1,'cons_S':1}
# These constants come from the Major Variables script that ran on
# tornado in serial
system_constants = {'tornado.agso.gov.au':{'tri_a_T':0.0000395,
'tri_b_T': 0.29575152,
'tim_a_T':0.03804736,'fil_a_T':0.005928693,
'cons_T':-135.0661178,
'tri_a_S':0.00369572,'cons_S':331.7128095},
TEST_CON:test_constants}
DEFAULT_HOST = 'tornado.agso.gov.au'
def estimate_time_mem(domain, yieldstep, finaltime, halt=False,
log_results=True, use_test_constants=False):
"""
Predict the time in seconds and memory in ?? that the simulation
will need.
params:
domain: a Domain instance, used to get number of triangles
yieldstep: the yieldstep of the simulation
finaltime: The final time used in the simulation.
halt: Set to True if you want ANUGA to stop after the prediction
log_results: Add the predictions to the log file.
use_test_constants: Use artificial test constants.
Example use:
anuga.estimate_time_mem(domain, yieldstep=yieldstep, finaltime=finaltime,
halt=True)
"""
time, memory = whole_equation(num_tri=len(domain),
yieldstep=yieldstep,
finaltime=finaltime,
use_test_constants=use_test_constants)
if log_results: #FIXME, not loging results yet
print("This program will run for: " + str(time) + " (s)")
print("This program will use: " + str(memory) + " (MB)")
if halt:
sys.exit()
return time, memory
def whole_equation(halt = False, **kwargs):
"""
num_tri = None,
tri_area = None,
time_length = None,
time_step = None,
water_depth = None,
velocity = None,
per_water_cover = None,
cpus = None,
cpu_speed = None,
halt = False
"""
if not kwargs['use_test_constants']:
host_name = system_tools.get_host_name()
else:
host_name = TEST_CON
constants = system_constants.get(host_name, system_constants[DEFAULT_HOST])
kwargs['constants'] = constants
time = time_equation(**kwargs)
memory = space_equation(**kwargs)
result = (time, memory)
return result
# Using the constants from the experiments into
# memory and time the Time and Memory are estimated
def time_equation(**kwargs):
time = kwargs['constants']['tri_a_T'] * (kwargs['num_tri']) ** 2 + \
kwargs['constants']['tri_b_T'] * kwargs['num_tri'] + \
kwargs['constants']['tim_a_T'] * kwargs['finaltime'] + \
kwargs['constants']['fil_a_T'] * \
((kwargs['finaltime'] / kwargs['yieldstep'])) + \
kwargs['constants']['cons_T']
return time
def space_equation(**kwargs):
memory = kwargs['constants']['tri_a_S'] * kwargs['num_tri'] + \
kwargs['constants']['cons_S']
return memory
################################################################################
if __name__ == "__main__":
whole_equation(num_tri = 7)
|
testing/MLDBFB-545-incorrect_result_on_merged_ds.py | kstepanmpmg/mldb | 665 | 11183420 | #
# MLDBFB-545-incorrect_result_on_merged_ds.py
# Mich, 2016-05-27
# This file is part of MLDB. Copyright 2016 mldb.ai inc. All rights reserved.
#
import unittest
from mldb import mldb, MldbUnitTest, ResponseException
class Mldbfb545MergeDsWhereQueryTest(MldbUnitTest): # noqa
def test_MLDBFB_545_where_query_beh(self):
self.run_MLDBFB_545_with_ds_type('beh')
def test_MLDBFB_545_where_query_sparse(self):
self.run_MLDBFB_545_with_ds_type('sparse')
def run_MLDBFB_545_with_ds_type(self, ds_type):
id1 = ds_type + 'mldbfb545_1'
ds = mldb.create_dataset({
'id' : id1,
'type': ds_type + '.mutable'
})
ds.record_row('user1', [['converted', 'n', 0]])
ds.commit()
id2 = ds_type + 'mldbfb545_2'
ds = mldb.create_dataset({
'id' : id2,
'type' : ds_type + '.mutable'
})
ds.record_row('user2', [['blah', 'blah', 0]])
ds.commit()
# query directly on the dataset works
res = mldb.query("""
SELECT * FROM {} WHERE converted='c' LIMIT 1
""".format(id1))
self.assertEqual(len(res), 1)
merge_id = ds_type + 'mldbfb545merged'
mldb.put("/v1/datasets/" + merge_id, {
"type": "merged",
"params": {
"datasets": [{"id": id1}, {"id": id2}]
}
})
# query on the merged dataset yields incorrect results
res = mldb.query("""
SELECT * FROM {} WHERE converted='c' LIMIT 1
""".format(merge_id))
mldb.log(res)
self.assertEqual(len(res), 1)
def test_merge_freeze_beh(self):
self.run_freeze_with_ds_type('beh')
def test_merge_freeze_sparse(self):
self.run_freeze_with_ds_type('sparse')
def run_freeze_with_ds_type(self, ds_type):
ds = mldb.create_dataset({
'id' : ds_type,
'type': ds_type + '.mutable'
})
ds.record_row('user1', [['converted', 'n', 0]])
ds.commit()
ds = mldb.create_dataset({
'id' : ds_type + "2",
'type' : ds_type + '.mutable'
})
ds.record_row('user2', [['converted', 'n', 0]])
ds.commit()
# query directly on the dataset works
res = mldb.query("""
SELECT * FROM {} WHERE converted='c' LIMIT 1
""".format(ds_type))
self.assertEqual(len(res), 1)
mldb.put("/v1/datasets/mergedDs" + ds_type, {
"type": "merged",
"params": {
"datasets": [{"id": ds_type}, {"id": ds_type + "2"}]
}
})
# query on the merged dataset yields incorrect results
res = mldb.query("""
SELECT * FROM mergedDs{} WHERE converted='c' LIMIT 1
""".format(ds_type))
mldb.log(res)
self.assertEqual(len(res), 1)
if __name__ == '__main__':
mldb.run_tests()
|
genie-examples/explore_vlans.py | fallenfuzz/netdevops_demos | 104 | 11183427 | #! /usr/bin/env python
"""Example script using Genie
Intended to be ran interactively (ie from iPython)
This script will retrieve information from a device.
Copyright (c) 2018 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# Import Genie
from genie.conf import Genie
# Initialize a Testbed File
testbed = Genie.init("testbed.yml")
# Create variable for specific testbed device
device = testbed.devices["sbx-n9kv-ao"]
# Conenct to the device
device.connect(learn_hostname=True)
# Learn the vlans using Genie model
vlans = device.learn("vlan")
# Print out VLAN ids and names.
print("Here are the vlans from device {}".format(device.name))
for key, details in vlans.info["vlans"].items():
# The model for vlans.info has an oddity where the vlans are
# mixed together with vlan configuration info so need a bit of
# logic to just target the VLAN details and not config info.
# See the model here: https://pubhub.devnetcloud.com/media/pyats-packages/docs/genie/_models/vlan.pdf
# Ignore the config details by exclusing configuration keys
if key in ["interface_vlan_enabled", "configuration", "vn_segment_vlan_based_enabled"]:
# these aren't vlans, move along
continue
# Print details on vlans
print("VLAN ID {} with name {}".format(details["vlan_id"], details["name"]))
device.disconnect()
|
modin/core/dataframe/algebra/default2pandas/dataframe.py | Rubtsowa/modin | 7,258 | 11183438 | <reponame>Rubtsowa/modin<filename>modin/core/dataframe/algebra/default2pandas/dataframe.py
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Module houses default DataFrame functions builder class."""
# FIXME: This whole module is duplicating the logic of `default.py` and should be removed.
from .default import DefaultMethod
from modin.utils import _inherit_docstrings
import pandas
@_inherit_docstrings(DefaultMethod)
class DataFrameDefault(DefaultMethod):
@classmethod
def register(cls, func, obj_type=None, **kwargs):
"""
Build function that do fallback to default pandas implementation for passed `func`.
Parameters
----------
func : callable or str,
Function to apply to the casted to pandas frame.
obj_type : object, optional
If `func` is a string with a function name then `obj_type` provides an
object to search function in. If not specified `pandas.DataFrame` will be used.
**kwargs : kwargs
Additional parameters that will be used for building.
Returns
-------
callable
Function that takes query compiler, does fallback to pandas and applies `func`
to the casted to pandas frame.
"""
if obj_type is None:
obj_type = pandas.DataFrame
return cls.call(func, obj_type=obj_type, **kwargs)
|
tests/instagram_basic/conftest.py | sns-sdks/python-facebook | 181 | 11183472 | <gh_stars>100-1000
import pytest
from pyfacebook import IGBasicDisplayApi
@pytest.fixture
def api():
return IGBasicDisplayApi(
app_id="123456",
app_secret="xxxxx",
access_token="token",
)
|
parsifal/library/migrations/0005_auto_20150626_1649.py | michelav/parsifal | 342 | 11183498 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('library', '0004_auto_20150626_0841'),
]
operations = [
migrations.AlterModelOptions(
name='folder',
options={'ordering': ('name',), 'verbose_name': 'Folder', 'verbose_name_plural': 'Folders'},
),
migrations.RemoveField(
model_name='document',
name='annote',
),
migrations.AlterField(
model_name='document',
name='author',
field=models.TextField(max_length=500, null=True, blank=True),
),
migrations.AlterField(
model_name='document',
name='bibtexkey',
field=models.CharField(max_length=50, null=True, verbose_name=b'Bibtex key', blank=True),
),
migrations.AlterField(
model_name='document',
name='crossref',
field=models.CharField(max_length=255, null=True, verbose_name=b'Cross-referenced', blank=True),
),
migrations.AlterField(
model_name='document',
name='doi',
field=models.CharField(max_length=50, null=True, verbose_name=b'DOI', blank=True),
),
migrations.AlterField(
model_name='document',
name='entry_type',
field=models.CharField(blank=True, max_length=13, null=True, verbose_name=b'Document type', choices=[('article', 'Article'), ('book', 'Book'), ('booklet', 'Booklet'), ('conference', 'Conference'), ('inbook', 'Inbook'), ('incollection', 'Incollection'), ('inproceedings', 'Inproceedings'), ('manual', 'Manual'), ('mastersthesis', "Master's Thesis"), ('misc', 'Misc'), ('phdthesis', 'Ph.D. Thesis'), ('proceedings', 'Proceedings'), ('techreport', 'Tech Report'), ('unpublished', 'Unpublished')]),
),
migrations.AlterField(
model_name='document',
name='howpublished',
field=models.CharField(max_length=255, null=True, verbose_name=b'How it was published', blank=True),
),
migrations.AlterField(
model_name='document',
name='isbn',
field=models.CharField(max_length=30, null=True, verbose_name=b'ISBN', blank=True),
),
migrations.AlterField(
model_name='document',
name='issn',
field=models.CharField(max_length=30, null=True, verbose_name=b'ISSN', blank=True),
),
migrations.AlterField(
model_name='document',
name='keywords',
field=models.CharField(max_length=500, null=True, blank=True),
),
migrations.AlterField(
model_name='document',
name='url',
field=models.CharField(max_length=255, null=True, verbose_name=b'URL', blank=True),
),
]
|
querybook/migrations/versions/22c9a254b41e_initial_commit.py | shivammmmm/querybook | 1,144 | 11183507 | """Initial commit
Revision ID: 22c9a254b41e
Revises:
Create Date: 2019-06-17 17:42:47.075692
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "22c9a254b41e"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
MediumText = sa.Text(length=16777215)
Text = sa.Text(length=65535)
conn = op.get_bind()
if conn.dialect.name == "postgresql":
MediumText = sa.Text()
Text = sa.Text()
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"data_cell",
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
sa.Column(
"cell_type",
sa.Enum("query", "text", "chart", name="datacelltype"),
nullable=False,
),
sa.Column("context", MediumText, nullable=True),
sa.Column("meta", sa.JSON(), nullable=True),
sa.Column("created_at", sa.DateTime(), nullable=False),
sa.Column("updated_at", sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint("id"),
mysql_charset="utf8mb4",
mysql_engine="InnoDB",
)
op.create_table(
"data_job_metadata",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("job_name", sa.String(length=191), nullable=True),
sa.Column("job_info", sa.JSON(), nullable=True),
sa.Column("job_owner", sa.String(length=255), nullable=True),
sa.Column("query_text", MediumText, nullable=True),
sa.Column("is_adhoc", sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint("id"),
mysql_charset="utf8mb4",
mysql_engine="InnoDB",
)
op.create_index(
op.f("ix_data_job_metadata_job_name"),
"data_job_metadata",
["job_name"],
unique=False,
)
op.create_table(
"environment",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=255), nullable=False),
sa.Column("description", sa.String(length=5000), nullable=True),
sa.Column("image", sa.String(length=2083), nullable=True),
sa.Column("public", sa.Boolean(), nullable=True),
sa.Column("archived", sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(op.f("ix_environment_name"), "environment", ["name"], unique=True)
op.create_table(
"function_documentation",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("language", sa.String(length=255), nullable=False),
sa.Column("name", sa.String(length=255), nullable=False),
sa.Column("params", sa.String(length=255), nullable=False),
sa.Column("return_type", sa.String(length=255), nullable=False),
sa.Column("description", sa.String(length=5000), nullable=True),
sa.PrimaryKeyConstraint("id"),
mysql_charset="utf8mb4",
mysql_engine="InnoDB",
)
op.create_table(
"key_value_store",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("key", sa.String(length=191), nullable=True),
sa.Column("value", MediumText, nullable=True),
sa.PrimaryKeyConstraint("id"),
mysql_charset="utf8mb4",
mysql_engine="InnoDB",
)
op.create_index(
op.f("ix_key_value_store_key"), "key_value_store", ["key"], unique=True
)
op.create_table(
"query_metastore",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.Column("name", sa.String(length=255), nullable=False),
sa.Column("metastore_url", sa.JSON(), nullable=True),
sa.Column("acl_control", sa.JSON(), nullable=False),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("name"),
)
op.create_table(
"task_run_record",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=255), nullable=True),
sa.Column(
"status",
sa.Enum("RUNNING", "SUCCESS", "FAILURE", name="taskrunstatus"),
nullable=False,
),
sa.Column("alerted", sa.Boolean(), nullable=True),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"task_schedule",
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
sa.Column("name", sa.String(length=255), nullable=False),
sa.Column("task", sa.String(length=255), nullable=False),
sa.Column("cron", sa.String(length=255), nullable=True),
sa.Column("start_time", sa.DateTime(), nullable=True),
sa.Column("args", sa.JSON(), nullable=True),
sa.Column("kwargs", sa.JSON(), nullable=True),
sa.Column("options", sa.JSON(), nullable=True),
sa.Column("last_run_at", sa.DateTime(), nullable=True),
sa.Column("total_run_count", sa.Integer(), nullable=True),
sa.Column("enabled", sa.Boolean(), nullable=True),
sa.Column("no_changes", sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("name"),
)
op.create_table(
"task_schedules",
sa.Column("id", sa.SmallInteger(), nullable=False),
sa.Column("last_update", sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("id"),
)
op.create_table(
"user",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("username", sa.String(length=255), nullable=True),
sa.Column("fullname", sa.String(length=255), nullable=True),
sa.Column("password", sa.String(length=255), nullable=True),
sa.Column("email", sa.String(length=255), nullable=True),
sa.Column("profile_img", sa.String(length=2083), nullable=True),
sa.Column("deleted", sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("username"),
)
op.create_table(
"announcements",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.Column("uid", sa.Integer(), nullable=True),
sa.Column("message", sa.String(length=5000), nullable=True),
sa.Column("url_regex", sa.String(length=255), nullable=True),
sa.Column("can_dismiss", sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(
["uid"], ["user.id"], name="announcements_ibfk_1", ondelete="CASCADE",
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"api_access_token",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("token", sa.String(length=128), nullable=False),
sa.Column("description", sa.String(length=5000), nullable=True),
sa.Column("enabled", sa.Boolean(), nullable=True),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("creator_uid", sa.Integer(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.Column("updater_uid", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["creator_uid"],
["user.id"],
name="api_access_token_ibfk_1",
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(
["updater_uid"],
["user.id"],
name="api_access_token_ibfk_2",
ondelete="CASCADE",
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("token"),
)
op.create_table(
"data_doc",
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
sa.Column("environment_id", sa.Integer(), nullable=False),
sa.Column("public", sa.Boolean(), nullable=False),
sa.Column("archived", sa.Boolean(), nullable=False),
sa.Column("owner_uid", sa.Integer(), nullable=True),
sa.Column("created_at", sa.DateTime(), nullable=False),
sa.Column("updated_at", sa.DateTime(), nullable=False),
sa.Column("title", sa.String(length=255), nullable=False),
sa.Column("meta", sa.JSON(), nullable=False),
sa.ForeignKeyConstraint(
["environment_id"],
["environment.id"],
name="data_doc_ibfk_1",
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(
["owner_uid"], ["user.id"], name="data_doc_ibfk_2", ondelete="CASCADE"
),
sa.PrimaryKeyConstraint("id"),
mysql_charset="utf8mb4",
mysql_engine="InnoDB",
)
op.create_table(
"data_schema",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.Column("name", sa.String(length=255), nullable=True),
sa.Column("table_count", sa.Integer(), nullable=True),
sa.Column("description", MediumText, nullable=True),
sa.Column("metastore_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["metastore_id"], ["query_metastore.id"], name="data_schema_ibfk_1",
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(op.f("ix_data_schema_name"), "data_schema", ["name"], unique=False)
op.create_table(
"impression",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("item_id", sa.Integer(), nullable=True),
sa.Column(
"item_type",
sa.Enum("DATA_DOC", "DATA_TABLE", name="itemtype"),
nullable=True,
),
sa.Column("uid", sa.Integer(), nullable=True),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(
["uid"], ["user.id"], name="impression_ibfk_1", ondelete="CASCADE"
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"query_engine",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.Column("name", sa.String(length=255), nullable=False),
sa.Column("description", sa.String(length=255), nullable=True),
sa.Column("language", sa.String(length=255), nullable=False),
sa.Column("executor", sa.String(length=255), nullable=False),
sa.Column("executor_params", sa.JSON(), nullable=True),
sa.Column("control_params", sa.JSON(), nullable=False),
sa.Column("metastore_id", sa.Integer(), nullable=True),
sa.Column("environment_id", sa.Integer(), nullable=False),
sa.Column("archived", sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(
["environment_id"],
["environment.id"],
name="query_engine_ibfk_1",
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(
["metastore_id"], ["query_metastore.id"], name="query_engine_ibfk_2"
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("name"),
)
op.create_table(
"user_environment",
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
sa.Column("environment_id", sa.Integer(), nullable=False),
sa.Column("user_id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["environment_id"], ["environment.id"], ondelete="CASCADE"
),
sa.ForeignKeyConstraint(["user_id"], ["user.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"user_role",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("uid", sa.Integer(), nullable=True),
sa.Column("role", sa.Enum("ADMIN", name="userroletype"), nullable=False),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(
["uid"], ["user.id"], name="user_role_ibfk_1", ondelete="CASCADE"
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"user_setting",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("uid", sa.Integer(), nullable=True),
sa.Column("key", sa.String(length=255), nullable=True),
sa.Column("value", sa.String(length=5000), nullable=True),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(
["uid"], ["user.id"], name="user_setting_ibfk_1", ondelete="CASCADE"
),
sa.PrimaryKeyConstraint("id"),
mysql_charset="utf8mb4",
mysql_engine="InnoDB",
)
op.create_table(
"data_doc_data_cell",
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
sa.Column("data_doc_id", sa.Integer(), nullable=False),
sa.Column("data_cell_id", sa.Integer(), nullable=False),
sa.Column("cell_order", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["data_cell_id"],
["data_cell.id"],
name="data_doc_data_cell_ibfk_1",
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(
["data_doc_id"],
["data_doc.id"],
name="data_doc_data_cell_ibfk_2",
ondelete="CASCADE",
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("data_cell_id"),
)
op.create_table(
"data_doc_editor",
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
sa.Column("data_doc_id", sa.Integer(), nullable=True),
sa.Column("uid", sa.Integer(), nullable=True),
sa.Column("read", sa.Boolean(), nullable=False),
sa.Column("write", sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint(
["data_doc_id"], ["data_doc.id"], name="data_doc_editor_ibfk_1",
),
sa.ForeignKeyConstraint(
["uid"], ["user.id"], ondelete="CASCADE", name="data_doc_editor_ibfk_2",
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("data_doc_id", "uid", name="unique_data_doc_user"),
)
op.create_table(
"data_table",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.Column("name", sa.String(length=255), nullable=True),
sa.Column("type", sa.String(length=255), nullable=True),
sa.Column("owner", sa.String(length=255), nullable=True),
sa.Column("table_created_at", sa.DateTime(), nullable=True),
sa.Column("table_updated_by", sa.String(length=255), nullable=True),
sa.Column("table_updated_at", sa.DateTime(), nullable=True),
sa.Column("data_size_bytes", sa.BigInteger(), nullable=True),
sa.Column("location", sa.String(length=2083), nullable=True),
sa.Column("column_count", sa.Integer(), nullable=True),
sa.Column("schema_id", sa.Integer(), nullable=True),
sa.Column("golden", sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(
["schema_id"],
["data_schema.id"],
name="data_table_ibfk_1",
ondelete="CASCADE",
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(op.f("ix_data_table_name"), "data_table", ["name"], unique=False)
op.create_index(op.f("ix_data_table_type"), "data_table", ["type"], unique=False)
op.create_table(
"favorite_data_doc",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("data_doc_id", sa.Integer(), nullable=True),
sa.Column("uid", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["data_doc_id"], ["data_doc.id"], name="favorite_data_doc_ibfk_1",
),
sa.ForeignKeyConstraint(
["uid"], ["user.id"], ondelete="CASCADE", name="favorite_data_doc_ibfk_2"
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"query_execution",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("task_id", sa.String(length=255), nullable=True),
sa.Column(
"status",
sa.Enum(
"INITIALIZED",
"DELIVERED",
"RUNNING",
"DONE",
"ERROR",
"CANCEL",
name="queryexecutionstatus",
),
nullable=True,
),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("completed_at", sa.DateTime(), nullable=True),
sa.Column("query", MediumText, nullable=True),
sa.Column("engine_id", sa.Integer(), nullable=True),
sa.Column("uid", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["engine_id"], ["query_engine.id"], name="query_execution_ibfk_1",
),
sa.ForeignKeyConstraint(
["uid"], ["user.id"], ondelete="CASCADE", name="query_execution_ibfk_2"
),
sa.PrimaryKeyConstraint("id"),
mysql_charset="utf8mb4",
mysql_engine="InnoDB",
)
op.create_table(
"query_snippet",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("context", MediumText, nullable=True),
sa.Column("title", sa.String(length=255), nullable=False),
sa.Column("engine_id", sa.Integer(), nullable=True),
sa.Column("description", sa.String(length=5000), nullable=True),
sa.Column("is_public", sa.Boolean(), nullable=False),
sa.Column("golden", sa.Boolean(), nullable=False),
sa.Column("created_at", sa.DateTime(), nullable=False),
sa.Column("created_by", sa.Integer(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=False),
sa.Column("last_updated_by", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["created_by"], ["user.id"], name="query_snippet_ibfk_1", ondelete="CASCADE"
),
sa.ForeignKeyConstraint(
["engine_id"], ["query_engine.id"], name="query_snippet_ibfk_2",
),
sa.ForeignKeyConstraint(
["last_updated_by"],
["user.id"],
name="query_snippet_ibfk_3",
ondelete="CASCADE",
),
sa.PrimaryKeyConstraint("id"),
mysql_charset="utf8mb4",
mysql_engine="InnoDB",
)
op.create_table(
"data_cell_query_execution",
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
sa.Column("query_execution_id", sa.Integer(), nullable=False),
sa.Column("data_cell_id", sa.Integer(), nullable=False),
sa.Column("latest", sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint(
["data_cell_id"],
["data_cell.id"],
name="data_cell_query_execution_ibfk_1",
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(
["query_execution_id"],
["query_execution.id"],
name="data_cell_query_execution_ibfk_2",
ondelete="CASCADE",
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"data_table_column",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.Column("name", sa.String(length=255), nullable=True),
sa.Column("type", sa.String(length=255), nullable=True),
sa.Column("comment", sa.String(length=5000), nullable=True),
sa.Column("description", MediumText, nullable=True),
sa.Column("table_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["table_id"],
["data_table.id"],
name="data_table_column_ibfk_1",
ondelete="CASCADE",
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("ix_data_table_column_name"), "data_table_column", ["name"], unique=False
)
op.create_table(
"data_table_information",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("data_table_id", sa.Integer(), nullable=True),
sa.Column("latest_partitions", sa.String(length=5000), nullable=True),
sa.Column("earliest_partitions", sa.String(length=5000), nullable=True),
sa.Column("description", MediumText, nullable=True),
sa.Column("hive_metastore_description", MediumText, nullable=True),
sa.ForeignKeyConstraint(
["data_table_id"],
["data_table.id"],
name="data_table_information_ibfk_1",
ondelete="CASCADE",
),
sa.PrimaryKeyConstraint("id"),
mysql_charset="utf8mb4",
mysql_engine="InnoDB",
)
op.create_table(
"data_table_ownership",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("data_table_id", sa.Integer(), nullable=True),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("owner", sa.String(length=255), nullable=True),
sa.ForeignKeyConstraint(
["data_table_id"],
["data_table.id"],
name="data_table_ownership_data_table_id_fk",
ondelete="CASCADE",
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("data_table_id"),
)
op.create_table(
"query_execution_error",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("query_execution_id", sa.Integer(), nullable=True),
sa.Column("error_type", sa.Integer(), nullable=False),
sa.Column("error_message_extracted", sa.String(length=5000), nullable=True),
sa.Column("error_message", Text, nullable=True),
sa.ForeignKeyConstraint(
["query_execution_id"],
["query_execution.id"],
name="query_execution_error_ibfk_1",
ondelete="CASCADE",
),
sa.PrimaryKeyConstraint("id"),
mysql_charset="utf8mb4",
mysql_engine="InnoDB",
)
op.create_table(
"query_execution_notification",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("query_execution_id", sa.Integer(), nullable=True),
sa.Column("user", sa.String(length=255), nullable=False),
sa.ForeignKeyConstraint(
["query_execution_id"],
["query_execution.id"],
name="query_execution_notification_ibfk_1",
ondelete="CASCADE",
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"statement_execution",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("statement_range_start", sa.Integer(), nullable=False),
sa.Column("statement_range_end", sa.Integer(), nullable=False),
sa.Column("query_execution_id", sa.Integer(), nullable=True),
sa.Column(
"status",
sa.Enum(
"INITIALIZED",
"RUNNING",
"UPLOADING",
"DONE",
"ERROR",
"CANCEL",
name="statementexecutionstatus",
),
nullable=True,
),
sa.Column("tracking_url", sa.String(length=2083), nullable=True),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("completed_at", sa.DateTime(), nullable=True),
sa.Column("result_row_count", sa.BigInteger(), nullable=False),
sa.Column("result_path", sa.String(length=2083), nullable=True),
sa.Column("has_log", sa.Boolean(), nullable=False),
sa.Column("log_path", sa.String(length=2083), nullable=True),
sa.ForeignKeyConstraint(
["query_execution_id"],
["query_execution.id"],
name="statement_execution_ibfk_1",
ondelete="CASCADE",
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"table_lineage",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("table_id", sa.Integer(), nullable=True),
sa.Column("parent_table_id", sa.Integer(), nullable=True),
sa.Column("job_metadata_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["job_metadata_id"], ["data_job_metadata.id"], name="table_lineage_ibfk_1",
),
sa.ForeignKeyConstraint(
["parent_table_id"],
["data_table.id"],
name="table_lineage_ibfk_2",
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(
["table_id"],
["data_table.id"],
name="table_lineage_ibfk_3",
ondelete="CASCADE",
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"statement_execution_stream_log",
sa.Column("id", sa.BigInteger(), nullable=False),
sa.Column("statement_execution_id", sa.Integer(), nullable=True),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("log", sa.String(length=5000), nullable=True),
sa.ForeignKeyConstraint(
["statement_execution_id"],
["statement_execution.id"],
name="statement_execution_stream_log_ibfk_1",
ondelete="CASCADE",
),
sa.PrimaryKeyConstraint("id"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("statement_execution_stream_log")
op.drop_table("table_lineage")
op.drop_table("statement_execution")
op.drop_table("query_execution_notification")
op.drop_table("query_execution_error")
op.drop_table("data_table_ownership")
op.drop_table("data_table_information")
op.drop_index(op.f("ix_data_table_column_name"), table_name="data_table_column")
op.drop_table("data_table_column")
op.drop_table("data_cell_query_execution")
op.drop_table("query_snippet")
op.drop_table("query_execution")
op.drop_table("favorite_data_doc")
op.drop_index(op.f("ix_data_table_type"), table_name="data_table")
op.drop_index(op.f("ix_data_table_name"), table_name="data_table")
op.drop_table("data_table")
op.drop_table("data_doc_editor")
op.drop_table("data_doc_data_cell")
op.drop_table("user_setting")
op.drop_table("user_role")
op.drop_table("user_environment")
op.drop_table("query_engine")
op.drop_table("impression")
op.drop_index(op.f("ix_data_schema_name"), table_name="data_schema")
op.drop_table("data_schema")
op.drop_table("data_doc")
op.drop_table("api_access_token")
op.drop_table("announcements")
op.drop_table("user")
op.drop_table("task_schedules")
op.drop_table("task_schedule")
op.drop_table("task_run_record")
op.drop_table("query_metastore")
op.drop_index(op.f("ix_key_value_store_key"), table_name="key_value_store")
op.drop_table("key_value_store")
op.drop_table("function_documentation")
op.drop_index(op.f("ix_environment_name"), table_name="environment")
op.drop_table("environment")
op.drop_index(op.f("ix_data_job_metadata_job_name"), table_name="data_job_metadata")
op.drop_table("data_job_metadata")
op.drop_table("data_cell")
# ### end Alembic commands ###
conn = op.get_bind()
if conn.dialect.name == "postgresql":
op.execute("DROP TYPE datacelltype")
op.execute("DROP TYPE taskrunstatus")
op.execute("DROP TYPE itemtype")
op.execute("DROP TYPE userroletype")
op.execute("DROP TYPE queryexecutionstatus")
op.execute("DROP TYPE statementexecutionstatus")
|
flows/ablations/abl_nomixlog.py | evanlohn/flowpp | 131 | 11183509 | """
Ablation: no mixture of logistics
Filters: 108 (to compensate for parameter count)
Params: 32,045,708
Dropout 0.2
"""
import tensorflow as tf
from flows.flow_training import train, evaluate
from flows.flows import (
Flow, Compose, Inverse, ImgProc, Sigmoid,
TupleFlip, CheckerboardSplit, ChannelSplit, SpaceToDepth, Norm, Pointwise, ElemwiseAffine,
conv2d, gated_conv, gated_attn, layernorm, VarConfig, get_var, gaussian_sample_logp
)
class AffineAttnCoupling(Flow):
"""
CDF of mixture of logistics, followed by affine
"""
def __init__(self, filters, blocks, heads=4, init_scale=0.1):
def f(x, *, vcfg: VarConfig, context=None, dropout_p=0., verbose=True):
if vcfg.init and verbose:
# debug stuff
xmean, xvar = tf.nn.moments(x, axes=list(range(len(x.shape))))
x = tf.Print(
x, [tf.shape(x), xmean, tf.sqrt(xvar), tf.reduce_min(x), tf.reduce_max(x)],
message='{} (shape/mean/std/min/max) '.format(self.template.variable_scope.name), summarize=10
)
B, H, W, C = x.shape.as_list()
pos_emb = get_var('pos_emb', shape=[H, W, filters], initializer=tf.random_normal_initializer(stddev=0.01),
vcfg=vcfg)
x = conv2d(x, name='proj_in', num_units=filters, vcfg=vcfg)
for i_block in range(blocks):
with tf.variable_scope(f'block{i_block}'):
x = gated_conv(x, name='conv', a=context, use_nin=True, dropout_p=dropout_p, vcfg=vcfg)
x = layernorm(x, name='ln1', vcfg=vcfg)
x = gated_attn(x, name='attn', pos_emb=pos_emb, heads=heads, dropout_p=dropout_p, vcfg=vcfg)
x = layernorm(x, name='ln2', vcfg=vcfg)
components = 0 # no mixture of logistics
x = conv2d(x, name='proj_out', num_units=C * (2 + 3 * components), init_scale=init_scale, vcfg=vcfg)
assert x.shape == [B, H, W, C * (2 + 3 * components)]
x = tf.reshape(x, [B, H, W, C, 2 + 3 * components])
s, t = tf.tanh(x[:, :, :, :, 0]), x[:, :, :, :, 1]
assert s.shape == t.shape == [B, H, W, C]
return ElemwiseAffine(scales=tf.exp(s), logscales=s, biases=t)
self.template = tf.make_template(self.__class__.__name__, f)
def forward(self, x, **kwargs):
assert isinstance(x, tuple)
cf, ef = x
flow = self.template(cf, **kwargs)
out, logd = flow.forward(ef)
assert out.shape == ef.shape == cf.shape
return (cf, out), logd
def inverse(self, y, **kwargs):
assert isinstance(y, tuple)
cf, ef = y
flow = self.template(cf, **kwargs)
out, logd = flow.inverse(ef)
assert out.shape == ef.shape == cf.shape
return (cf, out), logd
def construct(*, filters, dequant_filters, blocks):
dequant_coupling_kwargs = dict(filters=dequant_filters, blocks=2)
coupling_kwargs = dict(filters=filters, blocks=blocks)
class Dequant(Flow):
def __init__(self):
def shallow_processor(x, *, dropout_p, vcfg):
x = x / 256.0 - 0.5
(this, that), _ = CheckerboardSplit().forward(x)
x = conv2d(tf.concat([this, that], 3), name='proj', num_units=32, vcfg=vcfg)
for i in range(3):
x = gated_conv(x, name=f'c{i}', vcfg=vcfg, dropout_p=dropout_p, use_nin=False, a=None)
return x
self.context_proc = tf.make_template("context_proc", shallow_processor)
self.dequant_flow = Compose([
CheckerboardSplit(),
Norm(), Pointwise(), AffineAttnCoupling(**dequant_coupling_kwargs), TupleFlip(),
Norm(), Pointwise(), AffineAttnCoupling(**dequant_coupling_kwargs), TupleFlip(),
Norm(), Pointwise(), AffineAttnCoupling(**dequant_coupling_kwargs), TupleFlip(),
Norm(), Pointwise(), AffineAttnCoupling(**dequant_coupling_kwargs), TupleFlip(),
Inverse(CheckerboardSplit()),
Sigmoid(),
])
def forward(self, x, *, vcfg, dropout_p=0., verbose=True, context=None):
assert context is None
eps, eps_logp = gaussian_sample_logp(x.shape.as_list())
xd, logd = self.dequant_flow.forward(
eps,
context=self.context_proc(x, dropout_p=dropout_p, vcfg=vcfg),
dropout_p=dropout_p, verbose=verbose, vcfg=vcfg
)
assert eps.shape == x.shape and logd.shape == eps_logp.shape == [x.shape[0]]
return x + xd, logd - eps_logp
dequant_flow = Dequant()
flow = Compose([
ImgProc(),
CheckerboardSplit(),
Norm(), Pointwise(), AffineAttnCoupling(**coupling_kwargs), TupleFlip(),
Norm(), Pointwise(), AffineAttnCoupling(**coupling_kwargs), TupleFlip(),
Norm(), Pointwise(), AffineAttnCoupling(**coupling_kwargs), TupleFlip(),
Norm(), Pointwise(), AffineAttnCoupling(**coupling_kwargs), TupleFlip(),
Inverse(CheckerboardSplit()),
SpaceToDepth(),
ChannelSplit(),
Norm(), Pointwise(), AffineAttnCoupling(**coupling_kwargs), TupleFlip(),
Norm(), Pointwise(), AffineAttnCoupling(**coupling_kwargs), TupleFlip(),
Inverse(ChannelSplit()),
CheckerboardSplit(),
Norm(), Pointwise(), AffineAttnCoupling(**coupling_kwargs), TupleFlip(),
Norm(), Pointwise(), AffineAttnCoupling(**coupling_kwargs), TupleFlip(),
Norm(), Pointwise(), AffineAttnCoupling(**coupling_kwargs), TupleFlip(),
Inverse(CheckerboardSplit()),
])
return dequant_flow, flow
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--eval_checkpoint', type=str, default=None)
args = parser.parse_args()
max_lr = 3e-4
warmup_steps = 2000
lr_decay = 1
def lr_schedule(step):
if step < warmup_steps:
return max_lr * step / warmup_steps
return max_lr * (lr_decay ** (step - warmup_steps))
dropout_p = 0.2
blocks = 10
filters = dequant_filters = 108
ema_decay = 0.999
def flow_constructor():
return construct(filters=filters, dequant_filters=dequant_filters, blocks=blocks)
if args.eval_checkpoint:
evaluate(flow_constructor=flow_constructor, seed=0, restore_checkpoint=args.eval_checkpoint)
return
train(
flow_constructor=flow_constructor,
logdir=f'~/logs/abl_nomixlog_fbdq{dequant_filters}_blocks{blocks}_f{filters}_lr{max_lr}_drop{dropout_p}',
lr_schedule=lr_schedule,
dropout_p=dropout_p,
seed=0,
init_bs=128,
total_bs=64,
ema_decay=ema_decay,
steps_per_log=100,
epochs_per_val=1,
max_grad_norm=1.,
)
if __name__ == '__main__':
main()
|
cpython-pycopy/test.py | AEMICS/pycopy-lib | 126 | 11183539 | <reponame>AEMICS/pycopy-lib
import pycopy
from pycopy import const
FOO = const(1)
@pycopy.native
def func1():
return 2
@pycopy.viper
def func2() -> int:
return 3
assert FOO == 1
assert func1() == 2
assert func2() == 3
|
libs/fuel/tests/test_caltech101_silhouettes.py | dendisuhubdy/attention-lvcsr | 767 | 11183551 | import numpy
from numpy.testing import assert_raises
from fuel.datasets import CalTech101Silhouettes
from tests import skip_if_not_available
def test_caltech101_silhouettes16():
skip_if_not_available(datasets=['caltech101_silhouettes16.hdf5'])
for which_set, size, num_examples in (
('train', 16, 4082), ('valid', 16, 2257), ('test', 16, 2302)):
ds = CalTech101Silhouettes(which_sets=[which_set], size=size,
load_in_memory=False)
assert ds.num_examples == num_examples
handle = ds.open()
features, targets = ds.get_data(handle, slice(0, 10))
assert features.shape == (10, 1, size, size)
assert targets.shape == (10, 1)
assert features.dtype == numpy.uint8
assert targets.dtype == numpy.uint8
def test_caltech101_silhouettes_unkn_size():
assert_raises(ValueError, CalTech101Silhouettes,
which_sets=['test'], size=10)
def test_caltech101_silhouettes28():
skip_if_not_available(datasets=['caltech101_silhouettes28.hdf5'])
for which_set, size, num_examples in (
('train', 28, 4100), ('valid', 28, 2264), ('test', 28, 2307)):
ds = CalTech101Silhouettes(which_sets=[which_set], size=size,
load_in_memory=False)
assert ds.num_examples == num_examples
handle = ds.open()
features, targets = ds.get_data(handle, slice(0, 10))
assert features.shape == (10, 1, size, size)
assert targets.shape == (10, 1)
assert features.dtype == numpy.uint8
assert targets.dtype == numpy.uint8
|
curriculum/envs/goal_env.py | ACampero/rllab-curriculum | 115 | 11183553 | <reponame>ACampero/rllab-curriculum
"""
Goal based environments. The classes inside this file should inherit the classes
from the state environment base classes.
"""
import random
from rllab import spaces
import sys
import os.path as osp
import numpy as np
import scipy.misc
import tempfile
import math
from rllab.envs.mujoco.mujoco_env import MODEL_DIR, BIG
from rllab.core.serializable import Serializable
from rllab.envs.proxy_env import ProxyEnv
from rllab.envs.base import Step
from rllab.misc import autoargs
from rllab.misc import logger
from rllab.sampler.utils import rollout
from rllab.spaces.box import Box
from rllab.misc.overrides import overrides
from curriculum.envs.base import StateGenerator, UniformListStateGenerator, \
UniformStateGenerator, FixedStateGenerator, StateAuxiliaryEnv
class GoalEnv(Serializable):
""" A wrapper of StateAuxiliaryEnv to make it compatible with the old goal env."""
def __init__(self, goal_generator=None, *args, **kwargs):
Serializable.quick_init(self, locals())
self._goal_holder = StateAuxiliaryEnv(state_generator=goal_generator, *args, **kwargs)
def update_goal_generator(self, *args, **kwargs):
return self._goal_holder.update_state_generator(*args, **kwargs)
def update_goal(self, goal=None, *args, **kwargs):
return self._goal_holder.update_aux_state(state=goal, *args, **kwargs)
@property
def goal_generator(self):
return self._goal_holder.state_generator
@property
def current_goal(self):
return self._goal_holder.current_aux_state
def __getstate__(self):
d = super(GoalEnv, self).__getstate__()
d['__goal_holder'] = self._goal_holder
return d
def __setstate__(self, d):
super(GoalEnv, self).__setstate__(d)
self._goal_holder = d['__goal_holder']
class GoalExplorationEnv(GoalEnv, ProxyEnv, Serializable):
def __init__(self, env, goal_generator, obs2goal_transform=None, terminal_eps=0.05, only_feasible=False,
terminate_env=False, goal_bounds=None, distance_metric='L2', extend_dist_rew=0., goal_weight=1,
inner_weight=0, append_transformed_obs=False, append_goal_to_observation=True, **kwargs):
"""
This environment wraps around a normal environment to facilitate goal based exploration.
Initial position based experiments should not use this class.
:param env: wrapped env
:param goal_generator: a StateGenerator object
:param obs2goal_transform: a callable that transforms an observation of the wrapped environment into goal space
:param terminal_eps: a threshold of distance that determines if a goal is reached
:param terminate_env: a boolean that controls if the environment is terminated with the goal is reached
:param goal_bounds: array marking the UB of the rectangular limit of goals.
:param distance_metric: L1 or L2 or a callable func
:param goal_weight: coef of the goal based reward
:param inner_weight: coef of the inner environment reward
:param append_transformed_obs: append the transformation of the current observation to full observation
"""
Serializable.quick_init(self, locals())
ProxyEnv.__init__(self, env)
GoalEnv.__init__(self, **kwargs)
self.update_goal_generator(goal_generator)
if obs2goal_transform is None:
self._obs2goal_transform = lambda x: x # needed for replay old policies [:2]
else:
self._obs2goal_transform = obs2goal_transform
self.terminate_env = terminate_env
self.goal_bounds = goal_bounds
self.terminal_eps = terminal_eps
self.only_feasible = only_feasible
self.distance_metric = distance_metric
self.extend_dist_rew_weight = extend_dist_rew
self.goal_weight = goal_weight
self.inner_weight = inner_weight
self.append_transformed_obs = append_transformed_obs
self.append_goal_to_observation = append_goal_to_observation
# TODO fix this
if self.goal_bounds is None:
# print("setting goal bounds to match env")
self.goal_bounds = self.wrapped_env.observation_space.bounds[1] # we keep only UB
self._feasible_goal_space = self.wrapped_env.observation_space
else:
self._feasible_goal_space = Box(low=-1 * self.goal_bounds, high=self.goal_bounds)
# elif np.array(self.goal_bounds).size <= 1:
# self.goal_bounds = [-1 * self.goal_bounds * np.ones(self.wrapped_env.observation_space.flat_dim),
# self.goal_bounds * np.ones(self.wrapped_env.observation_space.flat_dim)]
@property
@overrides
def feasible_goal_space(self):
return self._feasible_goal_space
def is_feasible(self, goal):
obj = self.wrapped_env
while not hasattr(obj, 'is_feasible') and hasattr(obj, 'wrapped_env'):
obj = obj.wrapped_env
if hasattr(obj, 'is_feasible'):
return obj.is_feasible(np.array(goal)) # but the goal might not leave in the same space!
else:
return True
def reset(self, reset_goal=True, **kwargs): # allows to pass init_state if needed
if reset_goal:
self.update_goal()
#default behavior
if self.append_goal_to_observation:
ret = self.append_goal_observation(ProxyEnv.reset(self, goal=self.current_goal, **kwargs)) # the wrapped env needs to use or ignore it
else:
ret = ProxyEnv.reset(self, goal=self.current_goal, **kwargs)
# used by disk environment # todo: make more generalizable! NOT USABLE FOR OTHER ENVS!!
if 'init_state' in kwargs and len(kwargs['init_state']) == 9:
delta = tuple(kwargs['init_state'][-2:]) # joint position is in terms of amount moved
original_goal = self.wrapped_env.model.data.site_xpos[-1]
new_goal = delta[0] + original_goal[0], delta[1] + original_goal[1], original_goal[2] # z dim unchanged
self.update_goal(new_goal)
return ret
def step(self, action):
observation, reward, done, info = ProxyEnv.step(self, action)
info['reward_inner'] = reward_inner = self.inner_weight * reward
# print(reward_inner)
if 'distance' not in info:
info['distance'] = dist = self.dist_to_goal(observation)
info['reward_dist'] = reward_dist = self.compute_dist_reward(observation)
info['goal_reached'] = 1.0 * self.is_goal_reached(observation)
else:
# modified so that inner environment can pass in goal via step
dist = info['distance']
info['goal_reached'] = 1.0 * (dist < self.terminal_eps)
info['reward_dist'] = reward_dist = - self.extend_dist_rew_weight * dist
info['goal'] = self.current_goal
# print(reward_dist)
# print(reward_inner)
# print("step: obs={}, goal={}, dist={}".format(self.append_goal_observation(observation), self.current_goal, dist))
if self.terminate_env and info['goal_reached']:
done = True
if self.append_goal_to_observation:
# print("appending goal to obs")
observation = self.append_goal_observation(observation)
return (
observation,
reward_dist + reward_inner + info['goal_reached'] * self.goal_weight,
done,
info
)
def is_goal_reached(self, observation):
""" Return a boolean whether the (unaugmented) observation reached the goal. """
if self.only_feasible:
return self.dist_to_goal(observation) < self.terminal_eps and self.is_feasible(self.current_goal)
else:
return self.dist_to_goal(observation) < self.terminal_eps
def compute_dist_reward(self, observation):
""" Compute the 0 or 1 reward for reaching the goal. """
return - self.extend_dist_rew_weight * self.dist_to_goal(observation)
def dist_to_goal(self, obs):
""" Compute the distance of the given observation to the current goal. """
goal_obs = self.transform_to_goal_space(obs)
if self.distance_metric == 'L1':
goal_distance = np.linalg.norm(goal_obs - self.current_goal, ord=1)
elif self.distance_metric == 'L2':
goal_distance = np.linalg.norm(goal_obs - self.current_goal, ord=2)
elif callable(self.distance_metric):
goal_distance = self.distance_metric(goal_obs, self.current_goal)
else:
raise NotImplementedError('Unsupported distance metric type.')
return goal_distance
def transform_to_goal_space(self, obs):
""" Apply the goal space transformation to the given observation. """
return self._obs2goal_transform(obs)
def get_current_obs(self):
""" Get the full current observation. The observation should be identical to the one used by policy. """
obj = self
while hasattr(obj, "wrapped_env"): # try to go through "Normalize and Proxy and whatever wrapper"
obj = obj.wrapped_env
if self.append_goal_to_observation:
return self.append_goal_observation(obj.get_current_obs())
else:
return obj.get_current_obs()
@overrides
@property
def goal_observation(self):
""" Get the goal space part of the current observation. """
obj = self
while hasattr(obj, "wrapped_env"): # try to go through "Normalize and Proxy and whatever wrapper"
obj = obj.wrapped_env
# FIXME: technically we need to invert the angle
return self.transform_to_goal_space(obj.get_current_obs())
def append_goal_observation(self, obs):
""" Append the current goal based observation to the given original observation. """
if self.append_transformed_obs:
return np.concatenate(
[obs, np.array(self.transform_to_goal_space(obs)), np.array(self.current_goal)]
)
return np.concatenate([obs, np.array(self.current_goal)])
@property
@overrides
def observation_space(self):
shp = self.get_current_obs().shape
ub = BIG * np.ones(shp)
return spaces.Box(ub * -1, ub)
@overrides
def log_diagnostics(self, paths, n_traj=1, *args, **kwargs):
# Process by time steps
distances = [
np.mean(path['env_infos']['distance'])
for path in paths
]
initial_goal_distances = [
path['env_infos']['distance'][0] for path in paths
]
final_goal_distances = [
path['env_infos']['distance'][-1] for path in paths
]
reward_dist = [
np.sum(path['env_infos']['reward_dist'])
for path in paths
]
reward_inner = [
np.sum(path['env_infos']['reward_inner'])
for path in paths
]
goals = [path['observations'][0, -self.feasible_goal_space.flat_dim:] for path in paths] # assumes const goal
success = [np.max(path['env_infos']['goal_reached']) for path in paths]
feasible = [int(self.feasible_goal_space.contains(goal)) for goal in goals]
if n_traj > 1:
avg_success = []
for i in range(len(success) // n_traj):
avg_success.append(np.mean(success[3 * i: 3 * i + 3]))
success = avg_success # here the success can be non-int
print('the mean success is: ', np.mean(success))
print('the mean feasible is: ', np.mean(feasible))
# Process by trajectories
logger.record_tabular('AvgInitGoalDistance', np.mean(initial_goal_distances))
logger.record_tabular('AvgFinalGoalDistance', np.mean(final_goal_distances))
logger.record_tabular('MinFinalGoalDistance', np.min(final_goal_distances))
logger.record_tabular('MeanPathDistance', np.mean(distances))
logger.record_tabular('AvgTotalRewardDist', np.mean(reward_dist))
logger.record_tabular('AvgTotalRewardInner', np.mean(reward_inner))
logger.record_tabular('SuccessRate', np.mean(success))
logger.record_tabular('FeasibilityRate', np.mean(feasible))
def get_goal_observation(env):
if hasattr(env, 'goal_observation'):
return env.goal_observation # should be unnecessary
elif hasattr(env, 'wrapped_env'):
return env.wrapped_env.goal_observation
else:
raise NotImplementedError('Unsupported environment')
def get_current_goal(env):
""" Get the current goal for the wrapped environment. """
if hasattr(env, 'current_goal'):
return env.current_goal
elif hasattr(env, 'wrapped_env'):
return env.wrapped_env.current_goal
else:
raise NotImplementedError('Unsupported environment')
def generate_initial_goals(env, policy, goal_range, goal_center=None, horizon=500, size=10000):
current_goal = get_current_goal(env)
goal_dim = np.array(current_goal).shape
done = False
obs = env.reset()
goals = [get_goal_observation(env)]
if goal_center is None:
goal_center = np.zeros(goal_dim)
steps = 0
while len(goals) < size:
steps += 1
if done or steps >= horizon:
steps = 0
done = False
env.update_goal_generator(
FixedStateGenerator(
goal_center + np.random.uniform(-goal_range, goal_range, goal_dim)
)
)
obs = env.reset()
goals.append(get_goal_observation(env))
else:
action, _ = policy.get_action(obs)
obs, _, done, _ = env.step(action)
goals.append(get_goal_observation(env))
return np.array(goals)
def generate_brownian_goals(env, starts=None, horizon=100, size=1000):
current_goal = get_current_goal(env)
if starts is None:
starts = [current_goal]
n_starts = len(starts)
i = 0
done = False
env.reset(init_state=starts[i])
goals = [get_goal_observation(env)]
steps = 0
while len(goals) < size: # we ignore if it's done or not: the brownian motion around the goal will be short!
steps += 1
if done or steps >= horizon:
steps = 0
i += 1
done = False
env.reset(init_state=starts[i % n_starts])
goals.append(get_goal_observation(env))
else:
action = np.random.randn(env.action_space.flat_dim)
obs, _, done, _ = env.step(action)
goals.append(get_goal_observation(env))
return np.array(goals)
def evaluate_goal_env(env, policy, horizon, n_goals=10, n_traj=1, **kwargs):
paths = [rollout(env=env, agent=policy, max_path_length=horizon) for _ in range(int(n_goals))]
env.log_diagnostics(paths, n_traj=n_traj, **kwargs)
|
angr/engines/soot/expressions/newArray.py | Kyle-Kyle/angr | 6,132 | 11183578 | <gh_stars>1000+
import logging
from ..values import SimSootValue_ArrayBaseRef
from .base import SimSootExpr
l = logging.getLogger('angr.engines.soot.expressions.newarray')
class SimSootExpr_NewArray(SimSootExpr):
def _execute(self):
element_type = self.expr.base_type
size = self._translate_expr(self.expr.size).expr
self.expr = self.new_array(self.state, element_type, size)
@staticmethod
def new_array(state, element_type, size):
"""
Allocates a new array in memory and returns the reference to the base.
"""
size_bounded = SimSootExpr_NewArray._bound_array_size(state, size)
# return the reference of the array base
# => elements getting lazy initialized in the javavm memory
return SimSootValue_ArrayBaseRef(heap_alloc_id=state.javavm_memory.get_new_uuid(),
element_type=element_type,
size=size_bounded)
@staticmethod
def _bound_array_size(state, array_size):
# check if array size can exceed MAX_ARRAY_SIZE
max_array_size = state.solver.BVV(state.javavm_memory.max_array_size, 32)
size_stays_below_maximum = state.solver.eval_upto(
max_array_size.SGE(array_size), 2
)
# overwrite size, if it *always* exceeds the maximum
if not True in size_stays_below_maximum:
l.warning('Array size %s always exceeds maximum size. '
'It gets overwritten with the maximum %s.',
array_size, max_array_size)
return max_array_size
# bound size, if it *can* exceeds the maximum
if True in size_stays_below_maximum and\
False in size_stays_below_maximum:
l.warning('Array size %s can exceed maximum size. '
'It gets bounded with the maximum %s.',
array_size, max_array_size)
state.solver.add(max_array_size.UGE(array_size))
return array_size
|
model/sequential_recommender/GRU4Rec.py | jasonshere/NeuRec | 978 | 11183607 | """
Paper: Session-based Recommendations with Recurrent Neural Networks
Author: <NAME>, <NAME>, <NAME>, and <NAME>
Reference: https://github.com/hidasib/GRU4Rec
https://github.com/Songweiping/GRU4Rec_TensorFlow
@author: <NAME>
"""
import numpy as np
from model.AbstractRecommender import SeqAbstractRecommender
import tensorflow as tf
from util import log_loss, l2_loss
class GRU4Rec(SeqAbstractRecommender):
def __init__(self, sess, dataset, conf):
super(GRU4Rec, self).__init__(dataset, conf)
self.train_matrix = dataset.train_matrix
self.dataset = dataset
self.users_num, self.items_num = self.train_matrix.shape
self.lr = conf["lr"]
self.reg = conf["reg"]
self.layers = conf["layers"]
self.batch_size = conf["batch_size"]
self.epochs = conf["epochs"]
if conf["hidden_act"] == "relu":
self.hidden_act = tf.nn.relu
elif conf["hidden_act"] == "tanh":
self.hidden_act = tf.nn.tanh
else:
raise ValueError("There is not hidden_act named '%s'." % conf["hidden_act"])
# final_act = leaky-relu
if conf["final_act"] == "relu":
self.final_act = tf.nn.relu
elif conf["final_act"] == "linear":
self.final_act = tf.identity
elif conf["final_act"] == "leaky_relu":
self.final_act = tf.nn.leaky_relu
else:
raise ValueError("There is not final_act named '%s'." % conf["final_act"])
if conf["loss"] == "bpr":
self.loss_fun = self._bpr_loss
elif conf["loss"] == "top1":
self.loss_fun = self._top1_loss
else:
raise ValueError("There is not loss named '%s'." % conf["loss"])
self.data_uit, self.offset_idx = self._init_data()
# for sampling negative items
_, pop = np.unique(self.data_uit[:, 1], return_counts=True)
pop_cumsum = np.cumsum(pop)
self.pop_cumsum = pop_cumsum / pop_cumsum[-1]
self.sess = sess
def _init_data(self):
time_dok = self.dataset.time_matrix.todok()
data_uit = [[row, col, time] for (row, col), time in time_dok.items()]
data_uit.sort(key=lambda x: (x[0], x[-1]))
data_uit = np.array(data_uit, dtype=np.int32)
_, idx = np.unique(data_uit[:, 0], return_index=True)
offset_idx = np.zeros(len(idx)+1, dtype=np.int32)
offset_idx[:-1] = idx
offset_idx[-1] = len(data_uit)
return data_uit, offset_idx
def _create_variable(self):
self.X_ph = tf.placeholder(tf.int32, [self.batch_size], name='input')
self.Y_ph = tf.placeholder(tf.int32, [self.batch_size], name='output')
self.state_ph = [tf.placeholder(tf.float32, [self.batch_size, n_unit], name='layer_%d_state' % idx)
for idx, n_unit in enumerate(self.layers)]
init = tf.random.truncated_normal([self.items_num, self.layers[0]], mean=0.0, stddev=0.01)
self.input_embeddings = tf.Variable(init, dtype=tf.float32, name="input_embeddings")
init = tf.random.truncated_normal([self.items_num, self.layers[-1]], mean=0.0, stddev=0.01)
self.item_embeddings = tf.Variable(init, dtype=tf.float32, name="item_embeddings")
self.item_biases = tf.Variable(tf.zeros([self.items_num]), dtype=tf.float32, name="item_biases")
def _bpr_loss(self, logits):
# logits: (b, size_y)
pos_logits = tf.matrix_diag_part(logits) # (b,)
pos_logits = tf.reshape(pos_logits, shape=[-1, 1]) # (b, 1)
loss = tf.reduce_mean(log_loss(pos_logits-logits))
return loss
def _top1_loss(self, logits):
# logits: (b, size_y)
pos_logits = tf.matrix_diag_part(logits) # (b,)
pos_logits = tf.reshape(pos_logits, shape=[-1, 1]) # (b, 1)
loss1 = tf.reduce_mean(tf.sigmoid(-pos_logits + logits), axis=-1) # (b,)
loss2 = tf.reduce_mean(tf.sigmoid(tf.pow(logits, 2)), axis=-1) - \
tf.squeeze(tf.sigmoid(tf.pow(pos_logits, 2))/self.batch_size) # (b,)
return tf.reduce_mean(loss1+loss2)
def build_graph(self):
self._create_variable()
# get embedding and bias
# b: batch size
# l1: the dim of the first layer
# ln: the dim of the last layer
# size_y: the length of Y_ph, i.e., n_sample+batch_size
cells = [tf.nn.rnn_cell.GRUCell(size, activation=self.hidden_act) for size in self.layers]
drop_cell = [tf.nn.rnn_cell.DropoutWrapper(cell) for cell in cells]
stacked_cell = tf.nn.rnn_cell.MultiRNNCell(drop_cell)
inputs = tf.nn.embedding_lookup(self.input_embeddings, self.X_ph) # (b, l1)
outputs, state = stacked_cell(inputs, state=self.state_ph)
self.u_emb = outputs # outputs: (b, ln)
self.final_state = state # [(b, l1), (b, l2), ..., (b, ln)]
# for training
items_embed = tf.nn.embedding_lookup(self.item_embeddings, self.Y_ph) # (size_y, ln)
items_bias = tf.gather(self.item_biases, self.Y_ph) # (size_y,)
logits = tf.matmul(outputs, items_embed, transpose_b=True) + items_bias # (b, size_y)
logits = self.final_act(logits)
loss = self.loss_fun(logits)
# reg loss
reg_loss = l2_loss(inputs, items_embed, items_bias)
final_loss = loss + self.reg*reg_loss
self.update_opt = tf.train.AdamOptimizer(self.lr).minimize(final_loss)
def train_model(self):
self.logger.info(self.evaluator.metrics_info())
data_uit, offset_idx = self.data_uit, self.offset_idx
data_items = data_uit[:, 1]
for epoch in range(self.epochs):
state = [np.zeros([self.batch_size, n_unit], dtype=np.float32) for n_unit in self.layers]
user_idx = np.random.permutation(len(offset_idx) - 1)
iters = np.arange(self.batch_size, dtype=np.int32)
maxiter = iters.max()
start = offset_idx[user_idx[iters]]
end = offset_idx[user_idx[iters]+1]
finished = False
while not finished:
min_len = (end - start).min()
out_idx = data_items[start]
for i in range(min_len-1):
in_idx = out_idx
out_idx = data_items[start+i+1]
out_items = out_idx
feed = {self.X_ph: in_idx, self.Y_ph: out_items}
for l in range(len(self.layers)):
feed[self.state_ph[l]] = state[l]
_, state = self.sess.run([self.update_opt, self.final_state], feed_dict=feed)
start = start+min_len-1
mask = np.arange(len(iters))[(end - start) <= 1]
for idx in mask:
maxiter += 1
if maxiter >= len(offset_idx)-1:
finished = True
break
iters[idx] = maxiter
start[idx] = offset_idx[user_idx[maxiter]]
end[idx] = offset_idx[user_idx[maxiter]+1]
if len(mask):
for i in range(len(self.layers)):
state[i][mask] = 0
result = self.evaluate_model()
self.logger.info("epoch %d:\t%s" % (epoch, result))
def _get_user_embeddings(self):
users = np.arange(self.users_num, dtype=np.int32)
u_nnz = np.array([self.train_matrix[u].nnz for u in users], dtype=np.int32)
users = users[np.argsort(-u_nnz)]
user_embeddings = np.zeros([self.users_num, self.layers[-1]], dtype=np.float32) # saving user embedding
data_uit, offset_idx = self.data_uit, self.offset_idx
data_items = data_uit[:, 1]
state = [np.zeros([self.batch_size, n_unit], dtype=np.float32) for n_unit in self.layers]
batch_iter = np.arange(self.batch_size, dtype=np.int32)
next_iter = batch_iter.max() + 1
start = offset_idx[users[batch_iter]]
end = offset_idx[users[batch_iter] + 1] # the start index of next user
batch_mask = np.ones([self.batch_size], dtype=np.int32)
while np.sum(batch_mask) > 0:
min_len = (end - start).min()
for i in range(min_len):
cur_items = data_items[start + i]
feed = {self.X_ph: cur_items}
for l in range(len(self.layers)):
feed[self.state_ph[l]] = state[l]
u_emb, state = self.sess.run([self.u_emb, self.final_state], feed_dict=feed)
start = start + min_len
mask = np.arange(self.batch_size)[(end - start) == 0]
for idx in mask:
u = users[batch_iter[idx]]
user_embeddings[u] = u_emb[idx] # saving user embedding
if next_iter < self.users_num:
batch_iter[idx] = next_iter
start[idx] = offset_idx[users[next_iter]]
end[idx] = offset_idx[users[next_iter] + 1]
next_iter += 1
else:
batch_mask[idx] = 0
start[idx] = 0
end[idx] = offset_idx[-1]
for i, _ in enumerate(self.layers):
state[i][mask] = 0
return user_embeddings
def evaluate_model(self):
self.cur_user_embeddings = self._get_user_embeddings()
self.cur_item_embeddings, self.cur_item_biases = self.sess.run([self.item_embeddings, self.item_biases])
return self.evaluator.evaluate(self)
def predict(self, users, items=None):
user_embeddings = self.cur_user_embeddings[users]
all_ratings = np.matmul(user_embeddings, self.cur_item_embeddings.T) + self.cur_item_biases
# final_act = leaky-relu
if self.final_act == tf.nn.relu:
all_ratings = np.maximum(all_ratings, 0)
elif self.final_act == tf.identity:
all_ratings = all_ratings
elif self.final_act == tf.nn.leaky_relu:
all_ratings = np.maximum(all_ratings, all_ratings*0.2)
else:
pass
all_ratings = np.array(all_ratings, dtype=np.float32)
if items is not None:
all_ratings = [all_ratings[idx][item] for idx, item in enumerate(items)]
return all_ratings
|
library/python/symbols/python/ut/test_ctypes.py | ibr11/catboost | 6,989 | 11183608 | <gh_stars>1000+
from ctypes import (
byref, POINTER, c_int, c_char, c_char_p,
c_void_p, py_object, c_ssize_t, pythonapi, Structure
)
c_ssize_p = POINTER(c_ssize_t)
class Py_buffer(Structure):
_fields_ = [
('buf', c_void_p),
('obj', py_object),
('len', c_ssize_t),
('itemsize', c_ssize_t),
('readonly', c_int),
('ndim', c_int),
('format', c_char_p),
('shape', c_ssize_p),
('strides', c_ssize_p),
('suboffsets', c_ssize_p),
('smalltable', c_ssize_t * 2),
('internal', c_void_p)
]
def get_buffer(obj):
buf = Py_buffer()
pythonapi.PyObject_GetBuffer(py_object(obj), byref(buf), 0)
try:
buffer_type = c_char * buf.len
return buffer_type.from_address(buf.buf)
finally:
pythonapi.PyBuffer_Release(byref(buf))
def test_buffer():
assert get_buffer(b'test string')
|
tests/test_model_lognormal.py | confusedcrib/riskquant | 567 | 11183657 | import unittest
from riskquant.model import lognormal_magnitude
class MyTestCase(unittest.TestCase):
def setUp(self):
self.logn = lognormal_magnitude.LognormalMagnitude(1, 10)
def testDistribution(self):
# We defined the cdf(low) ~ 0.05 and the cdf(hi) ~ 0.95 so that
# it would be the 90% confidence interval. Check that it's true.
self.assertTrue(0.049 < self.logn.distribution.cdf(1) < 0.051)
self.assertTrue(0.949 < self.logn.distribution.cdf(10) < 0.951)
def testHardParameters(self):
# Test difficult-to-fit parameter values
hard = lognormal_magnitude.LognormalMagnitude(635000, 19000000)
self.assertAlmostEqual(5922706.83351131, hard.mean())
if __name__ == '__main__':
unittest.main()
|
meteostat/series/fetch.py | mitchkaden/meteostat-python | 133 | 11183677 | <gh_stars>100-1000
"""
Fetch Data
Meteorological data provided by Meteostat (https://dev.meteostat.net)
under the terms of the Creative Commons Attribution-NonCommercial
4.0 International Public License.
The code is licensed under the MIT license.
"""
from copy import copy
import pandas as pd
def fetch(self) -> pd.DataFrame:
"""
Fetch DataFrame
"""
# Copy DataFrame
temp = copy(self._data)
# Remove station index if it's a single station
if len(self._stations) == 1 and 'station' in temp.index.names:
temp = temp.reset_index(level='station', drop=True)
# Return data frame
return temp
|
moya/cache/disabledcache.py | moyaproject/moya | 129 | 11183689 | from __future__ import unicode_literals
from .base import Cache
class DisabledCache(Cache):
cache_backend_name = "disabled"
enabled = False
def get(self, key, default=None):
return default
def set(self, key, value, time=0):
pass
def delete(self, key):
pass
|
urizen/core/tile.py | vurmux/urizen | 107 | 11183742 | #!/usr/bin/python3
class Tile(object):
"""
Tile class
Attributes:
name -- Metatile name
index -- Tile index
orientation -- Tile orientation for non-default metatile geometry
frame -- Tile frame for animated tiles
image -- PIL Image of a tile
tileset_name -- Name of the tileset
tileset_index -- Tile index in a tileset
"""
def __init__(self, name, image, tileset_name, tileset_index, index=None, orientation=None, frame=None):
self.name = name
self.index = index
self.orientation = orientation
self.frame = frame
self.image = image
self.tileset_name = tileset_name
self.tileset_index = tileset_index
|
janitor/functions/select_columns.py | farhanreynaldo/pyjanitor | 674 | 11183749 | <reponame>farhanreynaldo/pyjanitor<filename>janitor/functions/select_columns.py
import pandas_flavor as pf
import pandas as pd
from janitor.utils import deprecated_alias
from janitor.functions.utils import _select_column_names
from pandas.api.types import is_list_like
@pf.register_dataframe_method
@deprecated_alias(search_cols="search_column_names")
def select_columns(
df: pd.DataFrame,
*args,
invert: bool = False,
) -> pd.DataFrame:
"""
Method-chainable selection of columns.
Not applicable to MultiIndex columns.
It accepts a string, shell-like glob strings `(*string*)`,
regex, slice, array-like object, or a list of the previous options.
This method does not mutate the original DataFrame.
Optional ability to invert selection of columns available as well.
```python
import pandas as pd
import janitor
import numpy as np
import datetime
import re
from janitor import patterns
from pandas.api.types import is_datetime64_dtype
df = pd.DataFrame(
{
"id": [0, 1],
"Name": ["ABC", "XYZ"],
"code": [1, 2],
"code1": [4, np.nan],
"code2": ["8", 5],
"type": ["S", "R"],
"type1": ["E", np.nan],
"type2": ["T", "U"],
"code3": pd.Series(["a", "b"], dtype="category"),
"type3": pd.to_datetime([np.datetime64("2018-01-01"),
datetime.datetime(2018, 1, 1)]),
}
)
df
id Name code code1 code2 type type1 type2 code3 type3
0 0 ABC 1 4.0 8 S E T a 2018-01-01
1 1 XYZ 2 NaN 5 R NaN U b 2018-01-01
```
- Select by string:
```
df.select_columns("id")
id
0 0
1 1
```
- Select via shell-like glob strings (`*`) is possible:
```python
df.select_columns("type*")
type type1 type2 type3
0 S E T 2018-01-01
1 R NaN U 2018-01-01
```
- Select by slice:
```python
df.select_columns(slice("code1", "type1"))
code1 code2 type type1
0 4.0 8 S E
1 NaN 5 R NaN
```
- Select by `Callable` (the callable is applied to every column
and should return a single `True` or `False` per column):
```python
df.select_columns(is_datetime64_dtype)
type3
0 2018-01-01
1 2018-01-01
df.select_columns(lambda x: x.name.startswith("code") or
x.name.endswith("1"))
code code1 code2 type1 code3
0 1 4.0 8 E a
1 2 NaN 5 NaN b
df.select_columns(lambda x: x.isna().any())
code1 type1
0 4.0 E
1 NaN NaN
```
- Select by regular expression:
```python
df.select_columns(re.compile("\\d+"))
code1 code2 type1 type2 code3 type3
0 4.0 8 E T a 2018-01-01
1 NaN 5 NaN U b 2018-01-01
# same as above, with janitor.patterns
# simply a wrapper around re.compile
df.select_columns(patterns("\\d+"))
code1 code2 type1 type2 code3 type3
0 4.0 8 E T a 2018-01-01
1 NaN 5 NaN U b 2018-01-01
```
- Select a combination of the above
(you can combine any of the previous options):
```python
df.select_columns("id", "code*", slice("code", "code2"))
id code code1 code2 code3
0 0 1 4.0 8 a
1 1 2 NaN 5 b
```
- You can also pass a sequence of booleans:
```python
df.select_columns([True, False, True, True, True,
False, False, False, True, False])
id code code1 code2 code3
0 0 1 4.0 8 a
1 1 2 NaN 5 b
```
- Setting `invert` to `True`
returns the complement of the columns provided:
```python
df.select_columns("id", "code*", slice("code", "code2"),
invert = True)
Name type type1 type2 type3
0 ABC S E T 2018-01-01
1 XYZ R NaN U 2018-01-01
```
Functional usage example:
```python
import pandas as pd
import janitor as jn
df = pd.DataFrame(...)
df = jn.select_columns('a', 'b', 'col_*',
invert=True)
```
Method-chaining example:
```python
df = (pd.DataFrame(...)
.select_columns('a', 'b', 'col_*',
invert=True))
```
:param df: A pandas DataFrame.
:param args: Valid inputs include:
- an exact column name to look for
- a shell-style glob string (e.g., `*_thing_*`)
- a regular expression
- a callable which is applicable to each Series in the dataframe
- variable arguments of all the aforementioned.
- a sequence of booleans.
:param invert: Whether or not to invert the selection.
This will result in the selection of the complement of the columns
provided.
:returns: A pandas DataFrame with the specified columns selected.
"""
# applicable for any
# list-like object (ndarray, Series, pd.Index, ...)
# excluding tuples, which are returned as is
search_column_names = []
for arg in args:
if is_list_like(arg) and (not isinstance(arg, tuple)):
search_column_names.extend([*arg])
else:
search_column_names.append(arg)
if len(search_column_names) == 1:
search_column_names = search_column_names[0]
full_column_list = _select_column_names(search_column_names, df)
if invert:
return df.drop(columns=full_column_list)
return df.loc[:, full_column_list]
|
pycaw/api/endpointvolume/__init__.py | Jan-Zeiseweis/pycaw | 234 | 11183773 | <reponame>Jan-Zeiseweis/pycaw<gh_stars>100-1000
from ctypes import HRESULT, POINTER, c_float
from ctypes.wintypes import BOOL, DWORD, UINT
from comtypes import COMMETHOD, GUID, IUnknown
from .depend import PAUDIO_VOLUME_NOTIFICATION_DATA
class IAudioEndpointVolumeCallback(IUnknown):
_iid_ = GUID('{b1136c83-b6b5-4add-98a5-a2df8eedf6fa}')
_methods_ = (
# HRESULT OnNotify(
# [in] PAUDIO_VOLUME_NOTIFICATION_DATA pNotify);
COMMETHOD([], HRESULT, 'OnNotify',
(['in'],
PAUDIO_VOLUME_NOTIFICATION_DATA,
'pNotify')),
)
class IAudioEndpointVolume(IUnknown):
_iid_ = GUID('{5CDF2C82-841E-4546-9722-0CF74078229A}')
_methods_ = (
# HRESULT RegisterControlChangeNotify(
# [in] IAudioEndpointVolumeCallback *pNotify);
COMMETHOD([], HRESULT, 'RegisterControlChangeNotify',
(['in'],
POINTER(IAudioEndpointVolumeCallback),
'pNotify')),
# HRESULT UnregisterControlChangeNotify(
# [in] IAudioEndpointVolumeCallback *pNotify);
COMMETHOD([], HRESULT, 'UnregisterControlChangeNotify',
(['in'],
POINTER(IAudioEndpointVolumeCallback),
'pNotify')),
# HRESULT GetChannelCount([out] UINT *pnChannelCount);
COMMETHOD([], HRESULT, 'GetChannelCount',
(['out'], POINTER(UINT), 'pnChannelCount')),
# HRESULT SetMasterVolumeLevel(
# [in] float fLevelDB, [in] LPCGUID pguidEventContext);
COMMETHOD([], HRESULT, 'SetMasterVolumeLevel',
(['in'], c_float, 'fLevelDB'),
(['in'], POINTER(GUID), 'pguidEventContext')),
# HRESULT SetMasterVolumeLevelScalar(
# [in] float fLevel, [in] LPCGUID pguidEventContext);
COMMETHOD([], HRESULT, 'SetMasterVolumeLevelScalar',
(['in'], c_float, 'fLevel'),
(['in'], POINTER(GUID), 'pguidEventContext')),
# HRESULT GetMasterVolumeLevel([out] float *pfLevelDB);
COMMETHOD([], HRESULT, 'GetMasterVolumeLevel',
(['out'], POINTER(c_float), 'pfLevelDB')),
# HRESULT GetMasterVolumeLevelScalar([out] float *pfLevel);
COMMETHOD([], HRESULT, 'GetMasterVolumeLevelScalar',
(['out'], POINTER(c_float), 'pfLevelDB')),
# HRESULT SetChannelVolumeLevel(
# [in] UINT nChannel,
# [in] float fLevelDB,
# [in] LPCGUID pguidEventContext);
COMMETHOD([], HRESULT, 'SetChannelVolumeLevel',
(['in'], UINT, 'nChannel'),
(['in'], c_float, 'fLevelDB'),
(['in'], POINTER(GUID), 'pguidEventContext')),
# HRESULT SetChannelVolumeLevelScalar(
# [in] UINT nChannel,
# [in] float fLevel,
# [in] LPCGUID pguidEventContext);
COMMETHOD([], HRESULT, 'SetChannelVolumeLevelScalar',
(['in'], DWORD, 'nChannel'),
(['in'], c_float, 'fLevelDB'),
(['in'], POINTER(GUID), 'pguidEventContext')),
# HRESULT GetChannelVolumeLevel(
# [in] UINT nChannel,
# [out] float *pfLevelDB);
COMMETHOD([], HRESULT, 'GetChannelVolumeLevel',
(['in'], UINT, 'nChannel'),
(['out'], POINTER(c_float), 'pfLevelDB')),
# HRESULT GetChannelVolumeLevelScalar(
# [in] UINT nChannel,
# [out] float *pfLevel);
COMMETHOD([], HRESULT, 'GetChannelVolumeLevelScalar',
(['in'], DWORD, 'nChannel'),
(['out'], POINTER(c_float), 'pfLevelDB')),
# HRESULT SetMute([in] BOOL bMute, [in] LPCGUID pguidEventContext);
COMMETHOD([], HRESULT, 'SetMute',
(['in'], BOOL, 'bMute'),
(['in'], POINTER(GUID), 'pguidEventContext')),
# HRESULT GetMute([out] BOOL *pbMute);
COMMETHOD([], HRESULT, 'GetMute',
(['out'], POINTER(BOOL), 'pbMute')),
# HRESULT GetVolumeStepInfo(
# [out] UINT *pnStep,
# [out] UINT *pnStepCount);
COMMETHOD([], HRESULT, 'GetVolumeStepInfo',
(['out'], POINTER(DWORD), 'pnStep'),
(['out'], POINTER(DWORD), 'pnStepCount')),
# HRESULT VolumeStepUp([in] LPCGUID pguidEventContext);
COMMETHOD([], HRESULT, 'VolumeStepUp',
(['in'], POINTER(GUID), 'pguidEventContext')),
# HRESULT VolumeStepDown([in] LPCGUID pguidEventContext);
COMMETHOD([], HRESULT, 'VolumeStepDown',
(['in'], POINTER(GUID), 'pguidEventContext')),
# HRESULT QueryHardwareSupport([out] DWORD *pdwHardwareSupportMask);
COMMETHOD([], HRESULT, 'QueryHardwareSupport',
(['out'], POINTER(DWORD), 'pdwHardwareSupportMask')),
# HRESULT GetVolumeRange(
# [out] float *pfLevelMinDB,
# [out] float *pfLevelMaxDB,
# [out] float *pfVolumeIncrementDB);
COMMETHOD([], HRESULT, 'GetVolumeRange',
(['out'], POINTER(c_float), 'pfMin'),
(['out'], POINTER(c_float), 'pfMax'),
(['out'], POINTER(c_float), 'pfIncr')))
class IAudioMeterInformation(IUnknown):
_iid_ = GUID('{C02216F6-8C67-4B5B-9D00-D008E73E0064}')
_methods_ = (
# HRESULT GetPeakValue([out] c_float *pfPeak);
COMMETHOD([], HRESULT, 'GetPeakValue',
(['out'], POINTER(c_float), 'pfPeak')),)
|
src/pipelines/epidemiology/us_imperial.py | chrismayemba/covid-19-open-data | 430 | 11183793 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
from pandas import DataFrame
from lib.cast import safe_int_cast
from lib.data_source import DataSource
from lib.utils import table_rename
class ImperialDataSource(DataSource):
def parse_dataframes(
self, dataframes: Dict[str, DataFrame], aux: Dict[str, DataFrame], **parse_opts
) -> DataFrame:
# Rename the appropriate columns
data = table_rename(
dataframes[0],
{
"date": "date",
"age": "age",
"cum.deaths": "total_deceased",
"daily.deaths": "new_deceased",
"code": "subregion1_code",
},
drop=True,
)
# Convert date to ISO format
data["date"] = data["date"].apply(lambda x: str(x)[:10])
# Correct an error in the age bins from data source
data.loc[data["age"] == "-1-9", "age"] = "1-9"
# Parse age to match our group names
def parse_age_group(age_group: str):
new_age_group = ""
age_bins = age_group.strip().replace("+", "-").split("-", 1)
age_lo = safe_int_cast(age_bins[0])
new_age_group += f"{age_lo:02d}-"
if len(age_bins) > 1 and age_bins[1]:
age_hi = safe_int_cast(age_bins[1])
new_age_group += f"{age_hi:02d}"
return new_age_group
data["age"] = data["age"].apply(parse_age_group)
# Derive key from the subregion code
data["key"] = "US_" + data["subregion1_code"]
# Some of the places are not US states
data.loc[data["subregion1_code"] == "NYC", "key"] = "US_NY_NYC"
# Compute our own age groups since they are not uniform across states
for idx in range(10):
data[f"age_bin_{idx:02d}"] = None
data[f"new_deceased_age_{idx:02d}"] = None
data[f"total_deceased_age_{idx:02d}"] = None
for key in data["key"].unique():
mask = data["key"] == key
age_bins = data.loc[mask, "age"].unique()
sorted_age_bins = sorted(age_bins, key=lambda x: safe_int_cast(x.split("-")[0]))
for idx, age_bin_val in enumerate(sorted_age_bins):
data.loc[mask, f"age_bin_{idx:02d}"] = age_bin_val
age_bin_mask = mask & (data["age"] == age_bin_val)
data.loc[age_bin_mask, f"new_deceased_age_{idx:02d}"] = data.loc[
age_bin_mask, "new_deceased"
]
data.loc[age_bin_mask, f"total_deceased_age_{idx:02d}"] = data.loc[
age_bin_mask, "total_deceased"
]
# Output the results
return data.drop(columns=["age", "subregion1_code"])
|
alipay/aop/api/domain/AlipayPcreditLoanCollateralCarModifyModel.py | snowxmas/alipay-sdk-python-all | 213 | 11183807 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayPcreditLoanCollateralCarModifyModel(object):
def __init__(self):
self._apply_no = None
self._car_brand_id = None
self._car_brand_name = None
self._car_color = None
self._car_engine_no = None
self._car_mileage = None
self._car_model_id = None
self._car_model_name = None
self._car_reg_date = None
self._car_series_id = None
self._car_series_name = None
self._car_vin = None
self._lic_plate_address = None
self._lic_plate_no = None
self._out_request_no = None
@property
def apply_no(self):
return self._apply_no
@apply_no.setter
def apply_no(self, value):
self._apply_no = value
@property
def car_brand_id(self):
return self._car_brand_id
@car_brand_id.setter
def car_brand_id(self, value):
self._car_brand_id = value
@property
def car_brand_name(self):
return self._car_brand_name
@car_brand_name.setter
def car_brand_name(self, value):
self._car_brand_name = value
@property
def car_color(self):
return self._car_color
@car_color.setter
def car_color(self, value):
self._car_color = value
@property
def car_engine_no(self):
return self._car_engine_no
@car_engine_no.setter
def car_engine_no(self, value):
self._car_engine_no = value
@property
def car_mileage(self):
return self._car_mileage
@car_mileage.setter
def car_mileage(self, value):
self._car_mileage = value
@property
def car_model_id(self):
return self._car_model_id
@car_model_id.setter
def car_model_id(self, value):
self._car_model_id = value
@property
def car_model_name(self):
return self._car_model_name
@car_model_name.setter
def car_model_name(self, value):
self._car_model_name = value
@property
def car_reg_date(self):
return self._car_reg_date
@car_reg_date.setter
def car_reg_date(self, value):
self._car_reg_date = value
@property
def car_series_id(self):
return self._car_series_id
@car_series_id.setter
def car_series_id(self, value):
self._car_series_id = value
@property
def car_series_name(self):
return self._car_series_name
@car_series_name.setter
def car_series_name(self, value):
self._car_series_name = value
@property
def car_vin(self):
return self._car_vin
@car_vin.setter
def car_vin(self, value):
self._car_vin = value
@property
def lic_plate_address(self):
return self._lic_plate_address
@lic_plate_address.setter
def lic_plate_address(self, value):
self._lic_plate_address = value
@property
def lic_plate_no(self):
return self._lic_plate_no
@lic_plate_no.setter
def lic_plate_no(self, value):
self._lic_plate_no = value
@property
def out_request_no(self):
return self._out_request_no
@out_request_no.setter
def out_request_no(self, value):
self._out_request_no = value
def to_alipay_dict(self):
params = dict()
if self.apply_no:
if hasattr(self.apply_no, 'to_alipay_dict'):
params['apply_no'] = self.apply_no.to_alipay_dict()
else:
params['apply_no'] = self.apply_no
if self.car_brand_id:
if hasattr(self.car_brand_id, 'to_alipay_dict'):
params['car_brand_id'] = self.car_brand_id.to_alipay_dict()
else:
params['car_brand_id'] = self.car_brand_id
if self.car_brand_name:
if hasattr(self.car_brand_name, 'to_alipay_dict'):
params['car_brand_name'] = self.car_brand_name.to_alipay_dict()
else:
params['car_brand_name'] = self.car_brand_name
if self.car_color:
if hasattr(self.car_color, 'to_alipay_dict'):
params['car_color'] = self.car_color.to_alipay_dict()
else:
params['car_color'] = self.car_color
if self.car_engine_no:
if hasattr(self.car_engine_no, 'to_alipay_dict'):
params['car_engine_no'] = self.car_engine_no.to_alipay_dict()
else:
params['car_engine_no'] = self.car_engine_no
if self.car_mileage:
if hasattr(self.car_mileage, 'to_alipay_dict'):
params['car_mileage'] = self.car_mileage.to_alipay_dict()
else:
params['car_mileage'] = self.car_mileage
if self.car_model_id:
if hasattr(self.car_model_id, 'to_alipay_dict'):
params['car_model_id'] = self.car_model_id.to_alipay_dict()
else:
params['car_model_id'] = self.car_model_id
if self.car_model_name:
if hasattr(self.car_model_name, 'to_alipay_dict'):
params['car_model_name'] = self.car_model_name.to_alipay_dict()
else:
params['car_model_name'] = self.car_model_name
if self.car_reg_date:
if hasattr(self.car_reg_date, 'to_alipay_dict'):
params['car_reg_date'] = self.car_reg_date.to_alipay_dict()
else:
params['car_reg_date'] = self.car_reg_date
if self.car_series_id:
if hasattr(self.car_series_id, 'to_alipay_dict'):
params['car_series_id'] = self.car_series_id.to_alipay_dict()
else:
params['car_series_id'] = self.car_series_id
if self.car_series_name:
if hasattr(self.car_series_name, 'to_alipay_dict'):
params['car_series_name'] = self.car_series_name.to_alipay_dict()
else:
params['car_series_name'] = self.car_series_name
if self.car_vin:
if hasattr(self.car_vin, 'to_alipay_dict'):
params['car_vin'] = self.car_vin.to_alipay_dict()
else:
params['car_vin'] = self.car_vin
if self.lic_plate_address:
if hasattr(self.lic_plate_address, 'to_alipay_dict'):
params['lic_plate_address'] = self.lic_plate_address.to_alipay_dict()
else:
params['lic_plate_address'] = self.lic_plate_address
if self.lic_plate_no:
if hasattr(self.lic_plate_no, 'to_alipay_dict'):
params['lic_plate_no'] = self.lic_plate_no.to_alipay_dict()
else:
params['lic_plate_no'] = self.lic_plate_no
if self.out_request_no:
if hasattr(self.out_request_no, 'to_alipay_dict'):
params['out_request_no'] = self.out_request_no.to_alipay_dict()
else:
params['out_request_no'] = self.out_request_no
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayPcreditLoanCollateralCarModifyModel()
if 'apply_no' in d:
o.apply_no = d['apply_no']
if 'car_brand_id' in d:
o.car_brand_id = d['car_brand_id']
if 'car_brand_name' in d:
o.car_brand_name = d['car_brand_name']
if 'car_color' in d:
o.car_color = d['car_color']
if 'car_engine_no' in d:
o.car_engine_no = d['car_engine_no']
if 'car_mileage' in d:
o.car_mileage = d['car_mileage']
if 'car_model_id' in d:
o.car_model_id = d['car_model_id']
if 'car_model_name' in d:
o.car_model_name = d['car_model_name']
if 'car_reg_date' in d:
o.car_reg_date = d['car_reg_date']
if 'car_series_id' in d:
o.car_series_id = d['car_series_id']
if 'car_series_name' in d:
o.car_series_name = d['car_series_name']
if 'car_vin' in d:
o.car_vin = d['car_vin']
if 'lic_plate_address' in d:
o.lic_plate_address = d['lic_plate_address']
if 'lic_plate_no' in d:
o.lic_plate_no = d['lic_plate_no']
if 'out_request_no' in d:
o.out_request_no = d['out_request_no']
return o
|
alipay/aop/api/domain/BenefitDetailInfo.py | antopen/alipay-sdk-python-all | 213 | 11183811 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.BenefitAmountInfo import BenefitAmountInfo
from alipay.aop.api.domain.BenefitDateInfo import BenefitDateInfo
from alipay.aop.api.domain.BenefitDisplayInfo import BenefitDisplayInfo
from alipay.aop.api.domain.BenefitSource import BenefitSource
class BenefitDetailInfo(object):
def __init__(self):
self._benefit_amount_info = None
self._benefit_date_info = None
self._benefit_display_info = None
self._benefit_id = None
self._benefit_operation_id = None
self._benefit_operation_time = None
self._benefit_source = None
self._benefit_status = None
self._benefit_type = None
self._customer_id = None
@property
def benefit_amount_info(self):
return self._benefit_amount_info
@benefit_amount_info.setter
def benefit_amount_info(self, value):
if isinstance(value, BenefitAmountInfo):
self._benefit_amount_info = value
else:
self._benefit_amount_info = BenefitAmountInfo.from_alipay_dict(value)
@property
def benefit_date_info(self):
return self._benefit_date_info
@benefit_date_info.setter
def benefit_date_info(self, value):
if isinstance(value, BenefitDateInfo):
self._benefit_date_info = value
else:
self._benefit_date_info = BenefitDateInfo.from_alipay_dict(value)
@property
def benefit_display_info(self):
return self._benefit_display_info
@benefit_display_info.setter
def benefit_display_info(self, value):
if isinstance(value, BenefitDisplayInfo):
self._benefit_display_info = value
else:
self._benefit_display_info = BenefitDisplayInfo.from_alipay_dict(value)
@property
def benefit_id(self):
return self._benefit_id
@benefit_id.setter
def benefit_id(self, value):
self._benefit_id = value
@property
def benefit_operation_id(self):
return self._benefit_operation_id
@benefit_operation_id.setter
def benefit_operation_id(self, value):
self._benefit_operation_id = value
@property
def benefit_operation_time(self):
return self._benefit_operation_time
@benefit_operation_time.setter
def benefit_operation_time(self, value):
self._benefit_operation_time = value
@property
def benefit_source(self):
return self._benefit_source
@benefit_source.setter
def benefit_source(self, value):
if isinstance(value, BenefitSource):
self._benefit_source = value
else:
self._benefit_source = BenefitSource.from_alipay_dict(value)
@property
def benefit_status(self):
return self._benefit_status
@benefit_status.setter
def benefit_status(self, value):
self._benefit_status = value
@property
def benefit_type(self):
return self._benefit_type
@benefit_type.setter
def benefit_type(self, value):
self._benefit_type = value
@property
def customer_id(self):
return self._customer_id
@customer_id.setter
def customer_id(self, value):
self._customer_id = value
def to_alipay_dict(self):
params = dict()
if self.benefit_amount_info:
if hasattr(self.benefit_amount_info, 'to_alipay_dict'):
params['benefit_amount_info'] = self.benefit_amount_info.to_alipay_dict()
else:
params['benefit_amount_info'] = self.benefit_amount_info
if self.benefit_date_info:
if hasattr(self.benefit_date_info, 'to_alipay_dict'):
params['benefit_date_info'] = self.benefit_date_info.to_alipay_dict()
else:
params['benefit_date_info'] = self.benefit_date_info
if self.benefit_display_info:
if hasattr(self.benefit_display_info, 'to_alipay_dict'):
params['benefit_display_info'] = self.benefit_display_info.to_alipay_dict()
else:
params['benefit_display_info'] = self.benefit_display_info
if self.benefit_id:
if hasattr(self.benefit_id, 'to_alipay_dict'):
params['benefit_id'] = self.benefit_id.to_alipay_dict()
else:
params['benefit_id'] = self.benefit_id
if self.benefit_operation_id:
if hasattr(self.benefit_operation_id, 'to_alipay_dict'):
params['benefit_operation_id'] = self.benefit_operation_id.to_alipay_dict()
else:
params['benefit_operation_id'] = self.benefit_operation_id
if self.benefit_operation_time:
if hasattr(self.benefit_operation_time, 'to_alipay_dict'):
params['benefit_operation_time'] = self.benefit_operation_time.to_alipay_dict()
else:
params['benefit_operation_time'] = self.benefit_operation_time
if self.benefit_source:
if hasattr(self.benefit_source, 'to_alipay_dict'):
params['benefit_source'] = self.benefit_source.to_alipay_dict()
else:
params['benefit_source'] = self.benefit_source
if self.benefit_status:
if hasattr(self.benefit_status, 'to_alipay_dict'):
params['benefit_status'] = self.benefit_status.to_alipay_dict()
else:
params['benefit_status'] = self.benefit_status
if self.benefit_type:
if hasattr(self.benefit_type, 'to_alipay_dict'):
params['benefit_type'] = self.benefit_type.to_alipay_dict()
else:
params['benefit_type'] = self.benefit_type
if self.customer_id:
if hasattr(self.customer_id, 'to_alipay_dict'):
params['customer_id'] = self.customer_id.to_alipay_dict()
else:
params['customer_id'] = self.customer_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = BenefitDetailInfo()
if 'benefit_amount_info' in d:
o.benefit_amount_info = d['benefit_amount_info']
if 'benefit_date_info' in d:
o.benefit_date_info = d['benefit_date_info']
if 'benefit_display_info' in d:
o.benefit_display_info = d['benefit_display_info']
if 'benefit_id' in d:
o.benefit_id = d['benefit_id']
if 'benefit_operation_id' in d:
o.benefit_operation_id = d['benefit_operation_id']
if 'benefit_operation_time' in d:
o.benefit_operation_time = d['benefit_operation_time']
if 'benefit_source' in d:
o.benefit_source = d['benefit_source']
if 'benefit_status' in d:
o.benefit_status = d['benefit_status']
if 'benefit_type' in d:
o.benefit_type = d['benefit_type']
if 'customer_id' in d:
o.customer_id = d['customer_id']
return o
|
python/runtime/model/metadata_test.py | lhw362950217/sqlflow | 4,742 | 11183824 | # Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from runtime.feature.column import NumericColumn
from runtime.feature.field_desc import FieldDesc
from runtime.model.metadata import (collect_metadata, load_metadata,
save_metadata)
class TestMetadata(unittest.TestCase):
def setUp(self):
self.file_name = 'meta.json'
def tearDown(self):
if os.path.exists(self.file_name):
os.remove(self.file_name)
def test_metadata(self):
original_sql = '''
SELECT c1, c2, class FROM my_db.train_table
TO TRAIN my_docker_image:latest/DNNClassifier
WITH
model.n_classes = 3,
model.hidden_units = [16, 32],
validation.select="SELECT c1, c2, class FROM my_db.val_table"
INTO my_db.my_dnn_model;
'''
select = "SELECT c1, c2, class FROM my_db.train_table"
validation_select = "SELECT c1, c2, class FROM my_db.val_table"
model_repo_image = "my_docker_image:latest"
estimator = "DNNClassifier"
attributes = {
'n_classes': 3,
'hidden_units': [16, 32],
}
features = {
'feature_columns': [
NumericColumn(FieldDesc(name='c1', shape=[3], delimiter=",")),
NumericColumn(FieldDesc(name='c2', shape=[1])),
],
}
label = NumericColumn(FieldDesc(name='class', shape=[5],
delimiter=','))
def check_metadata(meta):
self.assertEqual(meta['original_sql'], original_sql)
self.assertEqual(meta['select'], select)
self.assertEqual(meta['validation_select'], validation_select)
self.assertEqual(meta['model_repo_image'], model_repo_image)
self.assertEqual(meta['class_name'], estimator)
self.assertEqual(meta['attributes'], attributes)
meta_features = meta['features']
meta_label = meta['label']
self.assertEqual(len(meta_features), 1)
self.assertEqual(len(meta_features['feature_columns']), 2)
meta_features = meta_features['feature_columns']
self.assertEqual(type(meta_features[0]), NumericColumn)
self.assertEqual(type(meta_features[1]), NumericColumn)
field_desc = meta_features[0].get_field_desc()[0]
self.assertEqual(field_desc.name, 'c1')
self.assertEqual(field_desc.shape, [3])
self.assertEqual(field_desc.delimiter, ',')
field_desc = meta_features[1].get_field_desc()[0]
self.assertEqual(field_desc.name, 'c2')
self.assertEqual(field_desc.shape, [1])
self.assertEqual(type(meta_label), NumericColumn)
field_desc = meta_label.get_field_desc()[0]
self.assertEqual(field_desc.name, 'class')
self.assertEqual(field_desc.shape, [5])
self.assertEqual(field_desc.delimiter, ',')
self.assertEqual(meta['evaluation'], {'accuracy': 0.5})
self.assertEqual(meta['my_data'], 0.25)
meta = collect_metadata(original_sql,
select,
validation_select,
model_repo_image,
estimator,
attributes,
features,
label, {'accuracy': 0.5},
my_data=0.25)
check_metadata(meta)
save_metadata(self.file_name, meta)
meta = load_metadata(self.file_name)
check_metadata(meta)
if __name__ == '__main__':
unittest.main()
|
os/example_getenv.py | Carglglz/micropython-lib | 1,556 | 11183857 | <gh_stars>1000+
import os
print(os.getenv("HOME", "def"))
|
pypy/module/binascii/moduledef.py | nanjekyejoannah/pypy | 381 | 11183868 |
"""
Mixed-module definition for the binascii module.
Note that there is also a pure Python implementation in lib_pypy/binascii.py;
the pypy/module/binascii/ version takes precedence if it is enabled.
"""
from pypy.interpreter.mixedmodule import MixedModule
class Module(MixedModule):
"""binascii - Conversion between binary data and ASCII"""
appleveldefs = {
}
interpleveldefs = {
'a2b_uu': 'interp_uu.a2b_uu',
'b2a_uu': 'interp_uu.b2a_uu',
'a2b_base64': 'interp_base64.a2b_base64',
'b2a_base64': 'interp_base64.b2a_base64',
'a2b_qp': 'interp_qp.a2b_qp',
'b2a_qp': 'interp_qp.b2a_qp',
'a2b_hqx': 'interp_hqx.a2b_hqx',
'b2a_hqx': 'interp_hqx.b2a_hqx',
'rledecode_hqx': 'interp_hqx.rledecode_hqx',
'rlecode_hqx': 'interp_hqx.rlecode_hqx',
'crc_hqx': 'interp_hqx.crc_hqx',
'crc32': 'interp_crc32.crc32',
'b2a_hex': 'interp_hexlify.hexlify',
'hexlify': 'interp_hexlify.hexlify',
'a2b_hex': 'interp_hexlify.unhexlify',
'unhexlify': 'interp_hexlify.unhexlify',
'Error' : 'space.fromcache(interp_binascii.Cache).w_error',
'Incomplete': 'space.fromcache(interp_binascii.Cache).w_incomplete',
}
|
data_analysis/study_numpy/numpy_functions/np_arange.py | 2581676612/python | 112 | 11183870 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 17-9-7 下午3:10
# @Author : Tom.Lee
# @File : np_arange.py
# @Product : PyCharm
# @Docs :
# @Source :
import numpy as np
# 四维数组
t = np.arange(3 * 4 * 5 * 6).reshape((3, 4, 5, 6))
print len(t), len(t[0]), len(t[0][0]), len(t[0][0][0])
s = np.arange(3 * 4 * 5 * 6)[::-1].reshape((5, 4, 6, 3))
print len(s), len(s[0]), len(s[0][0]), len(s[0][0][0])
|
src/dal_select2/fields.py | robertispas/django-autocomplete-light | 1,368 | 11183872 | """Select2 field implementation module."""
from django.forms import ChoiceField
class ChoiceCallable:
def __init__(self, choices):
self.choices = choices
def __call__(self):
result = []
choices = self.choices() if callable(self.choices) else self.choices
for choice in choices or []:
if isinstance(choice, (list, tuple)):
result.append(choice)
else:
result.append((choice, choice))
return result
class Select2ListChoiceField(ChoiceField):
"""Allows a list of values to be used with a ChoiceField.
Avoids unusual things that can happen if Select2ListView is used for
a form where the text and value for choices are not the same.
"""
def __init__(self, choice_list=None, required=True, widget=None,
label=None, initial=None, help_text='', *args, **kwargs):
"""Use a list to generate choices in a ChoiceField.
.. py:param choice_list: The list to use to generate choices or a
function that returns a list.
"""
choices = ChoiceCallable(choice_list)
super(Select2ListChoiceField, self).__init__(
choices=choices, required=required, widget=widget, label=label,
initial=initial, help_text=help_text, *args, **kwargs
)
class Select2ListCreateChoiceField(Select2ListChoiceField):
"""Skips validation of choices so any value can be used."""
def validate(self, value):
"""Do not validate choices but check for empty."""
super(ChoiceField, self).validate(value)
|
examples/volumetric/probePoints.py | evanphilip/vedo | 836 | 11183891 | """Probe a voxel dataset at specified points
and plot a histogram of the values"""
from vedo import *
from vedo.pyplot import histogram
import numpy as np
vol = Volume(dataurl+'embryo.slc')
pts = np.random.rand(5000, 3)*256
mpts = probePoints(vol, pts).pointSize(3)
mpts.print()
# valid = mpts.pointdata['vtkValidPointMask']
scals = mpts.pointdata['SLCImage']
his = histogram(scals, xtitle='probed voxel value', xlim=(5,100))
show([(vol, Axes(vol), mpts, __doc__), his], N=2, sharecam=False).close()
|
packages/pyright-internal/src/tests/samples/genericTypes78.py | martindemello/pyright | 3,934 | 11183926 | # This sample tests the case where a generic function
# returns a generic Callable.
from typing import Callable, TypeVar
_T = TypeVar("_T")
def func1(val1: _T) -> Callable[[_T], None]:
def f(a: str):
...
# This should generate an error because str isn't
# compatible with _T.
return f
def func2(val1: _T) -> Callable[[_T], None]:
def f(a: _T):
...
return f
|
napari/_vispy/visuals/image.py | MaksHess/napari | 1,345 | 11183946 | <gh_stars>1000+
from vispy.scene.visuals import Image as BaseImage
# If data is not present, we need bounds to be None (see napari#3517)
class Image(BaseImage):
def _compute_bounds(self, axis, view):
if self._data is None:
return None
elif axis > 1:
return (0, 0)
else:
return (0, self.size[axis])
|
qmpy/web/views/materials/element_groups.py | tachyontraveler/qmpy | 103 | 11183950 | from django.template import RequestContext
from django.shortcuts import render_to_response
from django.template.context_processors import csrf
from qmpy.data import element_groups
def element_group_view(request):
data = {}
data["element_groups"] = dict(
[(k, ", ".join(v)) for k, v in list(element_groups.items())]
)
data["groups"] = sorted(element_groups.keys())
return render_to_response(
"materials/element_groups.html", data, RequestContext(request)
)
|
neurvps/models/__init__.py | keatonkraiger/neurvps | 133 | 11183968 | from .hourglass_pose import hg
from .vanishing_net import VanishingNet
|
react/__init__.py | kjkta/python-react | 1,603 | 11183998 | <filename>react/__init__.py
__version__ = '4.3.0'
default_app_config = 'react.apps.ReactConfig' |
tools/crates/lib/consts.py | chromium/chromium | 14,668 | 11184011 | # python3
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
from datetime import datetime
CRATES_IO_VIEW = "https://crates.io/crates/{crate}"
CRATES_IO_DOWNLOAD = "https://static.crates.io/crates/{crate}/{crate}-{version}.crate"
# Allowed licenses, in the format they are specified in Cargo.toml files from
# crates.io, and the format to write to README.chromium.
ALLOWED_LICENSES = [
# ("Cargo.toml string", "License for README.chromium")
("Apache-2.0", "Apache 2.0"),
("MIT OR Apache-2.0", "Apache 2.0"),
("MIT/Apache-2.0", "Apache 2.0"),
("Apache-2.0 / MIT", "Apache 2.0"),
("Apache-2.0 OR MIT", "Apache 2.0"),
("Apache-2.0/MIT", "Apache 2.0"),
("MIT", "MIT"),
("Unlicense OR MIT", "MIT"),
("Unlicense/MIT", "MIT"),
("Apache-2.0 OR BSL-1.0", "Apache 2.0"),
("BSD-3-Clause", "BSD 3-Clause"),
("ISC", "ISC"),
]
# The subdirectory where crates are found, relative to the current working
# directory where the tool is run (i.e. `os.getcwd()`).
THIRD_PARTY = ["third_party", "rust"]
# Where to place the extracted crate inside the version epoch directory. If
# empty, it will be extracted directly to the epoch directory.
CRATE_INNER_DIR = ["crate"]
# Template for generating README.chromium files.
README_CHROMIUM = """Name: {crate_name}
URL: {url}
Description: {description}
Version: {version}
Security Critical: {security}
License: {license}
"""
# Crates that can not be depended one. Dependencies should be removed from
# Cargo.toml files. Each one comes with a reason.
BLOCKED_CRATES = {
"cc":
"C/C++ code should be build by a GN rule, not from Rust code directly. See "
+ os.path.join(*(THIRD_PARTY + ["cc", "README.md"])),
}
# A Regex for parsing the output of `cargo tree`. This matches the dependencies
# and reports their name, version, if they are a proc macro, and their enabled
# features.
_CARGO_DEPS = \
r"(?:├──|└──) (?P<name>.*?) v(?P<version>[0-9]+.[0-9]+.[0-9]+)" \
r"(?P<isprocmacro> \(proc-macro\))?" \
r"(?: \((?P<path>[\/].*?)\))?" \
r"(?: (?P<features>[^( ][^ ]*))?(?: \(\*\))?"
CARGO_DEPS_REGEX = re.compile(_CARGO_DEPS)
# Header at the top of BUILD.gn files. The %YEAR% is substituted with the
# appropriate year.
GN_HEADER = \
"""# Copyright {year} The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import("//build/rust/cargo_crate.gni")
""".format(year=str(datetime.now().year))
# Comment on the skip_unit_tests field in BUILD.gn file output.
GN_TESTS_COMMENT = \
"""# Unit tests skipped. Generate with --with-tests to include them"""
# Comment on the visibility field in BUILD.gn file output.
GN_VISIBILITY_COMMENT = \
"""# Only for usage from third-party crates. Add the crate to
# third_party.toml to use it from first-party code."""
|
notebook/calendar_day_of_nth_dow.py | vhn0912/python-snippets | 174 | 11184047 | import calendar
import datetime
def get_day_of_nth_dow(year, month, nth, dow):
'''dow: Monday(0) - Sunday(6)'''
if nth < 1 or dow < 0 or dow > 6:
return None
first_dow, n = calendar.monthrange(year, month)
day = 7 * (nth - 1) + (dow - first_dow) % 7 + 1
return day if day <= n else None
print(calendar.month(2019, 1))
# January 2019
# Mo Tu We Th Fr Sa Su
# 1 2 3 4 5 6
# 7 8 9 10 11 12 13
# 14 15 16 17 18 19 20
# 21 22 23 24 25 26 27
# 28 29 30 31
#
print(get_day_of_nth_dow(2019, 1, 1, 1)) # 1st Tuesday(1)
# 1
print(get_day_of_nth_dow(2019, 1, 2, 0)) # 2nd Monday(0)
# 14
print(get_day_of_nth_dow(2019, 1, 3, 6)) # 3rd Sunday(6)
# 20
print(get_day_of_nth_dow(2019, 1, 5, 3)) # 5th Thursday(3)
# 31
print(get_day_of_nth_dow(2019, 1, 5, 4))
# None
print(get_day_of_nth_dow(2019, 1, 0, 4))
# None
print(get_day_of_nth_dow(2019, 1, 1, 10))
# None
print(get_day_of_nth_dow(2019, 1, 2, 1.8))
# 8.8
def get_date_of_nth_dow(year, month, nth, dow):
day = get_day_of_nth_dow(year, month, nth, dow)
return datetime.date(year, month, day) if day else None
print(get_date_of_nth_dow(2019, 1, 1, 1))
# 2019-01-01
print(get_date_of_nth_dow(2019, 1, 1, 10))
# None
# print(get_date_of_nth_dow(2019, 1, 2, 1.8))
# TypeError: integer argument expected, got float
print([(m, get_day_of_nth_dow(2019, m, 2, 0)) for m in range(1, 13)])
# [(1, 14), (2, 11), (3, 11), (4, 8), (5, 13), (6, 10), (7, 8), (8, 12), (9, 9), (10, 14), (11, 11), (12, 9)]
for y in range(2020, 2030):
print(get_date_of_nth_dow(y, 1, 2, 0))
# 2020-01-13
# 2021-01-11
# 2022-01-10
# 2023-01-09
# 2024-01-08
# 2025-01-13
# 2026-01-12
# 2027-01-11
# 2028-01-10
# 2029-01-08
|
airmozilla/new/views.py | mozilla/airmozilla | 115 | 11184092 | <reponame>mozilla/airmozilla
# -*- coding: utf-8 -*-
import json
import os
from cStringIO import StringIO
from xml.parsers.expat import ExpatError
import requests
import xmltodict
from PIL import Image
from slugify import slugify
from django import http
from django.shortcuts import render, get_object_or_404, redirect
from django.db import transaction
from django.conf import settings
from django.utils import timezone
from django.db.models import Count, Q
from django.contrib.auth.decorators import login_required
from django.utils.functional import wraps
from django.template.base import TemplateDoesNotExist
from django.views.decorators.cache import never_cache
from django.views.decorators.http import require_POST
from django.views.decorators.csrf import csrf_exempt
from django.core.cache import cache
from django.contrib.auth.models import User
from django.core.files.base import ContentFile
from django.core.urlresolvers import reverse
from django.core.files import File
from django.core.files.temp import NamedTemporaryFile
from jsonview.decorators import json_view
from sorl.thumbnail import get_thumbnail
from csp.decorators import csp_update
from airmozilla.manage import vidly
from airmozilla.base.utils import get_base_url, prepare_vidly_video_url
from airmozilla.main.models import (
Event,
VidlySubmission,
Template,
Picture,
EventOldSlug,
Channel,
Approval,
get_profile_safely,
Tag,
)
from airmozilla.main.tasks import create_all_timestamp_pictures
from airmozilla.comments.models import Discussion
from airmozilla.uploads.models import Upload
from airmozilla.manage import videoinfo
from airmozilla.base.templatetags.jinja_helpers import show_duration
from airmozilla.base.utils import simplify_form_errors
from airmozilla.manage import sending
from airmozilla.base import youtube
from . import forms
def xhr_login_required(view_func):
"""similar to django.contrib.auth.decorators.login_required
except instead of redirecting it returns a 403 message if not
authenticated."""
@wraps(view_func)
def inner(request, *args, **kwargs):
if not request.user.is_authenticated():
return http.HttpResponse(
json.dumps({'error': "You must be logged in"}),
content_type='application/json',
status=403
)
return view_func(request, *args, **kwargs)
return inner
def must_be_your_event(f):
@wraps(f)
def inner(request, id, **kwargs):
assert request.user.is_authenticated()
event = get_object_or_404(Event, pk=id)
if event.creator != request.user:
return http.HttpResponseForbidden(
"Not your event to meddle with"
)
return f(request, event, **kwargs)
return inner
@login_required
@csp_update(
CONNECT_SRC='{}.s3.amazonaws.com'.format(
settings.S3_UPLOAD_BUCKET
)
)
def home(request):
context = {
'has_youtube_api_key': bool(settings.YOUTUBE_API_KEY),
}
request.show_sidebar = False
return render(request, 'new/home.html', context)
@xhr_login_required
def partial_template(request, template_name):
context = {}
if template_name == 'details.html':
context['form'] = forms.DetailsForm()
template_path = os.path.join('new/partials', template_name)
try:
return render(request, template_path, context)
except TemplateDoesNotExist:
raise http.Http404(template_name)
@json_view
@xhr_login_required
@require_POST
@transaction.atomic
def save_upload(request):
data = json.loads(request.body)
form = forms.SaveForm(data)
if not form.is_valid():
return http.HttpResponseBadRequest(form.errors)
url = form.cleaned_data['url']
file_name = form.cleaned_data['file_name'] or os.path.basename(url)
mime_type = form.cleaned_data['mime_type']
size = form.cleaned_data['size']
upload_time = form.cleaned_data['upload_time']
duration = data.get('duration')
new_upload = Upload.objects.create(
user=request.user,
url=url,
size=size,
file_name=file_name,
mime_type=mime_type,
upload_time=upload_time,
)
# now we can create the event to start with
event = Event.objects.create(
upload=new_upload,
status=Event.STATUS_INITIATED,
start_time=timezone.now(),
privacy=Event.PRIVACY_PUBLIC,
creator=request.user,
duration=duration,
)
new_upload.event = event
new_upload.save()
return {'id': event.id}
@never_cache
@xhr_login_required
@transaction.atomic
@must_be_your_event
@json_view
def event_edit(request, event):
if request.method == 'POST':
if event.status != Event.STATUS_INITIATED:
return http.HttpResponseBadRequest(
"You can't edit events that are NOT in the state of initiated."
)
title_before = event.title
form = forms.DetailsForm(json.loads(request.body), instance=event)
if form.is_valid():
form.save()
if event.title != title_before:
# Un-setting it will automatically pick a good slug.
# But first we need to forget any EventOldSlug
EventOldSlug.objects.filter(slug=event.slug).delete()
event.slug = None
event.save()
else:
return {'errors': simplify_form_errors(form.errors)}
context = {
'event': serialize_event(event),
}
return context
def serialize_event(event, extended=False):
data = {
'id': event.id,
'title': event.title,
'slug': event.slug,
'description': event.description,
'privacy': event.privacy,
'privacy_display': event.get_privacy_display(),
'status': event.status,
'status_display': event.get_status_display(),
'additional_links': event.additional_links,
'duration': event.duration,
'tags': [],
'channels': {},
'topics': {},
}
if extended:
# When it's the extended version, we return a list of dicts
# that contain the id, name, etc.
data['channels'] = []
data['topics'] = []
if event.slug:
data['url'] = reverse('main:event', args=(event.slug,))
for tag in event.tags.all():
data['tags'].append(tag.name) # good enough?
# lastly, make it a string
data['tags'] = ', '.join(sorted(data['tags']))
for channel in event.channels.all():
if extended:
data['channels'].append({
'id': channel.id,
'name': channel.name,
'url': reverse('main:home_channels', args=(channel.slug,)),
})
else:
data['channels'][channel.id] = True
for topic in event.topics.all():
if extended:
data['topics'].append({
'id': topic.id,
'topic': topic.topic,
})
else:
data['topics'][topic.id] = True
if extended:
# get a list of all the groups that need to approve it
data['approvals'] = []
for approval in Approval.objects.filter(event=event, approved=False):
data['approvals'].append({
'group_name': approval.group.name,
})
if event.placeholder_img or event.picture:
geometry = '160x90'
crop = 'center'
if event.picture:
thumb = get_thumbnail(
event.picture.file, geometry, crop=crop
)
else:
thumb = get_thumbnail(
event.placeholder_img, geometry, crop=crop
)
data['picture'] = {
'url': thumb.url,
'width': thumb.width,
'height': thumb.height,
}
if event.upload:
data['upload'] = {
'size': event.upload.size,
'url': event.upload.url,
'mime_type': event.upload.mime_type,
}
elif (
'youtube' in event.template.name.lower() and
event.template_environment.get('id')
):
data['upload'] = None
data['youtube_id'] = event.template_environment['id']
return data
@require_POST
@login_required
@transaction.atomic
@must_be_your_event
@json_view
def event_archive(request, event):
if event.status != Event.STATUS_INITIATED:
return http.HttpResponseBadRequest(
"You can't archive events that are NOT in the state of initiated."
)
submissions = VidlySubmission.objects.filter(
event=event,
url__startswith=event.upload.url
)
for vidly_submission in submissions.order_by('-submission_time'):
break
else:
# we haven't sent it in for archive yet
upload = event.upload
base_url = get_base_url(request)
webhook_url = base_url + reverse('new:vidly_media_webhook')
video_url = prepare_vidly_video_url(upload.url)
tag, error = vidly.add_media(
video_url,
hd=True,
notify_url=webhook_url,
# Note that we deliberately don't bother yet to set
# token_protection here because we don't yet know if the
# event is going to be private or not.
# Also, it's much quicker to make screencaptures of videos
# that are not token protected on vid.ly.
)
# then we need to record that we did this
vidly_submission = VidlySubmission.objects.create(
event=event,
url=video_url,
tag=tag,
hd=True,
submission_error=error or None
)
default_template = Template.objects.get(default_archive_template=True)
# Do an in place edit in case this started before the fetch_duration
# has started.
Event.objects.filter(id=event.id).update(
template=default_template,
template_environment={'tag': tag},
)
create_all_timestamp_pictures.delay(event.id)
return {
'tag': vidly_submission.tag,
'error': vidly_submission.submission_error
}
@require_POST
@login_required
@must_be_your_event
@json_view
def event_screencaptures(request, event):
if event.status != Event.STATUS_INITIATED:
return http.HttpResponseBadRequest(
"Events NOT in the state of initiated."
)
upload = event.upload
video_url = upload.url
context = {}
cache_key = 'fetching-{0}'.format(event.id)
# This function sets the cache `fetching-{id}` before and after calling
# those functions in the videoinfo module.
# The reason is that those calls might take many many seconds
# and the webapp might send async calls to the event_picture view
# which will inform the webapp that the slow videoinfo processes
# are running and thus that the webapp shouldn't kick if off yet.
seconds = event.duration
if not event.duration:
# it's a poor man's lock
if not cache.get(cache_key):
cache.set(cache_key, True, 60)
seconds = videoinfo.fetch_duration(
event,
video_url=video_url,
save=True,
verbose=settings.DEBUG
)
cache.delete(cache_key)
event = Event.objects.get(id=event.id)
context['seconds'] = seconds
# The reason we can't use `if event.duration:` is because the
# fetch_duration() does an inline-update instead of modifying
# the instance object.
no_pictures = Picture.objects.filter(event=event).count()
if event.duration and not no_pictures:
if not cache.get(cache_key):
cache.set(cache_key, True, 60)
event = Event.objects.get(id=event.id)
no_pictures = videoinfo.fetch_screencapture(
event,
video_url=video_url,
save=True,
verbose=settings.DEBUG,
set_first_available=not event.picture,
import_immediately=True,
)
cache.delete(cache_key)
event = Event.objects.get(id=event.id)
if no_pictures and not event.picture:
# no picture has been chosen previously
pictures = Picture.objects.filter(event=event).order_by('created')[:1]
for picture in pictures:
event.picture = picture
event.save()
break
context['no_pictures'] = no_pictures
return context
# Note that this view is publically available.
# That means we can't trust the content but we can take it as a hint.
@csrf_exempt
@require_POST
def vidly_media_webhook(request):
if not request.POST.get('xml'):
return http.HttpResponseBadRequest("no 'xml'")
xml_string = request.POST['xml'].strip()
try:
struct = xmltodict.parse(xml_string)
except ExpatError:
return http.HttpResponseBadRequest("Bad 'xml'")
try:
task = struct['Response']['Result']['Task']
try:
vidly_submission = VidlySubmission.objects.get(
url=task['SourceFile'],
tag=task['MediaShortLink']
)
if task['Status'] == 'Finished':
if not vidly_submission.finished:
vidly_submission.finished = timezone.now()
vidly_submission.save()
event = vidly_submission.event
if (
task['Private'] == 'false' and
event.privacy != Event.PRIVACY_PUBLIC
):
# the event is private but the video is not
vidly.update_media_protection(
vidly_submission.tag,
True # make it private
)
if not vidly_submission.token_protection:
vidly_submission.token_protection = True
vidly_submission.save()
# Awesome!
# This event now has a fully working transcoded piece of
# media.
if (
event.status == Event.STATUS_PENDING or
event.status == Event.STATUS_PROCESSING
):
event.status = Event.STATUS_SCHEDULED
event.archive_time = timezone.now()
event.save()
# More awesome! We can start processing the transcoded media.
# XXX move this to a background task.
if not event.duration:
videoinfo.fetch_duration(
event,
save=True,
verbose=settings.DEBUG
)
event = Event.objects.get(id=event.id)
if event.duration:
if not Picture.objects.filter(event=event):
videoinfo.fetch_screencapture(
event,
save=True,
verbose=settings.DEBUG,
set_first_available=True,
)
elif task['Status'] == 'Error':
if not vidly_submission.errored:
vidly_submission.errored = timezone.now()
vidly_submission.save()
except VidlySubmission.DoesNotExist:
# remember, we can't trust the XML since it's publicly
# available and exposed as a webhook
pass
except KeyError:
# If it doesn't have a "Result" or "Task", it was just a notification
# that the media was added.
pass
return http.HttpResponse('OK\n')
@never_cache
@login_required
@must_be_your_event
@json_view
def event_picture(request, event):
if request.method == 'POST':
form = forms.PictureForm(json.loads(request.body), instance=event)
if not form.is_valid():
return http.HttpResponseBadRequest(form.errors)
with transaction.atomic():
form.save()
# if it has screen captures start returning them
pictures = Picture.objects.filter(event=event).order_by('created')
thumbnails = []
# geometry = request.GET.get('geometry', '160x90')
# crop = request.GET.get('crop', 'center')
geometry = '160x90'
crop = 'center'
for p in pictures:
thumb = get_thumbnail(
p.file, geometry, crop=crop
)
picked = event.picture and event.picture == p
thumbnails.append({
'id': p.id,
'url': thumb.url,
'width': thumb.width,
'height': thumb.height,
'picked': picked,
# 'large_url': large_thumb.url,
})
context = {}
if thumbnails:
context['thumbnails'] = thumbnails
cache_key = 'fetching-{0}'.format(event.id)
context['fetching'] = bool(cache.get(cache_key))
return context
@never_cache
@login_required
@must_be_your_event
@json_view
def event_summary(request, event):
return {
'event': serialize_event(event, extended=True),
'pictures': Picture.objects.filter(event=event).count(),
}
def _videos_by_tags(tags):
"""Return a list of dicts where each dict looks something like this:
{'id': 123, 'tag': 'abc123', 'Status': 'Processing', 'finished': False}
And if there's no VidlySubmission the dict will just look like this:
{'id': 124}
The advantage of this function is that you only need to do 1 query
to Vid.ly for a long list of tags.
"""
all_results = vidly.query(tags.keys())
video_contexts = []
for tag, event in tags.items():
video_context = {
'id': event.id,
}
if event.duration:
video_context['duration'] = event.duration
video_context['duration_human'] = show_duration(event.duration)
qs = VidlySubmission.objects.filter(event=event, tag=tag)
for vidly_submission in qs.order_by('-submission_time')[:1]:
video_context['tag'] = tag
results = all_results.get(tag, {})
video_context['status'] = results.get('Status')
video_context['finished'] = results.get('Status') == 'Finished'
if video_context['finished']:
if not vidly_submission.finished:
vidly_submission.finished = timezone.now()
vidly_submission.save()
if not event.archive_time:
event.archive_time = timezone.now()
event.save()
elif results.get('Status') == 'Error':
if not vidly_submission.errored:
vidly_submission.errored = timezone.now()
vidly_submission.save()
else:
video_context['estimated_time_left'] = (
vidly_submission.get_estimated_time_left()
)
break
video_contexts.append(video_context)
return video_contexts
@never_cache
@login_required
@must_be_your_event
@json_view
def event_video(request, event):
context = {}
tag = event.template_environment and event.template_environment.get('tag')
if tag:
tags = {tag: event}
contexts = _videos_by_tags(tags)
context = contexts[0]
return context
@require_POST
@login_required
@json_view
def videos(request):
"""Similar to event_video except it expects a 'ids' request parameter
and returns a dict of videos where the event ID is the keys."""
try:
ids = json.loads(request.body)['ids']
except ValueError as x:
return http.HttpResponseBadRequest(str(x))
events = Event.objects.filter(
id__in=ids,
creator=request.user,
template__name__icontains='vid.ly',
)
tags = {}
for event in events:
tag = (
event.template_environment and
event.template_environment.get('tag')
)
tags[tag] = event
return dict(
(x['id'], x)
for x in _videos_by_tags(tags)
)
@require_POST
@login_required
@must_be_your_event
@json_view
def event_publish(request, event):
if event.status != Event.STATUS_INITIATED:
return http.HttpResponseBadRequest("Not in an initiated state")
groups = []
with transaction.atomic():
# there has to be a Vid.ly video
if 'youtube' in event.template.name.lower():
event.status = Event.STATUS_SCHEDULED
else:
tag = event.template_environment['tag']
submission = None
qs = VidlySubmission.objects.filter(event=event, tag=tag)
for each in qs.order_by('-submission_time'):
submission = each
break
assert submission, "Event has no vidly submission"
results = vidly.query(tag).get(tag, {})
# Let's check the privacy/tokenization of the video.
# What matters (source of truth) is the event's privacy state.
if event.privacy != Event.PRIVACY_PUBLIC and results:
# make sure the submission the the video IS token protected
if not submission.token_protection:
submission.token_protection = True
submission.save()
if results['Private'] == 'false':
# We can only do this if the video has been successfully
# transcoded.
if results['Status'] == 'Finished':
vidly.update_media_protection(
tag,
True
)
if results.get('Status') == 'Finished':
event.status = Event.STATUS_SCHEDULED
# If it's definitely finished, it means we managed to ask
# Vid.ly this question before Vid.ly had a chance to ping
# us on the webhook. Might as well set it now.
if not event.archive_time:
event.archive_time = timezone.now()
else:
# vidly hasn't finished processing it yet
event.status = Event.STATUS_PROCESSING
event.save()
if not event.picture and not event.placeholder_img:
# assign the default placeholder picture if there is one
try:
event.picture = Picture.objects.get(default_placeholder=True)
event.save()
except Picture.DoesNotExist: # pragma: no cover
pass
if not event.channels.all():
# forcibly put it in the default channel(s)
for channel in Channel.objects.filter(default=True):
event.channels.add(channel)
if not Discussion.objects.filter(event=event):
discussion = Discussion.objects.create(
event=event,
enabled=True,
notify_all=True
)
discussion.moderators.add(event.creator)
if event.privacy == Event.PRIVACY_PUBLIC:
for topic in event.topics.all():
for group in topic.groups.all():
if group not in groups:
groups.append(group)
for group in groups:
Approval.objects.create(event=event, group=group)
for group in groups:
sending.email_about_approval_requested(
event,
group,
request
)
return True
@never_cache
@login_required
@json_view
def your_events(request):
# If you have some uploads that are lingering but not associated
# with an event, we might want to create empty events for them
# now.
lingering_uploads = Upload.objects.filter(
mime_type__startswith='video/',
user=request.user,
event__isnull=True,
size__gt=0
)
with transaction.atomic():
for upload in lingering_uploads:
event = Event.objects.create(
status=Event.STATUS_INITIATED,
creator=upload.user,
upload=upload,
start_time=upload.created,
privacy=Event.PRIVACY_PUBLIC,
created=upload.created
)
# event.channels.add(default_channel)
# We'll pretend the event was created at the time the
# video was uploaded.
# Doing this after the create() is necessary because the
# model uses the auto_now_add=True
event.created = upload.created
event.save()
upload.event = event
upload.save()
events = (
Event.objects.filter(
creator=request.user,
status=Event.STATUS_INITIATED,
)
.filter(
Q(upload__isnull=False) | Q(template__name__icontains='YouTube')
)
.select_related('upload', 'picture')
.order_by('-created')
)
all_possible_pictures = (
Picture.objects
.filter(event__in=events)
.values('event_id')
.annotate(Count('event'))
)
pictures_count = {}
for each in all_possible_pictures:
pictures_count[each['event_id']] = each['event__count']
serialized = []
for event in events:
upload = event.upload
if upload:
upload = {
'size': upload.size,
'mime_type': upload.mime_type
}
thumbnail = None
if event.picture or event.placeholder_img:
geometry = '160x90'
crop = 'center'
if event.picture:
thumb = get_thumbnail(
event.picture.file, geometry, crop=crop
)
else:
thumb = get_thumbnail(
event.placeholder_img, geometry, crop=crop
)
thumbnail = {
'url': thumb.url,
'width': thumb.width,
'height': thumb.height,
}
serialized.append({
'id': event.id,
'title': event.title,
'upload': upload,
'picture': thumbnail,
'pictures': pictures_count.get(event.id, 0),
'modified': event.modified,
})
return {'events': serialized}
@require_POST
@login_required
@must_be_your_event
@json_view
def event_delete(request, event):
with transaction.atomic():
event.status = Event.STATUS_REMOVED
event.save()
return True
@transaction.atomic
def unsubscribe(request, identifier):
context = {}
cache_key = 'unsubscribe-%s' % identifier
user_id = cache.get(cache_key)
if user_id:
user = get_object_or_404(User, id=user_id)
else:
user = None
cache.set(cache_key, request.user.id, 60)
context['user_'] = user
if request.method == 'POST':
if not user:
return http.HttpResponseBadRequest('No user')
user_profile = get_profile_safely(user, create_if_necessary=True)
user_profile.optout_event_emails = True
user_profile.save()
cache.delete(cache_key)
return redirect('new:unsubscribed')
return render(request, 'new/unsubscribe.html', context)
def unsubscribed(request):
context = {}
return render(request, 'new/unsubscribed.html', context)
@require_POST
@login_required
@must_be_your_event
@json_view
@transaction.atomic
def event_pictures_rotate(request, event):
try:
post = request.body and json.loads(request.body) or {}
except ValueError:
return http.HttpResponseBadRequest('invalid JSON body')
direction = post.get('direction', 'left')
for picture in Picture.objects.filter(event=event):
img = Image.open(picture.file.path)
format = picture.file.name.lower().endswith('.png') and 'png' or 'jpeg'
img = img.rotate(direction == 'left' and 90 or 270, expand=True)
f = StringIO()
try:
img.save(f, format=format)
picture.file.save(
picture.file.name,
ContentFile(f.getvalue())
)
finally:
f.close()
return True
@login_required
@json_view
def youtube_extract(request):
url = request.GET.get('url')
if not url:
return http.HttpResponseBadRequest('No url')
try:
return youtube.extract_metadata_by_url(url)
except ValueError:
return {'error': 'Video ID not found by that URL'}
except youtube.VideoNotFound as ex:
return {'error': 'No video by that ID could be found (%s)' % ex}
@require_POST
@login_required
@json_view
@transaction.atomic
def youtube_create(request):
try:
body = json.loads(request.body)
except ValueError:
# it wasn't sent as a JSON request body
return http.HttpResponseBadRequest('Missing JSON request body')
if not body.get('id'):
return http.HttpResponseBadRequest('Missing id')
# extract all the details again
data = youtube.extract_metadata_by_id(body['id'])
for template in Template.objects.filter(name__icontains='YouTube'):
break
else:
template = Template.objects.create(
name='YouTube',
content=(
'<iframe width="896" height="504" src="https://www.youtube-noc'
'ookie.com/embed/{{ id }}?rel=0&showinfo=0" '
'frameborder="0" allowfullscreen></iframe>'
)
)
youtube_url = 'https://www.youtube.com/watch?v=' + data['id']
additional_links = u'On YouTube™ {}'.format(youtube_url)
event = Event.objects.create(
title=data['title'],
description=data['description'],
template=template,
template_environment={'id': data['id']},
creator=request.user,
status=Event.STATUS_INITIATED,
privacy=Event.PRIVACY_PUBLIC,
start_time=timezone.now(),
additional_links=additional_links,
archive_time=timezone.now(),
)
img_temp = NamedTemporaryFile(delete=True)
img_temp.write(requests.get(data['thumbnail_url']).content)
img_temp.flush()
event.placeholder_img.save(
os.path.basename(data['thumbnail_url']),
File(img_temp)
)
for tag in data['tags']:
for this_tag in Tag.objects.filter(name__iexact=tag):
break
else:
this_tag = Tag.objects.create(name=tag)
event.tags.add(this_tag)
# first get the parent of all YouTube channels
youtube_parent, __ = Channel.objects.get_or_create(
name=u'YouTube™',
slug='youtube',
never_show=True,
)
try:
channel = Channel.objects.get(
parent=youtube_parent,
youtube_id=data['channel']['id'],
name=data['channel']['title'],
)
except Channel.DoesNotExist:
# If it doesn't exist under the "YouTube parent",
# see if it exists globally.
try:
channel = Channel.objects.get(
youtube_id=data['channel']['id'],
name=data['channel']['title'],
)
except Channel.DoesNotExist:
channel = Channel.objects.create(
parent=youtube_parent,
youtube_id=data['channel']['id'],
name=data['channel']['title'],
slug=slugify(data['channel']['title'])
)
if data['channel']['thumbnail_url']:
img_temp = NamedTemporaryFile(delete=True)
img_temp.write(
requests.get(data['channel']['thumbnail_url']).content
)
img_temp.flush()
channel.image.save(
os.path.basename(data['channel']['thumbnail_url']),
File(img_temp)
)
event.channels.add(channel)
# also put it in the other default channels
for channel in Channel.objects.filter(default=True):
event.channels.add(channel)
return serialize_event(event)
|
create_submission.py | hoytak/diabetic-retinopathy-code-private | 103 | 11184098 | import graphlab as gl
import re
import random
from copy import copy
import os
import graphlab.aggregate as agg
import array
import sys
# gl.set_runtime_config("GRAPHLAB_CACHE_FILE_LOCATIONS", os.path.expanduser("~/data/tmp/"))
model_path = "/data/hoytak/diabetic/models/models/model-0-pooling-3"
train_sf = []
test_sf = []
feature_names = []
each_sf_feature_names = []
# for n in [0, "1b", '2b', 4]:
for n in [0]: #, 1, "1b", 2, '2b', 3, 4]:
try:
print "Loading %s" % str(n)
Xf_train = gl.SFrame(model_path + "/scores_train_raw")
Xf_test = gl.SFrame(model_path + "/scores_test")
sf_feature_names = []
for fn in Xf_train.column_names():
if fn.startswith("scores"):
key = fn
idx = 0
while key in feature_names:
key = fn + ".%d" % idx
idx += 1
if key != fn:
Xf_train.rename({fn : key})
Xf_test.rename({fn : key})
sf_feature_names.append(key)
train_sf.append(Xf_train)
test_sf.append(Xf_test)
each_sf_feature_names.append(sf_feature_names)
feature_names += sf_feature_names
except IOError, ier:
print "Skipping %s" % str(n), ": ", str(ier)
# Train a boosted tree model on each sframe.
fn_path = "alt_test_predictions-linear/"
if False and os.path.exists(fn):
X_train = gl.SFrame(fn_path + "/train")
X_test = gl.SFrame(fn_path + "/test")
else:
X_train = train_sf[0][["name", "level"]]
X_test = test_sf[0][["name"]]
for i, (tr_sf, te_sf, fnl) in enumerate(zip(train_sf,test_sf,each_sf_feature_names)):
tr_2, tr_valid = tr_sf.random_split(0.97)
print "Training model %d of %d" % (i, len(fnl))
print fnl
# m = gl.regression.boosted_trees_regression.create(
# tr_2, target = "level", features = fnl,
# max_iterations= 100,
# column_subsample=1,
# row_subsample=1,
# validation_set = tr_valid)
m = gl.regression.linear_regression.create(
tr_2, target = "level",
features = fnl,
max_iterations= 100,
validation_set = tr_valid, l2_penalty=0.02, solver='newton')
# m = gl.regression.boosted_trees_regression.create(
# tr_2, target = "level", features = fnl,
# max_iterations= (400 if i == 0 else 1000),
# column_subsample=0.5,
# row_subsample=0.5,
# validation_set = tr_valid,
# step_size=0.01)
pn = 'L%d' % i
tr_sf[pn] = m.predict(tr_sf)
te_sf[pn] = m.predict(te_sf)
score_feature = [f for f in fnl if f.startswith('scores')]
X_train = X_train.join(tr_sf[["name", pn] + score_feature], on = "name")
X_test = X_test.join(te_sf[["name", pn] + score_feature], on = "name")
X_train.save("alt_test_predictions/train")
X_test.save("alt_test_predictions/test")
################################################################################
# Run the predictions
import numpy as np
def pred_median(d):
return np.median([v for k, v in d.iteritems() if k.startswith('L')])
def pred_max(d):
return max(v for k, v in d.iteritems() if k.startswith('L'))
def pred_sorted(d):
return dict( (i, v) for i, (v, k) in enumerate(
sorted( (v, k) for k, v in d.iteritems() if k.startswith('L'))))
X_train['median'] = X_train.apply(pred_median)
X_test['median'] = X_test.apply(pred_median)
X_train['max'] = X_train.apply(pred_max)
X_test['max'] = X_test.apply(pred_max)
X_train['sorted'] = X_train.apply(pred_sorted)
X_test['sorted'] = X_test.apply(pred_sorted)
X_train_2, X_valid = X_train.random_split(0.97)
prediction_method = "lr"
features = X_train.column_names()
del features[features.index("name")]
del features[features.index("level")]
if prediction_method == "lr":
m = gl.regression.linear_regression.create(
X_train_2, target = "level",
features = features,
max_iterations= 100,
validation_set = X_valid,
solver='newton')
X_test['level'] = m.predict(X_test).apply(lambda x: min(4, max(0, int(round(x)))))
elif prediction_method == "brt":
m = gl.regression.boosted_trees_regression.create(
X_train, target = "level",
features = features,
max_iterations=200,
max_depth=2,
column_subsample=1,
row_subsample=0.1,
step_size=0.01)
X_test['level'] = m.predict(X_test).apply(lambda x: min(4, max(0, int(round(x)))))
elif prediction_method == "median":
X_train['level'] = X_train['median']
X_test['level'] = X_test['median']
X_out = X_test[['name', 'level']]
def get_number(s):
n = float(re.match('[0-9]+', s).group(0))
if 'right' in s:
n += 0.5
return n
X_out['number'] = X_out['name'].apply(get_number)
X_out = X_out.sort('number')
X_out.rename({"name" : "image"})
import csv
import time
with open('submission-%d.csv' % int(time.time()), 'wb') as outfile:
fieldnames = ['image', 'level']
writer = csv.DictWriter(outfile, fieldnames=fieldnames)
writer.writeheader()
for d in X_out[['image', 'level']]:
writer.writerow(d)
|
Configuration/ProcessModifiers/python/genJetSubEvent_cff.py | ckamtsikis/cmssw | 852 | 11184128 | import FWCore.ParameterSet.Config as cms
genJetSubEvent = cms.Modifier()
|
release/src/router/samba3/source/python/gtkdictbrowser.py | ghsecuritylab/tomato_egg | 278 | 11184149 | <gh_stars>100-1000
#!/usr/bin/python
#
# Browse a Python dictionary in a two pane graphical interface written
# in GTK.
#
# The GtkDictBrowser class is supposed to be generic enough to allow
# applications to override enough methods and produce a
# domain-specific browser provided the information is presented as a
# Python dictionary.
#
# Possible applications:
#
# - Windows registry browser
# - SPOOLSS printerdata browser
# - tdb file browser
#
from gtk import *
import string, re
class GtkDictBrowser:
def __init__(self, dict):
self.dict = dict
# This variable stores a list of (regexp, function) used to
# convert the raw value data to a displayable string.
self.get_value_text_fns = []
self.get_key_text = lambda x: x
# We can filter the list of keys displayed using a regex
self.filter_regex = ""
# Create and configure user interface widgets. A string argument is
# used to set the window title.
def build_ui(self, title):
win = GtkWindow()
win.set_title(title)
win.connect("destroy", mainquit)
hpaned = GtkHPaned()
win.add(hpaned)
hpaned.set_border_width(5)
hpaned.show()
vbox = GtkVBox()
hpaned.add1(vbox)
vbox.show()
scrolled_win = GtkScrolledWindow()
scrolled_win.set_policy(POLICY_AUTOMATIC, POLICY_AUTOMATIC)
vbox.pack_start(scrolled_win)
scrolled_win.show()
hbox = GtkHBox()
vbox.pack_end(hbox, expand = 0, padding = 5)
hbox.show()
label = GtkLabel("Filter:")
hbox.pack_start(label, expand = 0, padding = 5)
label.show()
self.entry = GtkEntry()
hbox.pack_end(self.entry, padding = 5)
self.entry.show()
self.entry.connect("activate", self.filter_activated)
self.list = GtkList()
self.list.set_selection_mode(SELECTION_MULTIPLE)
self.list.set_selection_mode(SELECTION_BROWSE)
scrolled_win.add_with_viewport(self.list)
self.list.show()
self.list.connect("select_child", self.key_selected)
scrolled_win = GtkScrolledWindow()
scrolled_win.set_policy(POLICY_AUTOMATIC, POLICY_AUTOMATIC)
hpaned.add2(scrolled_win)
scrolled_win.set_usize(500,400)
scrolled_win.show()
self.text = GtkText()
self.text.set_editable(FALSE)
scrolled_win.add_with_viewport(self.text)
self.text.show()
self.text.connect("event", self.event_handler)
self.menu = GtkMenu()
self.menu.show()
self.font = load_font("fixed")
self.update_keylist()
win.show()
# Add a key to the left hand side of the user interface
def add_key(self, key):
display_key = self.get_key_text(key)
list_item = GtkListItem(display_key)
list_item.set_data("raw_key", key) # Store raw key in item data
self.list.add(list_item)
list_item.show()
# Event handler registered by build_ui()
def event_handler(self, event, menu):
return FALSE
# Set the text to appear in the right hand side of the user interface
def set_value_text(self, item):
# Clear old old value in text window
self.text.delete_text(0, self.text.get_length())
if type(item) == str:
# The text widget has trouble inserting text containing NULL
# characters.
item = string.replace(item, "\x00", ".")
self.text.insert(self.font, None, None, item)
else:
# A non-text item
self.text.insert(self.font, None, None, repr(item))
# This function is called when a key is selected in the left hand side
# of the user interface.
def key_selected(self, list, list_item):
key = list_item.children()[0].get()
# Look for a match in the value display function list
text = self.dict[list_item.get_data("raw_key")]
for entry in self.get_value_text_fns:
if re.match(entry[0], key):
text = entry[1](text)
break
self.set_value_text(text)
# Refresh the key list by removing all items and re-inserting them.
# Items are only inserted if they pass through the filter regexp.
def update_keylist(self):
self.list.remove_items(self.list.children())
self.set_value_text("")
for k in self.dict.keys():
if re.match(self.filter_regex, k):
self.add_key(k)
# Invoked when the user hits return in the filter text entry widget.
def filter_activated(self, entry):
self.filter_regex = entry.get_text()
self.update_keylist()
# Register a key display function
def register_get_key_text_fn(self, fn):
self.get_key_text = fn
# Register a value display function
def register_get_value_text_fn(self, regexp, fn):
self.get_value_text_fns.append((regexp, fn))
#
# A utility function to convert a string to the standard hex + ascii format.
# To display all values in hex do:
# register_get_value_text_fn("", gtkdictbrowser.hex_string)
#
def hex_string(data):
"""Return a hex dump of a string as a string.
The output produced is in the standard 16 characters per line hex +
ascii format:
00000000: 40 00 00 00 00 00 00 00 40 00 00 00 01 00 04 80 @....... @.......
00000010: 01 01 00 00 00 00 00 01 00 00 00 00 ........ ....
"""
pos = 0 # Position in data
line = 0 # Line of data
hex = "" # Hex display
ascii = "" # ASCII display
result = ""
while pos < len(data):
# Start with header
if pos % 16 == 0:
hex = "%08x: " % (line * 16)
ascii = ""
# Add character
hex = hex + "%02x " % (ord(data[pos]))
if ord(data[pos]) < 32 or ord(data[pos]) > 176:
ascii = ascii + '.'
else:
ascii = ascii + data[pos]
pos = pos + 1
# Add separator if half way
if pos % 16 == 8:
hex = hex + " "
ascii = ascii + " "
# End of line
if pos % 16 == 0:
result = result + "%s %s\n" % (hex, ascii)
line = line + 1
# Leftover bits
if pos % 16 != 0:
# Pad hex string
for i in range(0, (16 - (pos % 16))):
hex = hex + " "
# Half way separator
if (pos % 16) < 8:
hex = hex + " "
result = result + "%s %s\n" % (hex, ascii)
return result
# For testing purposes, create a fixed dictionary to browse with
if __name__ == "__main__":
dict = {"chicken": "ham", "spam": "fun", "subdict": {"a": "b", "c": "d"}}
db = GtkDictBrowser(dict)
db.build_ui("GtkDictBrowser")
# Override Python's handling of ctrl-c so we can break out of the
# gui from the command line.
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
mainloop()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.