max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
xnmt/speech_features/__init__.py | mukund-v/xnmt | 195 | 11134050 | <gh_stars>100-1000
# this is copied from https://github.com/thomasZen/python_speech_features2,
# which is itself based on https://github.com/jameslyons/python_speech_features
# and https://github.com/ZitengWang/python_kaldi_features
#
# contains the following changes:
# - made Python 3 compatible
# - support for per-speaker normalization
# - made formatting consistent w/ XNMT formatting
from .base import *
|
_solved/solutions/04-spatial-joins25.py | lleondia/geopandas-tutorial | 341 | 11134060 | # Visualize the land use of the Muette district
land_use_muette.plot(column='class') |
external/skia/third_party/externals/gyp/test/settings/gyptest-settings.py | gordonjohnpatrick/XobotOS | 263 | 11134081 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Smoke-tests 'settings' blocks.
"""
import TestGyp
# 'settings' is only supported for make and scons (and will be removed there as
# well eventually).
test = TestGyp.TestGyp(formats=['make', 'scons'])
test.run_gyp('settings.gyp')
test.build('test.gyp', test.ALL)
test.pass_test()
|
Tests/Common/test_util.py | faz1993/InnerEye-DeepLearning | 402 | 11134085 | # ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
import os
import sys
from pathlib import Path
import pytest
from InnerEye.Common import common_util
from InnerEye.Common.common_util import (change_working_directory, check_is_any_of,
is_private_field_name, namespace_to_path, path_to_namespace, print_exception)
from InnerEye.Common.fixed_paths import add_submodules_to_path, repository_root_directory
from InnerEye.Common.fixed_paths_for_tests import full_ml_test_data_path, tests_root_directory
from InnerEye.Common.output_directories import OutputFolderForTests
def test_get_items_from_string() -> None:
"""
Check items correctly extracted from string.
"""
assert ["i", "p"] == common_util.get_items_from_string("i, ,p")
assert ["i", "p"] == common_util.get_items_from_string("i- -p", separator="-")
assert ["i", " ", " p"] == common_util.get_items_from_string("i, , p", remove_blanks=False)
assert ["i", "p"] == common_util.get_items_from_string("i, , p")
assert [] == common_util.get_items_from_string("")
class SomeSimpleClass:
def __init__(self) -> None:
self.int = 1
self.float = 3.14
self.dict = {"foo": "Bar"}
self.str = "str"
def test_is_any_of() -> None:
"""
Tests for check_is_any_of: checks if a string is any of the strings in a valid set.
"""
check_is_any_of("prefix", "foo", ["foo"])
check_is_any_of("prefix", "foo", ["bar", "foo"])
check_is_any_of("prefix", None, ["bar", "foo", None])
# When the value is not found, an error message with the valid values should be printed
with pytest.raises(ValueError) as ex:
check_is_any_of("prefix", None, ["bar", "foo"])
assert "bar" in ex.value.args[0]
assert "foo" in ex.value.args[0]
assert "prefix" in ex.value.args[0]
# The error message should also work when one of the valid values is None
with pytest.raises(ValueError) as ex:
check_is_any_of("prefix", "baz", ["bar", None])
assert "bar" in ex.value.args[0]
assert "<None>" in ex.value.args[0]
assert "prefix" in ex.value.args[0]
assert "baz" in ex.value.args[0]
def test_is_field_private() -> None:
"""
Tests for is_private_field_name
"""
assert is_private_field_name("_hello")
assert is_private_field_name("__hello")
assert not is_private_field_name("world")
def test_print_exception() -> None:
"""
A test that just throws an exception, and allows to check if the diagnostics are at the right level.
You need to inspect the test output manually.
"""
try:
raise ValueError("foo")
except Exception as ex:
print_exception(ex, "Message")
@pytest.mark.parametrize("is_external", [True, False])
def test_namespace_to_path(is_external: bool, test_output_dirs: OutputFolderForTests) -> None:
"""
A test to check conversion between path to namespace for InnerEye and external namespaces
"""
if is_external:
folder_name = "logs"
full_folder = test_output_dirs.root_dir / folder_name
assert namespace_to_path(folder_name, root=test_output_dirs.root_dir) == full_folder
else:
from Tests.ML import test_data
assert namespace_to_path(test_data.__name__, root=tests_root_directory().parent) == full_ml_test_data_path()
@pytest.mark.parametrize("is_external", [True, False])
def test_path_to_namespace(is_external: bool, test_output_dirs: OutputFolderForTests) -> None:
"""
A test to check conversion between namespace to path for InnerEye and external namespaces
"""
if is_external:
folder_name = "logs"
full_folder = test_output_dirs.root_dir / folder_name
assert path_to_namespace(
path=full_folder,
root=test_output_dirs.root_dir
) == folder_name
else:
from Tests.ML import test_data
from InnerEye.Common.fixed_paths_for_tests import full_ml_test_data_path
assert path_to_namespace(
path=full_ml_test_data_path(),
root=tests_root_directory().parent
) == test_data.__name__
def test_change_dir(test_output_dirs: OutputFolderForTests) -> None:
"""
Test the context manager for changing directories.
"""
os.chdir(test_output_dirs.root_dir)
assert Path.cwd() == test_output_dirs.root_dir
new_dir = test_output_dirs.root_dir / "foo"
new_dir.mkdir()
with change_working_directory(new_dir):
assert Path.cwd() == new_dir
Path("bar.txt").touch()
assert Path.cwd() == test_output_dirs.root_dir
assert (new_dir / "bar.txt").is_file()
def test_add_submodules_to_path() -> None:
original_sys_path = sys.path
try:
fastmri_folder = repository_root_directory() / "fastMRI"
fastmri_str = str(fastmri_folder)
assert fastmri_folder.is_dir()
if fastmri_str in sys.path:
sys.path.remove(fastmri_str)
add_submodules_to_path()
assert fastmri_str in sys.path
finally:
sys.path = original_sys_path
|
tests/test_identity.py | 757670303037/stable-baselines | 3,681 | 11134100 | <reponame>757670303037/stable-baselines
import pytest
import numpy as np
from stable_baselines import A2C, ACER, ACKTR, DQN, DDPG, SAC, PPO1, PPO2, TD3, TRPO
from stable_baselines.ddpg import NormalActionNoise
from stable_baselines.common.identity_env import IdentityEnv, IdentityEnvBox
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines.common.evaluation import evaluate_policy
# Hyperparameters for learning identity for each RL model
LEARN_FUNC_DICT = {
"a2c": lambda e: A2C(
policy="MlpPolicy",
learning_rate=1e-3,
n_steps=4,
gamma=0.4,
ent_coef=0.0,
env=e,
seed=0,
).learn(total_timesteps=4000),
"acer": lambda e: ACER(
policy="MlpPolicy",
env=e,
seed=0,
n_steps=4,
replay_ratio=1,
ent_coef=0.0,
).learn(total_timesteps=4000),
"acktr": lambda e: ACKTR(
policy="MlpPolicy", env=e, seed=0, learning_rate=5e-4, ent_coef=0.0, n_steps=4
).learn(total_timesteps=4000),
"dqn": lambda e: DQN(
policy="MlpPolicy",
batch_size=32,
gamma=0.1,
learning_starts=0,
exploration_final_eps=0.05,
exploration_fraction=0.1,
env=e,
seed=0,
).learn(total_timesteps=4000),
"ppo1": lambda e: PPO1(
policy="MlpPolicy",
env=e,
seed=0,
lam=0.5,
entcoeff=0.0,
optim_batchsize=16,
gamma=0.4,
optim_stepsize=1e-3,
).learn(total_timesteps=3000),
"ppo2": lambda e: PPO2(
policy="MlpPolicy",
env=e,
seed=0,
learning_rate=1.5e-3,
lam=0.8,
ent_coef=0.0,
gamma=0.4,
).learn(total_timesteps=3000),
"trpo": lambda e: TRPO(
policy="MlpPolicy",
env=e,
gamma=0.4,
seed=0,
max_kl=0.05,
lam=0.7,
timesteps_per_batch=256,
).learn(total_timesteps=4000),
}
@pytest.mark.slow
@pytest.mark.parametrize(
"model_name", ["a2c", "acer", "acktr", "dqn", "ppo1", "ppo2", "trpo"]
)
def test_identity_discrete(model_name):
"""
Test if the algorithm (with a given policy)
can learn an identity transformation (i.e. return observation as an action)
:param model_name: (str) Name of the RL model
"""
env = DummyVecEnv([lambda: IdentityEnv(10)])
model = LEARN_FUNC_DICT[model_name](env)
evaluate_policy(model, env, n_eval_episodes=20, reward_threshold=90)
obs = env.reset()
assert model.action_probability(obs).shape == (
1,
10,
), "Error: action_probability not returning correct shape"
action = env.action_space.sample()
action_prob = model.action_probability(obs, actions=action)
assert np.prod(action_prob.shape) == 1, "Error: not scalar probability"
action_logprob = model.action_probability(obs, actions=action, logp=True)
assert np.allclose(action_prob, np.exp(action_logprob)), (
action_prob,
action_logprob,
)
# Free memory
del model, env
@pytest.mark.slow
@pytest.mark.parametrize("model_class", [DDPG, TD3, SAC])
def test_identity_continuous(model_class):
"""
Test if the algorithm (with a given policy)
can learn an identity transformation (i.e. return observation as an action)
"""
env = DummyVecEnv([lambda: IdentityEnvBox(eps=0.5)])
n_steps = {SAC: 700, TD3: 500, DDPG: 2000}[model_class]
kwargs = dict(seed=0, gamma=0.95, buffer_size=1e5)
if model_class in [DDPG, TD3]:
n_actions = 1
action_noise = NormalActionNoise(
mean=np.zeros(n_actions), sigma=0.05 * np.ones(n_actions)
)
kwargs["action_noise"] = action_noise
if model_class == DDPG:
kwargs["actor_lr"] = 1e-3
kwargs["batch_size"] = 100
model = model_class("MlpPolicy", env, **kwargs)
model.learn(total_timesteps=n_steps)
evaluate_policy(model, env, n_eval_episodes=20, reward_threshold=90)
# Free memory
del model, env
|
rules/configuration/rule_laravel.py | TomasTorresB/nerve | 365 | 11134130 | <filename>rules/configuration/rule_laravel.py
from core.redis import rds
from core.triage import Triage
from core.parser import ScanParser
class Rule:
def __init__(self):
self.rule = 'CFG_823E'
self.rule_severity = 3
self.rule_description = 'This rule checks for misconfigurations in Laravel'
self.rule_confirm = 'Remote Server Misconfigured Laravel'
self.rule_mitigation = '''Laravel has been misconfigured and may leak environment or log data. \
Use the Laravel Hardening Guidelines for reference: https://laravel.com/docs/7.x/configuration'''
self.rule_details = ''
self.rule_match_string = {
'/storage/logs/laravel.log':{
'app':'LARAVEL_FRAMEWORK_LOG',
'match':['Stack trace', 'Did you mean one of these?', 'ConsoleOutput'],
'title':'Laravel Framework Log'
},
'/.env':{
'app':'LARAVEL_FRAMEWORK_ENV',
'match':['MIX_PUSHER_APP_KEY', 'BROADCAST_DRIVER'],
'title':'Laravel Framework Env File'
},
}
self.intensity = 1
def check_rule(self, ip, port, values, conf):
t = Triage()
p = ScanParser(port, values)
module = p.get_module()
domain = p.get_domain()
if 'http' not in module:
return
for uri, values in self.rule_match_string.items():
app_title = values['title']
resp = t.http_request(ip, port, uri=uri)
if resp is not None:
for match in values['match']:
if match in resp.text:
self.rule_details = 'Laravel Misconfiguration - {} at {}'.format(app_title, resp.url)
rds.store_vuln({
'ip':ip,
'port':port,
'domain':domain,
'rule_id':self.rule,
'rule_sev':self.rule_severity,
'rule_desc':self.rule_description,
'rule_confirm':self.rule_confirm,
'rule_details':self.rule_details,
'rule_mitigation':self.rule_mitigation
})
return |
Ch06_Heavyweight_Scraping_with_Scrapy/nobel_winners/spiders/nwinners_minibio_spider.py | Geege/dataviz-with-python-and-js | 259 | 11134132 | <reponame>Geege/dataviz-with-python-and-js<gh_stars>100-1000
import scrapy
import re
BASE_URL = 'http://en.wikipedia.org'
class NWinnerItemBio(scrapy.Item):
link = scrapy.Field()
name = scrapy.Field()
mini_bio = scrapy.Field()
image_urls = scrapy.Field()
bio_image = scrapy.Field()
images = scrapy.Field()
class NWinnerSpiderBio(scrapy.Spider):
""" Scrapes the Nobel prize biography pages for portrait images and a biographical snippet """
name = 'nwinners_minibio'
allowed_domains = ['en.wikipedia.org']
start_urls = [
"http://en.wikipedia.org/wiki/List_of_Nobel_laureates_by_country?dfdfd"
]
For Scrapy v 1.0+, custom_settings can override the item pipelines in settings
custom_settings = {
'ITEM_PIPELINES': {'nobel_winners.pipelines.NobelImagesPipeline':1},
}
def parse(self, response):
filename = response.url.split('/')[-1]
h2s = response.xpath('//h2')
for h2 in h2s:
country = h2.xpath('span[@class="mw-headline"]/text()').extract()
if country:
winners = h2.xpath('following-sibling::ol[1]')
for w in winners.xpath('li'):
wdata = {}
wdata['link'] = BASE_URL + w.xpath('a/@href').extract()[0]
#print(wdata)
request = scrapy.Request(wdata['link'],
callback=self.get_mini_bio,
dont_filter=True)
request.meta['item'] = NWinnerItemBio(**wdata)
yield request
def get_mini_bio(self, response):
BASE_URL_ESCAPED = 'http:\/\/en.wikipedia.org'
item = response.meta['item']
# cache image
item['image_urls'] = []
# Get the URL of the winner's picture, contained in the infobox table
img_src = response.xpath('//table[contains(@class,"infobox")]//img/@src')
if img_src:
item['image_urls'] = ['http:' + img_src[0].extract()]
mini_bio = ''
# Get the paragraphs in the biography's body-text
ps = response.xpath('//*[@id="mw-content-text"]/p[text() or normalize-space(.)=""]').extract()
# Add introductory biography paragraphs till the empty breakpoint
for p in ps:
if p == '<p></p>':
break
mini_bio += p
# correct for wiki-links
mini_bio = mini_bio.replace('href="/wiki', 'href="' + BASE_URL + '/wiki')
mini_bio = mini_bio.replace('href="#', 'href="' + item['link'] + '#')
item['mini_bio'] = mini_bio
yield item
# def parse_bio(self, response):
# item = response.meta['item']
# bio_text = response.xpath('//div[@id="mw-content-text"]').extract()[0]
# item['gender'] = guess_gender(bio_text)
# persondata_table = response.xpath('//table[@id="persondata"]')
# if persondata_table:
# get_persondata(persondata_table[0], item)
# else:
# item['gender'] = None
# yield item
|
learning/katas/python/Triggers/Window Accumulation Mode/Window Accumulation Mode/task.py | chrisstockton/beam | 5,279 | 11134151 | <reponame>chrisstockton/beam
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import apache_beam as beam
from generate_event import GenerateEvent
from apache_beam.transforms.window import FixedWindows
from apache_beam.transforms.trigger import AfterWatermark
from apache_beam.transforms.trigger import AfterCount
from apache_beam.transforms.trigger import AccumulationMode
from apache_beam.utils.timestamp import Duration
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import StandardOptions
from log_elements import LogElements
def apply_transform(events):
return (events
| beam.WindowInto(FixedWindows(1*24*60*60), # 1 Day Window
trigger=AfterWatermark(early=AfterCount(1)),
accumulation_mode=AccumulationMode.ACCUMULATING,
allowed_lateness=Duration(seconds=0))
| beam.CombineGlobally(beam.combiners.CountCombineFn()).without_defaults())
def main():
options = PipelineOptions()
options.view_as(StandardOptions).streaming = True
with beam.Pipeline(options=options) as p:
events = p | GenerateEvent.sample_data()
output = apply_transform(events)
output | LogElements(with_window=True)
if __name__ == "__main__":
main()
|
src/storage-preview/azext_storage_preview/vendored_sdks/azure_storagev2/fileshare/v2020_02_10/_shared/__init__.py | Mannan2812/azure-cli-extensions | 2,728 | 11134164 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import base64
import hashlib
import hmac
try:
from urllib.parse import quote, unquote
except ImportError:
from urllib2 import quote, unquote # type: ignore
import six
def url_quote(url):
return quote(url)
def url_unquote(url):
return unquote(url)
def encode_base64(data):
if isinstance(data, six.text_type):
data = data.encode('utf-8')
encoded = base64.b64encode(data)
return encoded.decode('utf-8')
def decode_base64_to_bytes(data):
if isinstance(data, six.text_type):
data = data.encode('utf-8')
return base64.b64decode(data)
def decode_base64_to_text(data):
decoded_bytes = decode_base64_to_bytes(data)
return decoded_bytes.decode('utf-8')
def sign_string(key, string_to_sign, key_is_base64=True):
if key_is_base64:
key = decode_base64_to_bytes(key)
else:
if isinstance(key, six.text_type):
key = key.encode('utf-8')
if isinstance(string_to_sign, six.text_type):
string_to_sign = string_to_sign.encode('utf-8')
signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256)
digest = signed_hmac_sha256.digest()
encoded_digest = encode_base64(digest)
return encoded_digest
|
code/models/py_utils/builder.py | 360iQ/CPNDet | 191 | 11134192 | <gh_stars>100-1000
import sys
import os
from mmcv.runner import obj_from_dict
from torch import nn
import pdb
from . import (roi_extractors)
from . import (bbox_heads)
__all__ = [
'build_roi_extractor', 'build_bbox_head'
]
def _build_module(cfg, parrent=None, default_args=None):
return cfg if isinstance(cfg, nn.Module) else obj_from_dict(
cfg, parrent, default_args)
def build(cfg, parrent=None, default_args=None):
if isinstance(cfg, list):
modules = [_build_module(cfg_, parrent, default_args) for cfg_ in cfg]
return nn.Sequential(*modules)
else:
return _build_module(cfg, parrent, default_args)
def build_roi_extractor(cfg):
return build(cfg, roi_extractors)
def build_bbox_head(cfg):
return build(cfg, bbox_heads)
|
slack_sdk/socket_mode/__init__.py | priya1puresoftware/python-slack-sdk | 2,486 | 11134253 | """Socket Mode is a method of connecting your app to Slack’s APIs using WebSockets instead of HTTP.
You can use slack_sdk.socket_mode.SocketModeClient for managing Socket Mode connections
and performing interactions with Slack.
https://api.slack.com/apis/connections/socket
"""
from .builtin import SocketModeClient # noqa
|
ghostwriter/rolodex/templatetags/determine_primary.py | bbhunter/Ghostwriter | 601 | 11134263 | <reponame>bbhunter/Ghostwriter
"""This contains the custom template tags used by he Rolodex application."""
# Standard Libraries
from collections import defaultdict
import datetime
from django import template
# Ghostwriter Libraries
from ghostwriter.rolodex.models import ObjectivePriority
from ghostwriter.shepherd.models import AuxServerAddress
register = template.Library()
@register.simple_tag
def get_primary_address(value):
"""
Get the primary IP address for an individual :model:`shepherd.StaticServer`
from :model:`shepherd.AuxServerAddress`.
**Parameters**
``value``
Individual :model:`shepherd.StaticServer` entry
"""
primary_address = value.ip_address
aux_addresses = AuxServerAddress.objects.filter(static_server=value)
for address in aux_addresses:
if address.primary:
primary_address = address.ip_address
return primary_address
@register.filter
def get_scope_preview(value, n):
"""
Get the top N lines of a ``scope`` list for an individual :model:`rolodex.ProjectScope`.
**Parameters**
``value``
The ``scope`` value of an individual :model:`rolodex.ProjectScope` entry
``value``
Number of lines to return
"""
return "\n".join(value.split("\r\n")[0:n])
@register.filter
def plus_days(value, days):
"""
Add some number of days to a ``datetime`` value within a template.
**Parameters**
``days``
A whole integer to add to the day value of a ``datetime`` value
"""
return value + datetime.timedelta(days=days)
@register.filter
def days_left(value):
"""
Calculate how many days between the current date and a provide ``datetime`` value.
**Parameters**
``value``
A ``datetime`` value
"""
today = datetime.date.today()
delta = value - today
return delta.days
@register.filter
def get_item(dictionary, key):
"""
Return a key value from a dictionary object.
**Parameters**
``dictonary``
Python dictionary object to parse
``key``
Key name tor etrieve from the dictionary
"""
# Use `get` to return `None` if not found
return dictionary.get(key)
@register.simple_tag
def group_by_priority(queryset):
"""
Group a queryset by the ``Priority`` field.
**Parameters**
``queryset``
Instance of :model:`rolodex.ProjectObjective`
"""
all_priorities = ObjectivePriority.objects.all().order_by("weight")
priority_dict = defaultdict(list)
for priority in all_priorities:
priority_dict[str(priority)] = []
for objective in queryset:
priority_dict[str(objective.priority)].append(objective)
# Return a basic dict because templates can't handle defaultdict
return dict(priority_dict)
|
bcbio/rnaseq/cpat.py | a113n/bcbio-nextgen | 418 | 11134272 | """
run the Coding Potential Assessment Tool (CPAT)
http://nar.oxfordjournals.org/content/early/2013/01/17/nar.gkt006.full
"""
import numpy
import shutil
import tempfile
import os
from bcbio import utils
from bcbio.rnaseq import gtf
from bcbio.utils import file_exists, safe_makedir
from bcbio.distributed.transaction import file_transaction
from bcbio.provenance import do
from bcbio.bam import fasta
from bcbio.pipeline import config_utils
def classify_with_cpat(assembled_gtf, ref_gtf, ref_fasta, data):
cpat_cmd = config_utils.get_program("cpat.py", data)
if not cpat_cmd:
return {}
if not gtf.is_cpat_compatible(ref_gtf):
return {}
cutoff, hexamer, logit = get_coding_potential_cutoff(ref_gtf, ref_fasta, data)
assembled_fasta = gtf.gtf_to_fasta(assembled_gtf, ref_fasta)
cpat_fn = cpat(assembled_fasta, hexamer, logit, data)
coding_probabilities = load_cpat_coding_prob(cpat_fn)
lengths = fasta.sequence_length(assembled_fasta)
classification = {}
for transcript, prob in coding_probabilities.items():
if prob > cutoff:
classification[transcript] = "protein_coding"
if lengths[transcript] > 200:
classification[transcript] = "lncRNA"
else:
classification[transcript] = "ncRNA"
return classification
def cpat(assembled_fasta, hexamer, logit, data, out_file=None):
if out_file and file_exists(out_file):
return out_file
if not out_file:
out_file = tempfile.NamedTemporaryFile(delete=False, suffix=".cpat").name
cpat_cmd = config_utils.get_program("cpat.py", data)
r_setup = utils.get_R_exports()
cmd = ("{r_setup} && {cpat_cmd} --gene={assembled_fasta} --hex={hexamer} "
"--logitModel={logit} --outfile={tx_out_file}")
message = "Predicing coding potential of %s." % (assembled_fasta)
with file_transaction(out_file) as tx_out_file:
do.run(cmd.format(**locals()), message)
return out_file
def load_cpat_coding_prob(cpat_file):
with open(cpat_file) as in_handle:
header = next(in_handle)
return {line.split()[0]: float(line.split()[5]) for line in in_handle}
def load_cpat_orf_size(cpat_file):
with open(cpat_file) as in_handle:
header = next(in_handle)
return {line.split()[0]: float(line.split()[2]) for line in in_handle}
def grade_cpat(coding_transcripts, noncoding_transcripts, cpat, cutoff):
coding_tp = 0
coding_fp = 0
noncoding_tp = 0
noncoding_fp = 0
for transcript in coding_transcripts:
if cpat[transcript] < cutoff:
noncoding_fp += 1
else:
coding_tp += 1
for transcript in noncoding_transcripts:
if cpat[transcript] >= cutoff:
coding_fp += 1
else:
noncoding_tp += 1
tp = float(coding_tp)
fp = float(coding_fp)
tn = float(noncoding_tp)
fn = float(noncoding_fp)
sensitivity = tp / (tp + fn)
specificity = tn / (tn + fp)
accuracy = (tp + tn) / (tp + tn + fp + fn)
precision = tp / (tp + fp) if (tp + fp > 0) else -1
return {"sensitivity": sensitivity, "specificity": specificity,
"accuracy": accuracy, "precision": precision}
def make_logit_model(coding_fasta, noncoding_fasta, hexamers, data, out_dir=None):
safe_makedir(out_dir)
out_prefix = os.path.join(out_dir, "logit")
out_file = out_prefix + ".logit.RData"
if file_exists(out_file):
return out_file
tx_prefix = tempfile.NamedTemporaryFile(delete=False).name
tx_out_file = tx_prefix + ".logit.RData"
logit_cmd = config_utils.get_program("make_logitModel.py", data)
r_setup = utils.get_R_exports()
cmd = ("{r_setup} && {logit_cmd} --cgene={coding_fasta} --ngene={noncoding_fasta} "
"--hex={hexamers} --outfile={tx_prefix}")
message = "Building coding/noncoding logistical model."
do.run(cmd.format(**locals()), message)
shutil.move(tx_out_file, out_file)
return out_file
def get_coding_potential_cutoff(ref_gtf, ref_fasta, data):
"""
estimate the coding potential cutoff that best classifies
coding/noncoding transcripts by splitting the reference
annotation into a test and training set and determining
the cutoff where the sensitivity and specificity meet
"""
train_gtf, test_gtf = gtf.split_gtf(ref_gtf, sample_size=2000)
coding_gtf = gtf.partition_gtf(train_gtf, coding=True)
noncoding_gtf = gtf.partition_gtf(train_gtf)
noncoding_fasta = gtf.gtf_to_fasta(noncoding_gtf, ref_fasta)
cds_fasta = gtf.gtf_to_fasta(coding_gtf, ref_fasta, cds=True)
hexamer_content = hexamer_table(cds_fasta, noncoding_fasta, data)
coding_fasta = gtf.gtf_to_fasta(coding_gtf, ref_fasta)
logit_model = make_logit_model(coding_fasta, noncoding_fasta,
hexamer_content, data, "test_gtf")
test_fasta = gtf.gtf_to_fasta(test_gtf, ref_fasta)
cpat_fn = cpat(test_fasta, hexamer_content, logit_model, data)
cpat_prob = load_cpat_coding_prob(cpat_fn)
coding, noncoding = gtf.get_coding_noncoding_transcript_ids(test_gtf)
best_score = 1
best_cutoff = 0
best_sensitivity = 0
best_specificity = 0
for cutoff in list(numpy.arange(0.1, 1, 0.01)):
grade = grade_cpat(coding, noncoding, cpat_prob, cutoff)
score = abs(grade["sensitivity"] - grade["specificity"])
if score < best_score:
best_score = score
best_cutoff = cutoff
best_sensitivity = grade["sensitivity"]
best_specificity = grade["specificity"]
return best_cutoff, hexamer_content, logit_model
def hexamer_table(cds_fasta, noncoding_fasta, data, out_file=None):
if out_file and file_exists(out_file):
return out_file
if not out_file:
out_file = tempfile.NamedTemporaryFile(delete=False, suffix=".hexamers").name
hex_cmd = config_utils.get_program("make_hexamer_tab.py", data)
cmd = ("{hex_cmd} --cod={cds_fasta} --noncod={noncoding_fasta} "
"> {tx_out_file}")
with file_transaction(out_file) as tx_out_file:
message = ("Calculating hexamer content in %s and %s."
% (cds_fasta, noncoding_fasta))
do.run(cmd.format(**locals()), message)
return out_file
|
leetcode/algorithms/median-of-two-sorted-arrays/solution.py | palash24/algorithms | 113 | 11134308 | <gh_stars>100-1000
#!/usr/bin/env python
class Solution(object):
def findMedianSortedArrays(self, a, b):
"""
Returns the median of two sorted arrays a and b.
"""
n = len(a) + len(b)
if n % 2 == 0:
# If the total length is even, take the average of the two medians.
return (self._findKth(a, 0, b, 0, n // 2) +
self._findKth(a, 0, b, 0, n // 2 + 1)) / 2.0
else:
return self._findKth(a, 0, b, 0, n // 2 + 1)
def _findKth(self, a, i, b, j, k):
"""
Returns the kth element of two sorted sub-arrays a[i:] and b[j:]. The
high level description of the algorithm is as follows.
Call A = a[i:], B = b[j:], and n the kth element. In each iteration of
the algorithm we wish to get k / 2 closer to the kth element. To do
this we compare A[k / 2] and B[k / 2] where A = X + [A[k / 2]] + Y and
B = Z + [B[k / 2]] + J.
Note that |Z| = |X| = k / 2 - 1. This helps us consider 2 cases.
1. A[k / 2] < B[k / 2] -> n is not in X and n != A[k / 2]. Why?
We know for sure that all elements in X are part of the k - 2 smallest
elements because all x in X <= all y in Y, x < all j in J,
x <= A[k / 2] and x < B[k / 2]. This only leaves the relationship
between each x and each z in Z uncertain.
The same cannot be said about Z because some z in Z could be > some
y in Y. Thus we can eliminate only X and A[k / 2] from our search which
brings us k / 2 closer to our target.
2. A[k / 2] >= B[k / 2] -> n is not in Y or {B[k / 2]} using similar
logic.
The main edge cases arises when len(A) < k / 2 or len(B) < k / 2. Say
len(A) < k / 2. This means we can cut out the first k / 2 of B. Why?
1. If all a in A >= first k / 2 b in B -> No b in first k / 2 of B = n.
2. If all a in A < first k / 2 b in B -> There is no a in A that = n.
Similar logic holds for A when len(B) < k.
"""
assert k <= len(a) + len(b)
if i >= len(a):
# Out of bounds of a.
return b[j + k - 1]
elif j >= len(b):
# Out of bounds of b.
return a[i + k - 1]
elif k == 1:
# 1st element is the smaller of the first in a or b.
return min(a[i], b[j])
midAIndex = i + k // 2 - 1
# Handle case where a[i:] is shorter than k // 2. We will keep this
# array and cut the other one instead which is guaranteed to be at
# least length k // 2
midAValue = a[midAIndex] if midAIndex < len(a) else float('inf')
midBIndex = j + k // 2 - 1
# Same case as midAValue but for b.
midBValue = b[midBIndex] if midBIndex < len(b) else float('inf')
if midAValue < midBValue:
return self._findKth(a, midAIndex + 1, b, j, k - k // 2)
else:
return self._findKth(a, i, b, midBIndex + 1, k - k // 2)
def main():
print('Please run this solution on LeetCode.')
print('https://leetcode.com/problems/median-of-two-sorted-arrays/')
if __name__ == '__main__':
main()
|
kinto/core/authentication.py | taus-semmle/kinto | 4,618 | 11134313 | from pyramid import authentication as base_auth
from kinto.core import utils
from kinto.core.openapi import OpenAPI
class BasicAuthAuthenticationPolicy(base_auth.BasicAuthAuthenticationPolicy):
"""Basic auth implementation.
Allow any user with any credentials (e.g. there is no need to create an
account).
"""
def __init__(self, *args, **kwargs):
def noop_check(*a):
return []
super().__init__(noop_check, *args, **kwargs)
def effective_principals(self, request):
# Bypass default Pyramid construction of principals because
# Pyramid multiauth already adds userid, Authenticated and Everyone
# principals.
return []
def unauthenticated_userid(self, request):
settings = request.registry.settings
credentials = base_auth.extract_http_basic_credentials(request)
if credentials:
username, password = <PASSWORD>
if not username:
return
hmac_secret = settings["userid_hmac_secret"]
credentials = f"{credentials[0]}:{credentials[1]}"
userid = utils.hmac_digest(hmac_secret, credentials)
return userid
def includeme(config):
config.add_api_capability(
"basicauth",
description="Very basic authentication sessions. Not for production use.",
url="http://kinto.readthedocs.io/en/stable/api/1.x/authentication.html",
)
OpenAPI.expose_authentication_method("basicauth", {"type": "basic"})
|
models/modules/unet.py | mauriliosalg/Seis_Shift-Net_pytorch | 350 | 11134321 | import torch
import torch.nn as nn
import torch.nn.functional as F
from .modules import spectral_norm
# Defines the Unet generator.
# |num_downs|: number of downsamplings in UNet. For example,
# if |num_downs| == 7, image of size 128x128 will become of size 1x1
# at the bottleneck
class UnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf=64,
norm_layer=nn.BatchNorm2d, use_spectral_norm=False):
super(UnetGenerator, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True, use_spectral_norm=use_spectral_norm)
for i in range(num_downs - 5):
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_spectral_norm=use_spectral_norm)
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_spectral_norm=use_spectral_norm)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_spectral_norm=use_spectral_norm)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_spectral_norm=use_spectral_norm)
unet_block = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer, use_spectral_norm=use_spectral_norm)
self.model = unet_block
def forward(self, input):
return self.model(input)
# construct network from the inside to the outside.
# Defines the submodule with skip connection.
# X -------------------identity---------------------- X
# |-- downsampling -- |submodule| -- upsampling --|
class UnetSkipConnectionBlock(nn.Module):
def __init__(self, outer_nc, inner_nc, input_nc,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_spectral_norm=False):
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if input_nc is None:
input_nc = outer_nc
downconv = spectral_norm(nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1), use_spectral_norm)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
# Different position only has differences in `upconv`
# for the outermost, the special is `tanh`
if outermost:
upconv = spectral_norm(nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1), use_spectral_norm)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
# for the innermost, the special is `inner_nc` instead of `inner_nc*2`
elif innermost:
upconv = spectral_norm(nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1), use_spectral_norm)
down = [downrelu, downconv] # for the innermost, no submodule, and delete the bn
up = [uprelu, upconv, upnorm]
model = down + up
# else, the normal
else:
upconv = spectral_norm(nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1), use_spectral_norm)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost: # if it is the outermost, directly pass the input in.
return self.model(x)
else:
x_latter = self.model(x)
_, _, h, w = x.size()
if h != x_latter.size(2) or w != x_latter.size(3):
x_latter = F.interpolate(x_latter, (h, w), mode='bilinear')
return torch.cat([x_latter, x], 1) # cat in the C channel
# It is an easy type of UNet, intead of constructing UNet with UnetSkipConnectionBlocks.
# In this way, every thing is much clear and more flexible for extension.
class EasyUnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64,
norm_layer=nn.BatchNorm2d, use_spectral_norm=False):
super(EasyUnetGenerator, self).__init__()
# Encoder layers
self.e1_c = spectral_norm(nn.Conv2d(input_nc, ngf, kernel_size=4, stride=2, padding=1), use_spectral_norm)
self.e2_c = spectral_norm(nn.Conv2d(ngf, ngf*2, kernel_size=4, stride=2, padding=1), use_spectral_norm)
self.e2_norm = norm_layer(ngf*2)
self.e3_c = spectral_norm(nn.Conv2d(ngf*2, ngf*4, kernel_size=4, stride=2, padding=1), use_spectral_norm)
self.e3_norm = norm_layer(ngf*4)
self.e4_c = spectral_norm(nn.Conv2d(ngf*4, ngf*8, kernel_size=4, stride=2, padding=1), use_spectral_norm)
self.e4_norm = norm_layer(ngf*8)
self.e5_c = spectral_norm(nn.Conv2d(ngf*8, ngf*8, kernel_size=4, stride=2, padding=1), use_spectral_norm)
self.e5_norm = norm_layer(ngf*8)
self.e6_c = spectral_norm(nn.Conv2d(ngf*8, ngf*8, kernel_size=4, stride=2, padding=1), use_spectral_norm)
self.e6_norm = norm_layer(ngf*8)
self.e7_c = spectral_norm(nn.Conv2d(ngf*8, ngf*8, kernel_size=4, stride=2, padding=1), use_spectral_norm)
self.e7_norm = norm_layer(ngf*8)
self.e8_c = spectral_norm(nn.Conv2d(ngf*8, ngf*8, kernel_size=4, stride=2, padding=1), use_spectral_norm)
# Deocder layers
self.d1_c = spectral_norm(nn.ConvTranspose2d(ngf*8, ngf*8, kernel_size=4, stride=2, padding=1), use_spectral_norm)
self.d1_norm = norm_layer(ngf*8)
self.d2_c = spectral_norm(nn.ConvTranspose2d(ngf*8*2 , ngf*8, kernel_size=4, stride=2, padding=1), use_spectral_norm)
self.d2_norm = norm_layer(ngf*8)
self.d3_c = spectral_norm(nn.ConvTranspose2d(ngf*8*2, ngf*8, kernel_size=4, stride=2, padding=1), use_spectral_norm)
self.d3_norm = norm_layer(ngf*8)
self.d4_c = spectral_norm(nn.ConvTranspose2d(ngf*8*2, ngf*8, kernel_size=4, stride=2, padding=1), use_spectral_norm)
self.d4_norm = norm_layer(ngf*8)
self.d5_c = spectral_norm(nn.ConvTranspose2d(ngf*8*2, ngf*4, kernel_size=4, stride=2, padding=1), use_spectral_norm)
self.d5_norm = norm_layer(ngf*4)
self.d6_c = spectral_norm(nn.ConvTranspose2d(ngf*4*2, ngf*2, kernel_size=4, stride=2, padding=1), use_spectral_norm)
self.d6_norm = norm_layer(ngf*2)
self.d7_c = spectral_norm(nn.ConvTranspose2d(ngf*2*2, ngf, kernel_size=4, stride=2, padding=1), use_spectral_norm)
self.d7_norm = norm_layer(ngf)
self.d8_c = spectral_norm(nn.ConvTranspose2d(ngf*2, output_nc, kernel_size=4, stride=2, padding=1), use_spectral_norm)
# In this case, we have very flexible unet construction mode.
def forward(self, input):
# Encoder
# No norm on the first layer
e1 = self.e1_c(input)
e2 = self.e2_norm(self.e2_c(F.leaky_relu_(e1, negative_slope=0.2)))
e3 = self.e3_norm(self.e3_c(F.leaky_relu_(e2, negative_slope=0.2)))
e4 = self.e4_norm(self.e4_c(F.leaky_relu_(e3, negative_slope=0.2)))
e5 = self.e5_norm(self.e5_c(F.leaky_relu_(e4, negative_slope=0.2)))
e6 = self.e6_norm(self.e6_c(F.leaky_relu_(e5, negative_slope=0.2)))
e7 = self.e7_norm(self.e7_c(F.leaky_relu_(e6, negative_slope=0.2)))
# No norm on the inner_most layer
e8 = self.e8_c(F.leaky_relu_(e7, negative_slope=0.2))
# Decoder
d1 = self.d1_norm(self.d1_c(F.relu_(e8)))
d2 = self.d2_norm(self.d2_c(F.relu_(torch.cat([d1, e7], dim=1))))
d3 = self.d3_norm(self.d3_c(F.relu_(torch.cat([d2, e6], dim=1))))
d4 = self.d4_norm(self.d4_c(F.relu_(torch.cat([d3, e5], dim=1))))
d5 = self.d5_norm(self.d5_c(F.relu_(torch.cat([d4, e4], dim=1))))
d6 = self.d6_norm(self.d6_c(F.relu_(torch.cat([d5, e3], dim=1))))
d7 = self.d7_norm(self.d7_c(F.relu_(torch.cat([d6, e2], dim=1))))
# No norm on the last layer
d8 = self.d8_c(F.relu_(torch.cat([d7, e1], 1)))
d8 = torch.tanh(d8)
return d8
|
mindinsight/datavisual/data_transform/loader_generators/data_loader_generator.py | mindspore-ai/mindinsight | 216 | 11134339 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Data Loader Generator.
This module generate loaders from summary logs.
"""
import os
from mindinsight.datavisual.common.log import logger
from mindinsight.datavisual.common.exceptions import TrainJobNotExistError
from mindinsight.datavisual.data_access.file_handler import FileHandler
from mindinsight.datavisual.data_transform.data_loader import DataLoader
from mindinsight.datavisual.data_transform.loader_generators.loader_generator import MAX_DATA_LOADER_SIZE
from mindinsight.datavisual.data_transform.loader_generators.loader_struct import LoaderStruct
from mindinsight.datavisual.data_transform.loader_generators.loader_generator import LoaderGenerator
from mindinsight.datavisual.data_transform.summary_watcher import SummaryWatcher
from mindinsight.utils.exceptions import ParamValueError
from mindinsight.utils.exceptions import PathNotExistError
class DataLoaderGenerator(LoaderGenerator):
"""
DataLoaderGenerator generate a loader_dict of loader from summary logs.
Each loader helps deal the data of the events.
It helps DataManager to generate loaders.
"""
def __init__(self, summary_path):
"""
Init DataLoaderGenerator.
Args:
summary_path (str): A directory path, e.g. '/data/ImageNet/'.
"""
self._summary_path = self._check_and_normalize_summary_path(summary_path)
self._summary_watcher = SummaryWatcher()
def register_folder_analyzer(self, analyzer):
"""Register folder analyzer."""
self._summary_watcher.register_folder_analyzer(analyzer)
def _check_and_normalize_summary_path(self, summary_path):
"""
Check and normalize summary path.
Args:
summary_path (str): A directory path, e.g. '/data/ImageNet/'.
Returns:
str, normalized summary path.
"""
if summary_path is None:
logger.warning("Summary path is None. It will not init data loader generator.")
raise ParamValueError("Summary path is None.")
summary_path = os.path.realpath(summary_path)
return summary_path
def generate_loaders(self, loader_pool):
"""
Generate loader from summary path, if summary path is empty, will return empty list.
Args:
loader_pool (dict[str, LoaderStruct]): Current loader pool in data_manager.
Returns:
dict[str, LoaderStruct], a dict of `Loader`.
"""
loader_dict = {}
if not FileHandler.exists(self._summary_path):
logger.warning("Summary path does not exist. It will not start loading events data. "
"Current path is %r.", self._summary_path)
return loader_dict
dir_map_mtime_dict = {}
min_modify_time = None
summaries_info = self._summary_watcher.list_summary_directories(self._summary_path)
for item in summaries_info:
relative_path = item.get("relative_path")
current_dir = FileHandler.join(self._summary_path, relative_path)
dataloader = DataLoader(current_dir)
if not dataloader.has_valid_files():
logger.debug("Can not find valid train log file in folder %s , "
"will ignore.", relative_path)
continue
modify_time = item.get("update_time").timestamp()
# if loader exists in loader pool and newer time, update its time
loader_id = self._generate_loader_id(relative_path)
loader = loader_pool.get(loader_id)
if loader is not None and loader.latest_update_time > modify_time:
modify_time = loader.latest_update_time
if not min_modify_time:
# The first load, init min modify time
min_modify_time = modify_time
# We need to find `MAX_DATA_LOADER_SIZE` newly modified folders.
if len(dir_map_mtime_dict) < MAX_DATA_LOADER_SIZE:
if modify_time < min_modify_time:
min_modify_time = modify_time
dir_map_mtime_dict.update({relative_path: modify_time})
else:
if modify_time >= min_modify_time:
dir_map_mtime_dict.update({relative_path: modify_time})
sorted_dir_tuple = sorted(dir_map_mtime_dict.items(),
key=lambda d: d[1])[-MAX_DATA_LOADER_SIZE:]
for relative_path, modify_time in sorted_dir_tuple:
loader_id = self._generate_loader_id(relative_path)
loader = self._generate_loader_by_relative_path(relative_path)
loader_dict.update({loader_id: loader})
return loader_dict
def _generate_loader_by_relative_path(self, relative_path):
"""
Generate loader by relative path.
Args:
relative_path (str): Relative path of a summary directory, e.g. './log1'.
Returns:
dict[str, LoaderStruct], a dict of `Loader`.
"""
current_dir = os.path.realpath(FileHandler.join(self._summary_path, relative_path))
data_loader = DataLoader(current_dir)
loader_id = self._generate_loader_id(relative_path)
loader = LoaderStruct(loader_id=loader_id,
name=self._generate_loader_name(relative_path),
path=current_dir,
latest_update_time=FileHandler.file_stat(current_dir).mtime,
data_loader=data_loader)
return loader
def _generate_loader_id(self, relative_path):
"""
Generate loader id from relative path.
Args:
relative_path (str): Relative path of a summary directory, e.g. './log1'.
Returns:
str, loader_id for `Loader`.
"""
loader_id = relative_path
return loader_id
def _generate_loader_name(self, relative_path):
"""
Generate loader name from relative path.
Args:
relative_path (str): Relative path of a summary directory, e.g. './log1'.
Returns:
str, loader_name for `Loader`.
"""
loader_name = relative_path
return loader_name
def _get_relative_path_from_train_id(self, train_id):
"""
Get relative from train_id.
Args:
train_id (str): Train ID of a summary directory, e.g. './log1'.
Returns:
str, relative path of `Loader`.
"""
relative_path = train_id
return relative_path
def check_train_job_exist(self, train_id):
"""
Check if train job exists.
Args:
train_id (str): Train ID of a summary directory, e.g. './log1'.
Returns:
bool, if train job exists, return True.
"""
if not self._is_train_id_valid(train_id):
return False
relative_path = self._get_relative_path_from_train_id(train_id)
if self._summary_watcher.is_summary_directory(self._summary_path, relative_path):
return True
return False
def _is_train_id_valid(self, train_id):
"""
Check if train_id is valid.
Args:
train_id (str): Train ID of a summary directory, e.g. './log1'.
Returns:
bool, if train id is valid, return True.
"""
if not train_id.startswith('./'):
logger.warning("The train_id does not start with './'.")
return False
if len(train_id.split("/")) > 2:
logger.warning("The train_id contains multiple '/'.")
return False
return True
def generate_loader_by_train_id(self, train_id):
"""
Generate loader by train_id.
Args:
train_id (str): Train ID of a summary directory, e.g. './log1'.
Returns:
dict[str, LoaderStruct], a dict of `Loader`.
"""
relative_path = self._get_relative_path_from_train_id(train_id)
try:
loader = self._generate_loader_by_relative_path(relative_path)
except PathNotExistError as ex:
raise TrainJobNotExistError(str(ex))
return loader
|
src/oci/compute_instance_agent/models/instance_agent_command_execution_summary.py | Manny27nyc/oci-python-sdk | 249 | 11134340 | <gh_stars>100-1000
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class InstanceAgentCommandExecutionSummary(object):
"""
Execution details for a command.
"""
#: A constant which can be used with the delivery_state property of a InstanceAgentCommandExecutionSummary.
#: This constant has a value of "VISIBLE"
DELIVERY_STATE_VISIBLE = "VISIBLE"
#: A constant which can be used with the delivery_state property of a InstanceAgentCommandExecutionSummary.
#: This constant has a value of "PENDING"
DELIVERY_STATE_PENDING = "PENDING"
#: A constant which can be used with the delivery_state property of a InstanceAgentCommandExecutionSummary.
#: This constant has a value of "ACKED"
DELIVERY_STATE_ACKED = "ACKED"
#: A constant which can be used with the delivery_state property of a InstanceAgentCommandExecutionSummary.
#: This constant has a value of "ACKED_CANCELED"
DELIVERY_STATE_ACKED_CANCELED = "ACKED_CANCELED"
#: A constant which can be used with the delivery_state property of a InstanceAgentCommandExecutionSummary.
#: This constant has a value of "EXPIRED"
DELIVERY_STATE_EXPIRED = "EXPIRED"
#: A constant which can be used with the lifecycle_state property of a InstanceAgentCommandExecutionSummary.
#: This constant has a value of "ACCEPTED"
LIFECYCLE_STATE_ACCEPTED = "ACCEPTED"
#: A constant which can be used with the lifecycle_state property of a InstanceAgentCommandExecutionSummary.
#: This constant has a value of "IN_PROGRESS"
LIFECYCLE_STATE_IN_PROGRESS = "IN_PROGRESS"
#: A constant which can be used with the lifecycle_state property of a InstanceAgentCommandExecutionSummary.
#: This constant has a value of "SUCCEEDED"
LIFECYCLE_STATE_SUCCEEDED = "SUCCEEDED"
#: A constant which can be used with the lifecycle_state property of a InstanceAgentCommandExecutionSummary.
#: This constant has a value of "FAILED"
LIFECYCLE_STATE_FAILED = "FAILED"
#: A constant which can be used with the lifecycle_state property of a InstanceAgentCommandExecutionSummary.
#: This constant has a value of "TIMED_OUT"
LIFECYCLE_STATE_TIMED_OUT = "TIMED_OUT"
#: A constant which can be used with the lifecycle_state property of a InstanceAgentCommandExecutionSummary.
#: This constant has a value of "CANCELED"
LIFECYCLE_STATE_CANCELED = "CANCELED"
def __init__(self, **kwargs):
"""
Initializes a new InstanceAgentCommandExecutionSummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param instance_agent_command_id:
The value to assign to the instance_agent_command_id property of this InstanceAgentCommandExecutionSummary.
:type instance_agent_command_id: str
:param instance_id:
The value to assign to the instance_id property of this InstanceAgentCommandExecutionSummary.
:type instance_id: str
:param delivery_state:
The value to assign to the delivery_state property of this InstanceAgentCommandExecutionSummary.
Allowed values for this property are: "VISIBLE", "PENDING", "ACKED", "ACKED_CANCELED", "EXPIRED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type delivery_state: str
:param lifecycle_state:
The value to assign to the lifecycle_state property of this InstanceAgentCommandExecutionSummary.
Allowed values for this property are: "ACCEPTED", "IN_PROGRESS", "SUCCEEDED", "FAILED", "TIMED_OUT", "CANCELED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type lifecycle_state: str
:param time_created:
The value to assign to the time_created property of this InstanceAgentCommandExecutionSummary.
:type time_created: datetime
:param time_updated:
The value to assign to the time_updated property of this InstanceAgentCommandExecutionSummary.
:type time_updated: datetime
:param sequence_number:
The value to assign to the sequence_number property of this InstanceAgentCommandExecutionSummary.
:type sequence_number: int
:param display_name:
The value to assign to the display_name property of this InstanceAgentCommandExecutionSummary.
:type display_name: str
:param content:
The value to assign to the content property of this InstanceAgentCommandExecutionSummary.
:type content: oci.compute_instance_agent.models.InstanceAgentCommandExecutionOutputContent
"""
self.swagger_types = {
'instance_agent_command_id': 'str',
'instance_id': 'str',
'delivery_state': 'str',
'lifecycle_state': 'str',
'time_created': 'datetime',
'time_updated': 'datetime',
'sequence_number': 'int',
'display_name': 'str',
'content': 'InstanceAgentCommandExecutionOutputContent'
}
self.attribute_map = {
'instance_agent_command_id': 'instanceAgentCommandId',
'instance_id': 'instanceId',
'delivery_state': 'deliveryState',
'lifecycle_state': 'lifecycleState',
'time_created': 'timeCreated',
'time_updated': 'timeUpdated',
'sequence_number': 'sequenceNumber',
'display_name': 'displayName',
'content': 'content'
}
self._instance_agent_command_id = None
self._instance_id = None
self._delivery_state = None
self._lifecycle_state = None
self._time_created = None
self._time_updated = None
self._sequence_number = None
self._display_name = None
self._content = None
@property
def instance_agent_command_id(self):
"""
**[Required]** Gets the instance_agent_command_id of this InstanceAgentCommandExecutionSummary.
The `OCID`__ of the command.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The instance_agent_command_id of this InstanceAgentCommandExecutionSummary.
:rtype: str
"""
return self._instance_agent_command_id
@instance_agent_command_id.setter
def instance_agent_command_id(self, instance_agent_command_id):
"""
Sets the instance_agent_command_id of this InstanceAgentCommandExecutionSummary.
The `OCID`__ of the command.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param instance_agent_command_id: The instance_agent_command_id of this InstanceAgentCommandExecutionSummary.
:type: str
"""
self._instance_agent_command_id = instance_agent_command_id
@property
def instance_id(self):
"""
**[Required]** Gets the instance_id of this InstanceAgentCommandExecutionSummary.
The `OCID`__ of the instance.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The instance_id of this InstanceAgentCommandExecutionSummary.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""
Sets the instance_id of this InstanceAgentCommandExecutionSummary.
The `OCID`__ of the instance.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param instance_id: The instance_id of this InstanceAgentCommandExecutionSummary.
:type: str
"""
self._instance_id = instance_id
@property
def delivery_state(self):
"""
**[Required]** Gets the delivery_state of this InstanceAgentCommandExecutionSummary.
The command delivery state.
* `VISIBLE` - The command is visible to the instance.
* `PENDING` - The command is pending acknowledgment from the instance.
* `ACKED` - The command has been received and acknowledged by the instance.
* `ACKED_CANCELED` - The canceled command has been received and acknowledged by the instance.
* `EXPIRED` - The instance has not requested for commands and the command's delivery has expired.
Allowed values for this property are: "VISIBLE", "PENDING", "ACKED", "ACKED_CANCELED", "EXPIRED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The delivery_state of this InstanceAgentCommandExecutionSummary.
:rtype: str
"""
return self._delivery_state
@delivery_state.setter
def delivery_state(self, delivery_state):
"""
Sets the delivery_state of this InstanceAgentCommandExecutionSummary.
The command delivery state.
* `VISIBLE` - The command is visible to the instance.
* `PENDING` - The command is pending acknowledgment from the instance.
* `ACKED` - The command has been received and acknowledged by the instance.
* `ACKED_CANCELED` - The canceled command has been received and acknowledged by the instance.
* `EXPIRED` - The instance has not requested for commands and the command's delivery has expired.
:param delivery_state: The delivery_state of this InstanceAgentCommandExecutionSummary.
:type: str
"""
allowed_values = ["VISIBLE", "PENDING", "ACKED", "ACKED_CANCELED", "EXPIRED"]
if not value_allowed_none_or_none_sentinel(delivery_state, allowed_values):
delivery_state = 'UNKNOWN_ENUM_VALUE'
self._delivery_state = delivery_state
@property
def lifecycle_state(self):
"""
**[Required]** Gets the lifecycle_state of this InstanceAgentCommandExecutionSummary.
The command execution lifecycle state.
* `ACCEPTED` - The command has been accepted to run.
* `IN_PROGRESS` - The command is in progress.
* `SUCCEEDED` - The command was successfully executed.
* `FAILED` - The command failed to execute.
* `TIMED_OUT` - The command execution timed out.
* `CANCELED` - The command execution was canceled.
Allowed values for this property are: "ACCEPTED", "IN_PROGRESS", "SUCCEEDED", "FAILED", "TIMED_OUT", "CANCELED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The lifecycle_state of this InstanceAgentCommandExecutionSummary.
:rtype: str
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""
Sets the lifecycle_state of this InstanceAgentCommandExecutionSummary.
The command execution lifecycle state.
* `ACCEPTED` - The command has been accepted to run.
* `IN_PROGRESS` - The command is in progress.
* `SUCCEEDED` - The command was successfully executed.
* `FAILED` - The command failed to execute.
* `TIMED_OUT` - The command execution timed out.
* `CANCELED` - The command execution was canceled.
:param lifecycle_state: The lifecycle_state of this InstanceAgentCommandExecutionSummary.
:type: str
"""
allowed_values = ["ACCEPTED", "IN_PROGRESS", "SUCCEEDED", "FAILED", "TIMED_OUT", "CANCELED"]
if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values):
lifecycle_state = 'UNKNOWN_ENUM_VALUE'
self._lifecycle_state = lifecycle_state
@property
def time_created(self):
"""
**[Required]** Gets the time_created of this InstanceAgentCommandExecutionSummary.
The date and time the command was created, in the format defined by
`RFC3339`__.
__ https://tools.ietf.org/html/rfc3339
:return: The time_created of this InstanceAgentCommandExecutionSummary.
:rtype: datetime
"""
return self._time_created
@time_created.setter
def time_created(self, time_created):
"""
Sets the time_created of this InstanceAgentCommandExecutionSummary.
The date and time the command was created, in the format defined by
`RFC3339`__.
__ https://tools.ietf.org/html/rfc3339
:param time_created: The time_created of this InstanceAgentCommandExecutionSummary.
:type: datetime
"""
self._time_created = time_created
@property
def time_updated(self):
"""
**[Required]** Gets the time_updated of this InstanceAgentCommandExecutionSummary.
The date and time the command was last updated, in the format defined by
`RFC3339`__.
__ https://tools.ietf.org/html/rfc3339
:return: The time_updated of this InstanceAgentCommandExecutionSummary.
:rtype: datetime
"""
return self._time_updated
@time_updated.setter
def time_updated(self, time_updated):
"""
Sets the time_updated of this InstanceAgentCommandExecutionSummary.
The date and time the command was last updated, in the format defined by
`RFC3339`__.
__ https://tools.ietf.org/html/rfc3339
:param time_updated: The time_updated of this InstanceAgentCommandExecutionSummary.
:type: datetime
"""
self._time_updated = time_updated
@property
def sequence_number(self):
"""
**[Required]** Gets the sequence_number of this InstanceAgentCommandExecutionSummary.
A large, non-consecutive number that Oracle Cloud Agent assigns to each created command.
:return: The sequence_number of this InstanceAgentCommandExecutionSummary.
:rtype: int
"""
return self._sequence_number
@sequence_number.setter
def sequence_number(self, sequence_number):
"""
Sets the sequence_number of this InstanceAgentCommandExecutionSummary.
A large, non-consecutive number that Oracle Cloud Agent assigns to each created command.
:param sequence_number: The sequence_number of this InstanceAgentCommandExecutionSummary.
:type: int
"""
self._sequence_number = sequence_number
@property
def display_name(self):
"""
Gets the display_name of this InstanceAgentCommandExecutionSummary.
A user-friendly name. Does not have to be unique.
:return: The display_name of this InstanceAgentCommandExecutionSummary.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this InstanceAgentCommandExecutionSummary.
A user-friendly name. Does not have to be unique.
:param display_name: The display_name of this InstanceAgentCommandExecutionSummary.
:type: str
"""
self._display_name = display_name
@property
def content(self):
"""
**[Required]** Gets the content of this InstanceAgentCommandExecutionSummary.
The execution output from a command.
:return: The content of this InstanceAgentCommandExecutionSummary.
:rtype: oci.compute_instance_agent.models.InstanceAgentCommandExecutionOutputContent
"""
return self._content
@content.setter
def content(self, content):
"""
Sets the content of this InstanceAgentCommandExecutionSummary.
The execution output from a command.
:param content: The content of this InstanceAgentCommandExecutionSummary.
:type: oci.compute_instance_agent.models.InstanceAgentCommandExecutionOutputContent
"""
self._content = content
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
examples/sugarscape_cg/sugarscape_cg/model.py | DoofCoder/mesa | 1,704 | 11134342 | <reponame>DoofCoder/mesa
"""
Sugarscape Constant Growback Model
================================
Replication of the model found in Netlogo:
<NAME>. and <NAME>. (2009). NetLogo Sugarscape 2 Constant Growback model.
http://ccl.northwestern.edu/netlogo/models/Sugarscape2ConstantGrowback.
Center for Connected Learning and Computer-Based Modeling,
Northwestern University, Evanston, IL.
"""
from mesa import Model
from mesa.space import MultiGrid
from mesa.datacollection import DataCollector
from .agents import SsAgent, Sugar
from .schedule import RandomActivationByBreed
class SugarscapeCg(Model):
"""
Sugarscape 2 Constant Growback
"""
verbose = True # Print-monitoring
def __init__(self, height=50, width=50, initial_population=100):
"""
Create a new Constant Growback model with the given parameters.
Args:
initial_population: Number of population to start with
"""
# Set parameters
self.height = height
self.width = width
self.initial_population = initial_population
self.schedule = RandomActivationByBreed(self)
self.grid = MultiGrid(self.height, self.width, torus=False)
self.datacollector = DataCollector(
{"SsAgent": lambda m: m.schedule.get_breed_count(SsAgent)}
)
# Create sugar
import numpy as np
sugar_distribution = np.genfromtxt("sugarscape_cg/sugar-map.txt")
for _, x, y in self.grid.coord_iter():
max_sugar = sugar_distribution[x, y]
sugar = Sugar((x, y), self, max_sugar)
self.grid.place_agent(sugar, (x, y))
self.schedule.add(sugar)
# Create agent:
for i in range(self.initial_population):
x = self.random.randrange(self.width)
y = self.random.randrange(self.height)
sugar = self.random.randrange(6, 25)
metabolism = self.random.randrange(2, 4)
vision = self.random.randrange(1, 6)
ssa = SsAgent((x, y), self, False, sugar, metabolism, vision)
self.grid.place_agent(ssa, (x, y))
self.schedule.add(ssa)
self.running = True
self.datacollector.collect(self)
def step(self):
self.schedule.step()
# collect data
self.datacollector.collect(self)
if self.verbose:
print([self.schedule.time, self.schedule.get_breed_count(SsAgent)])
def run_model(self, step_count=200):
if self.verbose:
print(
"Initial number Sugarscape Agent: ",
self.schedule.get_breed_count(SsAgent),
)
for i in range(step_count):
self.step()
if self.verbose:
print("")
print(
"Final number Sugarscape Agent: ",
self.schedule.get_breed_count(SsAgent),
)
|
testing/tools/connector_sent_data_parser.py | pvmsikrsna/wallaroo | 1,459 | 11134347 | import struct
import sys
try:
import wallaroo.experimental.connector_wire_messages as cwm
except:
print("Couldn't import wallaroo.experimental.connector_wire_messages. Please ensure that machida/lib/ is on your PYTHONPATH")
filename = sys.argv[1]
print("parsing file: {}".format(filename))
f = open(filename, 'rb')
o = open('out.txt', 'wt')
while True:
hb = f.read(4)
if not hb:
break
h = struct.unpack('>I', hb)[0]
b = f.read(h)
if not b:
break
m = cwm.Frame.decode(b)
o.write("{}\n".format(m))
print("Parsed sender data written to 'out.txt'")
|
fpga/lib/pcie/tb/test_dma_if_pcie_us_wr_512.py | totuwei/corundum | 544 | 11134351 | <reponame>totuwei/corundum
#!/usr/bin/env python
"""
Copyright (c) 2019 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import pcie
import pcie_usp
import dma_ram
import axis_ep
module = 'dma_if_pcie_us_wr'
testbench = 'test_%s_512' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Parameters
AXIS_PCIE_DATA_WIDTH = 512
AXIS_PCIE_KEEP_WIDTH = (AXIS_PCIE_DATA_WIDTH/32)
AXIS_PCIE_RQ_USER_WIDTH = 137
RQ_SEQ_NUM_WIDTH = 4 if AXIS_PCIE_RQ_USER_WIDTH == 60 else 6
RQ_SEQ_NUM_ENABLE = 1
SEG_COUNT = max(2, int(AXIS_PCIE_DATA_WIDTH*2/128))
SEG_DATA_WIDTH = AXIS_PCIE_DATA_WIDTH*2/SEG_COUNT
SEG_ADDR_WIDTH = 12
SEG_BE_WIDTH = int(SEG_DATA_WIDTH/8)
RAM_SEL_WIDTH = 2
RAM_ADDR_WIDTH = SEG_ADDR_WIDTH+(SEG_COUNT-1).bit_length()+(SEG_BE_WIDTH-1).bit_length()
PCIE_ADDR_WIDTH = 64
LEN_WIDTH = 16
TAG_WIDTH = 8
OP_TABLE_SIZE = 2**(RQ_SEQ_NUM_WIDTH-1)
TX_LIMIT = 2**(RQ_SEQ_NUM_WIDTH-1)
TX_FC_ENABLE = 1
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
s_axis_rq_tdata = Signal(intbv(0)[AXIS_PCIE_DATA_WIDTH:])
s_axis_rq_tkeep = Signal(intbv(0)[AXIS_PCIE_KEEP_WIDTH:])
s_axis_rq_tvalid = Signal(bool(0))
s_axis_rq_tlast = Signal(bool(0))
s_axis_rq_tuser = Signal(intbv(0)[AXIS_PCIE_RQ_USER_WIDTH:])
m_axis_rq_tready = Signal(bool(0))
s_axis_rq_seq_num_0 = Signal(intbv(0)[RQ_SEQ_NUM_WIDTH:])
s_axis_rq_seq_num_valid_0 = Signal(bool(0))
s_axis_rq_seq_num_1 = Signal(intbv(0)[RQ_SEQ_NUM_WIDTH:])
s_axis_rq_seq_num_valid_1 = Signal(bool(0))
pcie_tx_fc_ph_av = Signal(intbv(0)[8:])
pcie_tx_fc_pd_av = Signal(intbv(0)[12:])
s_axis_write_desc_pcie_addr = Signal(intbv(0)[PCIE_ADDR_WIDTH:])
s_axis_write_desc_ram_sel = Signal(intbv(0)[RAM_SEL_WIDTH:])
s_axis_write_desc_ram_addr = Signal(intbv(0)[RAM_ADDR_WIDTH:])
s_axis_write_desc_len = Signal(intbv(0)[LEN_WIDTH:])
s_axis_write_desc_tag = Signal(intbv(0)[TAG_WIDTH:])
s_axis_write_desc_valid = Signal(bool(0))
ram_rd_cmd_ready = Signal(intbv(0)[SEG_COUNT:])
ram_rd_resp_data = Signal(intbv(0)[SEG_COUNT*SEG_DATA_WIDTH:])
ram_rd_resp_valid = Signal(intbv(0)[SEG_COUNT:])
enable = Signal(bool(0))
requester_id = Signal(intbv(0)[16:])
requester_id_enable = Signal(bool(0))
max_payload_size = Signal(intbv(0)[3:])
# Outputs
s_axis_rq_tready = Signal(bool(0))
m_axis_rq_tdata = Signal(intbv(0)[AXIS_PCIE_DATA_WIDTH:])
m_axis_rq_tkeep = Signal(intbv(0)[AXIS_PCIE_KEEP_WIDTH:])
m_axis_rq_tvalid = Signal(bool(0))
m_axis_rq_tlast = Signal(bool(0))
m_axis_rq_tuser = Signal(intbv(0)[AXIS_PCIE_RQ_USER_WIDTH:])
m_axis_rq_seq_num_0 = Signal(intbv(0)[RQ_SEQ_NUM_WIDTH:])
m_axis_rq_seq_num_valid_0 = Signal(bool(0))
m_axis_rq_seq_num_1 = Signal(intbv(0)[RQ_SEQ_NUM_WIDTH:])
m_axis_rq_seq_num_valid_1 = Signal(bool(0))
s_axis_write_desc_ready = Signal(bool(0))
m_axis_write_desc_status_tag = Signal(intbv(0)[TAG_WIDTH:])
m_axis_write_desc_status_valid = Signal(bool(0))
ram_rd_cmd_sel = Signal(intbv(0)[SEG_COUNT*RAM_SEL_WIDTH:])
ram_rd_cmd_addr = Signal(intbv(0)[SEG_COUNT*SEG_ADDR_WIDTH:])
ram_rd_cmd_valid = Signal(intbv(0)[SEG_COUNT:])
ram_rd_resp_ready = Signal(intbv(0)[SEG_COUNT:])
# Clock and Reset Interface
user_clk=Signal(bool(0))
user_reset=Signal(bool(0))
sys_clk=Signal(bool(0))
sys_reset=Signal(bool(0))
# PCIe DMA RAM
dma_ram_inst = dma_ram.PSDPRam(2**16)
dma_ram_pause = Signal(bool(0))
dma_ram_port0 = dma_ram_inst.create_read_ports(
user_clk,
ram_rd_cmd_addr=ram_rd_cmd_addr,
ram_rd_cmd_valid=ram_rd_cmd_valid,
ram_rd_cmd_ready=ram_rd_cmd_ready,
ram_rd_resp_data=ram_rd_resp_data,
ram_rd_resp_valid=ram_rd_resp_valid,
ram_rd_resp_ready=ram_rd_resp_ready,
pause=dma_ram_pause,
name='port0'
)
# sources and sinks
write_desc_source = axis_ep.AXIStreamSource()
write_desc_source_logic = write_desc_source.create_logic(
user_clk,
user_reset,
tdata=(s_axis_write_desc_pcie_addr, s_axis_write_desc_ram_sel, s_axis_write_desc_ram_addr, s_axis_write_desc_len, s_axis_write_desc_tag),
tvalid=s_axis_write_desc_valid,
tready=s_axis_write_desc_ready,
name='write_desc_source'
)
write_desc_status_sink = axis_ep.AXIStreamSink()
write_desc_status_sink_logic = write_desc_status_sink.create_logic(
user_clk,
user_reset,
tdata=(m_axis_write_desc_status_tag,),
tvalid=m_axis_write_desc_status_valid,
name='write_desc_status_sink'
)
# PCIe devices
rc = pcie.RootComplex()
mem_base, mem_data = rc.alloc_region(16*1024*1024)
dev = pcie_usp.UltrascalePlusPCIe()
dev.pcie_generation = 3
dev.pcie_link_width = 16
dev.user_clock_frequency = 256e6
rc.make_port().connect(dev)
cq_pause = Signal(bool(0))
cc_pause = Signal(bool(0))
rq_pause = Signal(bool(0))
rc_pause = Signal(bool(0))
pcie_logic = dev.create_logic(
# Completer reQuest Interface
m_axis_cq_tdata=Signal(intbv(0)[AXIS_PCIE_DATA_WIDTH:]),
m_axis_cq_tuser=Signal(intbv(0)[183:]),
m_axis_cq_tlast=Signal(bool(0)),
m_axis_cq_tkeep=Signal(intbv(0)[AXIS_PCIE_KEEP_WIDTH:]),
m_axis_cq_tvalid=Signal(bool(0)),
m_axis_cq_tready=Signal(bool(1)),
pcie_cq_np_req=Signal(intbv(3)[2:]),
pcie_cq_np_req_count=Signal(intbv(0)[6:]),
# Completer Completion Interface
s_axis_cc_tdata=Signal(intbv(0)[AXIS_PCIE_DATA_WIDTH:]),
s_axis_cc_tuser=Signal(intbv(0)[81:]),
s_axis_cc_tlast=Signal(bool(0)),
s_axis_cc_tkeep=Signal(intbv(0)[AXIS_PCIE_KEEP_WIDTH:]),
s_axis_cc_tvalid=Signal(bool(0)),
s_axis_cc_tready=Signal(bool(0)),
# Requester reQuest Interface
s_axis_rq_tdata=m_axis_rq_tdata,
s_axis_rq_tuser=m_axis_rq_tuser,
s_axis_rq_tlast=m_axis_rq_tlast,
s_axis_rq_tkeep=m_axis_rq_tkeep,
s_axis_rq_tvalid=m_axis_rq_tvalid,
s_axis_rq_tready=m_axis_rq_tready,
pcie_rq_seq_num0=s_axis_rq_seq_num_0,
pcie_rq_seq_num_vld0=s_axis_rq_seq_num_valid_0,
pcie_rq_seq_num1=s_axis_rq_seq_num_1,
pcie_rq_seq_num_vld1=s_axis_rq_seq_num_valid_1,
# pcie_rq_tag0=pcie_rq_tag0,
# pcie_rq_tag1=pcie_rq_tag1,
# pcie_rq_tag_av=pcie_rq_tag_av,
# pcie_rq_tag_vld0=pcie_rq_tag_vld0,
# pcie_rq_tag_vld1=pcie_rq_tag_vld1,
# Requester Completion Interface
m_axis_rc_tdata=Signal(intbv(0)[AXIS_PCIE_DATA_WIDTH:]),
m_axis_rc_tuser=Signal(intbv(0)[161:]),
m_axis_rc_tlast=Signal(bool(0)),
m_axis_rc_tkeep=Signal(intbv(0)[AXIS_PCIE_KEEP_WIDTH:]),
m_axis_rc_tvalid=Signal(bool(0)),
m_axis_rc_tready=Signal(bool(0)),
# Transmit Flow Control Interface
# pcie_tfc_nph_av=pcie_tfc_nph_av,
# pcie_tfc_npd_av=pcie_tfc_npd_av,
# Configuration Flow Control Interface
cfg_fc_ph=pcie_tx_fc_ph_av,
cfg_fc_pd=pcie_tx_fc_pd_av,
#cfg_fc_nph=cfg_fc_nph,
#cfg_fc_npd=cfg_fc_npd,
#cfg_fc_cplh=cfg_fc_cplh,
#cfg_fc_cpld=cfg_fc_cpld,
cfg_fc_sel=Signal(intbv(0b100)[3:]),
# Configuration Control Interface
# cfg_hot_reset_in=cfg_hot_reset_in,
# cfg_hot_reset_out=cfg_hot_reset_out,
# cfg_config_space_enable=cfg_config_space_enable,
# cfg_dsn=cfg_dsn,
# cfg_ds_port_number=cfg_ds_port_number,
# cfg_ds_bus_number=cfg_ds_bus_number,
# cfg_ds_device_number=cfg_ds_device_number,
# cfg_ds_function_number=cfg_ds_function_number,
# cfg_power_state_change_ack=cfg_power_state_change_ack,
# cfg_power_state_change_interrupt=cfg_power_state_change_interrupt,
# cfg_err_cor_in=cfg_err_cor_in,
# cfg_err_uncor_in=cfg_err_uncor_in,
# cfg_flr_done=cfg_flr_done,
# cfg_vf_flr_done=cfg_vf_flr_done,
# cfg_flr_in_process=cfg_flr_in_process,
# cfg_vf_flr_in_process=cfg_vf_flr_in_process,
# cfg_req_pm_transition_l23_ready=cfg_req_pm_transition_l23_ready,
# cfg_link_training_enable=cfg_link_training_enable,
# Clock and Reset Interface
user_clk=user_clk,
user_reset=user_reset,
#user_lnk_up=user_lnk_up,
sys_clk=sys_clk,
sys_clk_gt=sys_clk,
sys_reset=sys_reset,
cq_pause=cq_pause,
cc_pause=cc_pause,
rq_pause=rq_pause,
rc_pause=rc_pause
)
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=user_clk,
rst=user_reset,
current_test=current_test,
s_axis_rq_tdata=s_axis_rq_tdata,
s_axis_rq_tkeep=s_axis_rq_tkeep,
s_axis_rq_tvalid=s_axis_rq_tvalid,
s_axis_rq_tready=s_axis_rq_tready,
s_axis_rq_tlast=s_axis_rq_tlast,
s_axis_rq_tuser=s_axis_rq_tuser,
m_axis_rq_tdata=m_axis_rq_tdata,
m_axis_rq_tkeep=m_axis_rq_tkeep,
m_axis_rq_tvalid=m_axis_rq_tvalid,
m_axis_rq_tready=m_axis_rq_tready,
m_axis_rq_tlast=m_axis_rq_tlast,
m_axis_rq_tuser=m_axis_rq_tuser,
s_axis_rq_seq_num_0=s_axis_rq_seq_num_0,
s_axis_rq_seq_num_valid_0=s_axis_rq_seq_num_valid_0,
s_axis_rq_seq_num_1=s_axis_rq_seq_num_1,
s_axis_rq_seq_num_valid_1=s_axis_rq_seq_num_valid_1,
m_axis_rq_seq_num_0=m_axis_rq_seq_num_0,
m_axis_rq_seq_num_valid_0=m_axis_rq_seq_num_valid_0,
m_axis_rq_seq_num_1=m_axis_rq_seq_num_1,
m_axis_rq_seq_num_valid_1=m_axis_rq_seq_num_valid_1,
pcie_tx_fc_ph_av=pcie_tx_fc_ph_av,
pcie_tx_fc_pd_av=pcie_tx_fc_pd_av,
s_axis_write_desc_pcie_addr=s_axis_write_desc_pcie_addr,
s_axis_write_desc_ram_sel=s_axis_write_desc_ram_sel,
s_axis_write_desc_ram_addr=s_axis_write_desc_ram_addr,
s_axis_write_desc_len=s_axis_write_desc_len,
s_axis_write_desc_tag=s_axis_write_desc_tag,
s_axis_write_desc_valid=s_axis_write_desc_valid,
s_axis_write_desc_ready=s_axis_write_desc_ready,
m_axis_write_desc_status_tag=m_axis_write_desc_status_tag,
m_axis_write_desc_status_valid=m_axis_write_desc_status_valid,
ram_rd_cmd_sel=ram_rd_cmd_sel,
ram_rd_cmd_addr=ram_rd_cmd_addr,
ram_rd_cmd_valid=ram_rd_cmd_valid,
ram_rd_cmd_ready=ram_rd_cmd_ready,
ram_rd_resp_data=ram_rd_resp_data,
ram_rd_resp_valid=ram_rd_resp_valid,
ram_rd_resp_ready=ram_rd_resp_ready,
enable=enable,
requester_id=requester_id,
requester_id_enable=requester_id_enable,
max_payload_size=max_payload_size
)
@always(delay(4))
def clkgen():
clk.next = not clk
@always_comb
def clk_logic():
sys_clk.next = clk
sys_reset.next = not rst
cq_pause_toggle = Signal(bool(0))
cc_pause_toggle = Signal(bool(0))
rq_pause_toggle = Signal(bool(0))
rc_pause_toggle = Signal(bool(0))
@instance
def pause_toggle():
while True:
if (cq_pause_toggle or cc_pause_toggle or rq_pause_toggle or rc_pause_toggle):
cq_pause.next = cq_pause_toggle
cc_pause.next = cc_pause_toggle
rq_pause.next = rq_pause_toggle
rc_pause.next = rc_pause_toggle
yield user_clk.posedge
yield user_clk.posedge
yield user_clk.posedge
cq_pause.next = 0
cc_pause.next = 0
rq_pause.next = 0
rc_pause.next = 0
yield user_clk.posedge
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
# testbench stimulus
cur_tag = 1
max_payload_size.next = 0
enable.next = 1
yield user_clk.posedge
print("test 1: enumeration")
current_test.next = 1
yield rc.enumerate(enable_bus_mastering=True)
yield delay(100)
yield user_clk.posedge
print("test 2: PCIe write")
current_test.next = 2
pcie_addr = 0x00000000
ram_addr = 0x00000000
test_data = b'\x11\x22\x33\x44'
dma_ram_inst.write_mem(ram_addr, test_data)
data = dma_ram_inst.read_mem(ram_addr, 32)
for i in range(0, len(data), 16):
print(" ".join(("{:02x}".format(c) for c in bytearray(data[i:i+16]))))
write_desc_source.send([(mem_base+pcie_addr, 0, ram_addr, len(test_data), cur_tag)])
yield write_desc_status_sink.wait(1000)
yield delay(50)
status = write_desc_status_sink.recv()
print(status)
assert status.data[0][0] == cur_tag
data = mem_data[pcie_addr:pcie_addr+32]
for i in range(0, len(data), 16):
print(" ".join(("{:02x}".format(c) for c in bytearray(data[i:i+16]))))
assert mem_data[pcie_addr:pcie_addr+len(test_data)] == test_data
cur_tag = (cur_tag + 1) % 256
yield delay(100)
yield user_clk.posedge
print("test 3: various writes")
current_test.next = 3
for length in list(range(1,67))+list(range(128-4,128+4))+[1024]:
for pcie_offset in list(range(8,13))+list(range(4096-4,4096+4)):
for ram_offset in list(range(8,137))+list(range(4096-128,4096)):
for pause in [False, True]:
print("length %d, pcie_offset %d, ram_offset %d"% (length, pcie_offset, ram_offset))
#pcie_addr = length * 0x100000000 + pcie_offset * 0x10000 + offset
pcie_addr = pcie_offset
ram_addr = ram_offset
test_data = bytearray([x%256 for x in range(length)])
dma_ram_inst.write_mem(ram_addr & 0xffff80, b'\x55'*(len(test_data)+256))
mem_data[(pcie_addr-1) & 0xffff80:((pcie_addr-1) & 0xffff80)+len(test_data)+256] = b'\xaa'*(len(test_data)+256)
dma_ram_inst.write_mem(ram_addr, test_data)
data = dma_ram_inst.read_mem(ram_addr&0xfffff0, 64)
for i in range(0, len(data), 16):
print(" ".join(("{:02x}".format(c) for c in bytearray(data[i:i+16]))))
rq_pause_toggle.next = pause
write_desc_source.send([(mem_base+pcie_addr, 0, ram_addr, len(test_data), cur_tag)])
yield write_desc_status_sink.wait(4000)
yield delay(50)
rq_pause_toggle.next = 0
status = write_desc_status_sink.recv()
print(status)
assert status.data[0][0] == cur_tag
data = mem_data[pcie_addr&0xfffff0:(pcie_addr&0xfffff0)+64]
for i in range(0, len(data), 16):
print(" ".join(("{:02x}".format(c) for c in bytearray(data[i:i+16]))))
print(mem_data[pcie_addr-1:pcie_addr+len(test_data)+1])
assert mem_data[pcie_addr-1:pcie_addr+len(test_data)+1] == b'\xaa'+test_data+b'\xaa'
cur_tag = (cur_tag + 1) % 256
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
|
parlai/tasks/interactive/worlds.py | twstewart42/ParlAI | 9,228 | 11134369 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from copy import deepcopy
from typing import Optional
from parlai.core.opt import Opt
from parlai.core.params import ParlaiParser
from parlai.core.worlds import DialogPartnerWorld, validate
from parlai.core.message import Message
class InteractiveWorld(DialogPartnerWorld):
"""
Simple interactive world involving just two agents talking.
In more sophisticated worlds the environment could supply information, e.g. in
tasks/convai2 both agents are given personas, so a world class should be written
especially for those cases for given tasks.
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
# no default args
return parser
def __init__(self, opt, agents, shared=None):
super().__init__(opt, agents, shared)
self.init_contexts(shared=shared)
self.turn_cnt = 0
def init_contexts(self, shared=None):
"""
Override to load or instantiate contexts to be used to seed the chat.
"""
pass
def get_contexts(self):
"""
Override to return a pair of contexts with which to seed the episode.
This function will be called before the first turn of every episode.
"""
return ['', '']
def finalize_episode(self):
print("CHAT DONE ")
if not self.epoch_done():
print("\n... preparing new chat... \n")
def parley(self):
"""
Agent 0 goes first.
Alternate between the two agents.
"""
if self.turn_cnt == 0:
self.p1, self.p2 = self.get_contexts()
acts = self.acts
agents = self.agents
if self.turn_cnt == 0 and self.p1 != '':
# add the context on to the first message to agent 0
context_act = Message(
{'id': 'context', 'text': self.p1, 'episode_done': False}
)
agents[0].observe(validate(context_act))
try:
act = deepcopy(agents[0].act())
except StopIteration:
self.reset()
self.finalize_episode()
self.turn_cnt = 0
return
acts[0] = act
if self.turn_cnt == 0 and self.p2 != '':
# add the context on to the first message to agent 1
context_act = Message(
{'id': 'context', 'text': self.p2, 'episode_done': False}
)
agents[1].observe(validate(context_act))
agents[1].observe(validate(act))
acts[1] = agents[1].act()
agents[0].observe(validate(acts[1]))
self.update_counters()
self.turn_cnt += 1
if act['episode_done']:
self.finalize_episode()
self.turn_cnt = 0
|
CalibTracker/SiStripESProducers/python/fake/Phase2TrackerConfigurableCablingESSource_cfi.py | ckamtsikis/cmssw | 852 | 11134375 | import FWCore.ParameterSet.Config as cms
Phase2TrackerCabling = cms.ESSource("Phase2TrackerCablingCfgESSource",
modules = cms.VPSet(
cms.PSet( # Phase2 tracker module connection
moduleType=cms.string("2S"),
detid=cms.uint32(50000),
gbtid=cms.uint32(10),
fedid=cms.uint32(0),
fedch=cms.uint32(0),
powerGroup=cms.uint32(0),
coolingLoop=cms.uint32(0)
),
cms.PSet( # Phase2 tracker module connection
moduleType=cms.string("PS"),
detid=cms.uint32(51000),
gbtid=cms.uint32(11),
fedid=cms.uint32(0),
fedch=cms.uint32(1),
powerGroup=cms.uint32(1),
coolingLoop=cms.uint32(0)
),
)
)
|
fuzzers/026-bram-data/generate.py | rw1nkler/prjxray | 583 | 11134379 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import json
from prjxray.segmaker import Segmaker
BITS_PER_PARAM = 256
NUM_INITP_PARAMS = 8
NUM_INIT_PARAMS = 0x40
BITS_PER_SITE = BITS_PER_PARAM * (NUM_INITP_PARAMS + NUM_INIT_PARAMS)
def main():
segmk = Segmaker("design.bits")
segmk.set_def_bt('BLOCK_RAM')
print("Loading tags")
'''
'''
with open('params.json') as f:
params = json.load(f)
for param in params:
for initp in range(NUM_INITP_PARAMS):
p = 'INITP_{:02X}'.format(initp)
val = param[p]
for bit in range(BITS_PER_PARAM):
segmk.add_site_tag(
param['site'], "{p}[{bit:03d}]".format(
p=p,
bit=bit,
), val & (1 << bit) != 0)
for init in range(NUM_INIT_PARAMS):
p = 'INIT_{:02X}'.format(init)
val = param[p]
for bit in range(BITS_PER_PARAM):
segmk.add_site_tag(
param['site'], "{p}[{bit:03d}]".format(
p=p,
bit=bit,
), val & (1 << bit) != 0)
segmk.compile()
segmk.write()
if __name__ == "__main__":
main()
|
nidaqmx/_task_modules/channels/do_channel.py | stafak/nidaqmx-python | 252 | 11134384 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import ctypes
import numpy
from nidaqmx._lib import lib_importer, ctypes_byte_str, c_bool32
from nidaqmx.errors import (
check_for_error, is_string_buffer_too_small, is_array_buffer_too_small)
from nidaqmx._task_modules.channels.channel import Channel
from nidaqmx.constants import (
ActiveOrInactiveEdgeSelection, DataTransferActiveTransferMode,
DigitalDriveType, Level, LogicFamily, OutputDataTransferCondition)
class DOChannel(Channel):
"""
Represents one or more digital output virtual channels and their properties.
"""
__slots__ = []
def __repr__(self):
return 'DOChannel(name={0})'.format(self._name)
@property
def do_data_xfer_mech(self):
"""
:class:`nidaqmx.constants.DataTransferActiveTransferMode`:
Specifies the data transfer mode for the device.
"""
val = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxGetDODataXferMech
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_int)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return DataTransferActiveTransferMode(val.value)
@do_data_xfer_mech.setter
def do_data_xfer_mech(self, val):
val = val.value
cfunc = lib_importer.windll.DAQmxSetDODataXferMech
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_int]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@do_data_xfer_mech.deleter
def do_data_xfer_mech(self):
cfunc = lib_importer.windll.DAQmxResetDODataXferMech
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def do_data_xfer_req_cond(self):
"""
:class:`nidaqmx.constants.OutputDataTransferCondition`:
Specifies under what condition to transfer data from the
buffer to the onboard memory of the device.
"""
val = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxGetDODataXferReqCond
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_int)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return OutputDataTransferCondition(val.value)
@do_data_xfer_req_cond.setter
def do_data_xfer_req_cond(self, val):
val = val.value
cfunc = lib_importer.windll.DAQmxSetDODataXferReqCond
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_int]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@do_data_xfer_req_cond.deleter
def do_data_xfer_req_cond(self):
cfunc = lib_importer.windll.DAQmxResetDODataXferReqCond
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def do_generate_on(self):
"""
:class:`nidaqmx.constants.ActiveOrInactiveEdgeSelection`:
Specifies on which edge of the sample clock to generate
samples.
"""
val = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxGetDOGenerateOn
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_int)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return ActiveOrInactiveEdgeSelection(val.value)
@do_generate_on.setter
def do_generate_on(self, val):
val = val.value
cfunc = lib_importer.windll.DAQmxSetDOGenerateOn
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_int]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@do_generate_on.deleter
def do_generate_on(self):
cfunc = lib_importer.windll.DAQmxResetDOGenerateOn
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def do_invert_lines(self):
"""
bool: Specifies whether to invert the lines in the channel. If
you set this property to True, the lines are at high logic
when off and at low logic when on.
"""
val = c_bool32()
cfunc = lib_importer.windll.DAQmxGetDOInvertLines
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(c_bool32)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return val.value
@do_invert_lines.setter
def do_invert_lines(self, val):
cfunc = lib_importer.windll.DAQmxSetDOInvertLines
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str, c_bool32]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@do_invert_lines.deleter
def do_invert_lines(self):
cfunc = lib_importer.windll.DAQmxResetDOInvertLines
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def do_line_states_done_state(self):
"""
:class:`nidaqmx.constants.Level`: Specifies the state of the
lines in a digital output task when the task completes
execution.
"""
val = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxGetDOLineStatesDoneState
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_int)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return Level(val.value)
@do_line_states_done_state.setter
def do_line_states_done_state(self, val):
val = val.value
cfunc = lib_importer.windll.DAQmxSetDOLineStatesDoneState
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_int]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@do_line_states_done_state.deleter
def do_line_states_done_state(self):
cfunc = lib_importer.windll.DAQmxResetDOLineStatesDoneState
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def do_line_states_paused_state(self):
"""
:class:`nidaqmx.constants.Level`: Specifies the state of the
lines in a digital output task when the task pauses.
"""
val = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxGetDOLineStatesPausedState
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_int)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return Level(val.value)
@do_line_states_paused_state.setter
def do_line_states_paused_state(self, val):
val = val.value
cfunc = lib_importer.windll.DAQmxSetDOLineStatesPausedState
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_int]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@do_line_states_paused_state.deleter
def do_line_states_paused_state(self):
cfunc = lib_importer.windll.DAQmxResetDOLineStatesPausedState
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def do_line_states_start_state(self):
"""
:class:`nidaqmx.constants.Level`: Specifies the state of the
lines in a digital output task when the task starts.
"""
val = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxGetDOLineStatesStartState
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_int)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return Level(val.value)
@do_line_states_start_state.setter
def do_line_states_start_state(self, val):
val = val.value
cfunc = lib_importer.windll.DAQmxSetDOLineStatesStartState
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_int]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@do_line_states_start_state.deleter
def do_line_states_start_state(self):
cfunc = lib_importer.windll.DAQmxResetDOLineStatesStartState
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def do_logic_family(self):
"""
:class:`nidaqmx.constants.LogicFamily`: Specifies the logic
family to use for generation. A logic family corresponds to
voltage thresholds that are compatible with a group of
voltage standards. Refer to the device documentation for
information on the logic high and logic low voltages for
these logic families.
"""
val = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxGetDOLogicFamily
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_int)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return LogicFamily(val.value)
@do_logic_family.setter
def do_logic_family(self, val):
val = val.value
cfunc = lib_importer.windll.DAQmxSetDOLogicFamily
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_int]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@do_logic_family.deleter
def do_logic_family(self):
cfunc = lib_importer.windll.DAQmxResetDOLogicFamily
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def do_mem_map_enable(self):
"""
bool: Specifies for NI-DAQmx to map hardware registers to the
memory space of the application, if possible. Normally, NI-
DAQmx maps hardware registers to memory accessible only to
the kernel. Mapping the registers to the memory space of the
application increases performance. However, if the
application accesses the memory space mapped to the
registers, it can adversely affect the operation of the
device and possibly result in a system crash.
"""
val = c_bool32()
cfunc = lib_importer.windll.DAQmxGetDOMemMapEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(c_bool32)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return val.value
@do_mem_map_enable.setter
def do_mem_map_enable(self, val):
cfunc = lib_importer.windll.DAQmxSetDOMemMapEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str, c_bool32]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@do_mem_map_enable.deleter
def do_mem_map_enable(self):
cfunc = lib_importer.windll.DAQmxResetDOMemMapEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def do_num_lines(self):
"""
int: Indicates the number of digital lines in the channel.
"""
val = ctypes.c_uint()
cfunc = lib_importer.windll.DAQmxGetDONumLines
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_uint)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return val.value
@property
def do_output_drive_type(self):
"""
:class:`nidaqmx.constants.DigitalDriveType`: Specifies the drive
type for digital output channels.
"""
val = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxGetDOOutputDriveType
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_int)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return DigitalDriveType(val.value)
@do_output_drive_type.setter
def do_output_drive_type(self, val):
val = val.value
cfunc = lib_importer.windll.DAQmxSetDOOutputDriveType
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_int]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@do_output_drive_type.deleter
def do_output_drive_type(self):
cfunc = lib_importer.windll.DAQmxResetDOOutputDriveType
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def do_overcurrent_auto_reenable(self):
"""
bool: Specifies whether to automatically reenable channels after
they no longer exceed the current limit specified by
**do_overcurrent_limit**.
"""
val = c_bool32()
cfunc = lib_importer.windll.DAQmxGetDOOvercurrentAutoReenable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(c_bool32)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return val.value
@do_overcurrent_auto_reenable.setter
def do_overcurrent_auto_reenable(self, val):
cfunc = lib_importer.windll.DAQmxSetDOOvercurrentAutoReenable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str, c_bool32]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@do_overcurrent_auto_reenable.deleter
def do_overcurrent_auto_reenable(self):
cfunc = lib_importer.windll.DAQmxResetDOOvercurrentAutoReenable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def do_overcurrent_limit(self):
"""
float: Specifies the current threshold in Amperes for the
channel. A value of 0 means the channel observes no limit.
Devices can monitor only a finite number of current
thresholds simultaneously. If you attempt to monitor
additional thresholds, NI-DAQmx returns an error.
"""
val = ctypes.c_double()
cfunc = lib_importer.windll.DAQmxGetDOOvercurrentLimit
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_double)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return val.value
@do_overcurrent_limit.setter
def do_overcurrent_limit(self, val):
cfunc = lib_importer.windll.DAQmxSetDOOvercurrentLimit
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_double]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@do_overcurrent_limit.deleter
def do_overcurrent_limit(self):
cfunc = lib_importer.windll.DAQmxResetDOOvercurrentLimit
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def do_overcurrent_reenable_period(self):
"""
float: Specifies the delay in seconds between the time a channel
no longer exceeds the current limit and the reactivation of
that channel, if **do_overcurrent_auto_reenable** is True.
"""
val = ctypes.c_double()
cfunc = lib_importer.windll.DAQmxGetDOOvercurrentReenablePeriod
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_double)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return val.value
@do_overcurrent_reenable_period.setter
def do_overcurrent_reenable_period(self, val):
cfunc = lib_importer.windll.DAQmxSetDOOvercurrentReenablePeriod
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_double]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@do_overcurrent_reenable_period.deleter
def do_overcurrent_reenable_period(self):
cfunc = lib_importer.windll.DAQmxResetDOOvercurrentReenablePeriod
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def do_tristate(self):
"""
bool: Specifies whether to stop driving the channel and set it
to a high-impedance state. You must commit the task for this
setting to take effect.
"""
val = c_bool32()
cfunc = lib_importer.windll.DAQmxGetDOTristate
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(c_bool32)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return val.value
@do_tristate.setter
def do_tristate(self, val):
cfunc = lib_importer.windll.DAQmxSetDOTristate
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str, c_bool32]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@do_tristate.deleter
def do_tristate(self):
cfunc = lib_importer.windll.DAQmxResetDOTristate
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def do_usb_xfer_req_count(self):
"""
int: Specifies the maximum number of simultaneous USB transfers
used to stream data. Modify this value to affect performance
under different combinations of operating system and device.
"""
val = ctypes.c_uint()
cfunc = lib_importer.windll.DAQmxGetDOUsbXferReqCount
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_uint)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return val.value
@do_usb_xfer_req_count.setter
def do_usb_xfer_req_count(self, val):
cfunc = lib_importer.windll.DAQmxSetDOUsbXferReqCount
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_uint]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@do_usb_xfer_req_count.deleter
def do_usb_xfer_req_count(self):
cfunc = lib_importer.windll.DAQmxResetDOUsbXferReqCount
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def do_usb_xfer_req_size(self):
"""
int: Specifies the maximum size of a USB transfer request in
bytes. Modify this value to affect performance under
different combinations of operating system and device.
"""
val = ctypes.c_uint()
cfunc = lib_importer.windll.DAQmxGetDOUsbXferReqSize
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_uint)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return val.value
@do_usb_xfer_req_size.setter
def do_usb_xfer_req_size(self, val):
cfunc = lib_importer.windll.DAQmxSetDOUsbXferReqSize
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_uint]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@do_usb_xfer_req_size.deleter
def do_usb_xfer_req_size(self):
cfunc = lib_importer.windll.DAQmxResetDOUsbXferReqSize
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def do_use_only_on_brd_mem(self):
"""
bool: Specifies whether to write samples directly to the onboard
memory of the device, bypassing the memory buffer.
Generally, you cannot update onboard memory after you start
the task. Onboard memory includes data FIFOs.
"""
val = c_bool32()
cfunc = lib_importer.windll.DAQmxGetDOUseOnlyOnBrdMem
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(c_bool32)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return val.value
@do_use_only_on_brd_mem.setter
def do_use_only_on_brd_mem(self, val):
cfunc = lib_importer.windll.DAQmxSetDOUseOnlyOnBrdMem
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str, c_bool32]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@do_use_only_on_brd_mem.deleter
def do_use_only_on_brd_mem(self):
cfunc = lib_importer.windll.DAQmxResetDOUseOnlyOnBrdMem
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
|
flows/ablations/abl_noattn.py | evanlohn/flowpp | 131 | 11134419 | <reponame>evanlohn/flowpp
"""
Ablation: no attention (replaced with a pointwise MLP, with the same number of parameters)
Params: 31,443,440
Dropout 0.2
"""
import tensorflow as tf
from flows.flow_training import train, evaluate
from flows.flows import (
conv2d, gated_conv, gaussian_sample_logp, VarConfig, get_var, layernorm, nin, gate,
Flow, Compose, Inverse, ImgProc, Sigmoid, MixLogisticCDF, ElemwiseAffine,
TupleFlip, CheckerboardSplit, ChannelSplit, SpaceToDepth, Norm, Pointwise
)
def gated_nin(x, *, name, pos_emb, dropout_p, vcfg: VarConfig):
with tf.variable_scope(name):
bs, height, width, ch = x.shape.as_list()
assert pos_emb.shape == [height, width, ch]
# Position embeddings
c = x + pos_emb[None, :, :, :]
c = nin(c, name='proj1', num_units=3 * ch, vcfg=vcfg)
assert c.shape == [bs, height, width, 3 * ch]
c = tf.reshape(c, [bs, height, width, ch, 3])
c1 = tf.reduce_max(c, axis=4)
assert c1.shape == [bs, height, width, ch]
if dropout_p > 0:
c1 = tf.nn.dropout(c1, keep_prob=1. - dropout_p)
c2 = nin(c1, name='proj2', num_units=ch * 2, init_scale=0.1, vcfg=vcfg)
return x + gate(c2, axis=3)
class MixLogisticCoupling(Flow):
def __init__(self, filters, blocks, components, init_scale=0.1):
def f(x, *, vcfg: VarConfig, context=None, dropout_p=0., verbose=True):
if vcfg.init and verbose:
# debug stuff
xmean, xvar = tf.nn.moments(x, axes=list(range(len(x.shape))))
x = tf.Print(
x, [tf.shape(x), xmean, tf.sqrt(xvar), tf.reduce_min(x), tf.reduce_max(x)],
message='{} (shape/mean/std/min/max) '.format(self.template.variable_scope.name), summarize=10
)
B, H, W, C = x.shape.as_list()
pos_emb = get_var('pos_emb', shape=[H, W, filters], initializer=tf.random_normal_initializer(stddev=0.01),
vcfg=vcfg)
x = conv2d(x, name='proj_in', num_units=filters, vcfg=vcfg)
for i_block in range(blocks):
with tf.variable_scope(f'block{i_block}'):
x = gated_conv(x, name='conv', a=context, use_nin=True, dropout_p=dropout_p, vcfg=vcfg)
x = layernorm(x, name='ln1', vcfg=vcfg)
x = gated_nin(x, name='attn', pos_emb=pos_emb, dropout_p=dropout_p, vcfg=vcfg)
x = layernorm(x, name='ln2', vcfg=vcfg)
x = conv2d(x, name='proj_out', num_units=C * (2 + 3 * components), init_scale=init_scale, vcfg=vcfg)
assert x.shape == [B, H, W, C * (2 + 3 * components)]
x = tf.reshape(x, [B, H, W, C, 2 + 3 * components])
s, t = tf.tanh(x[:, :, :, :, 0]), x[:, :, :, :, 1]
ml_logits, ml_means, ml_logscales = tf.split(x[:, :, :, :, 2:], 3, axis=4)
assert s.shape == t.shape == [B, H, W, C]
assert ml_logits.shape == ml_means.shape == ml_logscales.shape == [B, H, W, C, components]
return Compose([
MixLogisticCDF(logits=ml_logits, means=ml_means, logscales=ml_logscales),
Inverse(Sigmoid()),
ElemwiseAffine(scales=tf.exp(s), logscales=s, biases=t),
])
self.template = tf.make_template(self.__class__.__name__, f)
def forward(self, x, **kwargs):
assert isinstance(x, tuple)
cf, ef = x
flow = self.template(cf, **kwargs)
out, logd = flow.forward(ef)
assert out.shape == ef.shape == cf.shape
return (cf, out), logd
def inverse(self, y, **kwargs):
assert isinstance(y, tuple)
cf, ef = y
flow = self.template(cf, **kwargs)
out, logd = flow.inverse(ef)
assert out.shape == ef.shape == cf.shape
return (cf, out), logd
def construct(*, filters, dequant_filters, components, blocks):
# see MixLogisticAttnCoupling constructor
dequant_coupling_kwargs = dict(filters=dequant_filters, blocks=2, components=components)
coupling_kwargs = dict(filters=filters, blocks=blocks, components=components)
class Dequant(Flow):
def __init__(self):
def shallow_processor(x, *, dropout_p, vcfg):
x = x / 256.0 - 0.5
(this, that), _ = CheckerboardSplit().forward(x)
x = conv2d(tf.concat([this, that], 3), name='proj', num_units=32, vcfg=vcfg)
for i in range(3):
x = gated_conv(x, name=f'c{i}', vcfg=vcfg, dropout_p=dropout_p, use_nin=False, a=None)
return x
self.context_proc = tf.make_template("context_proc", shallow_processor)
self.dequant_flow = Compose([
CheckerboardSplit(),
Norm(), Pointwise(), MixLogisticCoupling(**dequant_coupling_kwargs), TupleFlip(),
Norm(), Pointwise(), MixLogisticCoupling(**dequant_coupling_kwargs), TupleFlip(),
Norm(), Pointwise(), MixLogisticCoupling(**dequant_coupling_kwargs), TupleFlip(),
Norm(), Pointwise(), MixLogisticCoupling(**dequant_coupling_kwargs), TupleFlip(),
Inverse(CheckerboardSplit()),
Sigmoid(),
])
def forward(self, x, *, vcfg, dropout_p=0., verbose=True, context=None):
assert context is None
eps, eps_logp = gaussian_sample_logp(x.shape.as_list())
xd, logd = self.dequant_flow.forward(
eps,
context=self.context_proc(x, dropout_p=dropout_p, vcfg=vcfg),
dropout_p=dropout_p, verbose=verbose, vcfg=vcfg
)
assert eps.shape == x.shape and logd.shape == eps_logp.shape == [x.shape[0]]
return x + xd, logd - eps_logp
dequant_flow = Dequant()
flow = Compose([
ImgProc(),
CheckerboardSplit(),
Norm(), Pointwise(), MixLogisticCoupling(**coupling_kwargs), TupleFlip(),
Norm(), Pointwise(), MixLogisticCoupling(**coupling_kwargs), TupleFlip(),
Norm(), Pointwise(), MixLogisticCoupling(**coupling_kwargs), TupleFlip(),
Norm(), Pointwise(), MixLogisticCoupling(**coupling_kwargs), TupleFlip(),
Inverse(CheckerboardSplit()),
SpaceToDepth(),
ChannelSplit(),
Norm(), Pointwise(), MixLogisticCoupling(**coupling_kwargs), TupleFlip(),
Norm(), Pointwise(), MixLogisticCoupling(**coupling_kwargs), TupleFlip(),
Inverse(ChannelSplit()),
CheckerboardSplit(),
Norm(), Pointwise(), MixLogisticCoupling(**coupling_kwargs), TupleFlip(),
Norm(), Pointwise(), MixLogisticCoupling(**coupling_kwargs), TupleFlip(),
Norm(), Pointwise(), MixLogisticCoupling(**coupling_kwargs), TupleFlip(),
Inverse(CheckerboardSplit()),
])
return dequant_flow, flow
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--eval_checkpoint', type=str, default=None)
args = parser.parse_args()
max_lr = 3e-4
warmup_steps = 2000
lr_decay = 1
def lr_schedule(step):
if step < warmup_steps:
return max_lr * step / warmup_steps
return max_lr * (lr_decay ** (step - warmup_steps))
dropout_p = 0.2
components = 32 # logistic mixture components
blocks = 10
filters = dequant_filters = 96
ema_decay = 0.999
def flow_constructor():
return construct(filters=filters, dequant_filters=dequant_filters, components=components, blocks=blocks)
if args.eval_checkpoint:
evaluate(flow_constructor=flow_constructor, seed=0, restore_checkpoint=args.eval_checkpoint)
return
train(
flow_constructor=flow_constructor,
logdir=f'~/logs/abl_noattn_fbdq{dequant_filters}_mixlog{components}_blocks{blocks}_f{filters}_lr{max_lr}_drop{dropout_p}',
lr_schedule=lr_schedule,
dropout_p=dropout_p,
seed=0,
init_bs=128,
total_bs=64,
ema_decay=ema_decay,
steps_per_log=100,
epochs_per_val=1,
max_grad_norm=1.,
)
if __name__ == '__main__':
main()
|
anytree/walker.py | odidev/anytree | 700 | 11134433 | # -*- coding: utf-8 -*-
class Walker(object):
def __init__(self):
"""Walk from one node to another."""
super(Walker, self).__init__()
def walk(self, start, end):
"""
Walk from `start` node to `end` node.
Returns:
(upwards, common, downwards): `upwards` is a list of nodes to go upward to.
`common` top node. `downwards` is a list of nodes to go downward to.
Raises:
WalkError: on no common root node.
Example:
>>> from anytree import Node, RenderTree, AsciiStyle
>>> f = Node("f")
>>> b = Node("b", parent=f)
>>> a = Node("a", parent=b)
>>> d = Node("d", parent=b)
>>> c = Node("c", parent=d)
>>> e = Node("e", parent=d)
>>> g = Node("g", parent=f)
>>> i = Node("i", parent=g)
>>> h = Node("h", parent=i)
>>> print(RenderTree(f, style=AsciiStyle()))
Node('/f')
|-- Node('/f/b')
| |-- Node('/f/b/a')
| +-- Node('/f/b/d')
| |-- Node('/f/b/d/c')
| +-- Node('/f/b/d/e')
+-- Node('/f/g')
+-- Node('/f/g/i')
+-- Node('/f/g/i/h')
Create a walker:
>>> w = Walker()
This class is made for walking:
>>> w.walk(f, f)
((), Node('/f'), ())
>>> w.walk(f, b)
((), Node('/f'), (Node('/f/b'),))
>>> w.walk(b, f)
((Node('/f/b'),), Node('/f'), ())
>>> w.walk(h, e)
((Node('/f/g/i/h'), Node('/f/g/i'), Node('/f/g')), Node('/f'), (Node('/f/b'), Node('/f/b/d'), Node('/f/b/d/e')))
>>> w.walk(d, e)
((), Node('/f/b/d'), (Node('/f/b/d/e'),))
For a proper walking the nodes need to be part of the same tree:
>>> w.walk(Node("a"), Node("b"))
Traceback (most recent call last):
...
anytree.walker.WalkError: Node('/a') and Node('/b') are not part of the same tree.
"""
s = start.path
e = end.path
if start.root is not end.root:
msg = "%r and %r are not part of the same tree." % (start, end)
raise WalkError(msg)
# common
c = Walker.__calc_common(s, e)
assert c[0] is start.root
len_c = len(c)
# up
if start is c[-1]:
up = tuple()
else:
up = tuple(reversed(s[len_c:]))
# down
if end is c[-1]:
down = tuple()
else:
down = e[len_c:]
return up, c[-1], down
@staticmethod
def __calc_common(s, e):
return tuple([si for si, ei in zip(s, e) if si is ei])
class WalkError(RuntimeError):
"""Walk Error."""
|
tests/tools/assigner/actions/test_base_class.py | akashvacher/kafka-tools | 578 | 11134434 | <gh_stars>100-1000
import unittest
from argparse import Namespace
from .fixtures import set_up_cluster
from kafka.tools.assigner.actions import ActionModule, ActionBalanceModule
class ActionModuleTests(unittest.TestCase):
def setUp(self):
self.cluster = set_up_cluster()
self.args = Namespace(exclude_topics=[])
def test_create_class(self):
action = ActionModule(self.args, self.cluster)
assert isinstance(action, ActionModule)
def test_configure_args(self):
self.assertRaises(Exception, ActionModule.configure_args, None)
def test_add_args(self):
action = ActionModule(self.args, self.cluster)
action._add_args(None)
assert True
def test_process_cluster(self):
action = ActionModule(self.args, self.cluster)
action.process_cluster()
assert True
def test_create_balance_class(self):
ActionBalanceModule.configure_args(None)
|
elftools/construct/lib/py3compat.py | mebeim/pyelftools | 1,358 | 11134455 | #-------------------------------------------------------------------------------
# py3compat.py
#
# Some Python2&3 compatibility code
#-------------------------------------------------------------------------------
import sys
PY3 = sys.version_info[0] == 3
try:
from collections.abc import MutableMapping # python >= 3.3
except ImportError:
from collections import MutableMapping # python < 3.3
if PY3:
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
def bchr(i):
""" When iterating over b'...' in Python 2 you get single b'_' chars
and in Python 3 you get integers. Call bchr to always turn this
to single b'_' chars.
"""
return bytes((i,))
def u(s):
return s
def int2byte(i):
return bytes((i,))
def byte2int(b):
return b
def str2bytes(s):
return s.encode("latin-1")
def str2unicode(s):
return s
def bytes2str(b):
return b.decode('latin-1')
def decodebytes(b, encoding):
return bytes(b, encoding)
advance_iterator = next
else:
import cStringIO
StringIO = BytesIO = cStringIO.StringIO
int2byte = chr
byte2int = ord
bchr = lambda i: i
def u(s):
return unicode(s, "unicode_escape")
def str2bytes(s):
return s
def str2unicode(s):
return unicode(s, "unicode_escape")
def bytes2str(b):
return b
def decodebytes(b, encoding):
return b.decode(encoding)
def advance_iterator(it):
return it.next()
|
corehq/apps/api/tests/lookup_table_resources.py | dimagilg/commcare-hq | 471 | 11134458 | <filename>corehq/apps/api/tests/lookup_table_resources.py
import json
from corehq.apps.api.tests.utils import APIResourceTest
from corehq.apps.fixtures.models import (
FieldList,
FixtureDataItem,
FixtureDataType,
FixtureTypeField,
)
from corehq.apps.fixtures.resources.v0_1 import (
LookupTableItemResource,
LookupTableResource,
)
class TestLookupTableResource(APIResourceTest):
resource = LookupTableResource
api_name = 'v0.5'
def setUp(self):
super(TestLookupTableResource, self).setUp()
self.data_type = FixtureDataType(
domain=self.domain.name,
tag="lookup_table",
fields=[
FixtureTypeField(
field_name="fixture_property",
properties=["lang", "name"]
)
],
item_attributes=[]
)
self.data_type.save()
def tearDown(self):
self.data_type.delete()
super(TestLookupTableResource, self).tearDown()
def _data_type_json(self):
return {
"fields": [
{
"field_name": "fixture_property",
"properties": ["lang", "name"],
},
],
"item_attributes": [],
"id": self.data_type._id,
"is_global": False,
"resource_uri": "",
"tag": "lookup_table"
}
def test_get_list(self):
response = self._assert_auth_get_resource(self.list_endpoint)
self.assertEqual(response.status_code, 200)
fixture_data_types = json.loads(response.content)['objects']
self.assertEqual(len(fixture_data_types), 1)
self.assertEqual(fixture_data_types, [self._data_type_json()])
def test_get_single(self):
response = self._assert_auth_get_resource(self.single_endpoint(self.data_type._id))
self.assertEqual(response.status_code, 200)
fixture_data_type = json.loads(response.content)
self.assertEqual(fixture_data_type, self._data_type_json())
def test_delete(self):
data_type = FixtureDataType(
domain=self.domain.name,
tag="lookup_table2",
fields=[
FixtureTypeField(
field_name="fixture_property",
properties=["lang", "name"]
)
],
item_attributes=[]
)
data_type.save()
self.addCleanup(data_type.delete)
self.assertEqual(2, len(FixtureDataType.by_domain(self.domain.name)))
response = self._assert_auth_post_resource(self.single_endpoint(data_type._id), '', method='DELETE')
self.assertEqual(response.status_code, 204, response.content)
self.assertEqual(1, len(FixtureDataType.by_domain(self.domain.name)))
def test_create(self):
lookup_table = {
"tag": "table_name",
"fields": [{
"field_name": "fieldA",
"properties": ["property1", "property2"]
}]
}
response = self._assert_auth_post_resource(
self.list_endpoint, json.dumps(lookup_table), content_type='application/json')
self.assertEqual(response.status_code, 201)
data_type = FixtureDataType.by_domain_tag(self.domain.name, "table_name").first()
self.addCleanup(data_type.delete)
self.assertEqual(data_type.tag, "table_name")
self.assertEqual(len(data_type.fields), 1)
self.assertEqual(data_type.fields[0].field_name, 'fieldA')
self.assertEqual(data_type.fields[0].properties, ['property1', 'property2'])
def test_update(self):
lookup_table = {
"tag": "lookup_table",
"item_attributes": ["X"]
}
response = self._assert_auth_post_resource(
self.single_endpoint(self.data_type._id), json.dumps(lookup_table), method="PUT")
data_type = FixtureDataType.get(self.data_type._id)
self.assertEqual(response.status_code, 204)
self.assertEqual(data_type.tag, "lookup_table")
self.assertEqual(len(data_type.fields), 1)
self.assertEqual(data_type.fields[0].field_name, 'fixture_property')
self.assertEqual(data_type.fields[0].properties, ['lang', 'name'])
self.assertEqual(data_type.item_attributes, ['X'])
class TestLookupTableItemResource(APIResourceTest):
resource = LookupTableItemResource
api_name = 'v0.5'
@classmethod
def setUpClass(cls):
super(TestLookupTableItemResource, cls).setUpClass()
cls.data_type = FixtureDataType(
domain=cls.domain.name,
tag="lookup_table",
fields=[
FixtureTypeField(
field_name="fixture_property",
properties=["lang", "name"]
)
],
item_attributes=[]
)
cls.data_type.save()
@classmethod
def tearDownClass(cls):
cls.data_type.delete()
super(TestLookupTableItemResource, cls).tearDownClass()
def _create_data_item(self, cleanup=True):
data_item = FixtureDataItem(
domain=self.domain.name,
data_type_id=self.data_type._id,
fields={
"state_name": FieldList.wrap({
"field_list": [
{"field_value": "Tennessee", "properties": {"lang": "en"}},
{"field_value": "田納西", "properties": {"lang": "zh"}},
]})
},
item_attributes={},
sort_key=1
)
data_item.save()
if cleanup:
self.addCleanup(data_item.delete)
return data_item
def _data_item_json(self, id_, sort_key):
return {
"id": id_,
"data_type_id": self.data_type._id,
"fields": {
"state_name": {
"field_list": [
{"field_value": "Tennessee", "properties": {"lang": "en"}},
{"field_value": "田納西", "properties": {"lang": "zh"}},
]
}
},
"resource_uri": "",
"item_attributes": {},
"sort_key": sort_key,
}
def test_get_list(self):
data_item = self._create_data_item()
response = self._assert_auth_get_resource(self.list_endpoint)
self.assertEqual(response.status_code, 200)
fixture_data_types = json.loads(response.content)['objects']
self.assertEqual(len(fixture_data_types), 1)
self.assertEqual(fixture_data_types, [self._data_item_json(data_item._id, data_item.sort_key)])
def test_get_single(self):
data_item = self._create_data_item()
response = self._assert_auth_get_resource(self.single_endpoint(data_item._id))
self.assertEqual(response.status_code, 200)
fixture_data_type = json.loads(response.content)
self.assertEqual(fixture_data_type, self._data_item_json(data_item._id, data_item.sort_key))
def test_delete(self):
data_item = self._create_data_item(cleanup=False)
self.assertEqual(1, len(FixtureDataItem.by_domain(self.domain.name)))
response = self._assert_auth_post_resource(self.single_endpoint(data_item._id), '', method='DELETE')
self.assertEqual(response.status_code, 204, response.content)
self.assertEqual(0, len(FixtureDataItem.by_domain(self.domain.name)))
def test_create(self):
data_item_json = {
"data_type_id": self.data_type._id,
"fields": {
"state_name": {
"field_list": [
{"field_value": "Massachusetts", "properties": {"lang": "en"}},
{"field_value": "马萨诸塞", "properties": {"lang": "zh"}},
]
}
},
}
response = self._assert_auth_post_resource(
self.list_endpoint, json.dumps(data_item_json), content_type='application/json')
self.assertEqual(response.status_code, 201)
data_item = FixtureDataItem.by_domain(self.domain.name).first()
self.addCleanup(data_item.delete)
self.assertEqual(data_item.data_type_id, self.data_type._id)
self.assertEqual(len(data_item.fields), 1)
self.assertEqual(data_item.fields['state_name'].field_list[0].field_value, 'Massachusetts')
self.assertEqual(data_item.fields['state_name'].field_list[0].properties, {"lang": "en"})
def test_update(self):
data_item = self._create_data_item()
data_item_update = {
"data_type_id": self.data_type._id,
"fields": {
"state_name": {
"field_list": [
{"field_value": "Massachusetts", "properties": {"lang": "en"}},
{"field_value": "马萨诸塞", "properties": {"lang": "zh"}},
]
}
},
"item_attributes": {
"attribute1": "cool_attr_value",
}
}
response = self._assert_auth_post_resource(
self.single_endpoint(data_item._id), json.dumps(data_item_update), method="PUT")
data_item = FixtureDataItem.get(data_item._id)
self.assertEqual(response.status_code, 204)
self.assertEqual(data_item.data_type_id, self.data_type._id)
self.assertEqual(len(data_item.fields), 1)
self.assertEqual(data_item.fields['state_name'].field_list[0].field_value, 'Massachusetts')
self.assertEqual(data_item.fields['state_name'].field_list[0].properties, {"lang": "en"})
self.assertEqual(data_item.item_attributes, {"attribute1": "cool_attr_value"})
|
examples/pong.py | papagiannakis/py-sdl2 | 222 | 11134460 | """The Pong Game."""
import sys
import sdl2
import sdl2.ext
BLACK = sdl2.ext.Color(0, 0, 0)
WHITE = sdl2.ext.Color(255, 255, 255)
PADDLE_SPEED = 3
BALL_SPEED = 3
class CollisionSystem(sdl2.ext.Applicator):
def __init__(self, minx, miny, maxx, maxy):
super(CollisionSystem, self).__init__()
self.componenttypes = Velocity, sdl2.ext.Sprite
self.ball = None
self.minx = minx
self.miny = miny
self.maxx = maxx
self.maxy = maxy
def _overlap(self, item):
sprite = item[1]
if sprite == self.ball.sprite:
return False
left, top, right, bottom = sprite.area
bleft, btop, bright, bbottom = self.ball.sprite.area
return (bleft < right and bright > left and
btop < bottom and bbottom > top)
def process(self, world, componentsets):
collitems = [comp for comp in componentsets if self._overlap(comp)]
if len(collitems) != 0:
self.ball.velocity.vx = -self.ball.velocity.vx
sprite = collitems[0][1]
ballcentery = self.ball.sprite.y + self.ball.sprite.size[1] // 2
halfheight = sprite.size[1] // 2
stepsize = halfheight // 10
degrees = 0.7
paddlecentery = sprite.y + halfheight
if ballcentery < paddlecentery:
factor = (paddlecentery - ballcentery) // stepsize
self.ball.velocity.vy = -int(round(factor * degrees))
elif ballcentery > paddlecentery:
factor = (ballcentery - paddlecentery) // stepsize
self.ball.velocity.vy = int(round(factor * degrees))
else:
self.ball.velocity.vy = -self.ball.velocity.vy
if (self.ball.sprite.y <= self.miny or
self.ball.sprite.y + self.ball.sprite.size[1] >= self.maxy):
self.ball.velocity.vy = -self.ball.velocity.vy
if (self.ball.sprite.x <= self.minx or
self.ball.sprite.x + self.ball.sprite.size[0] >= self.maxx):
self.ball.velocity.vx = -self.ball.velocity.vx
class MovementSystem(sdl2.ext.Applicator):
def __init__(self, minx, miny, maxx, maxy):
super(MovementSystem, self).__init__()
self.componenttypes = Velocity, sdl2.ext.Sprite
self.minx = minx
self.miny = miny
self.maxx = maxx
self.maxy = maxy
def process(self, world, componentsets):
for velocity, sprite in componentsets:
swidth, sheight = sprite.size
sprite.x += velocity.vx
sprite.y += velocity.vy
sprite.x = max(self.minx, sprite.x)
sprite.y = max(self.miny, sprite.y)
pmaxx = sprite.x + swidth
pmaxy = sprite.y + sheight
if pmaxx > self.maxx:
sprite.x = self.maxx - swidth
if pmaxy > self.maxy:
sprite.y = self.maxy - sheight
class TrackingAIController(sdl2.ext.Applicator):
def __init__(self, miny, maxy):
super(TrackingAIController, self).__init__()
self.componenttypes = PlayerData, Velocity, sdl2.ext.Sprite
self.miny = miny
self.maxy = maxy
self.ball = None
def process(self, world, componentsets):
for pdata, vel, sprite in componentsets:
if not pdata.ai:
continue
sheight = sprite.size[1]
centery = sprite.y + sheight // 2
if self.ball.velocity.vx < 0:
# ball is moving away from the AI
if centery < self.maxy // 2 - PADDLE_SPEED:
vel.vy = PADDLE_SPEED
elif centery > self.maxy // 2 + PADDLE_SPEED:
vel.vy = -PADDLE_SPEED
else:
vel.vy = 0
else:
bcentery = self.ball.sprite.y + self.ball.sprite.size[1] // 2
if bcentery < centery:
vel.vy = -PADDLE_SPEED
elif bcentery > centery:
vel.vy = PADDLE_SPEED
else:
vel.vy = 0
class SoftwareRenderSystem(sdl2.ext.SoftwareSpriteRenderSystem):
def __init__(self, window):
super(SoftwareRenderSystem, self).__init__(window)
def render(self, components):
sdl2.ext.fill(self.surface, BLACK)
super(SoftwareRenderSystem, self).render(components)
class TextureRenderSystem(sdl2.ext.TextureSpriteRenderSystem):
def __init__(self, renderer):
super(TextureRenderSystem, self).__init__(renderer)
self.renderer = renderer
def render(self, components):
tmp = self.renderer.color
self.renderer.color = BLACK
self.renderer.clear()
self.renderer.color = tmp
super(TextureRenderSystem, self).render(components)
class Velocity(object):
def __init__(self):
super(Velocity, self).__init__()
self.vx = 0
self.vy = 0
class PlayerData(object):
def __init__(self):
super(PlayerData, self).__init__()
self.ai = False
self.points = 0
class Player(sdl2.ext.Entity):
def __init__(self, world, sprite, posx=0, posy=0, ai=False):
self.sprite = sprite
self.sprite.position = posx, posy
self.velocity = Velocity()
self.playerdata = PlayerData()
self.playerdata.ai = ai
class Ball(sdl2.ext.Entity):
def __init__(self, world, sprite, posx=0, posy=0):
self.sprite = sprite
self.sprite.position = posx, posy
self.velocity = Velocity()
def run():
sdl2.ext.init()
window = sdl2.ext.Window("The Pong Game", size=(800, 600))
window.show()
if "-hardware" in sys.argv:
print("Using hardware acceleration")
renderer = sdl2.ext.Renderer(window)
factory = sdl2.ext.SpriteFactory(sdl2.ext.TEXTURE, renderer=renderer)
else:
print("Using software rendering")
factory = sdl2.ext.SpriteFactory(sdl2.ext.SOFTWARE)
# Create the paddles - we want white ones. To keep it easy enough for us,
# we create a set of surfaces that can be used for Texture- and
# Software-based sprites.
sp_paddle1 = factory.from_color(WHITE, size=(20, 100))
sp_paddle2 = factory.from_color(WHITE, size=(20, 100))
sp_ball = factory.from_color(WHITE, size=(20, 20))
world = sdl2.ext.World()
movement = MovementSystem(0, 0, 800, 600)
collision = CollisionSystem(0, 0, 800, 600)
aicontroller = TrackingAIController(0, 600)
if factory.sprite_type == sdl2.ext.SOFTWARE:
spriterenderer = SoftwareRenderSystem(window)
else:
spriterenderer = TextureRenderSystem(renderer)
world.add_system(aicontroller)
world.add_system(movement)
world.add_system(collision)
world.add_system(spriterenderer)
player1 = Player(world, sp_paddle1, 0, 250)
player2 = Player(world, sp_paddle2, 780, 250, True)
ball = Ball(world, sp_ball, 390, 290)
ball.velocity.vx = -BALL_SPEED
collision.ball = ball
aicontroller.ball = ball
running = True
while running:
for event in sdl2.ext.get_events():
if event.type == sdl2.SDL_QUIT:
running = False
break
if event.type == sdl2.SDL_KEYDOWN:
if event.key.keysym.sym == sdl2.SDLK_UP:
player1.velocity.vy = -PADDLE_SPEED
elif event.key.keysym.sym == sdl2.SDLK_DOWN:
player1.velocity.vy = PADDLE_SPEED
elif event.type == sdl2.SDL_KEYUP:
if event.key.keysym.sym in (sdl2.SDLK_UP, sdl2.SDLK_DOWN):
player1.velocity.vy = 0
sdl2.SDL_Delay(10)
world.process()
if __name__ == "__main__":
sys.exit(run())
|
protonfixes/gamefixes/15700.py | Sirmentio/protonfixes | 213 | 11134488 | <gh_stars>100-1000
""" Game fix for Oddworld: Abe's Oddysee
TODO: Fix steam controller input, it is stuck in lizard mode without overlay
"""
#pylint: disable=C0103
from protonfixes import util
def main():
""" Adds the -interline argument to the game
"""
# Adding -interline fixes slow videos but adds scanlines
util.append_argument('-interline')
|
train.py | yangheng95/LCF-ATEPC | 137 | 11134495 | # -*- coding: utf-8 -*-
# file: train.py
# author: yangheng <<EMAIL>>
# Copyright (C) 2019. All Rights Reserved.
import argparse
import json
import logging
import os, sys
import random
from sklearn.metrics import f1_score
from time import strftime, localtime
import numpy as np
import torch
import torch.nn.functional as F
from transformers.optimization import AdamW
from transformers.models.bert.modeling_bert import BertModel
from transformers import BertTokenizer
# from pytorch_transformers.optimization import AdamW
# from pytorch_transformers.tokenization_bert import BertTokenizer
# from pytorch_transformers.modeling_bert import BertModel
from seqeval.metrics import classification_report
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset)
from utils.data_utils import ATEPCProcessor, convert_examples_to_features
from model.lcf_atepc import LCF_ATEPC
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler(sys.stdout))
os.makedirs('logs', exist_ok=True)
time = '{}'.format(strftime("%y%m%d-%H%M%S", localtime()))
log_file = 'logs/{}.log'.format(time)
logger.addHandler(logging.FileHandler(log_file))
logger.info('log file: {}'.format(log_file))
def main(config):
args = config
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
processor = ATEPCProcessor()
label_list = processor.get_labels()
num_labels = len(label_list) + 1
datasets = {
'camera': "atepc_datasets/camera",
'car': "atepc_datasets/car",
'phone': "atepc_datasets/phone",
'notebook': "atepc_datasets/notebook",
'laptop': "atepc_datasets/laptop",
'restaurant': "atepc_datasets/restaurant",
'twitter': "atepc_datasets/twitter",
'mixed': "atepc_datasets/mixed",
}
pretrained_bert_models = {
'camera': "bert-base-chinese",
'car': "bert-base-chinese",
'phone': "bert-base-chinese",
'notebook': "bert-base-chinese",
'laptop': "bert-base-uncased",
'restaurant': "bert-base-uncased",
# for loading domain-adapted BERT
# 'restaurant': "../bert_pretrained_restaurant",
'twitter': "bert-base-uncased",
'mixed': "bert-base-multilingual-uncased",
}
args.bert_model = pretrained_bert_models[args.dataset]
args.data_dir = datasets[args.dataset]
def convert_polarity(examples):
for i in range(len(examples)):
polarities = []
for polarity in examples[i].polarity:
if polarity == 2:
polarities.append(1)
else:
polarities.append(polarity)
examples[i].polarity = polarities
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=True)
train_examples = processor.get_train_examples(args.data_dir)
eval_examples = processor.get_test_examples(args.data_dir)
num_train_optimization_steps = int(
len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
bert_base_model = BertModel.from_pretrained(args.bert_model)
bert_base_model.config.num_labels = num_labels
if args.dataset in {'camera', 'car', 'phone', 'notebook'}:
convert_polarity(train_examples)
convert_polarity(eval_examples)
model = LCF_ATEPC(bert_base_model, args=args)
else:
model = LCF_ATEPC(bert_base_model, args=args)
for arg in vars(args):
logger.info('>>> {0}: {1}'.format(arg, getattr(args, arg)))
model.to(device)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.00001},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.00001}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, weight_decay=0.00001)
eval_features = convert_examples_to_features(eval_examples, label_list, args.max_seq_length,
tokenizer)
all_spc_input_ids = torch.tensor([f.input_ids_spc for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
all_polarities = torch.tensor([f.polarities for f in eval_features], dtype=torch.long)
all_valid_ids = torch.tensor([f.valid_ids for f in eval_features], dtype=torch.long)
all_lmask_ids = torch.tensor([f.label_mask for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_spc_input_ids, all_input_mask, all_segment_ids, all_label_ids,
all_polarities, all_valid_ids, all_lmask_ids)
# Run prediction for full data
eval_sampler = RandomSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
def evaluate(eval_ATE=True, eval_APC=True):
# evaluate
apc_result = {'max_apc_test_acc': 0, 'max_apc_test_f1': 0}
ate_result = 0
y_true = []
y_pred = []
n_test_correct, n_test_total = 0, 0
test_apc_logits_all, test_polarities_all = None, None
model.eval()
label_map = {i: label for i, label in enumerate(label_list, 1)}
for input_ids_spc, input_mask, segment_ids, label_ids, polarities, valid_ids, l_mask in eval_dataloader:
input_ids_spc = input_ids_spc.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
valid_ids = valid_ids.to(device)
label_ids = label_ids.to(device)
polarities = polarities.to(device)
l_mask = l_mask.to(device)
with torch.no_grad():
ate_logits, apc_logits = model(input_ids_spc, segment_ids, input_mask,
valid_ids=valid_ids, polarities=polarities, attention_mask_label=l_mask)
if eval_APC:
polarities = model.get_batch_polarities(polarities)
n_test_correct += (torch.argmax(apc_logits, -1) == polarities).sum().item()
n_test_total += len(polarities)
if test_polarities_all is None:
test_polarities_all = polarities
test_apc_logits_all = apc_logits
else:
test_polarities_all = torch.cat((test_polarities_all, polarities), dim=0)
test_apc_logits_all = torch.cat((test_apc_logits_all, apc_logits), dim=0)
if eval_ATE:
if not args.use_bert_spc:
label_ids = model.get_batch_token_labels_bert_base_indices(label_ids)
ate_logits = torch.argmax(F.log_softmax(ate_logits, dim=2), dim=2)
ate_logits = ate_logits.detach().cpu().numpy()
label_ids = label_ids.to('cpu').numpy()
input_mask = input_mask.to('cpu').numpy()
for i, label in enumerate(label_ids):
temp_1 = []
temp_2 = []
for j, m in enumerate(label):
if j == 0:
continue
elif label_ids[i][j] == len(label_list):
y_true.append(temp_1)
y_pred.append(temp_2)
break
else:
temp_1.append(label_map.get(label_ids[i][j], 'O'))
temp_2.append(label_map.get(ate_logits[i][j], 'O'))
if eval_APC:
test_acc = n_test_correct / n_test_total
if args.dataset in {'camera', 'car', 'phone', 'notebook'}:
test_f1 = f1_score(torch.argmax(test_apc_logits_all, -1).cpu(), test_polarities_all.cpu(),
labels=[0, 1], average='macro')
else:
test_f1 = f1_score(torch.argmax(test_apc_logits_all, -1).cpu(), test_polarities_all.cpu(),
labels=[0, 1, 2], average='macro')
test_acc = round(test_acc * 100, 2)
test_f1 = round(test_f1 * 100, 2)
apc_result = {'max_apc_test_acc': test_acc, 'max_apc_test_f1': test_f1}
if eval_ATE:
report = classification_report(y_true, y_pred, digits=4)
tmps = report.split()
ate_result = round(float(tmps[7]) * 100, 2)
return apc_result, ate_result
def save_model(path):
# Save a trained model and the associated configuration,
# Take care of the storage!
os.makedirs(path, exist_ok=True)
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
model_to_save.save_pretrained(path)
tokenizer.save_pretrained(path)
label_map = {i : label for i, label in enumerate(label_list,1)}
model_config = {"bert_model":args.bert_model,"do_lower": True,"max_seq_length":args.max_seq_length,"num_labels":len(label_list)+1,"label_map":label_map}
json.dump(model_config,open(os.path.join(path,"config.json"),"w"))
logger.info('save model to: {}'.format(path))
def train():
train_features = convert_examples_to_features(
train_examples, label_list, args.max_seq_length, tokenizer)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
all_spc_input_ids = torch.tensor([f.input_ids_spc for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
all_valid_ids = torch.tensor([f.valid_ids for f in train_features], dtype=torch.long)
all_lmask_ids = torch.tensor([f.label_mask for f in train_features], dtype=torch.long)
all_polarities = torch.tensor([f.polarities for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_spc_input_ids, all_input_mask, all_segment_ids,
all_label_ids, all_polarities, all_valid_ids, all_lmask_ids)
train_sampler = SequentialSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
max_apc_test_acc = 0
max_apc_test_f1 = 0
max_ate_test_f1 = 0
global_step = 0
for epoch in range(int(args.num_train_epochs)):
logger.info('#' * 80)
logger.info('Train {} Epoch{}'.format(args.seed, epoch + 1, args.data_dir))
logger.info('#' * 80)
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(train_dataloader):
model.train()
batch = tuple(t.to(device) for t in batch)
input_ids_spc, input_mask, segment_ids, label_ids, polarities, valid_ids, l_mask = batch
loss_ate, loss_apc = model(input_ids_spc, segment_ids, input_mask, label_ids, polarities, valid_ids,
l_mask)
loss = loss_ate + loss_apc
loss.backward()
nb_tr_examples += input_ids_spc.size(0)
nb_tr_steps += 1
optimizer.step()
optimizer.zero_grad()
global_step += 1
if global_step % args.eval_steps == 0:
if epoch >= args.num_train_epochs-2 or args.num_train_epochs<=2:
# evaluate in last 2 epochs
apc_result, ate_result = evaluate(eval_ATE=not args.use_bert_spc)
# apc_result, ate_result = evaluate()
# path = '{0}/{1}_{2}_apcacc_{3}_apcf1_{4}_atef1_{5}'.format(
# args.output_dir,
# args.dataset,
# args.local_context_focus,
# round(apc_result['max_apc_test_acc'], 2),
# round(apc_result['max_apc_test_f1'], 2),
# round(ate_result, 2)
# )
# if apc_result['max_apc_test_acc'] > max_apc_test_acc or \
# apc_result['max_apc_test_f1'] > max_apc_test_f1 or \
# ate_result > max_ate_test_f1:
# save_model(path)
if apc_result['max_apc_test_acc'] > max_apc_test_acc:
max_apc_test_acc = apc_result['max_apc_test_acc']
if apc_result['max_apc_test_f1'] > max_apc_test_f1:
max_apc_test_f1 = apc_result['max_apc_test_f1']
if ate_result > max_ate_test_f1:
max_ate_test_f1 = ate_result
current_apc_test_acc = apc_result['max_apc_test_acc']
current_apc_test_f1 = apc_result['max_apc_test_f1']
current_ate_test_f1 = round(ate_result, 2)
logger.info('*' * 80)
logger.info('Train {} Epoch{}, Evaluate for {}'.format(args.seed, epoch + 1, args.data_dir))
logger.info(f'APC_test_acc: {current_apc_test_acc}(max: {max_apc_test_acc}) '
f'APC_test_f1: {current_apc_test_f1}(max: {max_apc_test_f1})')
if args.use_bert_spc:
logger.info(f'ATE_test_F1: {current_apc_test_f1}(max: {max_apc_test_f1})'
f' (Unreliable since `use_bert_spc` is "True".)')
else:
logger.info(f'ATE_test_f1: {current_ate_test_f1}(max:{max_ate_test_f1})')
logger.info('*' * 80)
return [max_apc_test_acc, max_apc_test_f1, max_ate_test_f1]
return train()
def parse_experiments(path):
configs = []
opt = argparse.ArgumentParser()
with open(path, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for id, config in json_config.items():
# Hyper Parameters
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", default=config['dataset'], type=str)
parser.add_argument("--output_dir", default=config['output_dir'], type=str)
parser.add_argument("--SRD", default=int(config['SRD']), type=int)
parser.add_argument("--learning_rate", default=float(config['learning_rate']), type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--use_unique_bert", default=bool(config['use_unique_bert']), type=bool)
parser.add_argument("--use_bert_spc", default=bool(config['use_bert_spc_for_apc']), type=bool)
parser.add_argument("--local_context_focus", default=config['local_context_focus'], type=str)
parser.add_argument("--num_train_epochs", default=float(config['num_train_epochs']), type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--train_batch_size", default=int(config['train_batch_size']), type=int,
help="Total batch size for training.")
parser.add_argument("--dropout", default=float(config['dropout']), type=int)
parser.add_argument("--max_seq_length", default=int(config['max_seq_length']), type=int)
parser.add_argument("--eval_batch_size", default=32, type=int, help="Total batch size for eval.")
parser.add_argument("--eval_steps", default=20, help="evaluate per steps")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
configs.append(parser.parse_args())
return configs
if __name__ == "__main__":
experiments = argparse.ArgumentParser()
experiments.add_argument('--config_path', default='experiments.json', type=str, help='Path of experiments config file')
experiments = experiments.parse_args()
from utils.Pytorch_GPUManager import GPUManager
index = GPUManager().auto_choice()
device = torch.device("cuda:" + str(index) if torch.cuda.is_available() else "cpu")
exp_configs = parse_experiments(experiments.config_path)
n = 5
for config in exp_configs:
logger.info('-'*80)
logger.info('Config {} (totally {} configs)'.format(exp_configs.index(config)+1,len(exp_configs)))
results = []
max_apc_test_acc, max_apc_test_f1, max_ate_test_f1 = 0,0,0
for i in range(n):
config.device = device
config.seed = i + 1
logger.info('No.{} training process of {}'.format(i + 1, n))
apc_test_acc, apc_test_f1, ate_test_f1 = main(config)
if apc_test_acc > max_apc_test_acc:
max_apc_test_acc = apc_test_acc
if apc_test_f1 > max_apc_test_f1:
max_apc_test_f1 = apc_test_f1
if ate_test_f1 > max_ate_test_f1:
max_ate_test_f1 = ate_test_f1
logger.info('max_ate_test_f1:{} max_apc_test_acc: {}\tmax_apc_test_f1: {} \t'
.format(max_ate_test_f1, max_apc_test_acc, max_apc_test_f1))
|
src/lib/datasets/dataset/nusceneshp.py | morrolinux/RTM3D | 393 | 11134503 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pycocotools.coco as coco
from pycocotools.cocoeval import COCOeval
import numpy as np
import json
import os
import torch.utils.data as data
class NUSCENESHP(data.Dataset):
num_classes = 10
num_joints = 9
default_resolution = [896, 1600]
mean = np.array([0.485, 0.456, 0.406], np.float32).reshape(1, 1, 3)
std = np.array([0.229, 0.224, 0.225], np.float32).reshape(1, 1, 3)
flip_idx = [[0, 1], [2, 3], [4, 5], [6, 7]]
def __init__(self, opt, split):
super(KITTIHP, self).__init__()
self.edges = [[0, 1], [0, 2], [1, 3], [2, 4],
[4, 6], [3, 5], [5, 6],
[5, 7]]
self.acc_idxs = [1, 2, 3, 4, 5, 6, 7, 8]
self.data_dir = os.path.join(opt.data_dir, 'kitti')
self.img_dir= os.path.join(self.data_dir,'image')
self.calib_dir = os.path.join(self.data_dir,'calib')
if split == 'test':
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'image_info_test-dev2017.json').format(split)
else:
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'kitti_{}_nuscenes.json').format(split)
self.max_objs = 32
self._data_rng = np.random.RandomState(123)
self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571],
dtype=np.float32)
self._eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
self.split = split
self.opt = opt
self.alpha_in_degree = False
print('==> initializing kitti{} data.'.format(split))
self.coco = coco.COCO(self.annot_path)
image_ids = self.coco.getImgIds()
if split == 'train':
self.images = []
for img_id in image_ids:
idxs = self.coco.getAnnIds(imgIds=[img_id])
if len(idxs) > 0:
self.images.append(img_id)
else:
self.images = image_ids
self.num_samples = len(self.images)
print('Loaded {} {} samples'.format(split, self.num_samples))
def _to_float(self, x):
return float("{:.2f}".format(x))
def __len__(self):
return self.num_samples
|
recogym/agents/bayesian_poly_vb.py | philomenec/reco-gym | 413 | 11134509 | import numpy as np
from scipy.special import expit
from ..envs.configuration import Configuration
from . import (
AbstractFeatureProvider,
Model,
ModelBasedAgent,
ViewsFeaturesProvider
)
from .organic_count import to_categorical
bayesian_poly_args = {
'num_products': 10,
'random_seed': np.random.randint(2 ** 31 - 1),
'poly_degree': 2,
'max_iter': 5000,
'aa': 1.,
'bb': 1.,
'with_ps_all': False,
}
from scipy import rand
from numpy.linalg import inv
# Algorithm 6
# http://www.maths.usyd.edu.au/u/jormerod/JTOpapers/Ormerod10.pdf
def JJ(zeta):
return 1. / (2. * zeta) * (1. / (1 + np.exp(-zeta)) - 0.5)
# TODO replace explicit inv with linear solves
def bayesian_logistic(Psi, y, mu_beta, Sigma_beta, iter = 200):
zeta = rand(Psi.shape[0])
for _ in range(iter):
q_Sigma = inv(inv(Sigma_beta) + 2 * np.matmul(np.matmul(Psi.T, np.diag(JJ(zeta))), Psi))
q_mu = np.matmul(q_Sigma, (np.matmul(Psi.T, y - 0.5) + np.matmul(inv(Sigma_beta), mu_beta)))
zeta = np.sqrt(np.diag(np.matmul(np.matmul(Psi, q_Sigma + np.matmul(q_mu, q_mu.T)), Psi.T)))
return q_mu, q_Sigma
from scipy.stats import multivariate_normal
class BayesianModelBuilderVB(AbstractFeatureProvider):
def __init__(self, config):
super(BayesianModelBuilderVB, self).__init__(config)
def build(self):
class BayesianFeaturesProviderVB(ViewsFeaturesProvider):
"""
"""
def __init__(self, config):
super(BayesianFeaturesProviderVB, self).__init__(config)
def features(self, observation):
base_features = super().features(observation)
return base_features.reshape(1, self.config.num_products)
class BayesianRegressionModelVB(Model):
"""
"""
def __init__(self, config, Lambda):
super(BayesianRegressionModelVB, self).__init__(config)
self.Lambda = Lambda
def act(self, observation, features):
X = features
P = X.shape[1]
A = np.eye(P)
XA = np.kron(X, A)
action_proba = expit(np.matmul(XA, self.Lambda.T)).mean(1)
action = np.argmax(action_proba)
if self.config.with_ps_all:
ps_all = np.zeros(self.config.num_products)
ps_all[action] = 1.0
else:
ps_all = ()
return {
**super().act(observation, features),
**{
'a': action,
'ps': 1.0,
'ps-a': ps_all,
},
}
features, actions, deltas, pss = self.train_data()
X = features
N = X.shape[0]
P = X.shape[1]
A = to_categorical(actions, P)
XA = np.array([np.kron(X[n, :], A[n, :]) for n in range(N)])
y = deltas # clicks
Sigma = np.kron(self.config.aa * np.eye(P) + self.config.bb,
self.config.aa * np.eye(P) + self.config.bb)
q_mu, q_Sigma = bayesian_logistic(XA, y.reshape((N, 1)),
mu_beta = -6 * np.ones((P ** 2, 1)), Sigma_beta = Sigma)
Lambda = multivariate_normal.rvs(q_mu.reshape(P ** 2), q_Sigma, 1000)
# stan version of the above (seems to agree well)
# fit = pystan.stan('model.stan', data = {'N': features.shape[0], 'P': features.shape[1], 'XA': XA, 'y': y, 'Sigma': Sigma}, chains = 1)
# s = fit.extract()
# Lambda = s['lambda']
###
return (
BayesianFeaturesProviderVB(self.config), # Poly is a bad name ..
BayesianRegressionModelVB(self.config, Lambda)
)
class BayesianAgentVB(ModelBasedAgent):
"""
Bayesian Agent.
Note: the agent utilises VB to train a model.
"""
def __init__(self, config = Configuration(bayesian_poly_args)):
print('ffq')
super(BayesianAgentVB, self).__init__(
config,
BayesianModelBuilderVB(config)
)
|
surreal/session/default_configs.py | PeihongYu/surreal | 471 | 11134510 | <filename>surreal/session/default_configs.py
from .config import extend_config
# ======================== Agent-Learner side ========================
BASE_LEARNER_CONFIG = {
'model': '_dict_',
'algo': {
# Agent class to instantiate
# Learner class to instantiate
'n_step': 1,
'gamma': '_float_',
'use_batchnorm': False,
'limit_training_episode_length': 0,
'network': {
'actor_regularization': 0.0,
'critic_regularization': 0.0,
},
},
'replay': {
# The replay class to instantiate
'batch_size': '_int_',
'replay_shards': 1,
},
'parameter_publish': {
# Minimum amount of time (seconds) between two parameter publish
'min_publish_interval': 0.3,
},
}
# ======================== Env side ========================
BASE_ENV_CONFIG = {
'env_name' : '_str_',
'sleep_time': 0.0,
'video' : {
'record_video' : False,
'max_videos': 10,
'record_every': 10,
'save_folder': None,
},
'eval_mode': {}, # for providing different env init args when in eval
'action_spec': {},
'obs_spec': {},
'frame_stacks': 1,
'frame_stack_concatenate_on_env': True,
# 'action_spec': {
# 'dim': '_list_',
# 'type': '_enum[continuous, discrete]_'
# },
# 'obs_spec': {
# 'dim': '_list_',
# 'type': '' # TODO uint8 format
# },
}
# ======================== Session side ========================
BASE_SESSION_CONFIG = {
'folder': '_str_',
'replay': {
'collector_frontend_host': '_str_', # upstream from agents' pusher
'collector_frontend_port': '_int_',
'collector_backend_host': '_str_', # upstream from agents' pusher
'collector_backend_port': '_int_',
'sampler_frontend_host': '_str_', # downstream to Learner request
'sampler_frontend_port': '_int_',
'sampler_backend_host': '_str_', # downstream to Learner request
'sampler_backend_port': '_int_',
'max_puller_queue': '_int_', # replay side: pull queue size
'evict_interval': '_float_', # in seconds
'tensorboard_display': True, # display replay stats on Tensorboard
},
'sender': {
'flush_iteration': '_int_',
'flush_time': '_int_',
},
'ps': {
'parameter_serving_frontend_host': '_str_',
'parameter_serving_frontend_port': '_int_',
'parameter_serving_backend_host': '_str_',
'parameter_serving_backend_port': '_int_',
'shards': '_int_',
'publish_host': '_str', # upstream from learner
'publish_port': '_int_'
},
'tensorplex': {
'host': '_str_',
'port': '_int_',
'tensorboard_port': '_int_', # tensorboard port
'agent_bin_size': 8,
'max_processes': 4,
'update_schedule': { # TODO rename this to 'periodic'
# for TensorplexWrapper:
'training_env': '_int_', # env record every N episodes
'eval_env': '_int_',
'eval_env_sleep': '_int_', # throttle eval by sleep n seconds
# for manual updates:
'agent': '_int_', # agent.tensorplex.add_scalars()
# WARN!!: DEPRECATED
'learner': '_int_', # learner.tensorplex.add_scalars()
'learner_min_update_interval': '_int_', #Update tensorplex at most every ? seconds
}
},
'loggerplex': {
'host': '_str_',
'port': '_int_',
'overwrite': False,
'level': 'info',
'show_level': True,
'time_format': 'hms',
'enable_local_logger': '_bool_',
'local_logger_level': 'info',
'local_logger_time_format': 'hms'
},
'agent': {
'fetch_parameter_mode': '_str_',
'fetch_parameter_interval': int,
},
'learner': {
'num_gpus': '_int_',
'prefetch_host': '_str_',
'prefetch_port': '_int_',
'prefetch_processes': '_int_',
'max_prefetch_queue': '_int_', # learner side: max number of batches to prefetch
'max_preprocess_queue': '_int_', # learner side: max number of batches to preprocess
},
'checkpoint': {
'restore': '_bool_', # if False, ignore the other configs under 'restore'
'restore_folder': None, # if None, use the same session folder.
# Otherwise restore ckpt from another experiment dir.
'learner': {
'restore_target': '_int_',
'mode': '_enum[best,history]_',
'keep_history': '_int_',
'keep_best': '_int_',
'periodic': '_int_',
'min_interval': '_int_',
},
'agent': {
'restore_target': '_int_',
'mode': '_enum[best,history]_',
'keep_history': '_int_',
'keep_best': '_int_',
'periodic': '_int_',
},
}
}
LOCAL_SESSION_CONFIG = {
'folder': '_str_',
'replay': {
'collector_frontend_host': 'localhost', # upstream from agents' pusher
'collector_frontend_port': 7001,
'collector_backend_host': 'localhost', # upstream from agents' pusher
'collector_backend_port': 7002,
'sampler_frontend_host': 'localhost', # downstream to Learner request
'sampler_frontend_port': 7003,
'sampler_backend_host': 'localhost', # downstream to Learner request
'sampler_backend_port': 7004,
'max_puller_queue': 10000, # replay side: pull queue size
'evict_interval': 0., # in seconds
'tensorboard_display': True, # display replay stats on Tensorboard
},
'sender': {
'flush_iteration': '_int_',
'flush_time': 0,
},
'ps': {
'parameter_serving_frontend_host': 'localhost',
'parameter_serving_frontend_port': 7005,
'parameter_serving_backend_host': 'localhost',
'parameter_serving_backend_port': 7006,
'shards': 2,
'publish_host': 'localhost', # upstream from learner
'publish_port': 7007
},
'tensorplex': {
'host': 'localhost',
'port': 7008,
'tensorboard_port': 6006,
'update_schedule': { # TODO: rename this to 'periodic'
# for TensorplexWrapper:
'training_env': 20, # env record every N episodes
'eval_env': 20,
'eval_env_sleep': 30, # throttle eval by sleep n seconds
# for manual updates:
'agent': 20, # agent.tensorplex.add_scalars()
'learner': 20, # learner.tensorplex.add_scalars()
'learner_min_update_interval': 30, #Update tensorplex at most every 30 seconds
}
},
'loggerplex': {
'host': 'localhost',
'port': 7009,
'enable_local_logger': True,
},
'agent': {
# fetch_parameter_mode: 'episode', 'episode:<n>', 'step', 'step:<n>'
# every episode, every n episodes, every step, every n steps
'fetch_parameter_mode': 'episode',
'fetch_parameter_interval': 1,
},
'learner': {
'num_gpus': 0,
'prefetch_host': 'localhost',
'prefetch_port': 7010,
'prefetch_processes': 2,
'max_prefetch_queue': 10, # learner side: max number of batches to prefetch
'max_preprocess_queue': 2, # learner side: max number of batches to preprocess
},
'checkpoint': {
'restore': False, # if False, ignore the other configs under 'restore'
'restore_folder': None,
'learner': {
'restore_target': 0,
'mode': 'history',
'keep_history': 2,
'keep_best': 0, # TODO don't keep best unless we solve the learner score issue
'periodic': 100000, # Save every 100000 steps
'min_interval': 15 * 60, # No checkpoint less than 15 min apart.
},
'agent': {
'restore_target': 0,
'mode': 'history',
'keep_history': 2,
'keep_best': 0, # TODO don't keep best unless we solve the learner score issue
'periodic': 100,
},
}
}
LOCAL_SESSION_CONFIG = extend_config(LOCAL_SESSION_CONFIG, BASE_SESSION_CONFIG)
KUBE_SESSION_CONFIG = {
'folder': '_str_',
'replay': {
'collector_frontend_host': '_str_', # upstream from agents' pusher
'sampler_frontend_host': '_str_', # downstream to Learner request
},
'sender': {
'flush_iteration': '_int_',
},
'ps': {
'host': '_str_', # downstream to agent requests
'publish_host': '_str_', # upstream from learner
},
'tensorplex': {
'host': '_str_',
},
'loggerplex': {
'host': '_str_',
},
}
KUBE_SESSION_CONFIG = extend_config(KUBE_SESSION_CONFIG, LOCAL_SESSION_CONFIG)
|
optimization/fed_avg_client_opt.py | garyxcheng/federated | 330 | 11134514 | # Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An implementation of FedAvg where client optimizer states are aggregated."""
import collections
import enum
from typing import Callable, Optional, Union
import attr
import tensorflow as tf
import tensorflow_federated as tff
from utils import tensor_utils
# Convenience type aliases.
ModelBuilder = Callable[[], tff.learning.Model]
OptimizerBuilder = Callable[[float], tf.keras.optimizers.Optimizer]
ClientWeightFn = Callable[..., float]
LRScheduleFn = Callable[[Union[int, tf.Tensor]], Union[tf.Tensor, float]]
@attr.s(eq=False)
class OptimizerState(object):
iterations = attr.ib()
weights = attr.ib()
class AggregationType(enum.Enum):
mean = 'mean'
sum = 'sum'
min = 'min'
max = 'max'
def build_aggregator(aggregation_method):
"""Builds a federated aggregation method.
Args:
aggregation_method: A string describing the desired aggregation type. Should
be one of 'mean', 'sum', 'min', or 'max'.
Returns:
A function that accepts a federated value with placement `tff.CLIENTS` and
an optional 'weights' argument, and returns a federated value with
placement `tff.SERVER`.
"""
try:
aggregation_type = AggregationType(aggregation_method)
except ValueError:
raise ValueError(
'Aggregation method {} is not supported. Must be one of {}'.format(
aggregation_method, list(AggregationType.__members__.keys())))
if aggregation_type is AggregationType.mean:
aggregator = tff.federated_mean
elif aggregation_type is AggregationType.sum:
def aggregator(federated_values, weight=None):
del weight
return tff.federated_sum(federated_values)
elif aggregation_type is AggregationType.max:
def aggregator(federated_values, weight=None):
del weight
return tff.aggregators.federated_max(federated_values)
elif aggregation_type is AggregationType.min:
def aggregator(federated_values, weight=None):
del weight
return tff.aggregators.federated_min(federated_values)
else:
raise ValueError(
'Aggregation method {} has no associated TFF computation implemented.')
return aggregator
def _initialize_optimizer_vars(model: tff.learning.Model,
optimizer: tf.keras.optimizers.Optimizer):
"""Ensures variables holding the state of `optimizer` are created."""
delta = tf.nest.map_structure(tf.zeros_like, _get_weights(model).trainable)
model_weights = _get_weights(model)
grads_and_vars = tf.nest.map_structure(lambda x, v: (x, v), delta,
model_weights.trainable)
optimizer.apply_gradients(grads_and_vars, name='server_update')
assert optimizer.variables()
def _get_weights(model: tff.learning.Model) -> tff.learning.ModelWeights:
return tff.learning.ModelWeights.from_model(model)
def _get_optimizer_state(optimizer):
return OptimizerState(
iterations=optimizer.iterations,
# The first weight of an optimizer is reserved for the iterations count,
# see https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/Optimizer#get_weights pylint: disable=line-too-long]
weights=tuple(optimizer.weights[1:]))
@attr.s(eq=False, order=False, frozen=True)
class ServerState(object):
"""Structure for state on the server.
Fields:
- `model`: A dictionary of the model's trainable and non-trainable
weights.
- `client_optimizer_state`: A namedtuple of the client optimizer variables.
- `server_optimizer_state`: A namedtuple of the server optimizer variables.
- `round_num`: The current training round, as a float.
"""
model = attr.ib()
client_optimizer_state = attr.ib()
server_optimizer_state = attr.ib()
round_num = attr.ib()
# This is a float to avoid type incompatibility when calculating learning rate
# schedules.
@tf.function
def server_update(model, server_optimizer, server_state, weights_delta,
client_optimizer_state_delta):
"""Updates `server_state` based on `weights_delta`, increase the round number.
Args:
model: A `tff.learning.Model`.
server_optimizer: A `tf.keras.optimizers.Optimizer`.
server_state: A `ServerState`, the state to be updated.
weights_delta: An update to the trainable variables of the model.
client_optimizer_state_delta: An update to the client optimizer variables.
Returns:
An updated `ServerState`.
"""
model_weights = _get_weights(model)
tf.nest.map_structure(lambda v, t: v.assign(t), model_weights,
server_state.model)
# Server optimizer variables must be initialized prior to invoking this
updated_client_optimizer_state = tf.nest.map_structure(
lambda a, b: a + b, server_state.client_optimizer_state,
client_optimizer_state_delta)
server_optimizer_state = _get_optimizer_state(server_optimizer)
tf.nest.map_structure(lambda v, t: v.assign(t), server_optimizer_state,
server_state.server_optimizer_state)
# Apply the update to the model. We must multiply weights_delta by -1.0 to
# view it as a gradient that should be applied to the server_optimizer.
grads_and_vars = [
(-1.0 * x, v) for x, v in zip(weights_delta, model_weights.trainable)
]
server_optimizer.apply_gradients(grads_and_vars)
# Create a new state based on the updated model.
return tff.structure.update_struct(
server_state,
model=model_weights,
client_optimizer_state=updated_client_optimizer_state,
server_optimizer_state=server_optimizer_state,
round_num=server_state.round_num + 1.0)
@attr.s(eq=False, order=False, frozen=True)
class ClientOutput(object):
"""Structure for outputs returned from clients during federated optimization.
Fields:
- `weights_delta`: A dictionary of updates to the model's trainable
variables.
- `client_weight`: Weight to be used in a weighted mean when
aggregating `weights_delta` and `optimizer_delta`.
- `model_output`: A structure matching
`tff.learning.Model.report_local_outputs`, reflecting the results of
training on the input dataset.
- `optimizer_output`: Additional metrics or other outputs defined by the
optimizer.
"""
weights_delta = attr.ib()
optimizer_state_delta = attr.ib()
client_weight = attr.ib()
model_output = attr.ib()
optimizer_output = attr.ib()
def create_client_update_fn():
"""Returns a tf.function for the client_update.
This "create" fn is necesessary to prevent
"ValueError: Creating variables on a non-first call to a function decorated
with tf.function" errors due to the client optimizer creating variables. This
is only needed because we test the client_update function directly.
"""
@tf.function
def client_update(model,
dataset,
initial_weights,
initial_client_optimizer_state,
client_optimizer,
client_model_weight_fn=None,
client_opt_weight_fn=None):
"""Updates client model.
Args:
model: A `tff.learning.Model`.
dataset: A 'tf.data.Dataset'.
initial_weights: A `tff.learning.Model.weights` from server.
initial_client_optimizer_state: The variables to assign to the client
optimizer.
client_optimizer: A `tf.keras.optimizer.Optimizer` object.
client_model_weight_fn: Optional function that takes the output of
`model.report_local_outputs` and returns a tensor that provides the
weight in the federated average of model deltas. If not provided, the
default is the total number of examples processed on device.
client_opt_weight_fn: Optional function that takes the output of
`model.report_local_outputs` and returns a tensor that provides the
weight in the federated average of the optimizer states. If not
provided, the default is a uniform weighting.
Returns:
A 'ClientOutput`.
"""
model_weights = _get_weights(model)
tf.nest.map_structure(lambda v, t: v.assign(t), model_weights,
initial_weights)
# Client optimizer variables must be initialized prior to invoking this
client_optimizer_state = _get_optimizer_state(client_optimizer)
tf.nest.map_structure(lambda v, t: v.assign(t), client_optimizer_state,
initial_client_optimizer_state)
num_examples = tf.constant(0, dtype=tf.int32)
for batch in dataset:
with tf.GradientTape() as tape:
output = model.forward_pass(batch)
grads = tape.gradient(output.loss, model_weights.trainable)
grads_and_vars = zip(grads, model_weights.trainable)
client_optimizer.apply_gradients(grads_and_vars)
num_examples += tf.shape(output.predictions)[0]
aggregated_outputs = model.report_local_outputs()
weights_delta = tf.nest.map_structure(lambda a, b: a - b,
model_weights.trainable,
initial_weights.trainable)
weights_delta, non_finite_weights_delta = (
tensor_utils.zero_all_if_any_non_finite(weights_delta))
if non_finite_weights_delta > 0:
client_model_weight = tf.constant(0, dtype=tf.float32)
elif client_model_weight_fn is None:
client_model_weight = tf.cast(num_examples, dtype=tf.float32)
else:
client_model_weight = client_model_weight_fn(aggregated_outputs)
optimizer_state_delta = tf.nest.map_structure(
lambda a, b: a - b, client_optimizer_state,
initial_client_optimizer_state)
if client_opt_weight_fn is None:
client_opt_weight = tf.cast(num_examples, dtype=tf.float32)
else:
client_opt_weight = client_opt_weight_fn(aggregated_outputs)
optimizer_output = collections.OrderedDict([('num_examples', num_examples)])
client_weight = collections.OrderedDict([
('model_weight', client_model_weight),
('optimizer_weight', client_opt_weight)
])
return ClientOutput(
weights_delta=weights_delta,
optimizer_state_delta=optimizer_state_delta,
client_weight=client_weight,
model_output=aggregated_outputs,
optimizer_output=optimizer_output)
return client_update
def build_server_init_fn(model_fn, client_optimizer_fn, server_optimizer_fn):
"""Builds a `tff.tf_computation` that returns the initial `ServerState`.
The attributes `ServerState.model`, `ServerState.client_optimizer_state`, and
`ServerState.server_optimizer_state` are initialized via their constructor
functions. The attribute `ServerState.round_num` is set to 0.0.
Args:
model_fn: A no-arg function that returns a `tff.learning.TrainableModel`.
client_optimizer_fn: A no-arg function that returns a
`tf.keras.optimizers.Optimizer`.
server_optimizer_fn: A no-arg function that returns a
`tf.keras.optimizers.Optimizer`.
Returns:
A `tff.tf_computation` that returns initial `ServerState`.
"""
@tff.tf_computation
def server_init_tf():
client_optimizer = client_optimizer_fn()
server_optimizer = server_optimizer_fn()
model = model_fn()
_initialize_optimizer_vars(model, client_optimizer)
_initialize_optimizer_vars(model, server_optimizer)
return ServerState(
model=_get_weights(model),
client_optimizer_state=_get_optimizer_state(client_optimizer),
server_optimizer_state=_get_optimizer_state(server_optimizer),
round_num=0.0)
return server_init_tf
def build_iterative_process(
model_fn: ModelBuilder,
client_optimizer_fn: OptimizerBuilder,
client_lr: Union[float, LRScheduleFn] = 0.1,
server_optimizer_fn: OptimizerBuilder = tf.keras.optimizers.SGD,
server_lr: Union[float, LRScheduleFn] = 1.0,
optimizer_aggregation: AggregationType = 'mean',
client_model_weight_fn: Optional[ClientWeightFn] = None,
client_opt_weight_fn: Optional[ClientWeightFn] = None,
) -> tff.templates.IterativeProcess: # pytype: disable=annotation-type-mismatch
"""Builds an iterative process for FedAvg with client optimizer aggregation.
This version of FedAvg allows user-selected `tf.keras.Optimizers` on both
the client and server level. Moreover, the iterative process will aggregate
both the changes in the client model weights, and the changes in the client
optimizer states. The aggregated model weights and client optimizer states
will be broadcast to all clients in the subsequent round.
For example, if clients use SGD with momentum, then this iterative process
will aggregate both the client model weights and the momentum parameter in
the clients' optimizers. This allows clients in the next round of computation
to start with an estimated momentum parameter, rather than initializing it
at zero.
Args:
model_fn: A no-arg function that returns a `tff.learning.Model`.
client_optimizer_fn: A function that accepts a `learning_rate` keyword
argument and returns a `tf.keras.optimizers.Optimizer` instance.
client_lr: A scalar learning rate or a function that accepts a float
`round_num` argument and returns a learning rate.
server_optimizer_fn: A function that accepts a `learning_rate` argument and
returns a `tf.keras.optimizers.Optimizer` instance.
server_lr: A scalar learning rate or a function that accepts a float
`round_num` argument and returns a learning rate.
optimizer_aggregation: What type of aggregation to use for the client
optimizer states. Must be a member of ['mean', 'sum', 'min', 'max'].
client_model_weight_fn: Optional function that takes the output of
`model.report_local_outputs` and returns a tensor that provides the weight
in the federated average of the client models. If not provided, the
default is the total number of examples processed on device.
client_opt_weight_fn: Optional function that takes the output of
`model.report_local_outputs` and returns a tensor that provides the weight
in the federated average of the client optimizer states. If not provided,
the default is the total number of examples processed on device.
Returns:
A `tff.templates.IterativeProcess`.
"""
client_lr_schedule = client_lr
if not callable(client_lr_schedule):
client_lr_schedule = lambda round_num: client_lr
server_lr_schedule = server_lr
if not callable(server_lr_schedule):
server_lr_schedule = lambda round_num: server_lr
optimizer_aggregator = build_aggregator(optimizer_aggregation)
placeholder_model = model_fn()
server_init_tf = build_server_init_fn(
model_fn,
# Initialize with the learning rate for round zero.
lambda: client_optimizer_fn(client_lr_schedule(0)),
lambda: server_optimizer_fn(server_lr_schedule(0)))
server_state_type = server_init_tf.type_signature.result
model_weights_type = server_state_type.model
client_optimizer_state_type = server_state_type.client_optimizer_state
round_num_type = server_state_type.round_num
tf_dataset_type = tff.SequenceType(placeholder_model.input_spec)
@tff.tf_computation(tf_dataset_type, model_weights_type,
client_optimizer_state_type, round_num_type)
def client_update_fn(tf_dataset, initial_model_weights,
initial_optimizer_state, round_num):
"""Performs a client update."""
model = model_fn()
client_lr = client_lr_schedule(round_num)
client_optimizer = client_optimizer_fn(client_lr)
# We initialize the client optimizer variables to avoid creating them
# within the scope of the tf.function client_update.
_initialize_optimizer_vars(model, client_optimizer)
client_update = create_client_update_fn()
return client_update(model, tf_dataset, initial_model_weights,
initial_optimizer_state, client_optimizer,
client_model_weight_fn, client_opt_weight_fn)
@tff.tf_computation(server_state_type, model_weights_type.trainable,
client_optimizer_state_type)
def server_update_fn(server_state, model_delta, optimizer_delta):
model = model_fn()
server_lr = server_lr_schedule(server_state.round_num)
server_optimizer = server_optimizer_fn(server_lr)
# We initialize the server optimizer variables to avoid creating them
# within the scope of the tf.function server_update.
_initialize_optimizer_vars(model, server_optimizer)
return server_update(model, server_optimizer, server_state, model_delta,
optimizer_delta)
@tff.tf_computation(client_optimizer_state_type)
def _convert_opt_state_to_float(optimizer_state):
return tf.nest.map_structure(lambda x: tf.cast(x, tf.float32),
optimizer_state)
@tff.tf_computation(_convert_opt_state_to_float.type_signature.result)
def _convert_opt_state_to_int(optimizer_state):
iterations_as_int = tf.cast(optimizer_state.iterations, tf.int64)
return OptimizerState(
iterations=iterations_as_int, weights=optimizer_state.weights)
@tff.federated_computation(
tff.type_at_server(server_state_type),
tff.type_at_clients(tf_dataset_type))
def run_one_round(server_state, federated_dataset):
"""Orchestration logic for one round of computation.
Args:
server_state: A `ServerState`.
federated_dataset: A federated `tf.Dataset` with placement `tff.CLIENTS`.
Returns:
A tuple of updated `ServerState` and the result of
`tff.learning.Model.federated_output_computation`.
"""
client_model = tff.federated_broadcast(server_state.model)
client_optimizer_state = tff.federated_broadcast(
server_state.client_optimizer_state)
client_round_num = tff.federated_broadcast(server_state.round_num)
client_outputs = tff.federated_map(
client_update_fn, (federated_dataset, client_model,
client_optimizer_state, client_round_num))
client_model_weight = client_outputs.client_weight.model_weight
client_opt_weight = client_outputs.client_weight.optimizer_weight
model_delta = tff.federated_mean(
client_outputs.weights_delta, weight=client_model_weight)
# We convert the optimizer state to a float type so that it can be used
# with thing such as `tff.federated_mean`. This is only necessary because
# `tf.keras.Optimizer` objects have a state with an integer indicating
# the number of times it has been applied.
client_optimizer_state_delta = tff.federated_map(
_convert_opt_state_to_float, client_outputs.optimizer_state_delta)
client_optimizer_state_delta = optimizer_aggregator(
client_optimizer_state_delta, weight=client_opt_weight)
# We conver the optimizer state back into one with an integer round number
client_optimizer_state_delta = tff.federated_map(
_convert_opt_state_to_int, client_optimizer_state_delta)
server_state = tff.federated_map(
server_update_fn,
(server_state, model_delta, client_optimizer_state_delta))
aggregated_outputs = placeholder_model.federated_output_computation(
client_outputs.model_output)
if aggregated_outputs.type_signature.is_struct():
aggregated_outputs = tff.federated_zip(aggregated_outputs)
return server_state, aggregated_outputs
@tff.federated_computation
def initialize_fn():
return tff.federated_value(server_init_tf(), tff.SERVER)
iterative_process = tff.templates.IterativeProcess(
initialize_fn=initialize_fn, next_fn=run_one_round)
@tff.tf_computation(server_state_type)
def get_model_weights(server_state):
return server_state.model
iterative_process.get_model_weights = get_model_weights
return iterative_process
|
conf/opt/graphite/webapp/graphite/app_settings.py | turbosquid/docker-graphite-statsd | 845 | 11134522 | """Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
# Django settings for graphite project.
# DO NOT MODIFY THIS FILE DIRECTLY - use local_settings.py instead
from django import VERSION as DJANGO_VERSION
from os.path import dirname, join, abspath
ADMINS = ()
MANAGERS = ADMINS
TEMPLATE_DIRS = (
join(dirname( abspath(__file__) ), 'templates'),
)
#Django settings below, do not touch!
APPEND_SLASH = False
TEMPLATE_DEBUG = False
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
}
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# Absolute path to the directory that holds media.
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.middleware.gzip.GZipMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'graphite.urls'
INSTALLED_APPS = (
'graphite.metrics',
'graphite.render',
'graphite.cli',
'graphite.browser',
'graphite.composer',
'graphite.account',
'graphite.dashboard',
'graphite.whitelist',
'graphite.events',
'graphite.url_shortener',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.staticfiles',
'tagging',
)
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend']
GRAPHITE_WEB_APP_SETTINGS_LOADED = True
STATIC_URL = '/content/'
STATIC_ROOT='/opt/graphite/webapp/content/'
|
resume_parser/parser_app/apps.py | kailaspathi/ResumeParser | 215 | 11134525 | from django.apps import AppConfig
class ParserAppConfig(AppConfig):
name = 'parser_app'
|
holocron/trainer/utils.py | MateoLostanlen/Holocron | 181 | 11134529 | # Copyright (C) 2019-2021, <NAME>.
# This program is licensed under the Apache License version 2.
# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
from torch.nn import Module
from torch.nn.modules.batchnorm import _BatchNorm
from typing import Optional
__all__ = ['freeze_bn', 'freeze_model']
def freeze_bn(mod: Module) -> Module:
"""Prevents parameter and stats from updating in Batchnorm layers that are frozen
Args:
mod (torch.nn.Module): model to train
Returns:
torch.nn.Module: model
"""
# Loop on modules
for m in mod.modules():
if isinstance(m, _BatchNorm) and m.affine and all(not p.requires_grad for p in m.parameters()):
# Switch back to commented code when https://github.com/pytorch/pytorch/issues/37823 is resolved
m.track_running_stats = False
m.eval()
return mod
def freeze_model(model: Module, last_frozen_layer: Optional[str] = None, frozen_bn_stat_update: bool = False) -> Module:
"""Freeze a specific range of model layers
Args:
model (torch.nn.Module): model to train
last_frozen_layer (str, optional): last layer to freeze. Assumes layers have been registered in forward order
frozen_bn_stat_update (bool, optional): force stats update in BN layers that are frozen
Returns:
torch.nn.Module: model
"""
# Loop on parameters
if isinstance(last_frozen_layer, str):
layer_reached = False
for n, p in model.named_parameters():
if n.startswith(last_frozen_layer):
layer_reached = True
p.requires_grad_(False)
elif not layer_reached:
p.requires_grad_(False)
if not layer_reached:
raise ValueError(f"Unable to locate child module {last_frozen_layer}")
# Loop on modules
if not frozen_bn_stat_update:
model = freeze_bn(model)
return model
|
demos/wireworld_xor_demo.py | lantunes/cellpylib | 124 | 11134546 | import cellpylib as cpl
import numpy as np
from matplotlib.colors import ListedColormap
def wireworld_rule(n, c, t):
current_activity = n[1][1]
if current_activity == 0: # empty
return 0
if current_activity == 1: # electron head
return 2
if current_activity == 2: # electron tail
return 3
if current_activity == 3: # conductor
electron_head_count = np.count_nonzero(n == 1)
return 1 if electron_head_count == 1 or electron_head_count == 2 else 3
cellular_automata = np.array([[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 3, 1, 2, 3, 3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 3, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 3, 3, 3, 2],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0],
[0, 0, 0, 3, 3, 2, 1, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0],
[0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]])
cellular_automata = cpl.evolve2d(cellular_automata, timesteps=25,
apply_rule=wireworld_rule, neighbourhood="Moore")
cpl.plot2d_animate(cellular_automata, show_grid=True, show_margin=False, scale=0.3,
colormap=ListedColormap(["black", "blue", "red", "yellow"]))
|
examples/simulation/simulate_ucsf/make_ucsf.py | genematx/nmrglue | 150 | 11134564 | <gh_stars>100-1000
#! /usr/bin/env python
import nmrglue as ng
import numpy as np
# create a sparky dictionary
# A dictionary from a existing Sparky ucsf file can be found using:
# ng.sparky.guess_udic(*ng.sparky.read('filename.ucsf'))
udic = {
'ndim': 2,
0: {'car': 7329.0,
'complex': False,
'encoding': 'states',
'freq': True,
'label': '15N',
'obs': 60.8,
'size': 512,
'sw': 1523.43,
'time': False},
1: {'car': 5403.570418865944,
'complex': False,
'encoding': 'direct',
'freq': True,
'label': '1H',
'obs': 600.0,
'size': 1024,
'sw': 3606.5,
'time': False}
}
dic = ng.sparky.create_dic(udic)
data = np.empty((512, 1024), dtype='float32')
# read in the peak list
peak_list = np.recfromtxt('peaks.txt', names=True)
npeaks = len(peak_list)
# convert the peak list from PPM to points
uc_15N = ng.sparky.make_uc(dic, None, 0)
uc_1H = ng.sparky.make_uc(dic, None, 1)
lw_15N = 5.0 # 15N dimension linewidth in points
lw_1H = 5.0 # 1H dimension linewidth in points
params = []
for ppm_15N, ppm_1H in peak_list:
pts_15N = uc_15N.f(ppm_15N, 'ppm')
pts_1H = uc_1H.f(ppm_1H, 'ppm')
params.append([(pts_15N, lw_15N), (pts_1H, lw_1H)])
# simulate the spectrum
shape = (512, 1024) # size should match the dictionary size
lineshapes = ('g', 'g') # gaussian in both dimensions
amps = [100.0] * npeaks
data = ng.linesh.sim_NDregion(shape, lineshapes, params, amps)
# save the spectrum
ng.sparky.write("test.ucsf", dic, data.astype('float32'), overwrite=True)
|
src/rpdk/core/generate.py | wbingli/cloudformation-cli | 200 | 11134605 | """This sub command generates code from the project and resource schema.
Projects can be created via the 'init' sub command.
"""
import logging
from .project import Project
LOG = logging.getLogger(__name__)
def generate(_args):
project = Project()
project.load()
project.generate()
project.generate_docs()
LOG.warning("Generated files for %s", project.type_name)
def setup_subparser(subparsers, parents):
# see docstring of this file
parser = subparsers.add_parser("generate", description=__doc__, parents=parents)
parser.set_defaults(command=generate)
|
trakt/interfaces/sync/ratings.py | milokmet/trakt.py | 147 | 11134615 | from __future__ import absolute_import, division, print_function
from trakt.interfaces.base import authenticated
from trakt.interfaces.sync.core.mixins import Get, Add, Remove
class SyncRatingsInterface(Get, Add, Remove):
path = 'sync/ratings'
@authenticated
def get(self, media=None, rating=None, store=None, extended=None, flat=False, page=None, per_page=None, **kwargs):
if media and not flat and page is not None:
raise ValueError('`page` parameter is only supported with `flat=True`')
# Build parameters
params = []
if rating is not None:
params.append(rating)
# Build query
query = {
'extended': extended,
'page': page,
'limit': per_page
}
# Request ratings
return super(SyncRatingsInterface, self).get(
media, store, params,
flat=flat or media is None,
query=query,
**kwargs
)
#
# Shortcut methods
#
@authenticated
def all(self, rating=None, store=None, **kwargs):
return self.get(
'all',
rating=rating,
store=store,
**kwargs
)
@authenticated
def movies(self, rating=None, store=None, **kwargs):
return self.get(
'movies',
rating=rating,
store=store,
**kwargs
)
@authenticated
def shows(self, rating=None, store=None, **kwargs):
return self.get(
'shows',
rating=rating,
store=store,
**kwargs
)
@authenticated
def seasons(self, rating=None, store=None, **kwargs):
return self.get(
'seasons',
rating=rating,
store=store,
**kwargs
)
@authenticated
def episodes(self, rating=None, store=None, **kwargs):
return self.get(
'episodes',
rating=rating,
store=store,
**kwargs
)
|
tools/generate_fill_mask.py | ankane/informers | 340 | 11134627 | <gh_stars>100-1000
from pathlib import Path
import tempfile
from transformers.convert_graph_to_onnx import convert, quantize
dest = Path(tempfile.mkdtemp(), "fill-mask.onnx")
convert(
pipeline_name="fill-mask",
model="distilroberta-base",
output=dest,
framework="pt",
opset=11
)
print(dest)
|
pymatgen/command_line/enumlib_caller.py | wangyusu/pymatgen | 921 | 11134635 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements an interface to enumlib, <NAME>"s excellent Fortran
code for enumerating derivative structures.
This module depends on a compiled enumlib with the executables enum.x and
makestr.x available in the path. Please download the library at
https://github.com/msg-byu/enumlib and follow the instructions in the README to
compile these two executables accordingly.
If you use this module, please cite the following:
<NAME> and <NAME>, "Algorithm for generating derivative
structures," Phys. Rev. B 77 224115 (26 June 2008)
<NAME> and <NAME>, "Generating derivative structures from
multilattices: Application to hcp alloys," Phys. Rev. B 80 014120 (July 2009)
<NAME>, <NAME>, and <NAME>, "Generating
derivative structures at a fixed concentration," Comp. Mat. Sci. 59
101-107 (March 2012)
<NAME>, <NAME>, <NAME>, "Generating derivative
superstructures for systems with high configurational freedom," Comp. Mat.
Sci. 136 144-149 (May 2017)
"""
import fractions
import glob
import itertools
import logging
import math
import re
import subprocess
from threading import Timer
import numpy as np
from monty.dev import requires
from monty.fractions import lcm
from monty.os.path import which
from monty.tempfile import ScratchDir
from pymatgen.core.periodic_table import DummySpecies
from pymatgen.core.sites import PeriodicSite
from pymatgen.core.structure import Structure
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
logger = logging.getLogger(__name__)
# Favor the use of the newer "enum.x" by <NAME> instead of the older
# "multienum.x"
enum_cmd = which("enum.x") or which("multienum.x")
# prefer makestr.x at present
makestr_cmd = which("makestr.x") or which("makeStr.x") or which("makeStr.py")
@requires(
enum_cmd and makestr_cmd,
"EnumlibAdaptor requires the executables 'enum.x' or 'multienum.x' "
"and 'makestr.x' or 'makeStr.py' to be in the path. Please download the "
"library at https://github.com/msg-byu/enumlib and follow the instructions "
"in the README to compile these two executables accordingly.",
)
class EnumlibAdaptor:
"""
An adaptor for enumlib.
.. attribute:: structures
List of all enumerated structures.
"""
amount_tol = 1e-5
def __init__(
self,
structure,
min_cell_size=1,
max_cell_size=1,
symm_prec=0.1,
enum_precision_parameter=0.001,
refine_structure=False,
check_ordered_symmetry=True,
timeout=None,
):
"""
Initializes the adapter with a structure and some parameters.
Args:
structure: An input structure.
min_cell_size (int): The minimum cell size wanted. Defaults to 1.
max_cell_size (int): The maximum cell size wanted. Defaults to 1.
symm_prec (float): Symmetry precision. Defaults to 0.1.
enum_precision_parameter (float): Finite precision parameter for
enumlib. Default of 0.001 is usually ok, but you might need to
tweak it for certain cells.
refine_structure (bool): If you are starting from a structure that
has been relaxed via some electronic structure code,
it is usually much better to start with symmetry determination
and then obtain a refined structure. The refined structure have
cell parameters and atomic positions shifted to the expected
symmetry positions, which makes it much less sensitive precision
issues in enumlib. If you are already starting from an
experimental cif, refinement should have already been done and
it is not necessary. Defaults to False.
check_ordered_symmetry (bool): Whether to check the symmetry of
the ordered sites. If the symmetry of the ordered sites is
lower, the lowest symmetry ordered sites is included in the
enumeration. This is important if the ordered sites break
symmetry in a way that is important getting possible
structures. But sometimes including ordered sites
slows down enumeration to the point that it cannot be
completed. Switch to False in those cases. Defaults to True.
timeout (float): If specified, will kill enumlib after specified
time in minutes. This can be useful for gracefully handling
enumerations in a high-throughput context, for some enumerations
which will not terminate in a realistic length of time.
"""
if refine_structure:
finder = SpacegroupAnalyzer(structure, symm_prec)
self.structure = finder.get_refined_structure()
else:
self.structure = structure
self.min_cell_size = min_cell_size
self.max_cell_size = max_cell_size
self.symm_prec = symm_prec
self.enum_precision_parameter = enum_precision_parameter
self.check_ordered_symmetry = check_ordered_symmetry
self.timeout = timeout
def run(self):
"""
Run the enumeration.
"""
# Create a temporary directory for working.
with ScratchDir(".") as d:
logger.debug("Temp dir : {}".format(d))
# Generate input files
self._gen_input_file()
# Perform the actual enumeration
num_structs = self._run_multienum()
# Read in the enumeration output as structures.
if num_structs > 0:
self.structures = self._get_structures(num_structs)
else:
raise EnumError("Unable to enumerate structure.")
def _gen_input_file(self):
"""
Generate the necessary struct_enum.in file for enumlib. See enumlib
documentation for details.
"""
coord_format = "{:.6f} {:.6f} {:.6f}"
# Using symmetry finder, get the symmetrically distinct sites.
fitter = SpacegroupAnalyzer(self.structure, self.symm_prec)
symmetrized_structure = fitter.get_symmetrized_structure()
logger.debug(
"Spacegroup {} ({}) with {} distinct sites".format(
fitter.get_space_group_symbol(),
fitter.get_space_group_number(),
len(symmetrized_structure.equivalent_sites),
)
)
"""
Enumlib doesn"t work when the number of species get too large. To
simplify matters, we generate the input file only with disordered sites
and exclude the ordered sites from the enumeration. The fact that
different disordered sites with the exact same species may belong to
different equivalent sites is dealt with by having determined the
spacegroup earlier and labelling the species differently.
"""
# index_species and index_amounts store mappings between the indices
# used in the enum input file, and the actual species and amounts.
index_species = []
index_amounts = []
# Stores the ordered sites, which are not enumerated.
ordered_sites = []
disordered_sites = []
coord_str = []
for sites in symmetrized_structure.equivalent_sites:
if sites[0].is_ordered:
ordered_sites.append(sites)
else:
sp_label = []
species = dict(sites[0].species.items())
if sum(species.values()) < 1 - EnumlibAdaptor.amount_tol:
# Let us first make add a dummy element for every single
# site whose total occupancies don't sum to 1.
species[DummySpecies("X")] = 1 - sum(species.values())
for sp, amt in species.items():
if sp not in index_species:
index_species.append(sp)
sp_label.append(len(index_species) - 1)
index_amounts.append(amt * len(sites))
else:
ind = index_species.index(sp)
sp_label.append(ind)
index_amounts[ind] += amt * len(sites)
sp_label = "/".join(["{}".format(i) for i in sorted(sp_label)])
for site in sites:
coord_str.append("{} {}".format(coord_format.format(*site.coords), sp_label))
disordered_sites.append(sites)
def get_sg_info(ss):
finder = SpacegroupAnalyzer(Structure.from_sites(ss), self.symm_prec)
return finder.get_space_group_number()
target_sgnum = get_sg_info(symmetrized_structure.sites)
curr_sites = list(itertools.chain.from_iterable(disordered_sites))
sgnum = get_sg_info(curr_sites)
ordered_sites = sorted(ordered_sites, key=lambda sites: len(sites))
logger.debug("Disordered sites has sg # %d" % (sgnum))
self.ordered_sites = []
# progressively add ordered sites to our disordered sites
# until we match the symmetry of our input structure
if self.check_ordered_symmetry:
while sgnum != target_sgnum and len(ordered_sites) > 0:
sites = ordered_sites.pop(0)
temp_sites = list(curr_sites) + sites
new_sgnum = get_sg_info(temp_sites)
if sgnum != new_sgnum:
logger.debug("Adding %s in enum. New sg # %d" % (sites[0].specie, new_sgnum))
index_species.append(sites[0].specie)
index_amounts.append(len(sites))
sp_label = len(index_species) - 1
for site in sites:
coord_str.append("{} {}".format(coord_format.format(*site.coords), sp_label))
disordered_sites.append(sites)
curr_sites = temp_sites
sgnum = new_sgnum
else:
self.ordered_sites.extend(sites)
for sites in ordered_sites:
self.ordered_sites.extend(sites)
self.index_species = index_species
lattice = self.structure.lattice
output = [self.structure.formula, "bulk"]
for vec in lattice.matrix:
output.append(coord_format.format(*vec))
output.append("%d" % len(index_species))
output.append("%d" % len(coord_str))
output.extend(coord_str)
output.append("{} {}".format(self.min_cell_size, self.max_cell_size))
output.append(str(self.enum_precision_parameter))
output.append("full")
ndisordered = sum([len(s) for s in disordered_sites])
base = int(
ndisordered
* lcm(
*[
f.limit_denominator(ndisordered * self.max_cell_size).denominator
for f in map(fractions.Fraction, index_amounts)
]
)
)
# This multiplicative factor of 10 is to prevent having too small bases
# which can lead to rounding issues in the next step.
# An old bug was that a base was set to 8, with a conc of 0.4:0.6. That
# resulted in a range that overlaps and a conc of 0.5 satisfying this
# enumeration. See Cu7Te5.cif test file.
base *= 10
# base = ndisordered #10 ** int(math.ceil(math.log10(ndisordered)))
# To get a reasonable number of structures, we fix concentrations to the
# range expected in the original structure.
total_amounts = sum(index_amounts)
for amt in index_amounts:
conc = amt / total_amounts
if abs(conc * base - round(conc * base)) < 1e-5:
output.append("{} {} {}".format(int(round(conc * base)), int(round(conc * base)), base))
else:
min_conc = int(math.floor(conc * base))
output.append("{} {} {}".format(min_conc - 1, min_conc + 1, base))
output.append("")
logger.debug("Generated input file:\n{}".format("\n".join(output)))
with open("struct_enum.in", "w") as f:
f.write("\n".join(output))
def _run_multienum(self):
with subprocess.Popen([enum_cmd], stdout=subprocess.PIPE, stdin=subprocess.PIPE, close_fds=True) as p:
if self.timeout:
timed_out = False
timer = Timer(self.timeout * 60, lambda p: p.kill(), [p])
try:
timer.start()
output = p.communicate()[0].decode("utf-8")
finally:
if not timer.is_alive():
timed_out = True
timer.cancel()
if timed_out:
raise TimeoutError("Enumeration took too long.")
else:
output = p.communicate()[0].decode("utf-8")
count = 0
start_count = False
for line in output.strip().split("\n"):
if line.strip().endswith("RunTot"):
start_count = True
elif start_count and re.match(r"\d+\s+.*", line.strip()):
count = int(line.split()[-1])
logger.debug("Enumeration resulted in {} structures".format(count))
return count
def _get_structures(self, num_structs):
structs = []
if ".py" in makestr_cmd:
options = ["-input", "struct_enum.out", str(1), str(num_structs)]
else:
options = ["struct_enum.out", str(0), str(num_structs - 1)]
with subprocess.Popen(
[makestr_cmd] + options,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
close_fds=True,
) as rs:
stdout, stderr = rs.communicate()
if stderr:
logger.warning(stderr.decode())
# sites retrieved from enumlib will lack site properties
# to ensure consistency, we keep track of what site properties
# are missing and set them to None
# TODO: improve this by mapping ordered structure to original
# disorded structure, and retrieving correct site properties
disordered_site_properties = {}
if len(self.ordered_sites) > 0:
original_latt = self.ordered_sites[0].lattice
# Need to strip sites of site_properties, which would otherwise
# result in an index error. Hence Structure is reconstructed in
# the next step.
site_properties = {}
for site in self.ordered_sites:
for k, v in site.properties.items():
disordered_site_properties[k] = None
if k in site_properties:
site_properties[k].append(v)
else:
site_properties[k] = [v]
ordered_structure = Structure(
original_latt,
[site.species for site in self.ordered_sites],
[site.frac_coords for site in self.ordered_sites],
site_properties=site_properties,
)
inv_org_latt = np.linalg.inv(original_latt.matrix)
for file in glob.glob("vasp.*"):
with open(file) as f:
data = f.read()
data = re.sub(r"scale factor", "1", data)
data = re.sub(r"(\d+)-(\d+)", r"\1 -\2", data)
poscar = Poscar.from_string(data, self.index_species)
sub_structure = poscar.structure
# Enumeration may have resulted in a super lattice. We need to
# find the mapping from the new lattice to the old lattice, and
# perform supercell construction if necessary.
new_latt = sub_structure.lattice
sites = []
if len(self.ordered_sites) > 0:
transformation = np.dot(new_latt.matrix, inv_org_latt)
transformation = [[int(round(cell)) for cell in row] for row in transformation]
logger.debug("Supercell matrix: {}".format(transformation))
s = ordered_structure * transformation
sites.extend([site.to_unit_cell() for site in s])
super_latt = sites[-1].lattice
else:
super_latt = new_latt
for site in sub_structure:
if site.specie.symbol != "X": # We exclude vacancies.
sites.append(
PeriodicSite(
site.species,
site.frac_coords,
super_latt,
to_unit_cell=True,
properties=disordered_site_properties,
)
)
else:
logger.debug("Skipping sites that include species X.")
structs.append(Structure.from_sites(sorted(sites)))
logger.debug("Read in a total of {} structures.".format(num_structs))
return structs
class EnumError(BaseException):
"""
Error subclass for enumeration errors.
"""
pass
|
resotocore/tests/resotocore/model/__init__.py | someengineering/resoto | 126 | 11134654 | <reponame>someengineering/resoto
from typing import Optional, List
from resotocore.model.model import Model, Kind
from resotocore.model.model_handler import ModelHandler
class ModelHandlerStatic(ModelHandler):
def __init__(self, model: Model):
self.model = model
async def load_model(self) -> Model:
return self.model
async def uml_image(
self,
show_packages: Optional[List[str]] = None,
hide_packages: Optional[List[str]] = None,
output: str = "svg",
*,
with_bases: bool = False,
with_descendants: bool = False,
) -> bytes:
raise NotImplemented
async def update_model(self, kinds: List[Kind]) -> Model:
self.model = Model.from_kinds(kinds)
return self.model
|
ast/testdata/decorator.py | MaxTurchin/pycopy-lib | 126 | 11134662 | @decor
def foo():
pass
@decor()
def foo():
pass
@decor(1, 2)
def foo():
pass
@clsdecor(1)
class Foo(Bar):
pass
|
cfonts/consts.py | AdamMusa/python-cfonts | 174 | 11134663 | """
Python cFonts
=============
Sexy fonts for the console.
:license: GNU GPLv2
:author: <NAME><<EMAIL>>
"""
import enum
from shutil import get_terminal_size
from typing import Mapping, Tuple
SIZE = get_terminal_size((80, 24))
CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789|!?.+-_=@#$%&()/:;,' \""
class Colors(enum.Enum):
system = "system"
black = "black"
red = "red"
green = "green"
yellow = "yellow"
blue = "blue"
magenta = "magenta"
cyan = "cyan"
white = "white"
candy = "candy"
bright_red = "bright_red"
bright_green = "bright_green"
bright_yellow = "bright_yellow"
bright_blue = "bright_blue"
bright_magenta = "bright_magenta"
bright_cyan = "bright_cyan"
bright_white = "bright_white"
class CandyColors(enum.Enum):
red = "red"
green = "green"
yellow = "yellow"
blue = "blue"
magenta = "magenta"
cyan = "cyan"
gray = "gray"
bright_red = "bright_red"
bright_green = "bright_green"
bright_yellow = "bright_yellow"
bright_blue = "bright_blue"
bright_magenta = "bright_magenta"
bright_cyan = "bright_cyan"
class BgColors(enum.Enum):
transparent = "transparent"
black = "black"
red = "red"
green = "green"
yellow = "yellow"
blue = "blue"
magenta = "magenta"
cyan = "cyan"
white = "white"
bright_black = "bright_black"
bright_red = "bright_red"
bright_green = "bright_green"
bright_yellow = "bright_yellow"
bright_blue = "bright_blue"
bright_magenta = "bright_magenta"
bright_cyan = "bright_cyan"
bright_white = "bright_white"
ALIGNMENT = ["left", "center", "right"]
class FontFaces(enum.Enum):
console = "console"
block = "block"
simpleblock = "simpleBlock"
simple = "simple"
threed = "3d"
simple3d = "simple3d"
chrome = "chrome"
huge = "huge"
grid = "grid"
pallet = "pallet"
shade = "shade"
slick = "slick"
tiny = "tiny"
ANSI_COLORS = {
"black": 30,
"red": 31,
"green": 32,
"yellow": 33,
"blue": 34,
"magenta": 35,
"cyan": 36,
"white": 37,
"gray": 90,
"bright_red": 91,
"bright_green": 92,
"bright_yellow": 93,
"bright_blue": 94,
"bright_magenta": 95,
"bright_cyan": 96,
"bright_white": 97,
}
ANSI_RGB: Mapping[str, Tuple[int, int, int]] = {
"black": (0, 0, 0),
"red": (234, 50, 35),
"green": (55, 125, 34),
"yellow": (255, 253, 84),
"blue": (0, 32, 245),
"magenta": (234, 61, 247),
"cyan": (116, 251, 253),
"white": (255, 255, 255),
"gray": (128, 128, 128),
"bright_red": (238, 119, 109),
"bright_green": (140, 245, 123),
"bright_yellow": (255, 251, 127),
"bright_blue": (105, 116, 246),
"bright_magenta": (238, 130, 248),
"bright_cyan": (141, 250, 253),
"bright_white": (255, 255, 255),
}
|
api/http.py | wkma/bk-sops | 881 | 11134679 | <reponame>wkma/bk-sops<filename>api/http.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 TH<NAME> Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import logging
import requests
import curlify
logger = logging.getLogger("component")
def _gen_header():
headers = {
"Content-Type": "application/json",
}
return headers
def _http_request(
method, url, headers=None, data=None, verify=False, cert=None, timeout=None, cookies=None,
):
resp = requests.Response()
request_id = None
try:
if method == "GET":
resp = requests.get(
url=url, headers=headers, params=data, verify=verify, cert=cert, timeout=timeout, cookies=cookies,
)
elif method == "HEAD":
resp = requests.head(url=url, headers=headers, verify=verify, cert=cert, timeout=timeout, cookies=cookies,)
elif method == "POST":
resp = requests.post(
url=url, headers=headers, json=data, verify=verify, cert=cert, timeout=timeout, cookies=cookies,
)
elif method == "DELETE":
resp = requests.delete(
url=url, headers=headers, json=data, verify=verify, cert=cert, timeout=timeout, cookies=cookies,
)
elif method == "PUT":
resp = requests.put(
url=url, headers=headers, json=data, verify=verify, cert=cert, timeout=timeout, cookies=cookies,
)
else:
return {"result": False, "message": "Unsupported http method %s" % method}
except Exception as e:
logger.exception("Error occurred when requesting method=%s url=%s" % (method, url))
return {"result": False, "message": "Request API error, exception: %s" % str(e)}
else:
if not resp.ok:
message = "Request API error, status_code: %s" % resp.status_code
logger.error(message)
return {"result": False, "message": message}
log_message = "API return: message: %(message)s, request_id=%(request_id)s, url=%(url)s, data=%(data)s, response=%(response)s" # noqa
try:
json_resp = resp.json()
request_id = json_resp.get("request_id")
if not json_resp.get("result"):
logger.error(
log_message
% {
"request_id": request_id,
"message": json_resp.get("message"),
"url": url,
"data": data,
"response": resp.text,
}
)
else:
logger.debug(
log_message
% {
"request_id": request_id,
"message": json_resp.get("message"),
"url": url,
"data": data,
"response": resp.text,
}
)
except Exception:
logger.exception("Return data format is incorrect, which shall be unified as json: %s", resp.content[200:])
return {"result": False, "message": "API return is not a valid json"}
return json_resp
finally:
if resp.request is None:
resp.request = requests.Request(method, url, headers=headers, data=data, cookies=cookies).prepare()
logger.debug(
"the request_id: `%s`. curl: `%s`", request_id, curlify.to_curl(resp.request, verify=False),
)
def get(url, data, headers=None, verify=False, cert=None, timeout=None, cookies=None):
if not headers:
headers = _gen_header()
return _http_request(
method="GET", url=url, headers=headers, data=data, verify=verify, cert=cert, timeout=timeout, cookies=cookies,
)
def post(url, data, headers=None, verify=False, cert=None, timeout=None, cookies=None):
if not headers:
headers = _gen_header()
return _http_request(
method="POST", url=url, headers=headers, data=data, verify=verify, cert=cert, timeout=timeout, cookies=cookies,
)
def put(url, data, headers=None, verify=False, cert=None, timeout=None, cookies=None):
if not headers:
headers = _gen_header()
return _http_request(
method="PUT", url=url, headers=headers, data=data, verify=verify, cert=cert, timeout=timeout, cookies=cookies,
)
def delete(url, data, headers=None, verify=False, cert=None, timeout=None, cookies=None):
if not headers:
headers = _gen_header()
return _http_request(
method="DELETE",
url=url,
headers=headers,
data=data,
verify=verify,
cert=cert,
timeout=timeout,
cookies=cookies,
)
|
doxygen/siphon/process_syscfg.py | amithbraj/vpp | 751 | 11134686 | <filename>doxygen/siphon/process_syscfg.py
# Copyright (c) 2016 Comcast Cable Communications Management, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generate syscfg formatted output
from . import process, parsers
class SiphonSYSCFG(process.Siphon):
name = "syscfg"
identifier = "VLIB_CONFIG_FUNCTION"
def __init__(self, *args, **kwargs):
super(SiphonSYSCFG, self).__init__(*args, **kwargs)
self._parser = parsers.ParseFunctionMacroStmt()
# Register our processor
process.siphons["syscfg"] = SiphonSYSCFG
|
applications/StructuralMechanicsApplication/tests/test_distribute_load_on_surface_process.py | lkusch/Kratos | 778 | 11134717 | <gh_stars>100-1000
import KratosMultiphysics as KM
import KratosMultiphysics.StructuralMechanicsApplication as KSM
from KratosMultiphysics.StructuralMechanicsApplication.distribute_load_on_surface_process import Factory
import KratosMultiphysics.KratosUnittest as KratosUnittest
class TestDistributeLoadOnSurfaceProcess(KratosUnittest.TestCase):
def test_load_on_surface_distribution(self):
"""distribute on 2 triangles and 1 quad with a total area of 2.0."""
current_model = KM.Model()
mp = current_model.CreateModelPart("main")
#create nodes
mp.CreateNewNode(1, 0.0, 0.0, 0.0)
mp.CreateNewNode(2, 1.0, 0.0, 0.0)
mp.CreateNewNode(3, 1.0, 1.0, 0.0)
mp.CreateNewNode(4, 0.0, 1.0, 0.0)
mp.CreateNewNode(5, 1.0, 1.0, 1.0)
mp.CreateNewNode(6, 1.0, 0.0, 1.0)
#ensure that the property 1 is created
prop = mp.GetProperties()[1]
cond1 = mp.CreateNewCondition("SurfaceLoadCondition3D3N", 1, [1,2,3], prop)
cond2 = mp.CreateNewCondition("SurfaceLoadCondition3D3N", 2, [1,3,4], prop)
cond3 = mp.CreateNewCondition("SurfaceLoadCondition3D4N", 3, [2,6,5,3], prop)
settings = KM.Parameters("""{
"Parameters" : {
"model_part_name": "main",
"load": [1.0, 2.0, 3.0]
}
}""")
process = Factory(settings, current_model)
process.ExecuteInitialize()
process.ExecuteBeforeSolutionLoop()
process.ExecuteInitializeSolutionStep()
surface_load_1 = cond1.GetValue(KSM.SURFACE_LOAD)
surface_load_2 = cond2.GetValue(KSM.SURFACE_LOAD)
surface_load_3 = cond3.GetValue(KSM.SURFACE_LOAD)
total_load = KM.Vector([1.0, 2.0, 3.0])
self.assertVectorAlmostEqual(surface_load_1, total_load / 2.0 * 0.5)
self.assertVectorAlmostEqual(surface_load_2, total_load / 2.0 * 0.5)
self.assertVectorAlmostEqual(surface_load_3, total_load / 2.0 * 1.0)
self.assertVectorAlmostEqual(surface_load_1 + surface_load_2 + surface_load_3, total_load)
process.ExecuteFinalizeSolutionStep()
process.ExecuteBeforeOutputStep()
process.ExecuteAfterOutputStep()
process.ExecuteFinalize()
if __name__ == '__main__':
KratosUnittest.main()
|
rpython/tool/test/test_sourcetools.py | jptomo/pypy-lang-scheme | 381 | 11134723 | <reponame>jptomo/pypy-lang-scheme<filename>rpython/tool/test/test_sourcetools.py
from rpython.tool.sourcetools import (
func_renamer, func_with_new_name, rpython_wrapper)
def test_rename():
def f(x, y=5):
return x + y
f.prop = int
g = func_with_new_name(f, "g")
assert g(4, 5) == 9
assert g.func_name == "g"
assert f.func_defaults == (5,)
assert g.prop is int
def test_rename_decorator():
@func_renamer("g")
def f(x, y=5):
return x + y
f.prop = int
assert f(4, 5) == 9
assert f.func_name == "g"
assert f.func_defaults == (5,)
assert f.prop is int
def test_func_rename_decorator():
def bar():
'doc'
bar2 = func_with_new_name(bar, 'bar2')
assert bar.func_doc == bar2.func_doc == 'doc'
bar.func_doc = 'new doc'
bar3 = func_with_new_name(bar, 'bar3')
assert bar3.func_doc == 'new doc'
assert bar2.func_doc != bar3.func_doc
def test_rpython_wrapper():
calls = []
def bar(a, b):
calls.append(('bar', a, b))
return a+b
template = """
def {name}({arglist}):
calls.append(('decorated', {arglist}))
return {original}({arglist})
"""
bar = rpython_wrapper(bar, template, calls=calls)
assert bar(40, 2) == 42
assert calls == [
('decorated', 40, 2),
('bar', 40, 2),
]
|
fpga/lib/pcie/tb/test_pcie_us_axi_master_wr_128.py | totuwei/corundum | 544 | 11134736 | <filename>fpga/lib/pcie/tb/test_pcie_us_axi_master_wr_128.py
#!/usr/bin/env python
"""
Copyright (c) 2018 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import pcie
import pcie_us
import axi
module = 'pcie_us_axi_master_wr'
testbench = 'test_%s_128' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Parameters
AXIS_PCIE_DATA_WIDTH = 128
AXIS_PCIE_KEEP_WIDTH = (AXIS_PCIE_DATA_WIDTH/32)
AXIS_PCIE_CQ_USER_WIDTH = 85
AXI_DATA_WIDTH = AXIS_PCIE_DATA_WIDTH
AXI_ADDR_WIDTH = 64
AXI_STRB_WIDTH = (AXI_DATA_WIDTH/8)
AXI_ID_WIDTH = 8
AXI_MAX_BURST_LEN = 256
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
s_axis_cq_tdata = Signal(intbv(0)[AXIS_PCIE_DATA_WIDTH:])
s_axis_cq_tkeep = Signal(intbv(0)[AXIS_PCIE_KEEP_WIDTH:])
s_axis_cq_tvalid = Signal(bool(0))
s_axis_cq_tlast = Signal(bool(0))
s_axis_cq_tuser = Signal(intbv(0)[AXIS_PCIE_CQ_USER_WIDTH:])
m_axi_awready = Signal(bool(0))
m_axi_wready = Signal(bool(0))
m_axi_bid = Signal(intbv(0)[AXI_ID_WIDTH:])
m_axi_bresp = Signal(intbv(0)[2:])
m_axi_bvalid = Signal(bool(0))
# Outputs
s_axis_cq_tready = Signal(bool(0))
m_axi_awid = Signal(intbv(0)[AXI_ID_WIDTH:])
m_axi_awaddr = Signal(intbv(0)[AXI_ADDR_WIDTH:])
m_axi_awlen = Signal(intbv(0)[8:])
m_axi_awsize = Signal(intbv(4)[3:])
m_axi_awburst = Signal(intbv(1)[2:])
m_axi_awlock = Signal(bool(0))
m_axi_awcache = Signal(intbv(3)[4:])
m_axi_awprot = Signal(intbv(2)[3:])
m_axi_awvalid = Signal(bool(0))
m_axi_wdata = Signal(intbv(0)[AXI_DATA_WIDTH:])
m_axi_wstrb = Signal(intbv(0)[AXI_STRB_WIDTH:])
m_axi_wlast = Signal(bool(0))
m_axi_wvalid = Signal(bool(0))
m_axi_bready = Signal(bool(1))
status_error_uncor = Signal(bool(0))
# Clock and Reset Interface
user_clk=Signal(bool(0))
user_reset=Signal(bool(0))
sys_clk=Signal(bool(0))
sys_reset=Signal(bool(0))
# AXI4 RAM model
axi_ram_inst = axi.AXIRam(2**16)
axi_ram_port0 = axi_ram_inst.create_port(
user_clk,
s_axi_awid=m_axi_awid,
s_axi_awaddr=m_axi_awaddr,
s_axi_awlen=m_axi_awlen,
s_axi_awsize=m_axi_awsize,
s_axi_awburst=m_axi_awburst,
s_axi_awlock=m_axi_awlock,
s_axi_awcache=m_axi_awcache,
s_axi_awprot=m_axi_awprot,
s_axi_awvalid=m_axi_awvalid,
s_axi_awready=m_axi_awready,
s_axi_wdata=m_axi_wdata,
s_axi_wstrb=m_axi_wstrb,
s_axi_wlast=m_axi_wlast,
s_axi_wvalid=m_axi_wvalid,
s_axi_wready=m_axi_wready,
s_axi_bid=m_axi_bid,
s_axi_bresp=m_axi_bresp,
s_axi_bvalid=m_axi_bvalid,
s_axi_bready=m_axi_bready,
name='port0'
)
# PCIe devices
rc = pcie.RootComplex()
dev = pcie_us.UltrascalePCIe()
dev.pcie_generation = 3
dev.pcie_link_width = 4
dev.user_clock_frequency = 250e6
dev.functions[0].configure_bar(0, 16*1024*1024)
rc.make_port().connect(dev)
cq_pause = Signal(bool(0))
cc_pause = Signal(bool(0))
rq_pause = Signal(bool(0))
rc_pause = Signal(bool(0))
pcie_logic = dev.create_logic(
# Completer reQuest Interface
m_axis_cq_tdata=s_axis_cq_tdata,
m_axis_cq_tuser=s_axis_cq_tuser,
m_axis_cq_tlast=s_axis_cq_tlast,
m_axis_cq_tkeep=s_axis_cq_tkeep,
m_axis_cq_tvalid=s_axis_cq_tvalid,
m_axis_cq_tready=s_axis_cq_tready,
#pcie_cq_np_req=pcie_cq_np_req,
#pcie_cq_np_req_count=pcie_cq_np_req_count,
# Completer Completion Interface
s_axis_cc_tdata=Signal(intbv(0)[AXIS_PCIE_DATA_WIDTH:]),
s_axis_cc_tuser=Signal(intbv(0)[33:]),
s_axis_cc_tlast=Signal(bool(0)),
s_axis_cc_tkeep=Signal(intbv(0)[AXIS_PCIE_KEEP_WIDTH:]),
s_axis_cc_tvalid=Signal(bool(0)),
s_axis_cc_tready=Signal(bool(0)),
# Requester reQuest Interface
s_axis_rq_tdata=Signal(intbv(0)[AXIS_PCIE_DATA_WIDTH:]),
s_axis_rq_tuser=Signal(intbv(0)[60:]),
s_axis_rq_tlast=Signal(bool(0)),
s_axis_rq_tkeep=Signal(intbv(0)[AXIS_PCIE_KEEP_WIDTH:]),
s_axis_rq_tvalid=Signal(bool(0)),
s_axis_rq_tready=Signal(bool(1)),
# pcie_rq_seq_num=pcie_rq_seq_num,
# pcie_rq_seq_num_vld=pcie_rq_seq_num_vld,
# pcie_rq_tag=pcie_rq_tag,
# pcie_rq_tag_av=pcie_rq_tag_av,
# pcie_rq_tag_vld=pcie_rq_tag_vld,
# Requester Completion Interface
m_axis_rc_tdata=Signal(intbv(0)[AXIS_PCIE_DATA_WIDTH:]),
m_axis_rc_tuser=Signal(intbv(0)[75:]),
m_axis_rc_tlast=Signal(bool(0)),
m_axis_rc_tkeep=Signal(intbv(0)[AXIS_PCIE_KEEP_WIDTH:]),
m_axis_rc_tvalid=Signal(bool(0)),
m_axis_rc_tready=Signal(bool(0)),
# Transmit Flow Control Interface
# pcie_tfc_nph_av=pcie_tfc_nph_av,
# pcie_tfc_npd_av=pcie_tfc_npd_av,
# Configuration Control Interface
# cfg_hot_reset_in=cfg_hot_reset_in,
# cfg_hot_reset_out=cfg_hot_reset_out,
# cfg_config_space_enable=cfg_config_space_enable,
# cfg_per_function_update_done=cfg_per_function_update_done,
# cfg_per_function_number=cfg_per_function_number,
# cfg_per_function_output_request=cfg_per_function_output_request,
# cfg_dsn=cfg_dsn,
# cfg_ds_bus_number=cfg_ds_bus_number,
# cfg_ds_device_number=cfg_ds_device_number,
# cfg_ds_function_number=cfg_ds_function_number,
# cfg_power_state_change_ack=cfg_power_state_change_ack,
# cfg_power_state_change_interrupt=cfg_power_state_change_interrupt,
# cfg_err_cor_in=cfg_err_cor_in,
# cfg_err_uncor_in=cfg_err_uncor_in,
# cfg_flr_done=cfg_flr_done,
# cfg_vf_flr_done=cfg_vf_flr_done,
# cfg_flr_in_process=cfg_flr_in_process,
# cfg_vf_flr_in_process=cfg_vf_flr_in_process,
# cfg_req_pm_transition_l23_ready=cfg_req_pm_transition_l23_ready,
# cfg_link_training_enable=cfg_link_training_enable,
# Clock and Reset Interface
user_clk=user_clk,
user_reset=user_reset,
#user_lnk_up=user_lnk_up,
sys_clk=sys_clk,
sys_clk_gt=sys_clk,
sys_reset=sys_reset,
cq_pause=cq_pause,
cc_pause=cc_pause,
rq_pause=rq_pause,
rc_pause=rc_pause
)
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=user_clk,
rst=user_reset,
current_test=current_test,
s_axis_cq_tdata=s_axis_cq_tdata,
s_axis_cq_tkeep=s_axis_cq_tkeep,
s_axis_cq_tvalid=s_axis_cq_tvalid,
s_axis_cq_tready=s_axis_cq_tready,
s_axis_cq_tlast=s_axis_cq_tlast,
s_axis_cq_tuser=s_axis_cq_tuser,
m_axi_awid=m_axi_awid,
m_axi_awaddr=m_axi_awaddr,
m_axi_awlen=m_axi_awlen,
m_axi_awsize=m_axi_awsize,
m_axi_awburst=m_axi_awburst,
m_axi_awlock=m_axi_awlock,
m_axi_awcache=m_axi_awcache,
m_axi_awprot=m_axi_awprot,
m_axi_awvalid=m_axi_awvalid,
m_axi_awready=m_axi_awready,
m_axi_wdata=m_axi_wdata,
m_axi_wstrb=m_axi_wstrb,
m_axi_wlast=m_axi_wlast,
m_axi_wvalid=m_axi_wvalid,
m_axi_wready=m_axi_wready,
m_axi_bid=m_axi_bid,
m_axi_bresp=m_axi_bresp,
m_axi_bvalid=m_axi_bvalid,
m_axi_bready=m_axi_bready,
status_error_uncor=status_error_uncor
)
@always(delay(4))
def clkgen():
clk.next = not clk
@always_comb
def clk_logic():
sys_clk.next = clk
sys_reset.next = not rst
status_error_uncor_asserted = Signal(bool(0))
@always(user_clk.posedge)
def monitor():
if (status_error_uncor):
status_error_uncor_asserted.next = 1
cq_pause_toggle = Signal(bool(0))
cc_pause_toggle = Signal(bool(0))
rq_pause_toggle = Signal(bool(0))
rc_pause_toggle = Signal(bool(0))
@instance
def pause_toggle():
while True:
if (cq_pause_toggle or cc_pause_toggle or rq_pause_toggle or rc_pause_toggle):
cq_pause.next = cq_pause_toggle
cc_pause.next = cc_pause_toggle
rq_pause.next = rq_pause_toggle
rc_pause.next = rc_pause_toggle
yield user_clk.posedge
yield user_clk.posedge
yield user_clk.posedge
cq_pause.next = 0
cc_pause.next = 0
rq_pause.next = 0
rc_pause.next = 0
yield user_clk.posedge
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
# testbench stimulus
yield user_clk.posedge
print("test 1: enumeration")
current_test.next = 1
yield rc.enumerate()
dev_bar0 = rc.tree[0][0].bar[0]
yield delay(100)
yield clk.posedge
print("test 2: memory write")
current_test.next = 2
pcie_addr = 0x00000000
test_data = b'\x11\x22\x33\x44'
yield rc.mem_write(dev_bar0+pcie_addr, test_data)
yield delay(300)
data = axi_ram_inst.read_mem(pcie_addr, 32)
for i in range(0, len(data), 16):
print(" ".join(("{:02x}".format(c) for c in bytearray(data[i:i+16]))))
assert axi_ram_inst.read_mem(pcie_addr, len(test_data)) == test_data
assert not status_error_uncor_asserted
yield delay(100)
yield user_clk.posedge
print("test 3: various writes")
current_test.next = 3
for length in list(range(1,34))+[1024]:
for pcie_offset in list(range(8,41))+list(range(4096-32,4096)):
for pause in [False, True]:
print("length %d, pcie_offset %d"% (length, pcie_offset))
#pcie_addr = length * 0x100000000 + pcie_offset * 0x10000 + offset
pcie_addr = pcie_offset
test_data = bytearray([x%256 for x in range(length)])
axi_ram_inst.write_mem(pcie_addr & 0xffff80, b'\x55'*(len(test_data)+256))
cq_pause_toggle.next = pause
yield from rc.mem_write(dev_bar0+pcie_addr, test_data)
yield delay(int(length*4+120))
cq_pause_toggle.next = 0
data = axi_ram_inst.read_mem(pcie_addr&0xfffff0, 64)
for i in range(0, len(data), 16):
print(" ".join(("{:02x}".format(c) for c in bytearray(data[i:i+16]))))
assert axi_ram_inst.read_mem(pcie_addr-1, len(test_data)+2) == b'\x55'+test_data+b'\x55'
assert not status_error_uncor_asserted
yield delay(100)
yield clk.posedge
print("test 4: bad request")
current_test.next = 4
try:
yield from rc.mem_read(dev_bar0, 4, 100)
except:
print("Caught timeout exception")
pass
else:
assert False
assert status_error_uncor_asserted
status_error_uncor_asserted.next = False
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
|
tokumx/datadog_checks/tokumx/vendor/pymongo/common.py | remicalixte/integrations-core | 663 | 11134745 | # Copyright 2011-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Functions and classes common to multiple pymongo modules."""
import collections
import datetime
import warnings
from datadog_checks.tokumx.vendor.bson.binary import (STANDARD, PYTHON_LEGACY,
JAVA_LEGACY, CSHARP_LEGACY)
from datadog_checks.tokumx.vendor.bson.codec_options import CodecOptions
from datadog_checks.tokumx.vendor.bson.py3compat import string_type, integer_types, iteritems
from datadog_checks.tokumx.vendor.bson.raw_bson import RawBSONDocument
from datadog_checks.tokumx.vendor.pymongo.auth import MECHANISMS
from datadog_checks.tokumx.vendor.pymongo.errors import ConfigurationError
from datadog_checks.tokumx.vendor.pymongo.monitoring import _validate_event_listeners
from datadog_checks.tokumx.vendor.pymongo.read_concern import ReadConcern
from datadog_checks.tokumx.vendor.pymongo.read_preferences import _MONGOS_MODES, _ServerMode
from datadog_checks.tokumx.vendor.pymongo.ssl_support import validate_cert_reqs
from datadog_checks.tokumx.vendor.pymongo.write_concern import WriteConcern
# Defaults until we connect to a server and get updated limits.
MAX_BSON_SIZE = 16 * (1024 ** 2)
MAX_MESSAGE_SIZE = 2 * MAX_BSON_SIZE
MIN_WIRE_VERSION = 0
MAX_WIRE_VERSION = 0
MAX_WRITE_BATCH_SIZE = 1000
# What this version of PyMongo supports.
MIN_SUPPORTED_WIRE_VERSION = 0
MAX_SUPPORTED_WIRE_VERSION = 5
# Frequency to call ismaster on servers, in seconds.
HEARTBEAT_FREQUENCY = 10
# Frequency to process kill-cursors, in seconds. See MongoClient.close_cursor.
KILL_CURSOR_FREQUENCY = 1
# Frequency to process events queue, in seconds.
EVENTS_QUEUE_FREQUENCY = 1
# How long to wait, in seconds, for a suitable server to be found before
# aborting an operation. For example, if the client attempts an insert
# during a replica set election, SERVER_SELECTION_TIMEOUT governs the
# longest it is willing to wait for a new primary to be found.
SERVER_SELECTION_TIMEOUT = 30
# Spec requires at least 500ms between ismaster calls.
MIN_HEARTBEAT_INTERVAL = 0.5
# Default connectTimeout in seconds.
CONNECT_TIMEOUT = 20.0
# Default value for maxPoolSize.
MAX_POOL_SIZE = 100
# Default value for minPoolSize.
MIN_POOL_SIZE = 0
# Default value for maxIdleTimeMS.
MAX_IDLE_TIME_MS = None
# Default value for localThresholdMS.
LOCAL_THRESHOLD_MS = 15
# mongod/s 2.6 and above return code 59 when a
# command doesn't exist. mongod versions previous
# to 2.6 and mongos 2.4.x return no error code
# when a command does exist. mongos versions previous
# to 2.4.0 return code 13390 when a command does not
# exist.
COMMAND_NOT_FOUND_CODES = (59, 13390, None)
# Error codes to ignore if GridFS calls createIndex on a secondary
UNAUTHORIZED_CODES = (13, 16547, 16548)
def partition_node(node):
"""Split a host:port string into (host, int(port)) pair."""
host = node
port = 27017
idx = node.rfind(':')
if idx != -1:
host, port = node[:idx], int(node[idx + 1:])
if host.startswith('['):
host = host[1:-1]
return host, port
def clean_node(node):
"""Split and normalize a node name from an ismaster response."""
host, port = partition_node(node)
# Normalize hostname to lowercase, since DNS is case-insensitive:
# http://tools.ietf.org/html/rfc4343
# This prevents useless rediscovery if "foo.com" is in the seed list but
# "FOO.com" is in the ismaster response.
return host.lower(), port
def raise_config_error(key, dummy):
"""Raise ConfigurationError with the given key name."""
raise ConfigurationError("Unknown option %s" % (key,))
# Mapping of URI uuid representation options to valid subtypes.
_UUID_REPRESENTATIONS = {
'standard': STANDARD,
'pythonLegacy': PYTHON_LEGACY,
'javaLegacy': JAVA_LEGACY,
'csharpLegacy': CSHARP_LEGACY
}
def validate_boolean(option, value):
"""Validates that 'value' is True or False."""
if isinstance(value, bool):
return value
raise TypeError("%s must be True or False" % (option,))
def validate_boolean_or_string(option, value):
"""Validates that value is True, False, 'true', or 'false'."""
if isinstance(value, string_type):
if value not in ('true', 'false'):
raise ValueError("The value of %s must be "
"'true' or 'false'" % (option,))
return value == 'true'
return validate_boolean(option, value)
def validate_integer(option, value):
"""Validates that 'value' is an integer (or basestring representation).
"""
if isinstance(value, integer_types):
return value
elif isinstance(value, string_type):
if not value.isdigit():
raise ValueError("The value of %s must be "
"an integer" % (option,))
return int(value)
raise TypeError("Wrong type for %s, value must be an integer" % (option,))
def validate_positive_integer(option, value):
"""Validate that 'value' is a positive integer, which does not include 0.
"""
val = validate_integer(option, value)
if val <= 0:
raise ValueError("The value of %s must be "
"a positive integer" % (option,))
return val
def validate_non_negative_integer(option, value):
"""Validate that 'value' is a positive integer or 0.
"""
val = validate_integer(option, value)
if val < 0:
raise ValueError("The value of %s must be "
"a non negative integer" % (option,))
return val
def validate_readable(option, value):
"""Validates that 'value' is file-like and readable.
"""
if value is None:
return value
# First make sure its a string py3.3 open(True, 'r') succeeds
# Used in ssl cert checking due to poor ssl module error reporting
value = validate_string(option, value)
open(value, 'r').close()
return value
def validate_positive_integer_or_none(option, value):
"""Validate that 'value' is a positive integer or None.
"""
if value is None:
return value
return validate_positive_integer(option, value)
def validate_non_negative_integer_or_none(option, value):
"""Validate that 'value' is a positive integer or 0 or None.
"""
if value is None:
return value
return validate_non_negative_integer(option, value)
def validate_string(option, value):
"""Validates that 'value' is an instance of `basestring` for Python 2
or `str` for Python 3.
"""
if isinstance(value, string_type):
return value
raise TypeError("Wrong type for %s, value must be "
"an instance of %s" % (option, string_type.__name__))
def validate_string_or_none(option, value):
"""Validates that 'value' is an instance of `basestring` or `None`.
"""
if value is None:
return value
return validate_string(option, value)
def validate_int_or_basestring(option, value):
"""Validates that 'value' is an integer or string.
"""
if isinstance(value, integer_types):
return value
elif isinstance(value, string_type):
if value.isdigit():
return int(value)
return value
raise TypeError("Wrong type for %s, value must be an "
"integer or a string" % (option,))
def validate_positive_float(option, value):
"""Validates that 'value' is a float, or can be converted to one, and is
positive.
"""
errmsg = "%s must be an integer or float" % (option,)
try:
value = float(value)
except ValueError:
raise ValueError(errmsg)
except TypeError:
raise TypeError(errmsg)
# float('inf') doesn't work in 2.4 or 2.5 on Windows, so just cap floats at
# one billion - this is a reasonable approximation for infinity
if not 0 < value < 1e9:
raise ValueError("%s must be greater than 0 and "
"less than one billion" % (option,))
return value
def validate_positive_float_or_zero(option, value):
"""Validates that 'value' is 0 or a positive float, or can be converted to
0 or a positive float.
"""
if value == 0 or value == "0":
return 0
return validate_positive_float(option, value)
def validate_timeout_or_none(option, value):
"""Validates a timeout specified in milliseconds returning
a value in floating point seconds.
"""
if value is None:
return value
return validate_positive_float(option, value) / 1000.0
def validate_timeout_or_zero(option, value):
"""Validates a timeout specified in milliseconds returning
a value in floating point seconds for the case where None is an error
and 0 is valid. Setting the timeout to nothing in the URI string is a
config error.
"""
if value is None:
raise ConfigurationError("%s cannot be None" % (option, ))
if value == 0 or value == "0":
return 0
return validate_positive_float(option, value) / 1000.0
def validate_max_staleness(option, value):
"""Validates maxStalenessSeconds according to the Max Staleness Spec."""
if value == -1 or value == "-1":
# Default: No maximum staleness.
return -1
return validate_positive_integer(option, value)
def validate_read_preference(dummy, value):
"""Validate a read preference.
"""
if not isinstance(value, _ServerMode):
raise TypeError("%r is not a read preference." % (value,))
return value
def validate_read_preference_mode(dummy, value):
"""Validate read preference mode for a MongoReplicaSetClient.
.. versionchanged:: 3.5
Returns the original ``value`` instead of the validated read preference
mode.
"""
if value not in _MONGOS_MODES:
raise ValueError("%s is not a valid read preference" % (value,))
return value
def validate_auth_mechanism(option, value):
"""Validate the authMechanism URI option.
"""
# CRAM-MD5 is for server testing only. Undocumented,
# unsupported, may be removed at any time. You have
# been warned.
if value not in MECHANISMS and value != 'CRAM-MD5':
raise ValueError("%s must be in %s" % (option, tuple(MECHANISMS)))
return value
def validate_uuid_representation(dummy, value):
"""Validate the uuid representation option selected in the URI.
"""
try:
return _UUID_REPRESENTATIONS[value]
except KeyError:
raise ValueError("%s is an invalid UUID representation. "
"Must be one of "
"%s" % (value, tuple(_UUID_REPRESENTATIONS)))
def validate_read_preference_tags(name, value):
"""Parse readPreferenceTags if passed as a client kwarg.
"""
if not isinstance(value, list):
value = [value]
tag_sets = []
for tag_set in value:
if tag_set == '':
tag_sets.append({})
continue
try:
tag_sets.append(dict([tag.split(":")
for tag in tag_set.split(",")]))
except Exception:
raise ValueError("%r not a valid "
"value for %s" % (tag_set, name))
return tag_sets
_MECHANISM_PROPS = frozenset(['SERVICE_NAME',
'CANONICALIZE_HOST_NAME',
'SERVICE_REALM'])
def validate_auth_mechanism_properties(option, value):
"""Validate authMechanismProperties."""
value = validate_string(option, value)
props = {}
for opt in value.split(','):
try:
key, val = opt.split(':')
except ValueError:
raise ValueError("auth mechanism properties must be "
"key:value pairs like SERVICE_NAME:"
"mongodb, not %s." % (opt,))
if key not in _MECHANISM_PROPS:
raise ValueError("%s is not a supported auth "
"mechanism property. Must be one of "
"%s." % (key, tuple(_MECHANISM_PROPS)))
if key == 'CANONICALIZE_HOST_NAME':
props[key] = validate_boolean_or_string(key, val)
else:
props[key] = val
return props
def validate_document_class(option, value):
"""Validate the document_class option."""
if not issubclass(value, (collections.MutableMapping, RawBSONDocument)):
raise TypeError("%s must be dict, bson.son.SON, "
"bson.raw_bson.RawBSONDocument, or a "
"sublass of collections.MutableMapping" % (option,))
return value
def validate_is_mapping(option, value):
"""Validate the type of method arguments that expect a document."""
if not isinstance(value, collections.Mapping):
raise TypeError("%s must be an instance of dict, bson.son.SON, or "
"other type that inherits from "
"collections.Mapping" % (option,))
def validate_is_document_type(option, value):
"""Validate the type of method arguments that expect a MongoDB document."""
if not isinstance(value, (collections.MutableMapping, RawBSONDocument)):
raise TypeError("%s must be an instance of dict, bson.son.SON, "
"bson.raw_bson.RawBSONDocument, or "
"a type that inherits from "
"collections.MutableMapping" % (option,))
def validate_appname_or_none(option, value):
"""Validate the appname option."""
if value is None:
return value
validate_string(option, value)
# We need length in bytes, so encode utf8 first.
if len(value.encode('utf-8')) > 128:
raise ValueError("%s must be <= 128 bytes" % (option,))
return value
def validate_ok_for_replace(replacement):
"""Validate a replacement document."""
validate_is_mapping("replacement", replacement)
# Replacement can be {}
if replacement and not isinstance(replacement, RawBSONDocument):
first = next(iter(replacement))
if first.startswith('$'):
raise ValueError('replacement can not include $ operators')
def validate_ok_for_update(update):
"""Validate an update document."""
validate_is_mapping("update", update)
# Update can not be {}
if not update:
raise ValueError('update only works with $ operators')
first = next(iter(update))
if not first.startswith('$'):
raise ValueError('update only works with $ operators')
_UNICODE_DECODE_ERROR_HANDLERS = frozenset(['strict', 'replace', 'ignore'])
def validate_unicode_decode_error_handler(dummy, value):
"""Validate the Unicode decode error handler option of CodecOptions.
"""
if value not in _UNICODE_DECODE_ERROR_HANDLERS:
raise ValueError("%s is an invalid Unicode decode error handler. "
"Must be one of "
"%s" % (value, tuple(_UNICODE_DECODE_ERROR_HANDLERS)))
return value
def validate_tzinfo(dummy, value):
"""Validate the tzinfo option
"""
if value is not None and not isinstance(value, datetime.tzinfo):
raise TypeError("%s must be an instance of datetime.tzinfo" % value)
return value
# journal is an alias for j,
# wtimeoutms is an alias for wtimeout,
URI_VALIDATORS = {
'replicaset': validate_string_or_none,
'w': validate_int_or_basestring,
'wtimeout': validate_integer,
'wtimeoutms': validate_integer,
'fsync': validate_boolean_or_string,
'j': validate_boolean_or_string,
'journal': validate_boolean_or_string,
'maxpoolsize': validate_positive_integer_or_none,
'socketkeepalive': validate_boolean_or_string,
'waitqueuemultiple': validate_non_negative_integer_or_none,
'ssl': validate_boolean_or_string,
'ssl_keyfile': validate_readable,
'ssl_certfile': validate_readable,
'ssl_pem_passphrase': validate_string_or_none,
'ssl_cert_reqs': validate_cert_reqs,
'ssl_ca_certs': validate_readable,
'ssl_match_hostname': validate_boolean_or_string,
'ssl_crlfile': validate_readable,
'readconcernlevel': validate_string_or_none,
'readpreference': validate_read_preference_mode,
'readpreferencetags': validate_read_preference_tags,
'localthresholdms': validate_positive_float_or_zero,
'authmechanism': validate_auth_mechanism,
'authsource': validate_string,
'authmechanismproperties': validate_auth_mechanism_properties,
'tz_aware': validate_boolean_or_string,
'uuidrepresentation': validate_uuid_representation,
'connect': validate_boolean_or_string,
'minpoolsize': validate_non_negative_integer,
'appname': validate_appname_or_none,
'unicode_decode_error_handler': validate_unicode_decode_error_handler
}
TIMEOUT_VALIDATORS = {
'connecttimeoutms': validate_timeout_or_none,
'sockettimeoutms': validate_timeout_or_none,
'waitqueuetimeoutms': validate_timeout_or_none,
'serverselectiontimeoutms': validate_timeout_or_zero,
'heartbeatfrequencyms': validate_timeout_or_none,
'maxidletimems': validate_timeout_or_none,
'maxstalenessseconds': validate_max_staleness,
}
KW_VALIDATORS = {
'document_class': validate_document_class,
'read_preference': validate_read_preference,
'event_listeners': _validate_event_listeners,
'tzinfo': validate_tzinfo,
'username': validate_string_or_none,
'password': validate_string_or_none,
}
URI_VALIDATORS.update(TIMEOUT_VALIDATORS)
VALIDATORS = URI_VALIDATORS.copy()
VALIDATORS.update(KW_VALIDATORS)
_AUTH_OPTIONS = frozenset(['authmechanismproperties'])
def validate_auth_option(option, value):
"""Validate optional authentication parameters.
"""
lower, value = validate(option, value)
if lower not in _AUTH_OPTIONS:
raise ConfigurationError('Unknown '
'authentication option: %s' % (option,))
return lower, value
def validate(option, value):
"""Generic validation function.
"""
lower = option.lower()
validator = VALIDATORS.get(lower, raise_config_error)
value = validator(option, value)
return lower, value
def get_validated_options(options, warn=True):
"""Validate each entry in options and raise a warning if it is not valid.
Returns a copy of options with invalid entries removed
"""
validated_options = {}
for opt, value in iteritems(options):
lower = opt.lower()
try:
validator = URI_VALIDATORS.get(lower, raise_config_error)
value = validator(opt, value)
except (ValueError, ConfigurationError) as exc:
if warn:
warnings.warn(str(exc))
else:
raise
else:
validated_options[lower] = value
return validated_options
WRITE_CONCERN_OPTIONS = frozenset([
'w',
'wtimeout',
'wtimeoutms',
'fsync',
'j',
'journal'
])
class BaseObject(object):
"""A base class that provides attributes and methods common
to multiple pymongo classes.
SHOULD NOT BE USED BY DEVELOPERS EXTERNAL TO MONGODB.
"""
def __init__(self, codec_options, read_preference, write_concern,
read_concern):
if not isinstance(codec_options, CodecOptions):
raise TypeError("codec_options must be an instance of "
"bson.codec_options.CodecOptions")
self.__codec_options = codec_options
if not isinstance(read_preference, _ServerMode):
raise TypeError("%r is not valid for read_preference. See "
"pymongo.read_preferences for valid "
"options." % (read_preference,))
self.__read_preference = read_preference
if not isinstance(write_concern, WriteConcern):
raise TypeError("write_concern must be an instance of "
"pymongo.write_concern.WriteConcern")
self.__write_concern = write_concern
if not isinstance(read_concern, ReadConcern):
raise TypeError("read_concern must be an instance of "
"pymongo.read_concern.ReadConcern")
self.__read_concern = read_concern
@property
def codec_options(self):
"""Read only access to the :class:`~bson.codec_options.CodecOptions`
of this instance.
"""
return self.__codec_options
@property
def write_concern(self):
"""Read only access to the :class:`~pymongo.write_concern.WriteConcern`
of this instance.
.. versionchanged:: 3.0
The :attr:`write_concern` attribute is now read only.
"""
return self.__write_concern
@property
def read_preference(self):
"""Read only access to the read preference of this instance.
.. versionchanged:: 3.0
The :attr:`read_preference` attribute is now read only.
"""
return self.__read_preference
@property
def read_concern(self):
"""Read only access to the read concern of this instance.
.. versionadded:: 3.2
"""
return self.__read_concern
|
py_stringmatching/similarity_measure/generalized_jaccard.py | kevalii/py_stringmatching | 115 | 11134747 | """Generalized jaccard similarity measure"""
from py_stringmatching import utils
from py_stringmatching.similarity_measure.jaro import Jaro
from py_stringmatching.similarity_measure.hybrid_similarity_measure import \
HybridSimilarityMeasure
class GeneralizedJaccard(HybridSimilarityMeasure):
"""Generalized jaccard similarity measure class.
Parameters:
sim_func (function): similarity function. This should return a similarity score between two strings in set (optional),
default is jaro similarity measure
threshold (float): Threshold value (defaults to 0.5). If the similarity of a token pair exceeds the threshold,
then the token pair is considered a match.
"""
def __init__(self, sim_func=Jaro().get_raw_score, threshold=0.5):
self.sim_func = sim_func
self.threshold = threshold
super(GeneralizedJaccard, self).__init__()
def get_raw_score(self, set1, set2):
"""
Computes the Generalized Jaccard measure between two sets.
This similarity measure is softened version of the Jaccard measure. The Jaccard measure is
promising candidate for tokens which exactly match across the sets. However, in practice tokens
are often misspelled, such as energy vs. eneryg. THe generalized Jaccard measure will enable
matching in such cases.
Args:
set1,set2 (set or list): Input sets (or lists) of strings. Input lists are converted to sets.
Returns:
Generalized Jaccard similarity (float)
Raises:
TypeError : If the inputs are not sets (or lists) or if one of the inputs is None.
ValueError : If the similarity measure doesn't return values in the range [0,1]
Examples:
>>> gj = GeneralizedJaccard()
>>> gj.get_raw_score(['data', 'science'], ['data'])
0.5
>>> gj.get_raw_score(['data', 'management'], ['data', 'data', 'science'])
0.3333333333333333
>>> gj.get_raw_score(['Niall'], ['Neal', 'Njall'])
0.43333333333333335
>>> gj = GeneralizedJaccard(sim_func=JaroWinkler().get_raw_score, threshold=0.8)
>>> gj.get_raw_score(['Comp', 'Sci.', 'and', 'Engr', 'Dept.,', 'Universty', 'of', 'Cal,', 'San', 'Deigo'],
['Department', 'of', 'Computer', 'Science,', 'Univ.', 'Calif.,', 'San', 'Diego'])
0.45810185185185187
"""
# input validations
utils.sim_check_for_none(set1, set2)
utils.sim_check_for_list_or_set_inputs(set1, set2)
# if exact match return 1.0
if utils.sim_check_for_exact_match(set1, set2):
return 1.0
# if one of the strings is empty return 0
if utils.sim_check_for_empty(set1, set2):
return 0
if not isinstance(set1, set):
set1 = set(set1)
if not isinstance(set2, set):
set2 = set(set2)
set1_x = set()
set2_y = set()
match_score = 0.0
match_count = 0
list_matches = []
for element in set1:
for item in set2:
score = self.sim_func(element, item)
if score > 1 or score < 0:
raise ValueError('Similarity measure should' + \
' return value in the range [0,1]')
if score > self.threshold:
list_matches.append((element, item, score))
# position of first string, second string and sim score in tuple
first_string_pos = 0
second_string_pos = 1
sim_score_pos = 2
# sort the score of all the pairs
list_matches.sort(key=lambda x: x[sim_score_pos], reverse=True)
# select score in increasing order of their weightage,
# do not reselect the same element from either set.
for element in list_matches:
if (element[first_string_pos] not in set1_x and
element[second_string_pos] not in set2_y):
set1_x.add(element[first_string_pos])
set2_y.add(element[second_string_pos])
match_score += element[sim_score_pos]
match_count += 1
return float(match_score) / float(len(set1) + len(set2) - match_count)
def get_sim_score(self, set1, set2):
"""
Computes the normalized Generalized Jaccard similarity between two sets.
Args:
set1,set2 (set or list): Input sets (or lists) of strings. Input lists are converted to sets.
Returns:
Normalized Generalized Jaccard similarity (float)
Raises:
TypeError : If the inputs are not sets (or lists) or if one of the inputs is None.
ValueError : If the similarity measure doesn't return values in the range [0,1]
Examples:
>>> gj = GeneralizedJaccard()
>>> gj.get_sim_score(['data', 'science'], ['data'])
0.5
>>> gj.get_sim_score(['data', 'management'], ['data', 'data', 'science'])
0.3333333333333333
>>> gj.get_sim_score(['Niall'], ['Neal', 'Njall'])
0.43333333333333335
>>> gj = GeneralizedJaccard(sim_func=JaroWinkler().get_raw_score, threshold=0.8)
>>> gj.get_sim_score(['Comp', 'Sci.', 'and', 'Engr', 'Dept.,', 'Universty', 'of', 'Cal,', 'San', 'Deigo'],
['Department', 'of', 'Computer', 'Science,', 'Univ.', 'Calif.,', 'San', 'Diego'])
0.45810185185185187
"""
return self.get_raw_score(set1, set2)
def get_sim_func(self):
"""
Get similarity function
Returns:
similarity function (function)
"""
return self.sim_func
def get_threshold(self):
"""
Get threshold used for the similarity function
Returns:
threshold (float)
"""
return self.threshold
def set_sim_func(self, sim_func):
"""
Set similarity function
Args:
sim_func (function): similarity function
"""
self.sim_func = sim_func
return True
def set_threshold(self, threshold):
"""
Set threshold value for the similarity function
Args:
threshold (float): threshold value
"""
self.threshold = threshold
return True
|
refinery/bnpy/bnpy-dev/tests/init/TestFromSaved.py | csa0001/Refinery | 103 | 11134770 | <filename>refinery/bnpy/bnpy-dev/tests/init/TestFromSaved.py
'''
Unit tests for FromScratchGauss.py
'''
import unittest
import numpy as np
from bnpy.data import XData
from bnpy import HModel
from bnpy.ioutil import ModelWriter, ModelReader
class TestFromScratchGauss(unittest.TestCase):
def shortDescription(self):
return None
def setUp(self, K=7):
''' Create random data, and a K component MixModel to go with it
Call this original model "hmodel".
We copy hmodel into "modelB", and then save to file via save_model()
'''
self.K = K
PRNG = np.random.RandomState(867)
X = PRNG.randn(100,2)
self.Data = XData(X=X)
aPDict = dict(alpha0=1.0)
oPDict = dict(min_covar=1e-9)
self.hmodel = HModel.CreateEntireModel('EM','MixModel','ZMGauss',
aPDict, oPDict, self.Data)
modelB = self.hmodel.copy()
initParams = dict(initname='randexamples', seed=0, K=self.K)
modelB.init_global_params(self.Data, **initParams)
ModelWriter.save_model(modelB, '/tmp/', 'Test')
self.modelB = modelB
def test_viable_init(self):
''' Verify hmodel after init can be used to perform E-step
'''
initSavedParams = dict(initname='/tmp/', prefix='Test')
self.hmodel.init_global_params(self.Data, **initSavedParams)
assert self.hmodel.allocModel.K == self.K
keysA = self.hmodel.allocModel.to_dict()
keysB = self.modelB.allocModel.to_dict()
assert len(keysA) == len(keysB)
aLP = self.hmodel.calc_local_params(self.Data)
assert np.all(np.logical_and(aLP['resp']>=0,aLP['resp']<=1.0))
assert np.allclose(1.0, np.sum(aLP['resp'],axis=1))
|
photoshop/api/application.py | MrTeferi/photoshop-python-api | 270 | 11134774 | <filename>photoshop/api/application.py
"""The Adobe Adobe Photoshop CC application object.
Which is the root of the object model and provides access to all other
objects. This object provides application-wide information,
such as application defaults and available fonts. It provides many important
methods, such as those for opening files and loading documents.
app = Application()
app.documents.add(800, 600, 72, "docRef")
"""
# Import built-in modules
import os
from pathlib import Path
import time
from typing import List
# Import local modules
from photoshop.api._core import Photoshop
from photoshop.api._document import Document
from photoshop.api._documents import Documents
from photoshop.api._measurement_log import MeasurementLog
from photoshop.api._notifiers import Notifiers
from photoshop.api._preferences import Preferences
from photoshop.api._text_fonts import TextFonts
from photoshop.api.enumerations import DialogModes
from photoshop.api.solid_color import SolidColor
class Application(Photoshop):
def __init__(self, version=None):
super().__init__(ps_version=version)
@property
def activeLayer(self):
return self.app.ArtLayer
@property
def layerSets(self):
return self.app.LayerSets
@property
def activeDocument(self):
"""The frontmost documents.
Setting this property is equivalent to clicking an
open document in the Adobe Photoshop CC
application to bring it to the front of the screen.
"""
return Document(self.app.activeDocument)
@activeDocument.setter
def activeDocument(self, document):
self.app.activeDocument = document
@property
def backgroundColor(self):
"""The default background color and color style for documents.
Returns:
.solid_color.SolidColor: The SolidColor instance.
"""
return SolidColor(self.app.backgroundColor)
@backgroundColor.setter
def backgroundColor(self, color):
"""Sets the default background color and color style for documents.
Args:
color (.solid_color.SolidColor): The SolidColor instance.
"""
self.app.backgroundColor = color
@property
def build(self):
"""str: The information about the application."""
return self.app.build
@property
def colorSettings(self):
"""The name of the current color settings.
as selected with Edit > Color Settings.
"""
return self.app.colorSettings
@colorSettings.setter
def colorSettings(self, settings):
"""The name of the current color settings.
Args:
settings (str): The name of the current tool sel.
"""
self.doJavaScript(f'app.colorSettings="{settings}"')
@property
def currentTool(self):
"""str: The name of the current tool sel."""
return self.app.currentTool
@currentTool.setter
def currentTool(self, tool_name):
"""Sets the current tool for select.
Args:
tool_name (str): The name of the current tool sel.
"""
self.app.currentTool = tool_name
@property
def displayDialogs(self) -> DialogModes:
"""The dialog mode for the document, which indicates whether or not
Photoshop displays dialogs when the script runs."""
return DialogModes(self.app.displayDialogs)
@displayDialogs.setter
def displayDialogs(self, dialog_mode: DialogModes):
"""The dialog mode for the document, which indicates whether or not
Photoshop displays dialogs when the script runs.
"""
self.app.displayDialogs = dialog_mode
@property
def documents(self) -> Documents:
"""._documents.Documents: The Documents instance."""
return Documents(self.app.documents)
@property
def fonts(self) -> TextFonts:
return TextFonts(self.app.fonts)
@property
def foregroundColor(self):
"""Get default foreground color.
Used to paint, fill, and stroke selections.
Returns:
.solid_color.SolidColor: The SolidColor instance.
"""
return SolidColor(parent=self.app.foregroundColor)
@foregroundColor.setter
def foregroundColor(self, color: SolidColor):
"""Set the `foregroundColor`.
Args:
color (.solid_color.SolidColor): The SolidColor instance.
"""
self.app.foregroundColor = color
@property
def freeMemory(self) -> float:
"""The amount of unused memory available to ."""
return self.app.freeMemory
@property
def locale(self) -> str:
"""The language locale of the application."""
return self.app.locale
@property
def macintoshFileTypes(self) -> List[str]:
"""A list of the image file types Photoshop can open."""
return self.app.macintoshFileTypes
@property
def measurementLog(self):
"""The log of measurements taken."""
return MeasurementLog(self.app.measurementLog)
@property
def name(self) -> str:
return self.app.name
@property
def notifiers(self):
"""The notifiers currently configured (in the Scripts Events Manager
menu in the application)."""
return Notifiers(self.app.notifiers)
@property
def notifiersEnabled(self):
"""If true, notifiers are enabled."""
return self.app.notifiersEnabled
@notifiersEnabled.setter
def notifiersEnabled(self, value):
self.app.notifiersEnabled = value
@property
def parent(self):
"""The object’s container."""
return self.app.parent
@property
def path(self):
"""str: The full path to the location of the Photoshop application."""
return Path(self.app.path)
@property
def playbackDisplayDialogs(self):
return self.doJavaScript("app.playbackDisplayDialogs")
@property
def playbackParameters(self):
"""Stores and retrieves parameters used as part of a recorded action."""
return self.app.playbackParameters
@playbackParameters.setter
def playbackParameters(self, value):
self.app.playbackParameters = value
@property
def preferences(self):
return Preferences(self.app.preferences)
@property
def preferencesFolder(self):
return Path(self.app.preferencesFolder)
@property
def recentFiles(self):
return self.app.recentFiles
@property
def scriptingBuildDate(self):
return self.app.scriptingBuildDate
@property
def scriptingVersion(self):
return self.app.scriptingVersion
@property
def systemInformation(self):
return self.app.systemInformation
@property
def version(self):
return self.app.version
@property
def windowsFileTypes(self):
return self.app.windowsFileTypes
# Methods.
def batch(self, *args, **kwargs):
"""Runs the batch automation routine.
Similar to the **File** > **Automate** > **Batch** command.
"""
self.app.bath(*args, **kwargs)
def beep(self):
"""Causes a "beep" sound."""
return self.eval_javascript("app.beep()")
def bringToFront(self):
return self.eval_javascript("app.bringToFront()")
def changeProgressText(self, text):
"""Changes the text that appears in the progress window."""
self.eval_javascript(f"app.changeProgressText('{text}')")
def charIDToTypeID(self, char_id):
return self.app.charIDToTypeID(char_id)
@staticmethod
def compareWithNumbers(first, second):
return first > second
def doAction(self, action, action_from):
"""Plays the specified action from the Actions palette."""
self.app.doAction(action, action_from)
return True
def doForcedProgress(self, title, javascript):
script = "app.doForcedProgress('{}', '{}')".format(
title,
javascript,
)
self.eval_javascript(script)
# Ensure the script execute success.
time.sleep(1)
def doProgress(self, title, javascript):
"""Performs a task with a progress bar. Other progress APIs must be
called periodically to update the progress bar and allow cancelling.
Args:
title (str): String to show in the progress window.
javascript (str): JavaScriptString to execute.
"""
script = "app.doProgress('{}', '{}')".format(
title,
javascript,
)
self.eval_javascript(script)
# Ensure the script execute success.
time.sleep(1)
def doProgressSegmentTask(self, segmentLength, done, total, javascript):
script = "app.doProgressSegmentTask({}, {}, {}, '{}');".format(
segmentLength,
done,
total,
javascript,
)
self.eval_javascript(script)
# Ensure the script execute success.
time.sleep(1)
def doProgressSubTask(self, index, limit, javascript):
script = "app.doProgressSubTask({}, {}, '{}');".format(
index,
limit,
javascript,
)
self.eval_javascript(script)
# Ensure the script execute success.
time.sleep(1)
def doProgressTask(self, index, javascript):
"""Sections off a portion of the unused progress bar for execution of
a subtask. Returns false on cancel.
"""
script = f"app.doProgressTask({index}, '{javascript}');"
self.eval_javascript(script)
# Ensure the script execute success.
time.sleep(1)
def eraseCustomOptions(self, key):
"""Removes the specified user objects from the Photoshop registry."""
self.app.eraseCustomOptions(key)
def executeAction(self, event_id, descriptor, display_dialogs=2):
return self.app.executeAction(event_id, descriptor, display_dialogs)
def executeActionGet(self, reference):
return self.app.executeActionGet(reference)
def featureEnabled(self, name):
"""Determines whether the feature
specified by name is enabled.
The following features are supported
as values for name:
"photoshop/extended"
"photoshop/standard"
"photoshop/trial
"""
return self.app.featureEnabled(name)
def getCustomOptions(self, key):
"""Retrieves user objects in the Photoshop registry for the ID with
value key."""
return self.app.getCustomOptions(key)
def open(
self,
document_file_path,
document_type: str = None,
as_smart_object: bool = False,
) -> Document:
document = self.app.open(document_file_path, document_type, as_smart_object)
if not as_smart_object:
return Document(document)
return document
def load(self, document_file_path):
"""Loads a support document."""
self.app.load(document_file_path)
return self.activeDocument
def doJavaScript(self, javascript, Arguments=None, ExecutionMode=None):
return self.app.doJavaScript(javascript, Arguments, ExecutionMode)
def isQuicktimeAvailable(self) -> bool:
return self.app.isQuicktimeAvailable
def openDialog(self):
return self.app.openDialog()
def purge(self, target):
"""Purges one or more caches.
Args:
target:
.e.g:
0: Clears all caches.
1: Clears the clipboard.
2: Deletes all history states from the History palette.
3: Clears the undo cache.
Returns:
"""
self.app.purge(target)
def putCustomOptions(self, key, custom_object, persistent):
self.app.putCustomOptions(key, custom_object, persistent)
def refresh(self):
"""Pauses the script while the application refreshes.
Ues to slow down execution and show the results to the user as the
script runs.
Use carefully; your script runs much more slowly when using this
method.
"""
self.app.refresh()
def refreshFonts(self):
"""Force the font list to get refreshed."""
return self.eval_javascript("app.refreshFonts();")
def runMenuItem(self, menu_id):
"""Run a menu item given the menu ID."""
return self.eval_javascript(
f"app.runMenuItem({menu_id})",
)
def showColorPicker(self):
"""Returns false if dialog is cancelled, true otherwise."""
return self.eval_javascript("app.showColorPicker();")
def stringIDToTypeID(self, string_id):
return self.app.stringIDToTypeID(string_id)
def togglePalettes(self):
"""Toggle palette visibility."""
return self.doJavaScript("app.togglePalettes()")
def toolSupportsBrushes(self, tool):
return self.app.toolSupportsBrushes(tool)
def toolSupportsBrushPresets(self, tool):
return self.app.toolSupportsPresets(tool)
@staticmethod
def system(command):
os.system(command)
def typeIDToStringID(self, type_id):
return self.app.typeIDToStringID(type_id)
def typeIDToCharID(self, type_id):
return self.app.typeIDToCharID(type_id)
def updateProgress(self, done, total):
self.eval_javascript(f"app.updateProgress({done}, {total})")
|
scripts/nasbench101/sample_pkl.py | microsoft/archai | 344 | 11134791 | import pickle
from archai.common import utils
def main():
in_dataset_file = utils.full_path('~/dataroot/nasbench_ds/nasbench_full.tfrecord.pkl')
out_dataset_file = utils.full_path('~/dataroot/nasbench_ds/nasbench101_sample.tfrecord.pkl')
with open(in_dataset_file, 'rb') as f:
records = pickle.load(f)
sampled_indices = set()
adj_samples = 1000
for i in [0, 4000, 40000, len(records)-1-adj_samples+1]:
sampled_indices = sampled_indices.union([i+k for k in range(adj_samples)])
sampled_hashes = set(records[i][0] for i in sorted(list(sampled_indices)))
sampled = [r for r in records if r[0] in sampled_hashes]
with open(out_dataset_file, 'wb') as f:
pickle.dump(sampled, f)
if __name__ == '__main__':
main()
|
applications/ConstitutiveModelsApplication/tests/examples/cam_clay_example/example_constitutive_model_call.py | lkusch/Kratos | 778 | 11134796 | from KratosMultiphysics import *
import KratosMultiphysics.ConstitutiveModelsApplication as KratosMaterialModels
######################### general parameters
nnodes = 3
dim = 3
#define a model part and create new nodes
model = Model()
model_part = model.ModelPart("test")
node1 = model_part.CreateNewNode(1,0.0,0.0,0.0)
node2 = model_part.CreateNewNode(2,1.0,0.0,0.0)
node3 = model_part.CreateNewNode(3,0.0,1.0,0.0)
#material properties
prop_id = 0
properties = model_part.Properties[prop_id]
properties.SetValue(YOUNG_MODULUS, 200e9)
properties.SetValue(POISSON_RATIO, 0.3)
C10 = 200e9/(4*(1+0.3))
properties.SetValue(KratosMaterialModels.C10, C10)
#allocate a geometry
#a = PointerVector()
#a.append(node1)
#geom = Geometry(a)
#geom = Geometry()
geom = Triangle2D3(node1,node2,node3)
print(geom)
N = Vector(3)
DN_DX = Matrix(3,2)
######################################## here we choose the constitutive law #########################
#construct a constitutive law
elasticity_model = KratosMaterialModels.SaintVenantKirchhoffModel()
cl = KratosMaterialModels.LargeStrain3DLaw(elasticity_model)
#plasticity_model = KratosMaterialModels.VonMisesNeoHookeanPlasticityModel()
#cl = KratosMaterialModels.LargeStrain3DLaw(plasticity_model)
cl.Check( properties, geom, model_part.ProcessInfo )
if(cl.WorkingSpaceDimension() != dim):
raise Exception( "mismatch between the WorkingSpaceDimension of the Constitutive Law and the dimension of the space in which the test is performed")
##set the parameters to be employed
#note that here i am adding them all to check that this does not fail
cl_options = Flags()
cl_options.Set(ConstitutiveLaw.USE_ELEMENT_PROVIDED_STRAIN, True)
cl_options.Set(ConstitutiveLaw.COMPUTE_STRESS, True)
cl_options.Set(ConstitutiveLaw.COMPUTE_CONSTITUTIVE_TENSOR, True)
#cl_options.Set(ConstitutiveLaw.COMPUTE_STRAIN_ENERGY, False)
#cl_options.Set(ConstitutiveLaw.ISOCHORIC_TENSOR_ONLY, False)
#cl_options.Set(ConstitutiveLaw.VOLUMETRIC_TENSOR_ONLY, False)
#cl_options.Set(ConstitutiveLaw.FINALIZE_MATERIAL_RESPONSE, False)
##from here below it should be an output not an input
#cl_options.Set(ConstitutiveLaw.FINITE_STRAINS, False)
#cl_options.Set(ConstitutiveLaw.INFINITESIMAL_STRAINS, False)
#cl_options.Set(ConstitutiveLaw.PLANE_STRAIN_LAW, False)
#cl_options.Set(ConstitutiveLaw.PLANE_STRESS_LAW, False)
#cl_options.Set(ConstitutiveLaw.AXISYMMETRIC_LAW, False)
#cl_options.Set(ConstitutiveLaw.U_P_LAW, False)
#cl_options.Set(ConstitutiveLaw.ISOTROPIC, False)
#cl_options.Set(ConstitutiveLaw.ANISOTROPIC, False)
from math import sqrt
F = Matrix(3,3)
F[0,0] = 1.0; F[0,1] = 0.0; F[0,2] = 2.0;
F[1,0] = 0.0; F[1,1] = 0.9; F[1,2] = 0.0;
F[2,0] = 0.0; F[2,1] = 0.0; F[2,2] = 0.1;
detF = 0.09
stress_vector = Vector(cl.GetStrainSize())
strain_vector = Vector(cl.GetStrainSize())
constitutive_matrix = Matrix(cl.GetStrainSize(),cl.GetStrainSize())
#setting the parameters - note that a constitutive law may not need them all!
cl_params = ConstitutiveLawParameters()
cl_params.SetOptions( cl_options )
cl_params.SetDeformationGradientF( F )
cl_params.SetDeterminantF( detF )
cl_params.SetStrainVector( strain_vector )
cl_params.SetStressVector( stress_vector )
cl_params.SetConstitutiveMatrix( constitutive_matrix )
cl_params.SetShapeFunctionsValues( N )
cl_params.SetShapeFunctionsDerivatives( DN_DX )
cl_params.SetProcessInfo( model_part.ProcessInfo )
cl_params.SetMaterialProperties( properties )
cl_params.SetElementGeometry(geom)
##do all sort of checks
cl_params.CheckAllParameters() #can not use this until the geometry is correctly exported to python
cl_params.CheckMechanicalVariables()
cl_params.CheckShapeFunctions()
print("The Material Response PK2")
cl.CalculateMaterialResponsePK2( cl_params )
print( "stress = ", cl_params.GetStressVector() )
print( "strain = ", cl_params.GetStrainVector() )
print( "C = ", cl_params.GetConstitutiveMatrix() )
#cl.FinalizeMaterialResponsePK2( cl_params )
print("\n The Material Response Kirchhoff")
cl.CalculateMaterialResponseKirchhoff( cl_params )
print( "stress = ", cl_params.GetStressVector() )
print( "strain = ", cl_params.GetStrainVector() )
print( "C = ", cl_params.GetConstitutiveMatrix() )
cl.FinalizeMaterialResponseKirchhoff( cl_params )
print("\n The Material Response Cauchy")
cl.CalculateMaterialResponseCauchy( cl_params )
print( "stress = ", cl_params.GetStressVector() )
print( "strain = ", cl_params.GetStrainVector() )
print( "C = ", cl_params.GetConstitutiveMatrix() )
cl.FinalizeMaterialResponseCauchy( cl_params )
|
ammmerzougui/0001/test.py | saurabh896/python-1 | 3,976 | 11134797 | <reponame>saurabh896/python-1<filename>ammmerzougui/0001/test.py<gh_stars>1000+
'''
Generating a random code
By @ammmerzougui
'''
import random
def genCode(length):
s = "abcdefghijklmnopqrstuvwxyz01234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ!@#$%^&*()?"
return "".join(random.sample(s,length))
l=input("Enter the length of the random code: ")
print(genCode(int(l)))
|
examples/cloudml-collaborative-filtering/trainer/inputs.py | ruchirjain86/professional-services | 2,116 | 11134803 | <gh_stars>1000+
# Copyright 2019 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Input and preprocessing functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import tensorflow as tf
from constants import constants # pylint: disable=g-bad-import-order
def get_input_fn(file_pattern, batch_size, num_epochs=None):
"""Wrapper for the _input_fn.
Args:
file_pattern: pattern of the input files.
batch_size: batch size used to read data.
num_epochs: number of times to iterate over the dataset.
Returns:
An input_fn.
"""
def _parse_example(example):
"""Parses a row in a batch of data into features and labels."""
parsed_example = tf.parse_single_example(
serialized=example,
features=constants.TRAIN_SPEC)
label = parsed_example.pop(constants.LABEL_KEY)
return parsed_example, label
def _input_fn():
"""Reads TF-records and return the data in a tf.dataset."""
filenames = tf.data.Dataset.list_files(file_pattern)
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(
_parse_example,
num_parallel_calls=multiprocessing.cpu_count())
dataset = dataset.repeat(num_epochs)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(buffer_size=10)
return dataset
return _input_fn
def get_serving_input_fn():
"""Wrapper for _serving_input_fn.
Returns:
An input fn for serving.
"""
def _get_tensor_stubs():
"""Creates input tensors for the model with dynamic shapes."""
features = {}
stub = constants.get_serving_stub()
for feature in constants.SERVE_SPEC:
if feature not in constants.RAW_CATEGORICAL_FEATURES:
t = tf.placeholder(constants.SERVE_SPEC[feature].dtype)
features[feature] = tf.fill(
tf.shape(t), tf.cast(stub[feature], t.dtype))
for feature in constants.RAW_CATEGORICAL_FEATURES:
t = tf.placeholder(tf.float32)
dynamic_shape = tf.shape(t, out_type=tf.int64)
features[feature] = tf.SparseTensor(
[[0, 0], [1, 1]], ["", ""], dynamic_shape)
return features
def _serving_input_fn():
"""Creates in ServingInputReceiver to handle JSON inputs."""
receiver_tensors = {
constants.USER_KEY: tf.placeholder(tf.string),
constants.ITEM_KEY: tf.placeholder(tf.string),
}
features = _get_tensor_stubs()
features.update(receiver_tensors)
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
return _serving_input_fn
|
tina/pars/trans.py | xuhao1/taichi_three | 152 | 11134813 | <reponame>xuhao1/taichi_three
from ..common import *
from .base import ParsEditBase
class ParsTransform(ParsEditBase):
def __init__(self, pars):
super().__init__(pars)
self.trans = ti.Matrix.field(4, 4, float, ())
self.scale = ti.field(float, ())
@ti.materialize_callback
@ti.kernel
def init_trans():
self.trans[None] = ti.Matrix.identity(float, 4)
self.scale[None] = 1
def set_transform(self, trans, scale):
self.trans[None] = np.array(trans).tolist()
self.scale[None] = scale
@ti.func
def get_particle_position(self, n):
vert = self.pars.get_particle_position(n)
return mapply_pos(self.trans[None], vert)
@ti.func
def get_particle_radius(self, n):
size = self.pars.get_particle_radius(n)
return self.scale[None] * size
|
mmdet/cv_core/utils/__init__.py | Karybdis/mmdetection-mini | 834 | 11134827 | # flake8: noqa
# Copyright (c) Open-MMLab. All rights reserved.
from .config import Config, ConfigDict, DictAction
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
import_modules_from_strings, is_list_of, is_seq_of, is_str,
is_tuple_of, iter_cast, list_cast, requires_executable,
requires_package, slice_list, tuple_cast)
from .path import (check_file_exist, fopen, is_filepath, mkdir_or_exist,
scandir, symlink, traverse_file_paths)
from .progressbar import (ProgressBar, track_iter_progress,
track_parallel_progress, track_progress)
from .timer import Timer, TimerError, check_time
from .registry import Registry, build_from_cfg
from .parrots_wrapper import _BatchNorm, _InstanceNorm
from .logging import print_log, get_logger
from .receptivefield import calc_receptive_filed
from .kmean import Kmean
from .featuremap_vis import FeatureMapVis
from .coco_creator import CocoCreator
|
core/Streamlines.py | nmwsharp/DDGSpring2016 | 110 | 11134829 | <reponame>nmwsharp/DDGSpring2016<gh_stars>100-1000
"""
Generate a set of streamlines on the surface of a mesh corresponding to a vector
field defined in the tangent space of that mesh
"""
import random, cmath
from math import cos, sin, pi
from Utilities import *
def generateStreamlines(mesh, nLines, lineLength, vectorFieldName, nSym = 1, countMax=100,
definedOn='vertex', isTangentVector=False, includeNormals=False):
"""
Return a collection of lines that are streamlines for the vector field.
The returned object is a list, where each element in that list is a list
of positions
- vectorFieldName: String giving the name of the vector field attribute to use.
Should be vectors in R3 (will be normalized).
- includeNormals: if True, also returns a list of normal vectors to go with each point in each line
each element of the set is now a (line, normals) tuple of lists
"""
print("\nTracing " + str(nLines) + " streamlines on the surface of the mesh. Please hold...")
# Bookkeeping
valueCache = prepareVectorField(mesh, vectorFieldName, definedOn, isTangentVector=isTangentVector, nSym=nSym)
lines = []
# Make sure every mesh has at least one line passing through it before we
# start repeating
emptyFaces = set(mesh.faces)
for i in range(nLines):
# If every face has 1, start puting 2 in every face (etc...)
if len(emptyFaces) == 0:
emptyFaces = set(mesh.faces)
startingFace = random.sample(emptyFaces, 1)[0]
# For now, the starting point is just the barycenter of the face
startingPoint = startingFace.center
# Traces this line
if includeNormals:
line, normals, facesUsed = traceStreamline(startingFace, startingPoint, lineLength, valueCache, countMax=countMax, includeNormals=True)
lines.append((line, normals))
else:
line, facesUsed = traceStreamline(startingFace, startingPoint, lineLength, valueCache, countMax=countMax, includeNormals=False)
lines.append(line)
# Remove all faces that were used for this line
emptyFaces -= facesUsed
print(" ...done tracing streamlines. Please come again.")
return lines
def prepareVectorField(mesh, vectorFieldName, definedOn, isTangentVector=False, nSym=1):
"""
Make sure we have a good vector field defined on faces to compute streamlines.
Post: Each face will have an attribute _streamVec which is the
unit-norm constant vector field within that face
"""
if definedOn == 'vertex':
for face in mesh.faces:
if isTangentVector:
# Extend a vector field defined at vertices to the faces
# First, LC-transport all of the vertex fields to the first to get a uniform representation
firstVert = None
for vertex in face.adjacentVerts():
if firstVert is None:
firstVert = vertex
centralVal = cmath.exp(1.0j * getattr(vertex, vectorFieldName) * nSym)
else:
he = vertex.halfedgeTo(firstVert)
centralVal += cmath.exp(1.0j * (getattr(vertex, vectorFieldName) + he.transportAngle)*nSym)
centralAngle = cmath.phase(centralVal**(1.0/nSym))
meanVec = firstVert.tangentAngleInR3(centralAngle)
face._streamVec = normalized(face.projectToTangentSpace(meanVec))
else:
if nSym > 1:
raise ValueError("ERROR: Symmetric vector fields only supported as tangent angles")
vecs = [normalized(getattr(vert, vectorFieldName)) for vert in face.adjacentVerts()]
meanVec = sum(vecs)
face._streamVec = normalized(face.projectToTangentSpace(meanVec))
elif definedOn == 'face':
if isTangentVector:
raise ValueError("ERROR Don't know how to process tangent vectors on faces")
for face in mesh.faces:
face._streamVec = normalized(face.projectToTangentSpace(getattr(face, vectorFieldName)))
else:
raise ValueError("Illegal definedOn setting: " + str(definedOn))
# Pre-compute some values that we will be using repeatedly
delTheta = 2.0*pi / nSym
rotMat = np.array([[cos(delTheta), -sin(delTheta)],[sin(delTheta), cos(delTheta)]])
valueCache = {}
for face in mesh.faces:
xDir = face.anyHalfEdge.vector
yDir = cross(xDir, face.normal)
v0 = face.anyHalfEdge.vertex.position
# Generate a vector for each direction in a symmetric field
uVecFirst = np.array(( dot(face._streamVec, xDir) , dot(face._streamVec, yDir) ))
uVecThis = uVecFirst
uVecs = []
for i in range(nSym):
# Project in to 3D
uVec3 = uVecThis[0] * xDir + uVecThis[1] * yDir
# Save
uVecs.append((uVecThis.copy(), uVec3))
# Rotate for the next direction
uVecThis = rotMat.dot(uVecThis)
valueCache[face] = (xDir, yDir, v0, uVecs)
for he in face.adjacentHalfEdges():
edgePoint3D = he.vertex.position - v0
edgePoint = np.array(( dot(edgePoint3D, xDir), dot(edgePoint3D, yDir) ))
edgeVec3D = -he.vector
edgeVec = np.array(( dot(edgeVec3D, xDir), dot(edgeVec3D, yDir) ))
valueCache[(face,he)] = (edgePoint, edgeVec)
return valueCache
def traceStreamline(startingFace, startingPoint, lineLength, valueCache, countMax = 100, includeNormals=False):
"""
Traces a single streamline through the mesh, returning the line as a list
of points.
"""
line = [startingPoint]
if(includeNormals):
normals = [startingFace.normal]
facesUsed = set()
length = 0.0
currFace = startingFace
currPoint = startingPoint
currV = None
while (length < lineLength) and (currFace is not None):
facesUsed.add(currFace)
# Trace out to the next point
nextFace, nextPoint = traceStreamlineThroughFace(currFace, currPoint, currV, valueCache)
# Measure the velocity and length
currV = nextPoint - currPoint
length += norm(currV)
# Save the new point and continue
line.append(nextPoint)
if includeNormals:
if nextFace is None:
normals.append(currFace.normal)
else:
normals.append(nextFace.normal)
currFace = nextFace
currPoint = nextPoint
# Catch infinte loops that might happen for numerical reasons
if(len(line) > countMax):
break
if includeNormals:
return line, normals, facesUsed
else:
return line, facesUsed
def traceStreamlineThroughFace(startFace, startPoint, currV, valueCache):
"""
Trace a point through a triangle, returning (newFace, newPoint)
If the stream goes off a boundary, return (None, newPoint)
- currV is the current "velocity" of the line, in 3D. Used to choose which
direction to follow in symmetric fields
Pre: startPoint is strictly inside startFace
Post: newPoint is strictly inside newFace (if not boundary)
Assumes that the vector field is a constant within the triangle,
"""
## Raycast to decide which of the faces of the triangle we pass through
uMin = float('inf')
xDir, yDir, v0, uVecsR2R3 = valueCache[startFace]
startPointLocal = startPoint - v0
startPoint2D = np.array(( dot(startPointLocal, xDir), dot(startPointLocal, yDir) ))
# For symmetric fields, choose the rotation direction which is closest to
# current "velocity" of the streamline
if currV is None:
uVec, uVecR3 = random.sample(uVecsR2R3,1)[0]
else:
currDot = -float('inf')
uVec = None
for uR2,uR3 in uVecsR2R3:
if dot(uR3, currV) > currDot:
currDot = dot(uR3, currV)
uVec = uR2
uVecR3 = uR3
for he in startFace.adjacentHalfEdges():
edgePoint, edgeVec = valueCache[(startFace,he)]
# Line/plane intersection
u = cross2D(startPoint2D - edgePoint, edgeVec) / cross2D(edgeVec, uVec)
# Check if this is the closest
if u > 0 and u < uMin:
uMin = u
acrossHe = he
t = cross2D(startPoint2D - edgePoint, uVec) / cross2D(edgeVec, uVec)
t = clamp(t, 0.005, 0.995)
# TODO sometimes things can go wrong from numerical errors... just give up if that happens
if uMin == float('inf'):
return None, startPoint
# Compute the new point. Extend the vector just a little so the next numerical problem is well-posed
# TODO this could be a bug for exceptionally skinny triangles
newPoint = acrossHe.vertex.position - t * acrossHe.vector + (uMin * 0.00001) * uVecR3
if acrossHe.isBoundary:
return None, newPoint
else:
newFace = acrossHe.twin.face
return newFace, newPoint
def cross2D(v1, v2):
return v1[0]*v2[1] - v1[1]*v2[0]
def constainToFace(point, face):
"""
Given a point which is supposed to lie inside a
"""
pass
|
ai_safety_gridworlds/environments/shared/rl/array_spec_test.py | AicyDC/ai-safety-gridworlds | 532 | 11134848 | # Copyright 2018 The AI Safety Gridworlds Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Array spec tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import absltest
from ai_safety_gridworlds.environments.shared.rl import array_spec
import numpy as np
class ArraySpecTest(absltest.TestCase):
def testShapeTypeError(self):
with self.assertRaises(TypeError):
array_spec.ArraySpec(32, np.int32)
def testDtypeTypeError(self):
with self.assertRaises(TypeError):
array_spec.ArraySpec((1, 2, 3), "32")
def testStringDtype(self):
array_spec.ArraySpec((1, 2, 3), "int32")
def testNumpyDtype(self):
array_spec.ArraySpec((1, 2, 3), np.int32)
def testDtype(self):
spec = array_spec.ArraySpec((1, 2, 3), np.int32)
self.assertEqual(np.int32, spec.dtype)
def testShape(self):
spec = array_spec.ArraySpec([1, 2, 3], np.int32)
self.assertEqual((1, 2, 3), spec.shape)
def testEqual(self):
spec_1 = array_spec.ArraySpec((1, 2, 3), np.int32)
spec_2 = array_spec.ArraySpec((1, 2, 3), np.int32)
self.assertEqual(spec_1, spec_2)
def testNotEqualDifferentShape(self):
spec_1 = array_spec.ArraySpec((1, 2, 3), np.int32)
spec_2 = array_spec.ArraySpec((1, 3, 3), np.int32)
self.assertNotEqual(spec_1, spec_2)
def testNotEqualDifferentDtype(self):
spec_1 = array_spec.ArraySpec((1, 2, 3), np.int64)
spec_2 = array_spec.ArraySpec((1, 2, 3), np.int32)
self.assertNotEqual(spec_1, spec_2)
def testNotEqualOtherClass(self):
spec_1 = array_spec.ArraySpec((1, 2, 3), np.int32)
spec_2 = None
self.assertNotEqual(spec_1, spec_2)
self.assertNotEqual(spec_2, spec_1)
spec_2 = ()
self.assertNotEqual(spec_1, spec_2)
self.assertNotEqual(spec_2, spec_1)
def testValidateDtype(self):
spec = array_spec.ArraySpec((1, 2), np.int32)
spec.validate(np.zeros((1, 2), dtype=np.int32))
with self.assertRaises(ValueError):
spec.validate(np.zeros((1, 2), dtype=np.float32))
def testValidateShape(self):
spec = array_spec.ArraySpec((1, 2), np.int32)
spec.validate(np.zeros((1, 2), dtype=np.int32))
with self.assertRaises(ValueError):
spec.validate(np.zeros((1, 2, 3), dtype=np.int32))
def testGenerateValue(self):
spec = array_spec.ArraySpec((1, 2), np.int32)
test_value = spec.generate_value()
spec.validate(test_value)
class BoundedArraySpecTest(absltest.TestCase):
def testInvalidMinimum(self):
with self.assertRaisesRegexp(ValueError, "not compatible"):
array_spec.BoundedArraySpec((3, 5), np.uint8, (0, 0, 0), (1, 1))
def testInvalidMaximum(self):
with self.assertRaisesRegexp(ValueError, "not compatible"):
array_spec.BoundedArraySpec((3, 5), np.uint8, 0, (1, 1, 1))
def testMinMaxAttributes(self):
spec = array_spec.BoundedArraySpec((1, 2, 3), np.float32, 0, (5, 5, 5))
self.assertEqual(type(spec.minimum), np.ndarray)
self.assertEqual(type(spec.maximum), np.ndarray)
def testNotWriteable(self):
spec = array_spec.BoundedArraySpec((1, 2, 3), np.float32, 0, (5, 5, 5))
with self.assertRaisesRegexp(ValueError, "read-only"):
spec.minimum[0] = -1
with self.assertRaisesRegexp(ValueError, "read-only"):
spec.maximum[0] = 100
def testEqualBroadcastingBounds(self):
spec_1 = array_spec.BoundedArraySpec(
(1, 2), np.int32, minimum=0.0, maximum=1.0)
spec_2 = array_spec.BoundedArraySpec(
(1, 2), np.int32, minimum=[0.0, 0.0], maximum=[1.0, 1.0])
self.assertEqual(spec_1, spec_2)
def testNotEqualDifferentMinimum(self):
spec_1 = array_spec.BoundedArraySpec(
(1, 2), np.int32, minimum=[0.0, -0.6], maximum=[1.0, 1.0])
spec_2 = array_spec.BoundedArraySpec(
(1, 2), np.int32, minimum=[0.0, 0.0], maximum=[1.0, 1.0])
self.assertNotEqual(spec_1, spec_2)
def testNotEqualOtherClass(self):
spec_1 = array_spec.BoundedArraySpec(
(1, 2), np.int32, minimum=[0.0, -0.6], maximum=[1.0, 1.0])
spec_2 = array_spec.ArraySpec((1, 2), np.int32)
self.assertNotEqual(spec_1, spec_2)
self.assertNotEqual(spec_2, spec_1)
spec_2 = None
self.assertNotEqual(spec_1, spec_2)
self.assertNotEqual(spec_2, spec_1)
spec_2 = ()
self.assertNotEqual(spec_1, spec_2)
self.assertNotEqual(spec_2, spec_1)
def testNotEqualDifferentMaximum(self):
spec_1 = array_spec.BoundedArraySpec(
(1, 2), np.int32, minimum=0.0, maximum=2.0)
spec_2 = array_spec.BoundedArraySpec(
(1, 2), np.int32, minimum=[0.0, 0.0], maximum=[1.0, 1.0])
self.assertNotEqual(spec_1, spec_2)
def testRepr(self):
as_string = repr(array_spec.BoundedArraySpec(
(1, 2), np.int32, minimum=101.0, maximum=73.0))
self.assertIn("101", as_string)
self.assertIn("73", as_string)
def testValidateBounds(self):
spec = array_spec.BoundedArraySpec((2, 2), np.int32, minimum=5, maximum=10)
spec.validate(np.array([[5, 6], [8, 10]], dtype=np.int32))
with self.assertRaises(ValueError):
spec.validate(np.array([[5, 6], [8, 11]], dtype=np.int32))
with self.assertRaises(ValueError):
spec.validate(np.array([[4, 6], [8, 10]], dtype=np.int32))
def testGenerateValue(self):
spec = array_spec.BoundedArraySpec((2, 2), np.int32, minimum=5, maximum=10)
test_value = spec.generate_value()
spec.validate(test_value)
def testScalarBounds(self):
spec = array_spec.BoundedArraySpec((), np.float, minimum=0.0, maximum=1.0)
self.assertIsInstance(spec.minimum, np.ndarray)
self.assertIsInstance(spec.maximum, np.ndarray)
# Sanity check that numpy compares correctly to a scalar for an empty shape.
self.assertEqual(0.0, spec.minimum)
self.assertEqual(1.0, spec.maximum)
# Check that the spec doesn't fail its own input validation.
_ = array_spec.BoundedArraySpec(
spec.shape, spec.dtype, spec.minimum, spec.maximum)
if __name__ == "__main__":
absltest.main()
|
iexfinance/tests/test_account.py | jto-d/iexfinance | 653 | 11134859 | import pandas as pd
import pytest
from iexfinance.account import get_metadata, get_usage
@pytest.mark.skip
class TestAccount(object):
def test_usage_json_default(self):
# This function is defaulting to "messages" type due to bug
# in provider platform
data = get_usage()
assert isinstance(data, dict)
assert len(data) == 5
def test_usage_pandas(self):
data = get_usage(output_format="pandas")
assert isinstance(data, pd.DataFrame)
@pytest.mark.xfail(
reason="This endpoint incorrectly causes an error for " "accounts without rules"
)
def test_usage_param(self):
data = get_usage(quota_type="rules")
assert isinstance(data, dict)
def test_usage_fails_bad_param(self):
with pytest.raises(ValueError):
get_usage(quota_type="BADTYPE")
def test_metadata_json(self):
data = get_metadata()
assert isinstance(data, dict)
assert len(data) == 7
def test_metadata_pandas(self):
data = get_metadata(output_format="pandas")
assert isinstance(data, pd.DataFrame)
assert data.shape == (7, 1)
@pytest.mark.skip(reason="Not yet implemented by IEX")
def test_allow_pay_as_you_go(self):
pass
@pytest.mark.skip(reason="Not yet implemented by IEX")
def test_disallow_pay_as_you_go(self):
pass
|
tests/test_pexels.py | korymath/talk-generator | 110 | 11134882 | <filename>tests/test_pexels.py
import unittest
from talkgenerator.sources import pexels
class PexelsTest(unittest.TestCase):
def test_pexels_access(self):
images = pexels.search_photos("office")
self.assertTrue(len(images) > 0)
sources = [
image.get_source() for image in images if image.get_source() is not None
]
self.assertTrue(len(sources) > 0)
if __name__ == "__main__":
unittest.main()
|
src/python/turicreate/data_structures/sframe.py | cookingcodewithme/turicreate | 11,356 | 11134902 | # -*- coding: utf-8 -*-
# Copyright © 2017 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
"""
This module defines the SFrame class which provides the
ability to create, access and manipulate a remote scalable dataframe object.
SFrame acts similarly to pandas.DataFrame, but the data is completely immutable
and is stored column wise.
"""
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
from .._connect import main as glconnect
from .._cython.cy_flexible_type import infer_type_of_list
from .._cython.context import debug_trace as cython_context
from .._cython.cy_sframe import UnitySFrameProxy
from ..util import _is_non_string_iterable, _make_internal_url
from ..util import _infer_dbapi2_types
from ..util import _get_module_from_object, _pytype_to_printf
from ..visualization import _get_client_app_path
from .sarray import SArray, _create_sequential_sarray
from .. import aggregate
from .image import Image as _Image
from .._deps import pandas, numpy, HAS_PANDAS, HAS_NUMPY
from ..visualization import Plot
import array
from prettytable import PrettyTable
from textwrap import wrap
import datetime
import time
import itertools
import logging as _logging
import numbers
import sys
import six
import csv
from collections import Iterable as _Iterable
__all__ = ["SFrame"]
__LOGGER__ = _logging.getLogger(__name__)
FOOTER_STRS = [
"Note: Only the head of the SFrame is printed.",
"You can use print_rows(num_rows=m, num_columns=n) to print more rows and columns.",
]
LAZY_FOOTER_STRS = [
"Note: Only the head of the SFrame is printed. This SFrame is lazily evaluated.",
"You can use sf.materialize() to force materialization.",
]
if sys.version_info.major > 2:
long = int
def load_sframe(filename):
"""
Load an SFrame. The filename extension is used to determine the format
automatically. This function is particularly useful for SFrames previously
saved in binary format. For CSV imports the ``SFrame.read_csv`` function
provides greater control. If the SFrame is in binary format, ``filename`` is
actually a directory, created when the SFrame is saved.
Parameters
----------
filename : string
Location of the file to load. Can be a local path or a remote URL.
Returns
-------
out : SFrame
See Also
--------
SFrame.save, SFrame.read_csv
Examples
--------
>>> sf = turicreate.SFrame({'id':[1,2,3], 'val':['A','B','C']})
>>> sf.save('my_sframe') # 'my_sframe' is a directory
>>> sf_loaded = turicreate.load_sframe('my_sframe')
"""
sf = SFrame(data=filename)
return sf
def _get_global_dbapi_info(dbapi_module, conn):
"""
Fetches all needed information from the top-level DBAPI module,
guessing at the module if it wasn't passed as a parameter. Returns a
dictionary of all the needed variables. This is put in one place to
make sure the error message is clear if the module "guess" is wrong.
"""
module_given_msg = (
"The DBAPI2 module given ({0}) is missing the global\n"
+ "variable '{1}'. Please make sure you are supplying a module that\n"
+ "conforms to the DBAPI 2.0 standard (PEP 0249)."
)
module_not_given_msg = (
"Hello! I gave my best effort to find the\n"
+ "top-level module that the connection object you gave me came from.\n"
+ "I found '{0}' which doesn't have the global variable '{1}'.\n"
+ "To avoid this confusion, you can pass the module as a parameter using\n"
+ "the 'dbapi_module' argument to either from_sql or to_sql."
)
if dbapi_module is None:
dbapi_module = _get_module_from_object(conn)
module_given = False
else:
module_given = True
module_name = dbapi_module.__name__ if hasattr(dbapi_module, "__name__") else None
needed_vars = ["apilevel", "paramstyle", "Error", "DATETIME", "NUMBER", "ROWID"]
ret_dict = {}
ret_dict["module_name"] = module_name
for i in needed_vars:
tmp = None
try:
tmp = eval("dbapi_module." + i)
except AttributeError as e:
# Some DBs don't actually care about types, so they won't define
# the types. These are the ACTUALLY needed variables though
if i not in ["apilevel", "paramstyle", "Error"]:
pass
elif module_given:
raise AttributeError(module_given_msg.format(module_name, i))
else:
raise AttributeError(module_not_given_msg.format(module_name, i))
ret_dict[i] = tmp
try:
if ret_dict["apilevel"][0:3] != "2.0":
raise NotImplementedError(
"Unsupported API version "
+ str(ret_dict["apilevel"])
+ ". Only DBAPI 2.0 is supported."
)
except TypeError as e:
e.message = "Module's 'apilevel' value is invalid."
raise e
acceptable_paramstyles = ["qmark", "numeric", "named", "format", "pyformat"]
try:
if ret_dict["paramstyle"] not in acceptable_paramstyles:
raise TypeError("Module's 'paramstyle' value is invalid.")
except TypeError as e:
raise TypeError("Module's 'paramstyle' value is invalid.")
return ret_dict
def _convert_rows_to_builtin_seq(data):
# Flexible type expects a builtin type (like list or tuple) for conversion.
# Some DBAPI modules abstract rows as classes that act as single sequences
# and this allows these to work with flexible type. list is chosen to allow
# mutation in case we need to force cast any entries
if len(data) > 0 and type(data[0]) != list:
data = [list(row) for row in data]
return data
# Expects list of tuples
def _force_cast_sql_types(data, result_types, force_cast_cols):
if len(force_cast_cols) == 0:
return data
ret_data = []
for row in data:
for idx in force_cast_cols:
if row[idx] is not None and result_types[idx] != datetime.datetime:
row[idx] = result_types[idx](row[idx])
ret_data.append(row)
return ret_data
class SFrame(object):
"""
SFrame means scalable data frame. A tabular, column-mutable dataframe object that can
scale to big data. The data in SFrame is stored column-wise, and is
stored on persistent storage (e.g. disk) to avoid being constrained by
memory size. Each column in an SFrame is a size-immutable
:class:`~turicreate.SArray`, but SFrames are mutable in that columns can be
added and subtracted with ease. An SFrame essentially acts as an ordered
dict of SArrays.
Currently, we support constructing an SFrame from the following data
formats:
* csv file (comma separated value)
* sframe directory archive (A directory where an sframe was saved
previously)
* general text file (with csv parsing options, See :py:meth:`read_csv()`)
* a Python dictionary
* pandas.DataFrame
* JSON
and from the following sources:
* your local file system
* a network file system mounted locally
* HDFS
* Amazon S3
* HTTP(S).
Only basic examples of construction are covered here. For more information
and examples, please see the `User Guide <https://apple.github.io/turicreate/docs/user
guide/index.html#Working_with_data_Tabular_data>`_.
Parameters
----------
data : array | pandas.DataFrame | string | dict, optional
The actual interpretation of this field is dependent on the ``format``
parameter. If ``data`` is an array or Pandas DataFrame, the contents are
stored in the SFrame. If ``data`` is a string, it is interpreted as a
file. Files can be read from local file system or urls (local://,
hdfs://, s3://, http://).
format : string, optional
Format of the data. The default, "auto" will automatically infer the
input data format. The inference rules are simple: If the data is an
array or a dataframe, it is associated with 'array' and 'dataframe'
respectively. If the data is a string, it is interpreted as a file, and
the file extension is used to infer the file format. The explicit
options are:
- "auto"
- "array"
- "dict"
- "sarray"
- "dataframe"
- "csv"
- "tsv"
- "sframe".
See Also
--------
read_csv:
Create a new SFrame from a csv file. Preferred for text and CSV formats,
because it has a lot more options for controlling the parser.
save : Save an SFrame for later use.
Notes
-----
- When reading from HDFS on Linux we must guess the location of your java
installation. By default, we will use the location pointed to by the
JAVA_HOME environment variable. If this is not set, we check many common
installation paths. You may use two environment variables to override
this behavior. TURI_JAVA_HOME allows you to specify a specific java
installation and overrides JAVA_HOME. TURI_LIBJVM_DIRECTORY
overrides all and expects the exact directory that your preferred
libjvm.so file is located. Use this ONLY if you'd like to use a
non-standard JVM.
Examples
--------
>>> import turicreate
>>> from turicreate import SFrame
**Construction**
Construct an SFrame from a dataframe and transfers the dataframe object
across the network.
>>> df = pandas.DataFrame()
>>> sf = SFrame(data=df)
Construct an SFrame from a local csv file (only works for local server).
>>> sf = SFrame(data='~/mydata/foo.csv')
Construct an SFrame from a csv file on Amazon S3. This requires the
environment variables: *AWS_ACCESS_KEY_ID* and *AWS_SECRET_ACCESS_KEY* to be
set before the python session started.
>>> sf = SFrame(data='s3://mybucket/foo.csv')
Read from HDFS using a specific java installation (environment variable
only applies when using Linux)
>>> import os
>>> os.environ['TURI_JAVA_HOME'] = '/my/path/to/java'
>>> from turicreate import SFrame
>>> sf = SFrame("hdfs://mycluster.example.com:8020/user/myname/coolfile.txt")
An SFrame can be constructed from a dictionary of values or SArrays:
>>> sf = tc.SFrame({'id':[1,2,3],'val':['A','B','C']})
>>> sf
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
Or equivalently:
>>> ids = SArray([1,2,3])
>>> vals = SArray(['A','B','C'])
>>> sf = SFrame({'id':ids,'val':vals})
It can also be constructed from an array of SArrays in which case column
names are automatically assigned.
>>> ids = SArray([1,2,3])
>>> vals = SArray(['A','B','C'])
>>> sf = SFrame([ids, vals])
>>> sf
Columns:
X1 int
X2 str
Rows: 3
Data:
X1 X2
0 1 A
1 2 B
2 3 C
If the SFrame is constructed from a list of values, an SFrame of a single
column is constructed.
>>> sf = SFrame([1,2,3])
>>> sf
Columns:
X1 int
Rows: 3
Data:
X1
0 1
1 2
2 3
**Parsing**
The :py:func:`turicreate.SFrame.read_csv()` is quite powerful and, can be
used to import a variety of row-based formats.
First, some simple cases:
>>> !cat ratings.csv
user_id,movie_id,rating
10210,1,1
10213,2,5
10217,2,2
10102,1,3
10109,3,4
10117,5,2
10122,2,4
10114,1,5
10125,1,1
>>> tc.SFrame.read_csv('ratings.csv')
Columns:
user_id int
movie_id int
rating int
Rows: 9
Data:
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 10210 | 1 | 1 |
| 10213 | 2 | 5 |
| 10217 | 2 | 2 |
| 10102 | 1 | 3 |
| 10109 | 3 | 4 |
| 10117 | 5 | 2 |
| 10122 | 2 | 4 |
| 10114 | 1 | 5 |
| 10125 | 1 | 1 |
+---------+----------+--------+
[9 rows x 3 columns]
Delimiters can be specified, if "," is not the delimiter, for instance
space ' ' in this case. Only single character delimiters are supported.
>>> !cat ratings.csv
user_id movie_id rating
10210 1 1
10213 2 5
10217 2 2
10102 1 3
10109 3 4
10117 5 2
10122 2 4
10114 1 5
10125 1 1
>>> tc.SFrame.read_csv('ratings.csv', delimiter=' ')
By default, "NA" or a missing element are interpreted as missing values.
>>> !cat ratings2.csv
user,movie,rating
"tom",,1
harry,5,
jack,2,2
bill,,
>>> tc.SFrame.read_csv('ratings2.csv')
Columns:
user str
movie int
rating int
Rows: 4
Data:
+---------+-------+--------+
| user | movie | rating |
+---------+-------+--------+
| tom | None | 1 |
| harry | 5 | None |
| jack | 2 | 2 |
| missing | None | None |
+---------+-------+--------+
[4 rows x 3 columns]
Furthermore due to the dictionary types and list types, can handle parsing
of JSON-like formats.
>>> !cat ratings3.csv
business, categories, ratings
"Restaurant 1", [1 4 9 10], {"funny":5, "cool":2}
"Restaurant 2", [], {"happy":2, "sad":2}
"Restaurant 3", [2, 11, 12], {}
>>> tc.SFrame.read_csv('ratings3.csv')
Columns:
business str
categories array
ratings dict
Rows: 3
Data:
+--------------+--------------------------------+-------------------------+
| business | categories | ratings |
+--------------+--------------------------------+-------------------------+
| Restaurant 1 | array('d', [1.0, 4.0, 9.0, ... | {'funny': 5, 'cool': 2} |
| Restaurant 2 | array('d') | {'sad': 2, 'happy': 2} |
| Restaurant 3 | array('d', [2.0, 11.0, 12.0]) | {} |
+--------------+--------------------------------+-------------------------+
[3 rows x 3 columns]
The list and dictionary parsers are quite flexible and can absorb a
variety of purely formatted inputs. Also, note that the list and dictionary
types are recursive, allowing for arbitrary values to be contained.
All these are valid lists:
>>> !cat interesting_lists.csv
list
[]
[1,2,3]
[1;2,3]
[1 2 3]
[{a:b}]
["c",d, e]
[[a]]
>>> tc.SFrame.read_csv('interesting_lists.csv')
Columns:
list list
Rows: 7
Data:
+-----------------+
| list |
+-----------------+
| [] |
| [1, 2, 3] |
| [1, 2, 3] |
| [1, 2, 3] |
| [{'a': 'b'}] |
| ['c', 'd', 'e'] |
| [['a']] |
+-----------------+
[7 rows x 1 columns]
All these are valid dicts:
>>> !cat interesting_dicts.csv
dict
{"classic":1,"dict":1}
{space:1 separated:1}
{emptyvalue:}
{}
{:}
{recursive1:[{a:b}]}
{:[{:[a]}]}
>>> tc.SFrame.read_csv('interesting_dicts.csv')
Columns:
dict dict
Rows: 7
Data:
+------------------------------+
| dict |
+------------------------------+
| {'dict': 1, 'classic': 1} |
| {'separated': 1, 'space': 1} |
| {'emptyvalue': None} |
| {} |
| {None: None} |
| {'recursive1': [{'a': 'b'}]} |
| {None: [{None: array('d')}]} |
+------------------------------+
[7 rows x 1 columns]
**Saving**
Save and load the sframe in native format.
>>> sf.save('mysframedir')
>>> sf2 = turicreate.load_sframe('mysframedir')
**Column Manipulation**
An SFrame is composed of a collection of columns of SArrays, and individual
SArrays can be extracted easily. For instance given an SFrame:
>>> sf = SFrame({'id':[1,2,3],'val':['A','B','C']})
>>> sf
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
The "id" column can be extracted using:
>>> sf["id"]
dtype: int
Rows: 3
[1, 2, 3]
And can be deleted using:
>>> del sf["id"]
Multiple columns can be selected by passing a list of column names:
>>> sf = SFrame({'id':[1,2,3],'val':['A','B','C'],'val2':[5,6,7]})
>>> sf
Columns:
id int
val str
val2 int
Rows: 3
Data:
id val val2
0 1 A 5
1 2 B 6
2 3 C 7
>>> sf2 = sf[['id','val']]
>>> sf2
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
You can also select columns using types or a list of types:
>>> sf2 = sf[int]
>>> sf2
Columns:
id int
val2 int
Rows: 3
Data:
id val2
0 1 5
1 2 6
2 3 7
Or a mix of types and names:
>>> sf2 = sf[['id', str]]
>>> sf2
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
The same mechanism can be used to re-order columns:
>>> sf = SFrame({'id':[1,2,3],'val':['A','B','C']})
>>> sf
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
>>> sf[['val','id']]
>>> sf
Columns:
val str
id int
Rows: 3
Data:
val id
0 A 1
1 B 2
2 C 3
**Element Access and Slicing**
SFrames can be accessed by integer keys just like a regular python list.
Such operations may not be fast on large datasets so looping over an SFrame
should be avoided.
>>> sf = SFrame({'id':[1,2,3],'val':['A','B','C']})
>>> sf[0]
{'id': 1, 'val': 'A'}
>>> sf[2]
{'id': 3, 'val': 'C'}
>>> sf[5]
IndexError: SFrame index out of range
Negative indices can be used to access elements from the tail of the array
>>> sf[-1] # returns the last element
{'id': 3, 'val': 'C'}
>>> sf[-2] # returns the second to last element
{'id': 2, 'val': 'B'}
The SFrame also supports the full range of python slicing operators:
>>> sf[1000:] # Returns an SFrame containing rows 1000 to the end
>>> sf[:1000] # Returns an SFrame containing rows 0 to row 999 inclusive
>>> sf[0:1000:2] # Returns an SFrame containing rows 0 to row 1000 in steps of 2
>>> sf[-100:] # Returns an SFrame containing last 100 rows
>>> sf[-100:len(sf):2] # Returns an SFrame containing last 100 rows in steps of 2
**Logical Filter**
An SFrame can be filtered using
>>> sframe[binary_filter]
where sframe is an SFrame and binary_filter is an SArray of the same length.
The result is a new SFrame which contains only rows of the SFrame where its
matching row in the binary_filter is non zero.
This permits the use of boolean operators that can be used to perform
logical filtering operations. For instance, given an SFrame
>>> sf
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
>>> sf[(sf['id'] >= 1) & (sf['id'] <= 2)]
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
See :class:`~turicreate.SArray` for more details on the use of the logical
filter.
This can also be used more generally to provide filtering capability which
is otherwise not expressible with simple boolean functions. For instance:
>>> sf[sf['id'].apply(lambda x: math.log(x) <= 1)]
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
Or alternatively:
>>> sf[sf.apply(lambda x: math.log(x['id']) <= 1)]
Create an SFrame from a Python dictionary.
>>> from turicreate import SFrame
>>> sf = SFrame({'id':[1,2,3], 'val':['A','B','C']})
>>> sf
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
"""
__slots__ = ["_proxy", "_cache"]
def __init__(self, data=None, format="auto", _proxy=None):
"""__init__(data=list(), format='auto')
Construct a new SFrame from a url or a pandas.DataFrame.
"""
# emit metrics for num_rows, num_columns, and type (local://, s3, hdfs, http)
if _proxy:
self.__proxy__ = _proxy
else:
self.__proxy__ = UnitySFrameProxy()
_format = None
if six.PY2 and isinstance(data, unicode):
data = data.encode("utf-8")
if format == "auto":
if HAS_PANDAS and isinstance(data, pandas.DataFrame):
_format = "dataframe"
elif isinstance(data, str) or (
sys.version_info.major < 3 and isinstance(data, unicode)
):
if data.endswith((".csv", ".csv.gz")):
_format = "csv"
elif data.endswith((".tsv", ".tsv.gz")):
_format = "tsv"
elif data.endswith((".txt", ".txt.gz")):
print(
"Assuming file is csv. For other delimiters, "
+ "please use `SFrame.read_csv`."
)
_format = "csv"
else:
_format = "sframe"
elif type(data) == SArray:
_format = "sarray"
elif isinstance(data, SFrame):
_format = "sframe_obj"
elif isinstance(data, dict):
_format = "dict"
elif _is_non_string_iterable(data):
_format = "array"
elif data is None:
_format = "empty"
else:
raise ValueError("Cannot infer input type for data " + str(data))
else:
_format = format
with cython_context():
if _format == "dataframe":
for c in data.columns.values:
self.add_column(SArray(data[c].values), str(c), inplace=True)
elif _format == "sframe_obj":
for col in data.column_names():
self.__proxy__.add_column(data[col].__proxy__, col)
elif _format == "sarray":
self.__proxy__.add_column(data.__proxy__, "")
elif _format == "array":
if len(data) > 0:
unique_types = set([type(x) for x in data if x is not None])
if len(unique_types) == 1 and SArray in unique_types:
for arr in data:
self.add_column(arr, inplace=True)
elif SArray in unique_types:
raise ValueError(
"Cannot create SFrame from mix of regular values and SArrays"
)
else:
self.__proxy__.add_column(SArray(data).__proxy__, "")
elif _format == "dict":
# Validate that every column is the same length.
if len(set(len(value) for value in data.values())) > 1:
# probably should be a value error. But we used to raise
# runtime error here...
raise RuntimeError("All column should be of the same length")
# split into SArray values and other iterable values.
# We convert the iterable values in bulk, and then add the sarray values as columns
sarray_keys = sorted(
key
for key, value in six.iteritems(data)
if isinstance(value, SArray)
)
self.__proxy__.load_from_dataframe(
{
key: value
for key, value in six.iteritems(data)
if not isinstance(value, SArray)
}
)
for key in sarray_keys:
self.__proxy__.add_column(data[key].__proxy__, key)
elif _format == "csv":
url = data
tmpsf = SFrame.read_csv(url, delimiter=",", header=True)
self.__proxy__ = tmpsf.__proxy__
elif _format == "tsv":
url = data
tmpsf = SFrame.read_csv(url, delimiter="\t", header=True)
self.__proxy__ = tmpsf.__proxy__
elif _format == "sframe":
url = _make_internal_url(data)
self.__proxy__.load_from_sframe_index(url)
elif _format == "empty":
pass
else:
raise ValueError("Unknown input type: " + format)
@staticmethod
def _infer_column_types_from_lines(first_rows):
if len(first_rows.column_names()) < 1:
print("Insufficient number of columns to perform type inference")
raise RuntimeError("Insufficient columns ")
if len(first_rows) < 1:
print("Insufficient number of rows to perform type inference")
raise RuntimeError("Insufficient rows")
# gets all the values column-wise
all_column_values_transposed = [
list(first_rows[col]) for col in first_rows.column_names()
]
# transpose
all_column_values = [list(x) for x in list(zip(*all_column_values_transposed))]
all_column_type_hints = [[type(t) for t in vals] for vals in all_column_values]
# collect the hints
# if every line was inferred to have a different number of elements, die
if len(set(len(x) for x in all_column_type_hints)) != 1:
print("Unable to infer column types. Defaulting to str")
return str
column_type_hints = all_column_type_hints[0]
# now perform type combining across rows
for i in range(1, len(all_column_type_hints)):
currow = all_column_type_hints[i]
for j in range(len(column_type_hints)):
# combine types
d = set([currow[j], column_type_hints[j]])
if len(d) == 1:
# easy case. both agree on the type
continue
if ((long in d) or (int in d)) and (float in d):
# one is an int, one is a float. its a float
column_type_hints[j] = float
elif (array.array in d) and (list in d):
# one is an array , one is a list. its a list
column_type_hints[j] = list
elif type(None) in d:
# one is a NoneType. assign to other type
if currow[j] != type(None):
column_type_hints[j] = currow[j]
else:
column_type_hints[j] = str
# final pass. everything which is still NoneType is now a str
for i in range(len(column_type_hints)):
if column_type_hints[i] == type(None):
column_type_hints[i] = str
return column_type_hints
@classmethod
def _read_csv_impl(
cls,
url,
delimiter=",",
header=True,
error_bad_lines=False,
comment_char="",
escape_char="\\",
double_quote=True,
quote_char='"',
skip_initial_space=True,
column_type_hints=None,
na_values=["NA"],
line_terminator="\n",
usecols=[],
nrows=None,
skiprows=0,
verbose=True,
store_errors=True,
nrows_to_infer=100,
true_values=[],
false_values=[],
_only_raw_string_substitutions=False,
**kwargs
):
"""
Constructs an SFrame from a CSV file or a path to multiple CSVs, and
returns a pair containing the SFrame and optionally
(if store_errors=True) a dict of filenames to SArrays
indicating for each file, what are the incorrectly parsed lines
encountered.
Parameters
----------
store_errors : bool
If true, the output errors dict will be filled.
See `read_csv` for the rest of the parameters.
"""
# Pandas argument compatibility
if "sep" in kwargs:
delimiter = kwargs["sep"]
del kwargs["sep"]
if "quotechar" in kwargs:
quote_char = kwargs["quotechar"]
del kwargs["quotechar"]
if "doublequote" in kwargs:
double_quote = kwargs["doublequote"]
del kwargs["doublequote"]
if "comment" in kwargs:
comment_char = kwargs["comment"]
del kwargs["comment"]
if comment_char is None:
comment_char = ""
if "lineterminator" in kwargs:
line_terminator = kwargs["lineterminator"]
del kwargs["lineterminator"]
if len(kwargs) > 0:
raise TypeError("Unexpected keyword arguments " + str(kwargs.keys()))
parsing_config = dict()
parsing_config["delimiter"] = delimiter
parsing_config["use_header"] = header
parsing_config["continue_on_failure"] = not error_bad_lines
parsing_config["comment_char"] = comment_char
parsing_config["escape_char"] = "\0" if escape_char is None else escape_char
parsing_config["use_escape_char"] = escape_char is None
parsing_config["double_quote"] = double_quote
parsing_config["quote_char"] = quote_char
parsing_config["skip_initial_space"] = skip_initial_space
parsing_config["store_errors"] = store_errors
parsing_config["line_terminator"] = line_terminator
parsing_config["output_columns"] = usecols
parsing_config["skip_rows"] = skiprows
parsing_config["true_values"] = true_values
parsing_config["false_values"] = false_values
parsing_config["only_raw_string_substitutions"] = _only_raw_string_substitutions
if type(na_values) is str:
na_values = [na_values]
if na_values is not None and len(na_values) > 0:
parsing_config["na_values"] = na_values
if nrows is not None:
parsing_config["row_limit"] = nrows
proxy = UnitySFrameProxy()
internal_url = _make_internal_url(url)
# Attempt to automatically detect the column types. Either produce a
# list of types; otherwise default to all str types.
column_type_inference_was_used = False
if column_type_hints is None:
try:
# Get the first nrows_to_infer rows (using all the desired arguments).
first_rows = SFrame.read_csv(
url,
nrows=nrows_to_infer,
column_type_hints=type(None),
header=header,
delimiter=delimiter,
comment_char=comment_char,
escape_char=escape_char,
double_quote=double_quote,
quote_char=quote_char,
skip_initial_space=skip_initial_space,
na_values=na_values,
line_terminator=line_terminator,
usecols=usecols,
skiprows=skiprows,
verbose=verbose,
true_values=true_values,
false_values=false_values,
_only_raw_string_substitutions=_only_raw_string_substitutions,
)
column_type_hints = SFrame._infer_column_types_from_lines(first_rows)
typelist = "[" + ",".join(t.__name__ for t in column_type_hints) + "]"
if verbose:
print("------------------------------------------------------")
print(
"Inferred types from first %d line(s) of file as "
% nrows_to_infer
)
print("column_type_hints=" + typelist)
print("If parsing fails due to incorrect types, you can correct")
print("the inferred type list above and pass it to read_csv in")
print("the column_type_hints argument")
print("------------------------------------------------------")
column_type_inference_was_used = True
except RuntimeError as e:
if type(e) == RuntimeError and (
"cancel" in str(e.args[0]) or "Cancel" in str(e.args[0])
):
raise e
# If the above fails, default back to str for all columns.
column_type_hints = str
if verbose:
print("Could not detect types. Using str for each column.")
if type(column_type_hints) is type:
type_hints = {"__all_columns__": column_type_hints}
elif type(column_type_hints) is list:
type_hints = dict(
list(
zip(
["__X%d__" % i for i in range(len(column_type_hints))],
column_type_hints,
)
)
)
elif type(column_type_hints) is dict:
# we need to fill in a potentially incomplete dictionary
try:
# Get the first nrows_to_infer rows (using all the desired arguments).
first_rows = SFrame.read_csv(
url,
nrows=nrows_to_infer,
column_type_hints=type(None),
header=header,
delimiter=delimiter,
comment_char=comment_char,
escape_char=escape_char,
double_quote=double_quote,
quote_char=quote_char,
skip_initial_space=skip_initial_space,
na_values=na_values,
line_terminator=line_terminator,
usecols=usecols,
skiprows=skiprows,
verbose=verbose,
true_values=true_values,
false_values=false_values,
_only_raw_string_substitutions=_only_raw_string_substitutions,
)
inferred_types = SFrame._infer_column_types_from_lines(first_rows)
# make a dict of column_name to type
inferred_types = dict(
list(zip(first_rows.column_names(), inferred_types))
)
# overwrite with the user's specified types
for key in column_type_hints:
inferred_types[key] = column_type_hints[key]
column_type_hints = inferred_types
except RuntimeError as e:
if type(e) == RuntimeError and (
"cancel" in str(e) or "Cancel" in str(e)
):
raise e
# If the above fails, default back to str for unmatched columns
if verbose:
print(
"Could not detect types. Using str for all unspecified columns."
)
type_hints = column_type_hints
else:
raise TypeError(
"Invalid type for column_type_hints. Must be a dictionary, list or a single type."
)
try:
if not verbose:
glconnect.get_server().set_log_progress(False)
with cython_context():
errors = proxy.load_from_csvs(internal_url, parsing_config, type_hints)
except Exception as e:
if type(e) == RuntimeError and "CSV parsing cancelled" in str(e.args[0]):
raise e
if column_type_inference_was_used:
# try again
if verbose:
print("Unable to parse the file with automatic type inference.")
print("Defaulting to column_type_hints=str")
type_hints = {"__all_columns__": str}
try:
with cython_context():
errors = proxy.load_from_csvs(
internal_url, parsing_config, type_hints
)
except:
glconnect.get_server().set_log_progress(True)
raise
else:
glconnect.get_server().set_log_progress(True)
raise
glconnect.get_server().set_log_progress(True)
return (cls(_proxy=proxy), {f: SArray(_proxy=es) for (f, es) in errors.items()})
@classmethod
def read_csv_with_errors(
cls,
url,
delimiter=",",
header=True,
comment_char="",
escape_char="\\",
double_quote=True,
quote_char='"',
skip_initial_space=True,
column_type_hints=None,
na_values=["NA"],
line_terminator="\n",
usecols=[],
nrows=None,
skiprows=0,
verbose=True,
nrows_to_infer=100,
true_values=[],
false_values=[],
_only_raw_string_substitutions=False,
**kwargs
):
"""
Constructs an SFrame from a CSV file or a path to multiple CSVs, and
returns a pair containing the SFrame and a dict of filenames to SArrays
indicating for each file, what are the incorrectly parsed lines
encountered.
Parameters
----------
url : string
Location of the CSV file or directory to load. If URL is a directory
or a "glob" pattern, all matching files will be loaded.
delimiter : string, optional
This describes the delimiter used for parsing csv files.
header : bool, optional
If true, uses the first row as the column names. Otherwise use the
default column names: 'X1, X2, ...'.
comment_char : string, optional
The character which denotes that the
remainder of the line is a comment.
escape_char : string, optional
Character which begins a C escape sequence. Defaults to backslash(\\)
Set to None to disable.
double_quote : bool, optional
If True, two consecutive quotes in a string are parsed to a single
quote.
quote_char : string, optional
Character sequence that indicates a quote.
skip_initial_space : bool, optional
Ignore extra spaces at the start of a field
column_type_hints : None, type, list[type], dict[string, type], optional
This provides type hints for each column. By default, this method
attempts to detect the type of each column automatically.
Supported types are int, float, str, list, dict, and array.array.
* If a single type is provided, the type will be
applied to all columns. For instance, column_type_hints=float
will force all columns to be parsed as float.
* If a list of types is provided, the types applies
to each column in order, e.g.[int, float, str]
will parse the first column as int, second as float and third as
string.
* If a dictionary of column name to type is provided,
each type value in the dictionary is applied to the key it
belongs to.
For instance {'user':int} will hint that the column called "user"
should be parsed as an integer, and the rest will be type inferred.
na_values : str | list of str, optional
A string or list of strings to be interpreted as missing values.
true_values : str | list of str, optional
A string or list of strings to be interpreted as 1
false_values : str | list of str, optional
A string or list of strings to be interpreted as 0
line_terminator : str, optional
A string to be interpreted as the line terminator. Defaults to "\\n"
which will also correctly match Mac, Linux and Windows line endings
("\\r", "\\n" and "\\r\\n" respectively)
usecols : list of str, optional
A subset of column names to output. If unspecified (default),
all columns will be read. This can provide performance gains if the
number of columns are large. If the input file has no headers,
usecols=['X1','X3'] will read columns 1 and 3.
nrows : int, optional
If set, only this many rows will be read from the file.
skiprows : int, optional
If set, this number of rows at the start of the file are skipped.
verbose : bool, optional
If True, print the progress.
Returns
-------
out : tuple
The first element is the SFrame with good data. The second element
is a dictionary of filenames to SArrays indicating for each file,
what are the incorrectly parsed lines encountered.
See Also
--------
read_csv, SFrame
Examples
--------
>>> bad_url = 'https://static.turi.com/datasets/bad_csv_example.csv'
>>> (sf, bad_lines) = turicreate.SFrame.read_csv_with_errors(bad_url)
>>> sf
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[98 rows x 3 columns]
>>> bad_lines
{'https://static.turi.com/datasets/bad_csv_example.csv': dtype: str
Rows: 1
['x,y,z,a,b,c']}
"""
return cls._read_csv_impl(
url,
delimiter=delimiter,
header=header,
error_bad_lines=False, # we are storing errors,
# thus we must not fail
# on bad lines
comment_char=comment_char,
escape_char=escape_char,
double_quote=double_quote,
quote_char=quote_char,
skip_initial_space=skip_initial_space,
column_type_hints=column_type_hints,
na_values=na_values,
line_terminator=line_terminator,
usecols=usecols,
nrows=nrows,
verbose=verbose,
skiprows=skiprows,
store_errors=True,
nrows_to_infer=nrows_to_infer,
true_values=true_values,
false_values=false_values,
_only_raw_string_substitutions=_only_raw_string_substitutions,
**kwargs
)
@classmethod
def read_csv(
cls,
url,
delimiter=",",
header=True,
error_bad_lines=False,
comment_char="",
escape_char="\\",
double_quote=True,
quote_char='"',
skip_initial_space=True,
column_type_hints=None,
na_values=["NA"],
line_terminator="\n",
usecols=[],
nrows=None,
skiprows=0,
verbose=True,
nrows_to_infer=100,
true_values=[],
false_values=[],
_only_raw_string_substitutions=False,
**kwargs
):
"""
Constructs an SFrame from a CSV file or a path to multiple CSVs.
Parameters
----------
url : string
Location of the CSV file or directory to load. If URL is a directory
or a "glob" pattern, all matching files will be loaded.
delimiter : string, optional
This describes the delimiter used for parsing csv files.
header : bool, optional
If true, uses the first row as the column names. Otherwise use the
default column names : 'X1, X2, ...'.
error_bad_lines : bool
If true, will fail upon encountering a bad line. If false, will
continue parsing skipping lines which fail to parse correctly.
A sample of the first 10 encountered bad lines will be printed.
comment_char : string, optional
The character which denotes that the remainder of the line is a
comment.
escape_char : string, optional
Character which begins a C escape sequence. Defaults to backslash(\\)
Set to None to disable.
double_quote : bool, optional
If True, two consecutive quotes in a string are parsed to a single
quote.
quote_char : string, optional
Character sequence that indicates a quote.
skip_initial_space : bool, optional
Ignore extra spaces at the start of a field
column_type_hints : None, type, list[type], dict[string, type], optional
This provides type hints for each column. By default, this method
attempts to detect the type of each column automatically.
Supported types are int, float, str, list, dict, and array.array.
* If a single type is provided, the type will be
applied to all columns. For instance, column_type_hints=float
will force all columns to be parsed as float.
* If a list of types is provided, the types applies
to each column in order, e.g.[int, float, str]
will parse the first column as int, second as float and third as
string.
* If a dictionary of column name to type is provided,
each type value in the dictionary is applied to the key it
belongs to.
For instance {'user':int} will hint that the column called "user"
should be parsed as an integer, and the rest will be type inferred.
na_values : str | list of str, optional
A string or list of strings to be interpreted as missing values.
true_values : str | list of str, optional
A string or list of strings to be interpreted as 1
false_values : str | list of str, optional
A string or list of strings to be interpreted as 0
line_terminator : str, optional
A string to be interpreted as the line terminator. Defaults to "\n"
which will also correctly match Mac, Linux and Windows line endings
("\\r", "\\n" and "\\r\\n" respectively)
usecols : list of str, optional
A subset of column names to output. If unspecified (default),
all columns will be read. This can provide performance gains if the
number of columns are large. If the input file has no headers,
usecols=['X1','X3'] will read columns 1 and 3.
nrows : int, optional
If set, only this many rows will be read from the file.
skiprows : int, optional
If set, this number of rows at the start of the file are skipped.
verbose : bool, optional
If True, print the progress.
nrows_to_infer : integer
The number of rows used to infer column types.
Returns
-------
out : SFrame
See Also
--------
read_csv_with_errors, SFrame
Examples
--------
Read a regular csv file, with all default options, automatically
determine types:
>>> url = 'https://static.turi.com/datasets/rating_data_example.csv'
>>> sf = turicreate.SFrame.read_csv(url)
>>> sf
Columns:
user_id int
movie_id int
rating int
Rows: 10000
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[10000 rows x 3 columns]
Read only the first 100 lines of the csv file:
>>> sf = turicreate.SFrame.read_csv(url, nrows=100)
>>> sf
Columns:
user_id int
movie_id int
rating int
Rows: 100
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[100 rows x 3 columns]
Read all columns as str type
>>> sf = turicreate.SFrame.read_csv(url, column_type_hints=str)
>>> sf
Columns:
user_id str
movie_id str
rating str
Rows: 10000
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[10000 rows x 3 columns]
Specify types for a subset of columns and leave the rest to be str.
>>> sf = turicreate.SFrame.read_csv(url,
... column_type_hints={
... 'user_id':int, 'rating':float
... })
>>> sf
Columns:
user_id str
movie_id str
rating float
Rows: 10000
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3.0 |
| 25907 | 1663 | 3.0 |
| 25923 | 1663 | 3.0 |
| 25924 | 1663 | 3.0 |
| 25928 | 1663 | 2.0 |
| ... | ... | ... |
+---------+----------+--------+
[10000 rows x 3 columns]
Not treat first line as header:
>>> sf = turicreate.SFrame.read_csv(url, header=False)
>>> sf
Columns:
X1 str
X2 str
X3 str
Rows: 10001
+---------+----------+--------+
| X1 | X2 | X3 |
+---------+----------+--------+
| user_id | movie_id | rating |
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[10001 rows x 3 columns]
Treat '3' as missing value:
>>> sf = turicreate.SFrame.read_csv(url, na_values=['3'], column_type_hints=str)
>>> sf
Columns:
user_id str
movie_id str
rating str
Rows: 10000
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | None |
| 25907 | 1663 | None |
| 25923 | 1663 | None |
| 25924 | 1663 | None |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[10000 rows x 3 columns]
Throw error on parse failure:
>>> bad_url = 'https://static.turi.com/datasets/bad_csv_example.csv'
>>> sf = turicreate.SFrame.read_csv(bad_url, error_bad_lines=True)
RuntimeError: Runtime Exception. Unable to parse line "x,y,z,a,b,c"
Set error_bad_lines=False to skip bad lines
"""
return cls._read_csv_impl(
url,
delimiter=delimiter,
header=header,
error_bad_lines=error_bad_lines,
comment_char=comment_char,
escape_char=escape_char,
double_quote=double_quote,
quote_char=quote_char,
skip_initial_space=skip_initial_space,
column_type_hints=column_type_hints,
na_values=na_values,
line_terminator=line_terminator,
usecols=usecols,
nrows=nrows,
skiprows=skiprows,
verbose=verbose,
store_errors=False,
nrows_to_infer=nrows_to_infer,
true_values=true_values,
false_values=false_values,
_only_raw_string_substitutions=_only_raw_string_substitutions,
**kwargs
)[0]
@classmethod
def read_json(cls, url, orient="records"):
"""
Reads a JSON file representing a table into an SFrame.
Parameters
----------
url : string
Location of the CSV file or directory to load. If URL is a directory
or a "glob" pattern, all matching files will be loaded.
orient : string, optional. Either "records" or "lines"
If orient="records" the file is expected to contain a single JSON
array, where each array element is a dictionary. If orient="lines",
the file is expected to contain a JSON element per line.
Examples
--------
The orient parameter describes the expected input format of the JSON
file.
If orient="records", the JSON file is expected to contain a single
JSON Array where each array element is a dictionary describing the row.
For instance:
>>> !cat input.json
[{'a':1,'b':1}, {'a':2,'b':2}, {'a':3,'b':3}]
>>> SFrame.read_json('input.json', orient='records')
Columns:
a int
b int
Rows: 3
Data:
+---+---+
| a | b |
+---+---+
| 1 | 1 |
| 2 | 2 |
| 3 | 3 |
+---+---+
If orient="lines", the JSON file is expected to contain a JSON element
per line. If each line contains a dictionary, it is automatically
unpacked.
>>> !cat input.json
{'a':1,'b':1}
{'a':2,'b':2}
{'a':3,'b':3}
>>> g = SFrame.read_json('input.json', orient='lines')
Columns:
a int
b int
Rows: 3
Data:
+---+---+
| a | b |
+---+---+
| 1 | 1 |
| 2 | 2 |
| 3 | 3 |
+---+---+
If the lines are not dictionaries, the original format is maintained.
>>> !cat input.json
['a','b','c']
['d','e','f']
['g','h','i']
[1,2,3]
>>> g = SFrame.read_json('input.json', orient='lines')
Columns:
X1 list
Rows: 3
Data:
+-----------+
| X1 |
+-----------+
| [a, b, c] |
| [d, e, f] |
| [g, h, i] |
+-----------+
[3 rows x 1 columns]
"""
if orient == "records":
g = SArray.read_json(url)
if len(g) == 0:
return SFrame()
if g.dtype != dict:
raise RuntimeError(
"Invalid input JSON format. Expected list of dictionaries"
)
g = SFrame({"X1": g})
return g.unpack("X1", "")
elif orient == "lines":
g = cls.read_csv(
url,
header=False,
na_values=["null"],
true_values=["true"],
false_values=["false"],
_only_raw_string_substitutions=True,
)
if g.num_rows() == 0:
return SFrame()
if g.num_columns() != 1:
raise RuntimeError("Input JSON not of expected format")
if g["X1"].dtype == dict:
return g.unpack("X1", "")
else:
return g
else:
raise ValueError("Invalid value for orient parameter (" + str(orient) + ")")
@classmethod
def from_sql(
cls,
conn,
sql_statement,
params=None,
type_inference_rows=100,
dbapi_module=None,
column_type_hints=None,
cursor_arraysize=128,
):
"""
Convert the result of a SQL database query to an SFrame.
Parameters
----------
conn : dbapi2.Connection
A DBAPI2 connection object. Any connection object originating from
the 'connect' method of a DBAPI2-compliant package can be used.
sql_statement : str
The query to be sent to the database through the given connection.
No checks are performed on the `sql_statement`. Any side effects from
the query will be reflected on the database. If no result rows are
returned, an empty SFrame is created.
params : iterable | dict, optional
Parameters to substitute for any parameter markers in the
`sql_statement`. Be aware that the style of parameters may vary
between different DBAPI2 packages.
type_inference_rows : int, optional
The maximum number of rows to use for determining the column types of
the SFrame. These rows are held in Python until all column types are
determined or the maximum is reached.
dbapi_module : module | package, optional
The top-level DBAPI2 module/package that constructed the given
connection object. By default, a best guess of which module the
connection came from is made. In the event that this guess is wrong,
this will need to be specified.
column_type_hints : dict | list | type, optional
Specifies the types of the output SFrame. If a dict is given, it must
have result column names as keys, but need not have all of the result
column names. If a list is given, the length of the list must match
the number of result columns. If a single type is given, all columns
in the output SFrame will be this type. If the result type is
incompatible with the types given in this argument, a casting error
will occur.
cursor_arraysize : int, optional
The number of rows to fetch from the database at one time.
Returns
-------
out : SFrame
Examples
--------
>>> import sqlite3
>>> conn = sqlite3.connect('example.db')
>>> turicreate.SFrame.from_sql(conn, "SELECT * FROM foo")
Columns:
a int
b int
Rows: 1
Data:
+---+---+
| a | b |
+---+---+
| 1 | 2 |
+---+---+
[1 rows x 2 columns]
"""
# Mapping types is always the trickiest part about reading from a
# database, so the main complexity of this function concerns types.
# Much of the heavy-lifting of this is done by the DBAPI2 module, which
# holds the burden of the actual mapping from the database-specific
# type to a suitable Python type. The problem is that the type that the
# module chooses may not be supported by SFrame, and SFrame needs a
# list of types to be created, so we must resort to guessing the type
# of a column if the query result returns lots of NULL values. The goal
# of these steps is to fail as little as possible first, and then
# preserve data as much as we can.
#
# Here is how the type for an SFrame column is chosen:
#
# 1. The column_type_hints parameter is checked.
#
# Each column specified in the parameter will be forced to the
# hinted type via a Python-side cast before it is given to the
# SFrame. Only int, float, and str are allowed to be hints.
#
# 2. The types returned from the cursor are checked.
#
# The first non-None result for each column is taken to be the type
# of that column. The type is checked for whether SFrame supports
# it, or whether it can convert to a supported type. If the type is
# supported, no Python-side cast takes place. If unsupported, the
# SFrame column is set to str and the values are casted in Python to
# str before being added to the SFrame.
#
# 3. DB type codes provided by module are checked
#
# This case happens for any column that only had None values in the
# first `type_inference_rows` rows. In this case we check the
# type_code in the cursor description for the columns missing types.
# These types often do not match up with an SFrame-supported Python
# type, so the utility of this step is limited. It can only result
# in labeling datetime.datetime, float, or str. If a suitable
# mapping isn't found, we fall back to str.
mod_info = _get_global_dbapi_info(dbapi_module, conn)
from .sframe_builder import SFrameBuilder
c = conn.cursor()
try:
if params is None:
c.execute(sql_statement)
else:
c.execute(sql_statement, params)
except mod_info["Error"] as e:
# The rollback method is considered optional by DBAPI2, but some
# modules that do implement it won't work again unless it is called
# if an error happens on a cursor.
if hasattr(conn, "rollback"):
conn.rollback()
raise e
c.arraysize = cursor_arraysize
result_desc = c.description
result_names = [i[0] for i in result_desc]
result_types = [None for i in result_desc]
cols_to_force_cast = set()
temp_vals = []
# Set any types that are given to us
col_name_to_num = {result_names[i]: i for i in range(len(result_names))}
if column_type_hints is not None:
if type(column_type_hints) is dict:
for k, v in column_type_hints.items():
col_num = col_name_to_num[k]
cols_to_force_cast.add(col_num)
result_types[col_num] = v
elif type(column_type_hints) is list:
if len(column_type_hints) != len(result_names):
__LOGGER__.warn(
"If column_type_hints is specified as a "
+ "list, it must be of the same size as the result "
+ "set's number of columns. Ignoring (use dict instead)."
)
else:
result_types = column_type_hints
cols_to_force_cast.update(range(len(result_desc)))
elif type(column_type_hints) is type:
result_types = [column_type_hints for i in result_desc]
cols_to_force_cast.update(range(len(result_desc)))
# Since we will be casting whatever we receive to the types given
# before submitting the values to the SFrame, we need to make sure that
# these are types that a "cast" makes sense, and we're not calling a
# constructor that expects certain input (e.g. datetime.datetime),
# since we could get lots of different input
hintable_types = [int, float, str]
if not all([i in hintable_types or i is None for i in result_types]):
raise TypeError(
"Only " + str(hintable_types) + " can be provided as type hints!"
)
# Perform type inference by checking to see what python types are
# returned from the cursor
if not all(result_types):
# Only test the first fetch{one,many} command since the only way it
# will raise an exception is if execute didn't produce a result set
try:
row = c.fetchone()
except mod_info["Error"] as e:
if hasattr(conn, "rollback"):
conn.rollback()
raise e
while row is not None:
# Assumes that things like dicts are not a "single sequence"
temp_vals.append(row)
val_count = 0
for val in row:
if result_types[val_count] is None and val is not None:
result_types[val_count] = type(val)
val_count += 1
if all(result_types) or len(temp_vals) >= type_inference_rows:
break
row = c.fetchone()
# This will be true if some columns have all missing values up to this
# point. Try using DBAPI2 type_codes to pick a suitable type. If this
# doesn't work, fall back to string.
if not all(result_types):
missing_val_cols = [i for i, v in enumerate(result_types) if v is None]
cols_to_force_cast.update(missing_val_cols)
inferred_types = _infer_dbapi2_types(c, mod_info)
cnt = 0
for i in result_types:
if i is None:
result_types[cnt] = inferred_types[cnt]
cnt += 1
sb = SFrameBuilder(result_types, column_names=result_names)
unsupported_cols = [
i for i, v in enumerate(sb.column_types()) if v is type(None)
]
if len(unsupported_cols) > 0:
cols_to_force_cast.update(unsupported_cols)
for i in unsupported_cols:
result_types[i] = str
sb = SFrameBuilder(result_types, column_names=result_names)
temp_vals = _convert_rows_to_builtin_seq(temp_vals)
sb.append_multiple(
_force_cast_sql_types(temp_vals, result_types, cols_to_force_cast)
)
rows = c.fetchmany()
while len(rows) > 0:
rows = _convert_rows_to_builtin_seq(rows)
sb.append_multiple(
_force_cast_sql_types(rows, result_types, cols_to_force_cast)
)
rows = c.fetchmany()
cls = sb.close()
try:
c.close()
except mod_info["Error"] as e:
if hasattr(conn, "rollback"):
conn.rollback()
raise e
return cls
def to_sql(
self,
conn,
table_name,
dbapi_module=None,
use_python_type_specifiers=False,
use_exact_column_names=True,
):
"""
Convert an SFrame to a single table in a SQL database.
This function does not attempt to create the table or check if a table
named `table_name` exists in the database. It simply assumes that
`table_name` exists in the database and appends to it.
`to_sql` can be thought of as a convenience wrapper around
parameterized SQL insert statements.
Parameters
----------
conn : dbapi2.Connection
A DBAPI2 connection object. Any connection object originating from
the 'connect' method of a DBAPI2-compliant package can be used.
table_name : str
The name of the table to append the data in this SFrame.
dbapi_module : module | package, optional
The top-level DBAPI2 module/package that constructed the given
connection object. By default, a best guess of which module the
connection came from is made. In the event that this guess is wrong,
this will need to be specified.
use_python_type_specifiers : bool, optional
If the DBAPI2 module's parameter marker style is 'format' or
'pyformat', attempt to use accurate type specifiers for each value
('s' for string, 'd' for integer, etc.). Many DBAPI2 modules simply
use 's' for all types if they use these parameter markers, so this is
False by default.
use_exact_column_names : bool, optional
Specify the column names of the SFrame when inserting its contents
into the DB. If the specified table does not have the exact same
column names as the SFrame, inserting the data will fail. If False,
the columns in the SFrame are inserted in order without care of the
schema of the DB table. True by default.
"""
mod_info = _get_global_dbapi_info(dbapi_module, conn)
c = conn.cursor()
col_info = list(zip(self.column_names(), self.column_types()))
if not use_python_type_specifiers:
_pytype_to_printf = lambda x: "s"
# DBAPI2 standard allows for five different ways to specify parameters
sql_param = {
"qmark": lambda name, col_num, col_type: "?",
"numeric": lambda name, col_num, col_type: ":" + str(col_num + 1),
"named": lambda name, col_num, col_type: ":" + str(name),
"format": lambda name, col_num, col_type: "%" + _pytype_to_printf(col_type),
"pyformat": lambda name, col_num, col_type: "%("
+ str(name)
+ ")"
+ _pytype_to_printf(col_type),
}
get_sql_param = sql_param[mod_info["paramstyle"]]
# form insert string
ins_str = "INSERT INTO " + str(table_name)
value_str = " VALUES ("
col_str = " ("
count = 0
for i in col_info:
col_str += i[0]
value_str += get_sql_param(i[0], count, i[1])
if count < len(col_info) - 1:
col_str += ","
value_str += ","
count += 1
col_str += ")"
value_str += ")"
if use_exact_column_names:
ins_str += col_str
ins_str += value_str
# Some formats require values in an iterable, some a dictionary
if mod_info["paramstyle"] == "named" or mod_info["paramstyle"] == "pyformat":
prepare_sf_row = lambda x: x
else:
col_names = self.column_names()
prepare_sf_row = lambda x: [x[i] for i in col_names]
for i in self:
try:
c.execute(ins_str, prepare_sf_row(i))
except mod_info["Error"] as e:
if hasattr(conn, "rollback"):
conn.rollback()
raise e
conn.commit()
c.close()
def __hash__(self):
"""
Because we override `__eq__` we need to implement this function in Python 3.
Just make it match default behavior in Python 2.
"""
return id(self) // 16
def __add__(self, other):
"""
Return append one frames to other
"""
self = self.append(other)
return self
def __repr__(self):
"""
Returns a string description of the frame
"""
ret = self.__get_column_description__()
(is_empty, data_str) = self.__str_impl__()
if is_empty:
data_str = "\t[]"
if self.__has_size__():
ret = ret + "Rows: " + str(len(self)) + "\n\n"
else:
ret = ret + "Rows: Unknown" + "\n\n"
ret = ret + "Data:\n"
ret = ret + data_str
return ret
def __get_column_description__(self):
colnames = self.column_names()
coltypes = self.column_types()
ret = "Columns:\n"
if len(colnames) > 0:
for i in range(len(colnames)):
ret = ret + "\t" + colnames[i] + "\t" + coltypes[i].__name__ + "\n"
ret = ret + "\n"
else:
ret = ret + "\tNone\n\n"
return ret
def __get_pretty_tables__(
self,
wrap_text=False,
max_row_width=80,
max_column_width=30,
max_columns=20,
max_rows_to_display=60,
):
"""
Returns a list of pretty print tables representing the current SFrame.
If the number of columns is larger than max_columns, the last pretty
table will contain an extra column of "...".
Parameters
----------
wrap_text : bool, optional
max_row_width : int, optional
Max number of characters per table.
max_column_width : int, optional
Max number of characters per column.
max_columns : int, optional
Max number of columns per table.
max_rows_to_display : int, optional
Max number of rows to display.
Returns
-------
out : list[PrettyTable]
"""
if len(self) <= max_rows_to_display:
headsf = self.__copy__()
else:
headsf = self.head(max_rows_to_display)
if headsf.shape == (0, 0):
return [PrettyTable()]
# convert array.array column to list column so they print like [...]
# and not array('d', ...)
for col in headsf.column_names():
if headsf[col].dtype is array.array:
headsf[col] = headsf[col].astype(list)
def _value_to_str(value):
if type(value) is array.array:
return str(list(value))
elif type(value) is numpy.ndarray:
return str(value).replace("\n", " ")
elif type(value) is list:
return "[" + ", ".join(_value_to_str(x) for x in value) + "]"
else:
return str(value)
def _escape_space(s):
if sys.version_info.major == 3:
return "".join(
[
ch.encode("unicode_escape").decode() if ch.isspace() else ch
for ch in s
]
)
return "".join(
[ch.encode("string_escape") if ch.isspace() else ch for ch in s]
)
def _truncate_respect_unicode(s, max_length):
if len(s) <= max_length:
return s
else:
if sys.version_info.major < 3:
u = unicode(s, "utf-8", errors="replace")
return u[:max_length].encode("utf-8")
else:
return s[:max_length]
def _truncate_str(s, wrap_str=False):
"""
Truncate and optionally wrap the input string as unicode, replace
unconvertible character with a diamond ?.
"""
s = _escape_space(s)
if len(s) <= max_column_width:
if sys.version_info.major < 3:
return unicode(s, "utf-8", errors="replace")
else:
return s
else:
ret = ""
# if wrap_str is true, wrap the text and take at most 2 rows
if wrap_str:
wrapped_lines = wrap(s, max_column_width)
if len(wrapped_lines) == 1:
return wrapped_lines[0]
last_line = wrapped_lines[1]
if len(last_line) >= max_column_width:
last_line = _truncate_respect_unicode(
last_line, max_column_width - 4
)
ret = wrapped_lines[0] + "\n" + last_line + " ..."
else:
ret = _truncate_respect_unicode(s, max_column_width - 4) + "..."
if sys.version_info.major < 3:
return unicode(ret, "utf-8", errors="replace")
else:
return ret
columns = self.column_names()[:max_columns]
columns.reverse() # reverse the order of columns and we will pop from the end
num_column_of_last_table = 0
row_of_tables = []
# let's build a list of tables with max_columns
# each table should satisfy, max_row_width, and max_column_width
while len(columns) > 0:
tbl = PrettyTable()
table_width = 0
num_column_of_last_table = 0
while len(columns) > 0:
col = columns.pop()
# check the max length of element in the column
if len(headsf) > 0:
col_width = min(
max_column_width, max(len(str(x)) for x in headsf[col])
)
else:
col_width = max_column_width
if table_width + col_width < max_row_width:
# truncate the header if necessary
header = _truncate_str(col, wrap_text)
tbl.add_column(
header,
[
_truncate_str(_value_to_str(x), wrap_text)
for x in headsf[col]
],
)
table_width = str(tbl).find("\n")
num_column_of_last_table += 1
else:
# the column does not fit in the current table, push it back to columns
columns.append(col)
break
tbl.align = "c"
row_of_tables.append(tbl)
# add a column of all "..." if there are more columns than displayed
if self.num_columns() > max_columns:
row_of_tables[-1].add_column("...", ["..."] * len(headsf))
num_column_of_last_table += 1
# add a row of all "..." if there are more rows than displayed
if self.__has_size__() and self.num_rows() > headsf.num_rows():
row_of_tables[-1].add_row(["..."] * num_column_of_last_table)
return row_of_tables
def print_rows(
self,
num_rows=10,
num_columns=40,
max_column_width=30,
max_row_width=80,
output_file=None,
):
"""
Print the first M rows and N columns of the SFrame in human readable
format.
Parameters
----------
num_rows : int, optional
Number of rows to print.
num_columns : int, optional
Number of columns to print.
max_column_width : int, optional
Maximum width of a column. Columns use fewer characters if possible.
max_row_width : int, optional
Maximum width of a printed row. Columns beyond this width wrap to a
new line. `max_row_width` is automatically reset to be the
larger of itself and `max_column_width`.
output_file: file, optional
The stream or file that receives the output. By default the output
goes to sys.stdout, but it can also be redirected to a file or a
string (using an object of type StringIO).
See Also
--------
head, tail
"""
if output_file is None:
output_file = sys.stdout
max_row_width = max(max_row_width, max_column_width + 1)
printed_sf = self._imagecols_to_stringcols(num_rows)
row_of_tables = printed_sf.__get_pretty_tables__(
wrap_text=False,
max_rows_to_display=num_rows,
max_columns=num_columns,
max_column_width=max_column_width,
max_row_width=max_row_width,
)
footer = "[%d rows x %d columns]\n" % self.shape
print(
"\n".join([str(tb) for tb in row_of_tables]) + "\n" + footer,
file=output_file,
)
def _imagecols_to_stringcols(self, num_rows=10):
# A list of column types
types = self.column_types()
# A list of indexable column names
names = self.column_names()
# Constructing names of sframe columns that are of image type
image_column_names = [names[i] for i in range(len(names)) if types[i] == _Image]
# If there are image-type columns, copy the SFrame and cast the top MAX_NUM_ROWS_TO_DISPLAY of those columns to string
printed_sf = self.__copy__()
if len(image_column_names) > 0:
for t in names:
if t in image_column_names:
printed_sf[t] = self[t].astype(str)
return printed_sf.head(num_rows)
def drop_duplicates(self, subset):
"""
Returns an SFrame with duplicate rows removed.
Parameters
----------
subset : column label or sequence of labels
Use only these columns for identifying duplicates.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame({'A': ['a', 'b', 'a', 'C'], 'B': ['b', 'a', 'b', 'D'], 'C': [1, 2, 1, 8]})
>>> sf.drop_duplicates(subset=["A","B"])
Columns:
A str
B str
C int
Rows: 3
Data:
+---+---+---+
| A | B | C |
+---+---+---+
| b | a | 2 |
| C | D | 8 |
| a | b | 1 |
+---+---+---+
[3 rows x 3 columns]
"""
result = all(elem in self.column_names() for elem in subset)
if result:
return self.groupby(
subset,
{
col: aggregate.SELECT_ONE(col)
for col in self.column_names()
if col not in subset
},
)
else:
raise TypeError("Not all subset columns in SFrame")
def __str_impl__(self, num_rows=10, footer=True):
"""
Returns a string containing the first num_rows elements of the frame, along
with a description of the frame.
"""
MAX_ROWS_TO_DISPLAY = num_rows
printed_sf = self._imagecols_to_stringcols(MAX_ROWS_TO_DISPLAY)
row_of_tables = printed_sf.__get_pretty_tables__(
wrap_text=False, max_rows_to_display=MAX_ROWS_TO_DISPLAY
)
is_empty = len(printed_sf) == 0
if not footer:
return (is_empty, "\n".join([str(tb) for tb in row_of_tables]))
if self.__has_size__():
footer = "[%d rows x %d columns]\n" % self.shape
if self.num_rows() > MAX_ROWS_TO_DISPLAY:
footer += "\n".join(FOOTER_STRS)
else:
footer = "[? rows x %d columns]\n" % self.num_columns()
footer += "\n".join(LAZY_FOOTER_STRS)
return (is_empty, "\n".join([str(tb) for tb in row_of_tables]) + "\n" + footer)
def __str__(self, num_rows=10, footer=True):
"""
Returns a string containing the first 10 elements of the frame, along
with a description of the frame.
"""
return self.__str_impl__(num_rows, footer)[1]
def _repr_html_(self):
MAX_ROWS_TO_DISPLAY = 10
printed_sf = self._imagecols_to_stringcols(MAX_ROWS_TO_DISPLAY)
row_of_tables = printed_sf.__get_pretty_tables__(
wrap_text=True,
max_row_width=120,
max_columns=40,
max_column_width=25,
max_rows_to_display=MAX_ROWS_TO_DISPLAY,
)
if self.__has_size__():
footer = "[%d rows x %d columns]<br/>" % self.shape
if self.num_rows() > MAX_ROWS_TO_DISPLAY:
footer += "<br/>".join(FOOTER_STRS)
else:
footer = "[? rows x %d columns]<br/>" % self.num_columns()
footer += "<br/>".join(LAZY_FOOTER_STRS)
begin = '<div style="max-height:1000px;max-width:1500px;overflow:auto;">'
end = "\n</div>"
return (
begin
+ "\n".join([tb.get_html_string(format=True) for tb in row_of_tables])
+ "\n"
+ footer
+ end
)
def __nonzero__(self):
"""
Returns true if the frame is not empty.
"""
return self.num_rows() != 0
def __len__(self):
"""
Returns the number of rows of the sframe.
"""
return self.num_rows()
def __copy__(self):
"""
Returns a shallow copy of the sframe.
"""
return self.select_columns(self.column_names())
def __deepcopy__(self, memo):
"""
Returns a deep copy of the sframe. As the data in an SFrame is
immutable, this is identical to __copy__.
"""
return self.__copy__()
def copy(self):
"""
Returns a shallow copy of the sframe.
"""
return self.__copy__()
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
raise NotImplementedError
def _row_selector(self, other):
"""
Where other is an SArray of identical length as the current Frame,
this returns a selection of a subset of rows in the current SFrame
where the corresponding row in the selector is non-zero.
"""
if type(other) is SArray:
if self.__has_size__() and other.__has_size__() and len(other) != len(self):
raise IndexError(
"Cannot perform logical indexing on arrays of different length."
)
with cython_context():
return SFrame(_proxy=self.__proxy__.logical_filter(other.__proxy__))
@property
def dtype(self):
"""
The type of each column.
Returns
-------
out : list[type]
Column types of the SFrame.
See Also
--------
column_types
"""
return self.column_types()
def num_rows(self):
"""
The number of rows in this SFrame.
Returns
-------
out : int
Number of rows in the SFrame.
See Also
--------
num_columns
"""
return self.__proxy__.num_rows()
def num_columns(self):
"""
The number of columns in this SFrame.
Returns
-------
out : int
Number of columns in the SFrame.
See Also
--------
num_rows
"""
return self.__proxy__.num_columns()
def column_names(self):
"""
The name of each column in the SFrame.
Returns
-------
out : list[string]
Column names of the SFrame.
See Also
--------
rename
"""
return self.__proxy__.column_names()
def column_types(self):
"""
The type of each column in the SFrame.
Returns
-------
out : list[type]
Column types of the SFrame.
See Also
--------
dtype
"""
return self.__proxy__.dtype()
def head(self, n=10):
"""
The first n rows of the SFrame.
Parameters
----------
n : int, optional
The number of rows to fetch.
Returns
-------
out : SFrame
A new SFrame which contains the first n rows of the current SFrame
See Also
--------
tail, print_rows
"""
return SFrame(_proxy=self.__proxy__.head(n))
def to_dataframe(self):
"""
Convert this SFrame to pandas.DataFrame.
This operation will construct a pandas.DataFrame in memory. Care must
be taken when size of the returned object is big.
Returns
-------
out : pandas.DataFrame
The dataframe which contains all rows of SFrame
"""
from ..toolkits.image_classifier._evaluation import _image_resize
assert HAS_PANDAS, "pandas is not installed."
df = pandas.DataFrame()
for i in range(self.num_columns()):
column_name = self.column_names()[i]
if self.column_types()[i] == _Image:
df[column_name] = [
_image_resize(x[column_name])._to_pil_image()
for x in self.select_columns([column_name])
]
else:
df[column_name] = list(self[column_name])
if len(df[column_name]) == 0:
column_type = self.column_types()[i]
if column_type in (array.array, type(None)):
column_type = "object"
df[column_name] = df[column_name].astype(column_type)
return df
def to_numpy(self):
"""
Converts this SFrame to a numpy array
This operation will construct a numpy array in memory. Care must
be taken when size of the returned object is big.
Returns
-------
out : numpy.ndarray
A Numpy Array containing all the values of the SFrame
"""
assert HAS_NUMPY, "numpy is not installed."
import numpy
return numpy.transpose(numpy.asarray([self[x] for x in self.column_names()]))
def tail(self, n=10):
"""
The last n rows of the SFrame.
Parameters
----------
n : int, optional
The number of rows to fetch.
Returns
-------
out : SFrame
A new SFrame which contains the last n rows of the current SFrame
See Also
--------
head, print_rows
"""
return SFrame(_proxy=self.__proxy__.tail(n))
def apply(self, fn, dtype=None):
"""
Transform each row to an :class:`~turicreate.SArray` according to a
specified function. Returns a new SArray of ``dtype`` where each element
in this SArray is transformed by `fn(x)` where `x` is a single row in
the sframe represented as a dictionary. The ``fn`` should return
exactly one value which can be cast into type ``dtype``. If ``dtype`` is
not specified, the first 100 rows of the SFrame are used to make a guess
of the target data type.
Parameters
----------
fn : function
The function to transform each row of the SFrame. The return
type should be convertible to `dtype` if `dtype` is not None.
This can also be a toolkit extension function which is compiled
as a native shared library using SDK.
dtype : dtype, optional
The dtype of the new SArray. If None, the first 100
elements of the array are used to guess the target
data type.
Returns
-------
out : SArray
The SArray transformed by fn. Each element of the SArray is of
type ``dtype``
Examples
--------
Concatenate strings from several columns:
>>> sf = turicreate.SFrame({'user_id': [1, 2, 3], 'movie_id': [3, 3, 6],
'rating': [4, 5, 1]})
>>> sf.apply(lambda x: str(x['user_id']) + str(x['movie_id']) + str(x['rating']))
dtype: str
Rows: 3
['134', '235', '361']
"""
assert callable(fn), "Input must be callable"
test_sf = self[:10]
dryrun = [fn(row) for row in test_sf]
if dtype is None:
dtype = SArray(dryrun).dtype
seed = abs(hash("%0.20f" % time.time())) % (2 ** 31)
nativefn = None
try:
from .. import extensions as extensions
nativefn = extensions._build_native_function_call(fn)
except:
pass
if nativefn is not None:
# this is a toolkit lambda. We can do something about it
with cython_context():
return SArray(
_proxy=self.__proxy__.transform_native(nativefn, dtype, seed)
)
with cython_context():
return SArray(_proxy=self.__proxy__.transform(fn, dtype, seed))
def flat_map(self, column_names, fn, column_types="auto", seed=None):
"""
Map each row of the SFrame to multiple rows in a new SFrame via a
function.
The output of `fn` must have type List[List[...]]. Each inner list
will be a single row in the new output, and the collection of these
rows within the outer list make up the data for the output SFrame.
All rows must have the same length and the same order of types to
make sure the result columns are homogeneously typed. For example, if
the first element emitted into in the outer list by `fn` is
[43, 2.3, 'string'], then all other elements emitted into the outer
list must be a list with three elements, where the first is an int,
second is a float, and third is a string. If column_types is not
specified, the first 10 rows of the SFrame are used to determine the
column types of the returned sframe.
Parameters
----------
column_names : list[str]
The column names for the returned SFrame.
fn : function
The function that maps each of the sframe row into multiple rows,
returning List[List[...]]. All outputted rows must have the same
length and order of types.
column_types : list[type], optional
The column types of the output SFrame. Default value will be
automatically inferred by running `fn` on the first 10 rows of the
input. If the types cannot be inferred from the first 10 rows, an
error is raised.
seed : int, optional
Used as the seed if a random number generator is included in `fn`.
Returns
-------
out : SFrame
A new SFrame containing the results of the flat_map of the
original SFrame.
Examples
---------
Repeat each row according to the value in the 'number' column.
>>> sf = turicreate.SFrame({'letter': ['a', 'b', 'c'],
... 'number': [1, 2, 3]})
>>> sf.flat_map(['number', 'letter'],
... lambda x: [list(x.values()) for i in range(x['number'])])
+--------+--------+
| number | letter |
+--------+--------+
| 1 | a |
| 2 | b |
| 2 | b |
| 3 | c |
| 3 | c |
| 3 | c |
+--------+--------+
[6 rows x 2 columns]
"""
assert callable(fn), "Input must be callable"
if seed is None:
seed = abs(hash("%0.20f" % time.time())) % (2 ** 31)
# determine the column_types
if column_types == "auto":
types = set()
sample = self[0:10]
results = [fn(row) for row in sample]
for rows in results:
if type(rows) is not list:
raise TypeError(
"Output type of the lambda function must be a list of lists"
)
# note: this skips empty lists
for row in rows:
if type(row) is not list:
raise TypeError(
"Output type of the lambda function must be a list of lists"
)
types.add(tuple([type(v) for v in row]))
if len(types) == 0:
raise TypeError(
"Could not infer output column types from the first ten rows "
+ "of the SFrame. Please use the 'column_types' parameter to "
+ "set the types."
)
if len(types) > 1:
raise TypeError("Mapped rows must have the same length and types")
column_types = list(types.pop())
assert type(column_types) is list, "'column_types' must be a list."
assert len(column_types) == len(
column_names
), "Number of output columns must match the size of column names"
with cython_context():
return SFrame(
_proxy=self.__proxy__.flat_map(fn, column_names, column_types, seed)
)
def sample(self, fraction, seed=None, exact=False):
"""
Sample a fraction of the current SFrame's rows.
Parameters
----------
fraction : float
Fraction of the rows to fetch. Must be between 0 and 1.
if exact is False (default), the number of rows returned is
approximately the fraction times the number of rows.
seed : int, optional
Seed for the random number generator used to sample.
exact: bool, optional
Defaults to False. If exact=True, an exact fraction is returned,
but at a performance penalty.
Returns
-------
out : SFrame
A new SFrame containing sampled rows of the current SFrame.
Examples
--------
Suppose we have an SFrame with 6,145 rows.
>>> import random
>>> sf = SFrame({'id': range(0, 6145)})
Retrieve about 30% of the SFrame rows with repeatable results by
setting the random seed.
>>> len(sf.sample(.3, seed=5))
1783
"""
if seed is None:
seed = abs(hash("%0.20f" % time.time())) % (2 ** 31)
if fraction > 1 or fraction < 0:
raise ValueError("Invalid sampling rate: " + str(fraction))
if self.num_rows() == 0 or self.num_columns() == 0:
return self
else:
with cython_context():
return SFrame(_proxy=self.__proxy__.sample(fraction, seed, exact))
def shuffle(self):
"""
Randomly shuffles the rows of the SFrame.
Returns
-------
out : [SFrame]
An SFrame with all the same rows but with the rows in a random order.
Examples
--------
>>> sf = turicreate.SFrame({"nums": [1, 2, 3, 4],
"letters": ["a", "b", "c", "d"]})
>>> shuffled_sf = sf.shuffle()
>>> print(shuffled_sf)
+---------+------+
| letters | nums |
+---------+------+
| d | 4 |
| c | 3 |
| a | 1 |
| b | 2 |
+---------+------+
[4 rows x 2 columns]
"""
return SFrame(_proxy=self.__proxy__.shuffle())
def random_split(self, fraction, seed=None, exact=False):
"""
Randomly split the rows of an SFrame into two SFrames. The first SFrame
contains *M* rows, sampled uniformly (without replacement) from the
original SFrame. *M* is approximately the fraction times the original
number of rows. The second SFrame contains the remaining rows of the
original SFrame.
An exact fraction partition can be optionally obtained by setting
exact=True.
Parameters
----------
fraction : float
Fraction of the rows to fetch. Must be between 0 and 1.
if exact is False (default), the number of rows returned is
approximately the fraction times the number of rows.
seed : int, optional
Seed for the random number generator used to split.
exact: bool, optional
Defaults to False. If exact=True, an exact fraction is returned,
but at a performance penalty.
Returns
-------
out : tuple [SFrame]
Two new SFrames.
Examples
--------
Suppose we have an SFrame with 1,024 rows and we want to randomly split
it into training and testing datasets with about a 90%/10% split.
>>> sf = turicreate.SFrame({'id': range(1024)})
>>> sf_train, sf_test = sf.random_split(.9, seed=5)
>>> print(len(sf_train), len(sf_test))
922 102
"""
if fraction > 1 or fraction < 0:
raise ValueError("Invalid sampling rate: " + str(fraction))
if self.num_rows() == 0 or self.num_columns() == 0:
return (SFrame(), SFrame())
if seed is None:
# Include the nanosecond component as well.
seed = abs(hash("%0.20f" % time.time())) % (2 ** 31)
# The server side requires this to be an int, so cast if we can
try:
seed = int(seed)
except ValueError:
raise ValueError("The 'seed' parameter must be of type int.")
with cython_context():
proxy_pair = self.__proxy__.random_split(fraction, seed, exact)
return (
SFrame(data=[], _proxy=proxy_pair[0]),
SFrame(data=[], _proxy=proxy_pair[1]),
)
def topk(self, column_name, k=10, reverse=False):
"""
Get top k rows according to the given column. Result is according to and
sorted by `column_name` in the given order (default is descending).
When `k` is small, `topk` is more efficient than `sort`.
Parameters
----------
column_name : string
The column to sort on
k : int, optional
The number of rows to return
reverse : bool, optional
If True, return the top k rows in ascending order, otherwise, in
descending order.
Returns
-------
out : SFrame
an SFrame containing the top k rows sorted by column_name.
See Also
--------
sort
Examples
--------
>>> sf = turicreate.SFrame({'id': range(1000)})
>>> sf['value'] = -sf['id']
>>> sf.topk('id', k=3)
+--------+--------+
| id | value |
+--------+--------+
| 999 | -999 |
| 998 | -998 |
| 997 | -997 |
+--------+--------+
[3 rows x 2 columns]
>>> sf.topk('value', k=3)
+--------+--------+
| id | value |
+--------+--------+
| 1 | -1 |
| 2 | -2 |
| 3 | -3 |
+--------+--------+
[3 rows x 2 columns]
"""
if type(column_name) is not str:
raise TypeError("column_name must be a string")
sf = self[self[column_name].is_topk(k, reverse)]
return sf.sort(column_name, ascending=reverse)
def save(self, filename, format=None):
"""
Save the SFrame to a file system for later use.
Parameters
----------
filename : string
The location to save the SFrame. Either a local directory or a
remote URL. If the format is 'binary', a directory will be created
at the location which will contain the sframe.
format : {'binary', 'csv', 'json'}, optional
Format in which to save the SFrame. Binary saved SFrames can be
loaded much faster and without any format conversion losses. If not
given, will try to infer the format from filename given. If file
name ends with 'csv' or '.csv.gz', then save as 'csv' format,
otherwise save as 'binary' format.
See export_csv for more csv saving options.
See Also
--------
load_sframe, SFrame
Examples
--------
>>> # Save the sframe into binary format
>>> sf.save('data/training_data_sframe')
>>> # Save the sframe into csv format
>>> sf.save('data/training_data.csv', format='csv')
"""
if format is None:
if filename.endswith((".csv", ".csv.gz")):
format = "csv"
elif filename.endswith((".json")):
format = "json"
else:
format = "binary"
else:
if format == "csv":
if not filename.endswith((".csv", ".csv.gz")):
filename = filename + ".csv"
elif format != "binary" and format != "json":
raise ValueError(
"Invalid format: {}. Supported formats are 'csv' and 'binary' and 'json'".format(
format
)
)
# Save the SFrame
url = _make_internal_url(filename)
with cython_context():
if format == "binary":
self.__proxy__.save(url)
elif format == "csv":
assert filename.endswith((".csv", ".csv.gz"))
self.__proxy__.save_as_csv(url, {})
elif format == "json":
self.export_json(url)
else:
raise ValueError("Unsupported format: {}".format(format))
def export_csv(
self,
filename,
delimiter=",",
line_terminator="\n",
header=True,
quote_level=csv.QUOTE_NONNUMERIC,
double_quote=True,
escape_char="\\",
quote_char='"',
na_rep="",
file_header="",
file_footer="",
line_prefix="",
_no_prefix_on_first_value=False,
**kwargs
):
"""
Writes an SFrame to a CSV file.
Parameters
----------
filename : string
The location to save the CSV.
delimiter : string, optional
This describes the delimiter used for writing csv files.
line_terminator: string, optional
The newline character
header : bool, optional
If true, the column names are emitted as a header.
quote_level: csv.QUOTE_ALL | csv.QUOTE_NONE | csv.QUOTE_NONNUMERIC, optional
The quoting level. If csv.QUOTE_ALL, every field is quoted.
if csv.quote_NONE, no field is quoted. If csv.QUOTE_NONNUMERIC, only
non-numeric fileds are quoted. csv.QUOTE_MINIMAL is interpreted as
csv.QUOTE_NONNUMERIC.
double_quote : bool, optional
If True, quotes are escaped as two consecutive quotes
escape_char : string, optional
Character which begins a C escape sequence
quote_char: string, optional
Character used to quote fields
na_rep: string, optional
The value used to denote a missing value.
file_header: string, optional
A string printed to the start of the file
file_footer: string, optional
A string printed to the end of the file
line_prefix: string, optional
A string printed at the start of each value line
"""
# Pandas argument compatibility
if "sep" in kwargs:
delimiter = kwargs["sep"]
del kwargs["sep"]
if "quotechar" in kwargs:
quote_char = kwargs["quotechar"]
del kwargs["quotechar"]
if "doublequote" in kwargs:
double_quote = kwargs["doublequote"]
del kwargs["doublequote"]
if "lineterminator" in kwargs:
line_terminator = kwargs["lineterminator"]
del kwargs["lineterminator"]
if len(kwargs) > 0:
raise TypeError("Unexpected keyword arguments " + str(list(kwargs.keys())))
write_csv_options = {}
write_csv_options["delimiter"] = delimiter
write_csv_options["escape_char"] = escape_char
write_csv_options["double_quote"] = double_quote
write_csv_options["quote_char"] = quote_char
if quote_level == csv.QUOTE_MINIMAL:
write_csv_options["quote_level"] = 0
elif quote_level == csv.QUOTE_ALL:
write_csv_options["quote_level"] = 1
elif quote_level == csv.QUOTE_NONNUMERIC:
write_csv_options["quote_level"] = 2
elif quote_level == csv.QUOTE_NONE:
write_csv_options["quote_level"] = 3
write_csv_options["header"] = header
write_csv_options["line_terminator"] = line_terminator
write_csv_options["na_value"] = na_rep
write_csv_options["file_header"] = file_header
write_csv_options["file_footer"] = file_footer
write_csv_options["line_prefix"] = line_prefix
# undocumented option. Disables line prefix on the first value line
write_csv_options["_no_prefix_on_first_value"] = _no_prefix_on_first_value
url = _make_internal_url(filename)
self.__proxy__.save_as_csv(url, write_csv_options)
def export_json(self, filename, orient="records"):
"""
Writes an SFrame to a JSON file.
Parameters
----------
filename : string
The location to save the JSON file.
orient : string, optional. Either "records" or "lines"
If orient="records" the file is saved as a single JSON array.
If orient="lines", the file is saves as a JSON value per line.
Examples
--------
The orient parameter describes the expected input format of the JSON
file.
If orient="records", the output will be a single JSON Array where
each array element is a dictionary describing the row.
>>> g
Columns:
a int
b int
Rows: 3
Data:
+---+---+
| a | b |
+---+---+
| 1 | 1 |
| 2 | 2 |
| 3 | 3 |
+---+---+
>>> g.export('output.json', orient='records')
>>> !cat output.json
[
{'a':1,'b':1},
{'a':2,'b':2},
{'a':3,'b':3},
]
If orient="rows", each row will be emitted as a JSON dictionary to
each file line.
>>> g
Columns:
a int
b int
Rows: 3
Data:
+---+---+
| a | b |
+---+---+
| 1 | 1 |
| 2 | 2 |
| 3 | 3 |
+---+---+
>>> g.export('output.json', orient='rows')
>>> !cat output.json
{'a':1,'b':1}
{'a':2,'b':2}
{'a':3,'b':3}
"""
if orient == "records":
self.pack_columns(dtype=dict).export_csv(
filename,
file_header="[",
file_footer="]",
header=False,
double_quote=False,
quote_level=csv.QUOTE_NONE,
line_prefix=",",
_no_prefix_on_first_value=True,
)
elif orient == "lines":
self.pack_columns(dtype=dict).export_csv(
filename, header=False, double_quote=False, quote_level=csv.QUOTE_NONE
)
else:
raise ValueError("Invalid value for orient parameter (" + str(orient) + ")")
def _save_reference(self, filename):
"""
Performs an incomplete save of an existing SFrame into a directory.
This saved SFrame may reference SFrames in other locations in the same
filesystem for certain resources.
Parameters
----------
filename : string
The location to save the SFrame. Either a local directory or a
remote URL.
See Also
--------
load_sframe, SFrame
Examples
--------
>>> # Save the sframe into binary format
>>> sf.save_reference('data/training_data_sframe')
"""
## Save the SFrame
url = _make_internal_url(filename)
with cython_context():
self.__proxy__.save_reference(url)
def select_column(self, column_name):
"""
Get a reference to the :class:`~turicreate.SArray` that corresponds with
the given column_name. Throws an exception if the column_name is
something other than a string or if the column name is not found.
Parameters
----------
column_name: str
The column name.
Returns
-------
out : SArray
The SArray that is referred by ``column_name``.
See Also
--------
select_columns
Examples
--------
>>> sf = turicreate.SFrame({'user_id': [1,2,3],
... 'user_name': ['alice', 'bob', 'charlie']})
>>> # This line is equivalent to `sa = sf['user_name']`
>>> sa = sf.select_column('user_name')
>>> sa
dtype: str
Rows: 3
['alice', 'bob', 'charlie']
"""
if not isinstance(column_name, str):
raise TypeError("Invalid column_nametype: must be str")
with cython_context():
return SArray(data=[], _proxy=self.__proxy__.select_column(column_name))
def select_columns(self, column_names):
"""
Selects all columns where the name of the column or the type of column
is included in the column_names. An exception is raised if duplicate columns
are selected i.e. sf.select_columns(['a','a']), or non-existent columns
are selected.
Throws an exception for all other input types.
Parameters
----------
column_names: list[str or type]
The list of column names or a list of types.
Returns
-------
out : SFrame
A new SFrame that is made up of the columns referred to in
``column_names`` from the current SFrame.
See Also
--------
select_column
Examples
--------
>>> sf = turicreate.SFrame({'user_id': [1,2,3],
... 'user_name': ['alice', 'bob', 'charlie'],
... 'zipcode': [98101, 98102, 98103]
... })
>>> # This line is equivalent to `sf2 = sf[['user_id', 'zipcode']]`
>>> sf2 = sf.select_columns(['user_id', 'zipcode'])
>>> sf2
+---------+---------+
| user_id | zipcode |
+---------+---------+
| 1 | 98101 |
| 2 | 98102 |
| 3 | 98103 |
+---------+---------+
[3 rows x 2 columns]
"""
if not _is_non_string_iterable(column_names):
raise TypeError("column_names must be an iterable")
if not (
all(
[
isinstance(x, six.string_types)
or isinstance(x, type)
or isinstance(x, bytes)
for x in column_names
]
)
):
raise TypeError("Invalid key type: must be str, unicode, bytes or type")
requested_str_columns = [
s for s in column_names if isinstance(s, six.string_types)
]
# Make sure there are no duplicates keys
from collections import Counter
column_names_counter = Counter(column_names)
if (len(column_names)) != len(column_names_counter):
for key in column_names_counter:
if column_names_counter[key] > 1:
raise ValueError(
"There are duplicate keys in key list: '" + key + "'"
)
colnames_and_types = list(zip(self.column_names(), self.column_types()))
# Ok. we want the string columns to be in the ordering defined by the
# argument. And then all the type selection columns.
selected_columns = requested_str_columns
typelist = [s for s in column_names if isinstance(s, type)]
# next the type selection columns
# loop through all the columns, adding all columns with types in
# typelist. But don't add a column if it has already been added.
for i in colnames_and_types:
if i[1] in typelist and i[0] not in selected_columns:
selected_columns += [i[0]]
selected_columns = selected_columns
with cython_context():
return SFrame(
data=[], _proxy=self.__proxy__.select_columns(selected_columns)
)
def add_column(self, data, column_name="", inplace=False):
"""
Returns an SFrame with a new column. The number of elements in the data
given must match the length of every other column of the SFrame.
If no name is given, a default name is chosen.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
data : SArray
The 'column' of data to add.
column_name : string, optional
The name of the column. If no name is given, a default name is
chosen.
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
Returns
-------
out : SFrame
The current SFrame.
See Also
--------
add_columns
Examples
--------
>>> sf = turicreate.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sa = turicreate.SArray(['cat', 'dog', 'fossa'])
>>> # This line is equivalent to `sf['species'] = sa`
>>> res = sf.add_column(sa, 'species')
>>> res
+----+-----+---------+
| id | val | species |
+----+-----+---------+
| 1 | A | cat |
| 2 | B | dog |
| 3 | C | fossa |
+----+-----+---------+
[3 rows x 3 columns]
"""
# Check type for pandas dataframe or SArray?
if not isinstance(data, SArray):
if isinstance(data, _Iterable):
data = SArray(data)
else:
if self.num_columns() == 0:
data = SArray([data])
else:
data = SArray.from_const(data, self.num_rows())
if not isinstance(column_name, str):
raise TypeError("Invalid column name: must be str")
if inplace:
ret = self
else:
ret = self.copy()
with cython_context():
ret.__proxy__.add_column(data.__proxy__, column_name)
ret._cache = None
return ret
def add_columns(self, data, column_names=None, inplace=False):
"""
Returns an SFrame with multiple columns added. The number of
elements in all columns must match the length of every other column of
the SFrame.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
data : list[SArray] or SFrame
The columns to add.
column_names: list of string, optional
A list of column names. All names must be specified. ``column_names`` is
ignored if data is an SFrame.
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
Returns
-------
out : SFrame
The current SFrame.
See Also
--------
add_column
Examples
--------
>>> sf = turicreate.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sf2 = turicreate.SFrame({'species': ['cat', 'dog', 'fossa'],
... 'age': [3, 5, 9]})
>>> res = sf.add_columns(sf2)
>>> res
+----+-----+-----+---------+
| id | val | age | species |
+----+-----+-----+---------+
| 1 | A | 3 | cat |
| 2 | B | 5 | dog |
| 3 | C | 9 | fossa |
+----+-----+-----+---------+
[3 rows x 4 columns]
"""
datalist = data
if isinstance(data, SFrame):
other = data
datalist = [other.select_column(name) for name in other.column_names()]
column_names = other.column_names()
my_columns = set(self.column_names())
for name in column_names:
if name in my_columns:
raise ValueError(
"Column '" + name + "' already exists in current SFrame"
)
else:
if not _is_non_string_iterable(datalist):
raise TypeError("datalist must be an iterable")
if not _is_non_string_iterable(column_names):
raise TypeError("column_names must be an iterable")
if not all([isinstance(x, SArray) for x in datalist]):
raise TypeError("Must give column as SArray")
if not all([isinstance(x, str) for x in column_names]):
raise TypeError("Invalid column name in list : must all be str")
if inplace:
ret = self
else:
ret = self.copy()
with cython_context():
ret.__proxy__.add_columns([x.__proxy__ for x in datalist], column_names)
ret._cache = None
return ret
def remove_column(self, column_name, inplace=False):
"""
Returns an SFrame with a column removed.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
column_name : string
The name of the column to remove.
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
Returns
-------
out : SFrame
The SFrame with given column removed.
Examples
--------
>>> sf = turicreate.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> # This is equivalent to `del sf['val']`
>>> res = sf.remove_column('val')
>>> res
+----+
| id |
+----+
| 1 |
| 2 |
| 3 |
+----+
[3 rows x 1 columns]
"""
column_name = str(column_name)
if column_name not in self.column_names():
raise KeyError("Cannot find column %s" % column_name)
colid = self.column_names().index(column_name)
if inplace:
ret = self
else:
ret = self.copy()
with cython_context():
ret.__proxy__.remove_column(colid)
ret._cache = None
return ret
def remove_columns(self, column_names, inplace=False):
"""
Returns an SFrame with one or more columns removed.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
column_names : list or iterable
A list or iterable of column names.
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
Returns
-------
out : SFrame
The SFrame with given columns removed.
Examples
--------
>>> sf = turicreate.SFrame({'id': [1, 2, 3], 'val1': ['A', 'B', 'C'], 'val2' : [10, 11, 12]})
>>> res = sf.remove_columns(['val1', 'val2'])
>>> res
+----+
| id |
+----+
| 1 |
| 2 |
| 3 |
+----+
[3 rows x 1 columns]
"""
column_names = list(column_names)
existing_columns = dict((k, i) for i, k in enumerate(self.column_names()))
for name in column_names:
if name not in existing_columns:
raise KeyError("Cannot find column %s" % name)
# Delete it going backwards so we don't invalidate indices
deletion_indices = sorted(existing_columns[name] for name in column_names)
if inplace:
ret = self
else:
ret = self.copy()
for colid in reversed(deletion_indices):
with cython_context():
ret.__proxy__.remove_column(colid)
ret._cache = None
return ret
def swap_columns(self, column_name_1, column_name_2, inplace=False):
"""
Returns an SFrame with two column positions swapped.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
column_name_1 : string
Name of column to swap
column_name_2 : string
Name of other column to swap
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
Returns
-------
out : SFrame
The SFrame with swapped columns.
Examples
--------
>>> sf = turicreate.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> res = sf.swap_columns('id', 'val')
>>> res
+-----+-----+
| val | id |
+-----+-----+
| A | 1 |
| B | 2 |
| C | 3 |
+----+-----+
[3 rows x 2 columns]
"""
colnames = self.column_names()
colid_1 = colnames.index(column_name_1)
colid_2 = colnames.index(column_name_2)
if inplace:
ret = self
else:
ret = self.copy()
with cython_context():
ret.__proxy__.swap_columns(colid_1, colid_2)
ret._cache = None
return ret
def rename(self, names, inplace=False):
"""
Returns an SFrame with columns renamed. ``names`` is expected to be a
dict specifying the old and new names. This changes the names of the
columns given as the keys and replaces them with the names given as the
values.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
names : dict [string, string]
Dictionary of [old_name, new_name]
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
Returns
-------
out : SFrame
The current SFrame.
See Also
--------
column_names
Examples
--------
>>> sf = SFrame({'X1': ['Alice','Bob'],
... 'X2': ['123 Fake Street','456 Fake Street']})
>>> res = sf.rename({'X1': 'name', 'X2':'address'})
>>> res
+-------+-----------------+
| name | address |
+-------+-----------------+
| Alice | 123 Fake Street |
| Bob | 456 Fake Street |
+-------+-----------------+
[2 rows x 2 columns]
"""
if type(names) is not dict:
raise TypeError("names must be a dictionary: oldname -> newname")
all_columns = set(self.column_names())
for k in names:
if not k in all_columns:
raise ValueError("Cannot find column %s in the SFrame" % k)
if inplace:
ret = self
else:
ret = self.copy()
with cython_context():
for k in names:
colid = ret.column_names().index(k)
ret.__proxy__.set_column_name(colid, names[k])
ret._cache = None
return ret
def __getitem__(self, key):
"""
This method does things based on the type of `key`.
If `key` is:
* str
selects column with name 'key'
* type
selects all columns with types matching the type
* list of str or type
selects all columns with names or type in the list
* SArray
Performs a logical filter. Expects given SArray to be the same
length as all columns in current SFrame. Every row
corresponding with an entry in the given SArray that is
equivalent to False is filtered from the result.
* int
Returns a single row of the SFrame (the `key`th one) as a dictionary.
* slice
Returns an SFrame including only the sliced rows.
"""
if type(key) is SArray:
return self._row_selector(key)
elif isinstance(key, six.string_types):
if six.PY2 and type(key) == unicode:
key = key.encode("utf-8")
return self.select_column(key)
elif type(key) is type:
return self.select_columns([key])
elif _is_non_string_iterable(key):
return self.select_columns(key)
elif isinstance(key, numbers.Integral):
sf_len = len(self)
if key < 0:
key = sf_len + key
if key >= sf_len:
raise IndexError("SFrame index out of range")
if not hasattr(self, "_cache") or self._cache is None:
self._cache = {}
try:
lb, ub, value_list = self._cache["getitem_cache"]
if lb <= key < ub:
return value_list[int(key - lb)]
except KeyError:
pass
# Not in cache, need to grab it. Smaller here than with sarray
# Do we have a good block size that won't cause memory to blow up?
if not "getitem_cache_blocksize" in self._cache:
block_size = (8 * 1024) // sum(
(2 if dt in [int, long, float] else 8) for dt in self.column_types()
)
block_size = max(16, block_size)
self._cache["getitem_cache_blocksize"] = block_size
else:
block_size = self._cache["getitem_cache_blocksize"]
block_num = int(key // block_size)
lb = block_num * block_size
ub = min(sf_len, lb + block_size)
val_list = list(SFrame(_proxy=self.__proxy__.copy_range(lb, 1, ub)))
self._cache["getitem_cache"] = (lb, ub, val_list)
return val_list[int(key - lb)]
elif type(key) is slice:
start = key.start
stop = key.stop
step = key.step
if start is None:
start = 0
if stop is None:
stop = len(self)
if step is None:
step = 1
# handle negative indices
if start < 0:
start = len(self) + start
if stop < 0:
stop = len(self) + stop
return SFrame(_proxy=self.__proxy__.copy_range(start, step, stop))
else:
raise TypeError("Invalid index type: must be SArray, list, int, or str")
def __setitem__(self, key, value):
"""
A wrapper around add_column(s). Key can be either a list or a str. If
value is an SArray, it is added to the SFrame as a column. If it is a
constant value (int, str, or float), then a column is created where
every entry is equal to the constant value. Existing columns can also
be replaced using this wrapper.
"""
if type(key) is list:
self.add_columns(value, key, inplace=True)
elif type(key) is str:
sa_value = None
if type(value) is SArray:
sa_value = value
elif _is_non_string_iterable(value): # wrap list, array... to sarray
sa_value = SArray(value)
else: # create an sarray of constant value
sa_value = SArray.from_const(value, self.num_rows())
# set new column
if not key in self.column_names():
with cython_context():
self.add_column(sa_value, key, inplace=True)
else:
# special case if replacing the only column.
# server would fail the replacement if the new column has different
# length than current one, which doesn't make sense if we are replacing
# the only column. To support this, we first take out the only column
# and then put it back if exception happens
single_column = self.num_columns() == 1
if single_column:
tmpname = key
saved_column = self.select_column(key)
self.remove_column(key, inplace=True)
else:
# add the column to a unique column name.
tmpname = "__" + "-".join(self.column_names())
try:
self.add_column(sa_value, tmpname, inplace=True)
except Exception:
if single_column:
self.add_column(saved_column, key, inplace=True)
raise
if not single_column:
# if add succeeded, remove the column name and rename tmpname->columnname.
self.swap_columns(key, tmpname, inplace=True)
self.remove_column(key, inplace=True)
self.rename({tmpname: key}, inplace=True)
else:
raise TypeError("Cannot set column with key type " + str(type(key)))
def __delitem__(self, key):
"""
Wrapper around remove_column.
"""
self.remove_column(key, inplace=True)
def materialize(self):
"""
For an SFrame that is lazily evaluated, force the persistence of the
SFrame to disk, committing all lazy evaluated operations.
"""
with cython_context():
self.__proxy__.materialize()
def is_materialized(self):
"""
Returns whether or not the SFrame has been materialized.
"""
return self.__is_materialized__()
def __is_materialized__(self):
"""
Returns whether or not the SFrame has been materialized.
"""
return self.__proxy__.is_materialized()
def __has_size__(self):
"""
Returns whether or not the size of the SFrame is known.
"""
return self.__proxy__.has_size()
def __query_plan_str__(self):
"""
Returns the query plan as a dot graph string
"""
return self.__proxy__.query_plan_string()
def __iter__(self):
"""
Provides an iterator to the rows of the SFrame.
"""
def generator():
elems_at_a_time = 262144
self.__proxy__.begin_iterator()
ret = self.__proxy__.iterator_get_next(elems_at_a_time)
column_names = self.column_names()
while True:
for j in ret:
yield dict(list(zip(column_names, j)))
if len(ret) == elems_at_a_time:
ret = self.__proxy__.iterator_get_next(elems_at_a_time)
else:
break
return generator()
def append(self, other):
"""
Add the rows of an SFrame to the end of this SFrame.
Both SFrames must have the same set of columns with the same column
names and column types.
Parameters
----------
other : SFrame
Another SFrame whose rows are appended to the current SFrame.
Returns
-------
out : SFrame
The result SFrame from the append operation.
Examples
--------
>>> sf = turicreate.SFrame({'id': [4, 6, 8], 'val': ['D', 'F', 'H']})
>>> sf2 = turicreate.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sf = sf.append(sf2)
>>> sf
+----+-----+
| id | val |
+----+-----+
| 4 | D |
| 6 | F |
| 8 | H |
| 1 | A |
| 2 | B |
| 3 | C |
+----+-----+
[6 rows x 2 columns]
"""
if type(other) is not SFrame:
raise RuntimeError("SFrame append can only work with SFrame")
with cython_context():
return SFrame(_proxy=self.__proxy__.append(other.__proxy__))
def groupby(self, key_column_names, operations, *args):
"""
Perform a group on the key_column_names followed by aggregations on the
columns listed in operations.
The operations parameter is a dictionary that indicates which
aggregation operators to use and which columns to use them on. The
available operators are SUM, MAX, MIN, COUNT, AVG, VAR, STDV, CONCAT,
SELECT_ONE, ARGMIN, ARGMAX, and QUANTILE. For convenience, aggregators
MEAN, STD, and VARIANCE are available as synonyms for AVG, STDV, and
VAR. See :mod:`~turicreate.aggregate` for more detail on the aggregators.
Parameters
----------
key_column_names : string | list[string]
Column(s) to group by. Key columns can be of any type other than
dictionary.
operations : dict, list
Dictionary of columns and aggregation operations. Each key is a
output column name and each value is an aggregator. This can also
be a list of aggregators, in which case column names will be
automatically assigned.
*args
All other remaining arguments will be interpreted in the same
way as the operations argument.
Returns
-------
out_sf : SFrame
A new SFrame, with a column for each groupby column and each
aggregation operation.
See Also
--------
aggregate
Notes
-----
* Numeric aggregators (such as sum, mean, stdev etc.) follow the skip
None policy i.e they will omit all missing values from the aggregation.
As an example, `sum([None, 5, 10]) = 15` because the `None` value is
skipped.
* Aggregators have a default value when no values (after skipping all
`None` values) are present. Default values are `None` for ['ARGMAX',
'ARGMIN', 'AVG', 'STD', 'MEAN', 'MIN', 'MAX'], `0` for ['COUNT'
'COUNT_DISTINCT', 'DISTINCT'] `[]` for 'CONCAT', 'QUANTILE',
'DISTINCT', and `{}` for 'FREQ_COUNT'.
Examples
--------
Suppose we have an SFrame with movie ratings by many users.
>>> import turicreate.aggregate as agg
>>> url = 'https://static.turi.com/datasets/rating_data_example.csv'
>>> sf = turicreate.SFrame.read_csv(url)
>>> sf
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| 25933 | 1663 | 4 |
| 25934 | 1663 | 4 |
| 25935 | 1663 | 4 |
| 25936 | 1663 | 5 |
| 25937 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[10000 rows x 3 columns]
Compute the number of occurrences of each user.
>>> user_count = sf.groupby(key_column_names='user_id',
... operations={'count': agg.COUNT()})
>>> user_count
+---------+-------+
| user_id | count |
+---------+-------+
| 62361 | 1 |
| 30727 | 1 |
| 40111 | 1 |
| 50513 | 1 |
| 35140 | 1 |
| 42352 | 1 |
| 29667 | 1 |
| 46242 | 1 |
| 58310 | 1 |
| 64614 | 1 |
| ... | ... |
+---------+-------+
[9852 rows x 2 columns]
Compute the mean and standard deviation of ratings per user.
>>> user_rating_stats = sf.groupby(key_column_names='user_id',
... operations={
... 'mean_rating': agg.MEAN('rating'),
... 'std_rating': agg.STD('rating')
... })
>>> user_rating_stats
+---------+-------------+------------+
| user_id | mean_rating | std_rating |
+---------+-------------+------------+
| 62361 | 5.0 | 0.0 |
| 30727 | 4.0 | 0.0 |
| 40111 | 2.0 | 0.0 |
| 50513 | 4.0 | 0.0 |
| 35140 | 4.0 | 0.0 |
| 42352 | 5.0 | 0.0 |
| 29667 | 4.0 | 0.0 |
| 46242 | 5.0 | 0.0 |
| 58310 | 2.0 | 0.0 |
| 64614 | 2.0 | 0.0 |
| ... | ... | ... |
+---------+-------------+------------+
[9852 rows x 3 columns]
Compute the movie with the minimum rating per user.
>>> chosen_movies = sf.groupby(key_column_names='user_id',
... operations={
... 'worst_movies': agg.ARGMIN('rating','movie_id')
... })
>>> chosen_movies
+---------+-------------+
| user_id | worst_movies |
+---------+-------------+
| 62361 | 1663 |
| 30727 | 1663 |
| 40111 | 1663 |
| 50513 | 1663 |
| 35140 | 1663 |
| 42352 | 1663 |
| 29667 | 1663 |
| 46242 | 1663 |
| 58310 | 1663 |
| 64614 | 1663 |
| ... | ... |
+---------+-------------+
[9852 rows x 2 columns]
Compute the movie with the max rating per user and also the movie with
the maximum imdb-ranking per user.
>>> sf['imdb-ranking'] = sf['rating'] * 10
>>> chosen_movies = sf.groupby(key_column_names='user_id',
... operations={('max_rating_movie','max_imdb_ranking_movie'): agg.ARGMAX(('rating','imdb-ranking'),'movie_id')})
>>> chosen_movies
+---------+------------------+------------------------+
| user_id | max_rating_movie | max_imdb_ranking_movie |
+---------+------------------+------------------------+
| 62361 | 1663 | 16630 |
| 30727 | 1663 | 16630 |
| 40111 | 1663 | 16630 |
| 50513 | 1663 | 16630 |
| 35140 | 1663 | 16630 |
| 42352 | 1663 | 16630 |
| 29667 | 1663 | 16630 |
| 46242 | 1663 | 16630 |
| 58310 | 1663 | 16630 |
| 64614 | 1663 | 16630 |
| ... | ... | ... |
+---------+------------------+------------------------+
[9852 rows x 3 columns]
Compute the movie with the max rating per user.
>>> chosen_movies = sf.groupby(key_column_names='user_id',
operations={'best_movies': agg.ARGMAX('rating','movie')})
Compute the movie with the max rating per user and also the movie with the maximum imdb-ranking per user.
>>> chosen_movies = sf.groupby(key_column_names='user_id',
operations={('max_rating_movie','max_imdb_ranking_movie'): agg.ARGMAX(('rating','imdb-ranking'),'movie')})
Compute the count, mean, and standard deviation of ratings per (user,
time), automatically assigning output column names.
>>> sf['time'] = sf.apply(lambda x: (x['user_id'] + x['movie_id']) % 11 + 2000)
>>> user_rating_stats = sf.groupby(['user_id', 'time'],
... [agg.COUNT(),
... agg.AVG('rating'),
... agg.STDV('rating')])
>>> user_rating_stats
+------+---------+-------+---------------+----------------+
| time | user_id | Count | Avg of rating | Stdv of rating |
+------+---------+-------+---------------+----------------+
| 2006 | 61285 | 1 | 4.0 | 0.0 |
| 2000 | 36078 | 1 | 4.0 | 0.0 |
| 2003 | 47158 | 1 | 3.0 | 0.0 |
| 2007 | 34446 | 1 | 3.0 | 0.0 |
| 2010 | 47990 | 1 | 3.0 | 0.0 |
| 2003 | 42120 | 1 | 5.0 | 0.0 |
| 2007 | 44940 | 1 | 4.0 | 0.0 |
| 2008 | 58240 | 1 | 4.0 | 0.0 |
| 2002 | 102 | 1 | 1.0 | 0.0 |
| 2009 | 52708 | 1 | 3.0 | 0.0 |
| ... | ... | ... | ... | ... |
+------+---------+-------+---------------+----------------+
[10000 rows x 5 columns]
The groupby function can take a variable length list of aggregation
specifiers so if we want the count and the 0.25 and 0.75 quantiles of
ratings:
>>> user_rating_stats = sf.groupby(['user_id', 'time'], agg.COUNT(),
... {'rating_quantiles': agg.QUANTILE('rating',[0.25, 0.75])})
>>> user_rating_stats
+------+---------+-------+------------------------+
| time | user_id | Count | rating_quantiles |
+------+---------+-------+------------------------+
| 2006 | 61285 | 1 | array('d', [4.0, 4.0]) |
| 2000 | 36078 | 1 | array('d', [4.0, 4.0]) |
| 2003 | 47158 | 1 | array('d', [3.0, 3.0]) |
| 2007 | 34446 | 1 | array('d', [3.0, 3.0]) |
| 2010 | 47990 | 1 | array('d', [3.0, 3.0]) |
| 2003 | 42120 | 1 | array('d', [5.0, 5.0]) |
| 2007 | 44940 | 1 | array('d', [4.0, 4.0]) |
| 2008 | 58240 | 1 | array('d', [4.0, 4.0]) |
| 2002 | 102 | 1 | array('d', [1.0, 1.0]) |
| 2009 | 52708 | 1 | array('d', [3.0, 3.0]) |
| ... | ... | ... | ... |
+------+---------+-------+------------------------+
[10000 rows x 4 columns]
To put all items a user rated into one list value by their star rating:
>>> user_rating_stats = sf.groupby(["user_id", "rating"],
... {"rated_movie_ids":agg.CONCAT("movie_id")})
>>> user_rating_stats
+--------+---------+----------------------+
| rating | user_id | rated_movie_ids |
+--------+---------+----------------------+
| 3 | 31434 | array('d', [1663.0]) |
| 5 | 25944 | array('d', [1663.0]) |
| 4 | 38827 | array('d', [1663.0]) |
| 4 | 51437 | array('d', [1663.0]) |
| 4 | 42549 | array('d', [1663.0]) |
| 4 | 49532 | array('d', [1663.0]) |
| 3 | 26124 | array('d', [1663.0]) |
| 4 | 46336 | array('d', [1663.0]) |
| 4 | 52133 | array('d', [1663.0]) |
| 5 | 62361 | array('d', [1663.0]) |
| ... | ... | ... |
+--------+---------+----------------------+
[9952 rows x 3 columns]
To put all items and rating of a given user together into a dictionary
value:
>>> user_rating_stats = sf.groupby("user_id",
... {"movie_rating":agg.CONCAT("movie_id", "rating")})
>>> user_rating_stats
+---------+--------------+
| user_id | movie_rating |
+---------+--------------+
| 62361 | {1663: 5} |
| 30727 | {1663: 4} |
| 40111 | {1663: 2} |
| 50513 | {1663: 4} |
| 35140 | {1663: 4} |
| 42352 | {1663: 5} |
| 29667 | {1663: 4} |
| 46242 | {1663: 5} |
| 58310 | {1663: 2} |
| 64614 | {1663: 2} |
| ... | ... |
+---------+--------------+
[9852 rows x 2 columns]
"""
# some basic checking first
# make sure key_column_names is a list
if isinstance(key_column_names, str):
key_column_names = [key_column_names]
# check that every column is a string, and is a valid column name
my_column_names = self.column_names()
key_columns_array = []
for column in key_column_names:
if not isinstance(column, str):
raise TypeError("Column name must be a string")
if column not in my_column_names:
raise KeyError("Column \"" + column + "\" does not exist in SFrame")
if self[column].dtype == dict:
raise TypeError("Cannot group on a dictionary column.")
key_columns_array.append(column)
group_output_columns = []
group_columns = []
group_ops = []
all_ops = [operations] + list(args)
for op_entry in all_ops:
# if it is not a dict, nor a list, it is just a single aggregator
# element (probably COUNT). wrap it in a list so we can reuse the
# list processing code
operation = op_entry
if not (isinstance(operation, list) or isinstance(operation, dict)):
operation = [operation]
if isinstance(operation, dict):
# now sweep the dict and add to group_columns and group_ops
for key in operation:
val = operation[key]
if type(val) is tuple:
(op, column) = val
if op == "__builtin__avg__" and self[column[0]].dtype in [
array.array,
numpy.ndarray,
]:
op = "__builtin__vector__avg__"
if op == "__builtin__sum__" and self[column[0]].dtype in [
array.array,
numpy.ndarray,
]:
op = "__builtin__vector__sum__"
if (
op == "__builtin__argmax__" or op == "__builtin__argmin__"
) and ((type(column[0]) is tuple) != (type(key) is tuple)):
raise TypeError(
"Output column(s) and aggregate column(s) for aggregate operation should be either all tuple or all string."
)
if (
op == "__builtin__argmax__" or op == "__builtin__argmin__"
) and type(column[0]) is tuple:
for (col, output) in zip(column[0], key):
group_columns = group_columns + [[col, column[1]]]
group_ops = group_ops + [op]
group_output_columns = group_output_columns + [output]
else:
group_columns = group_columns + [column]
group_ops = group_ops + [op]
group_output_columns = group_output_columns + [key]
if op == "__builtin__concat__dict__":
key_column = column[0]
key_column_type = self.select_column(key_column).dtype
if not key_column_type in (int, float, str):
raise TypeError(
"CONCAT key column must be int, float or str type"
)
elif val == aggregate.COUNT:
group_output_columns = group_output_columns + [key]
val = aggregate.COUNT()
(op, column) = val
group_columns = group_columns + [column]
group_ops = group_ops + [op]
else:
raise TypeError(
"Unexpected type in aggregator definition of output column: "
+ key
)
elif isinstance(operation, list):
# we will be using automatically defined column names
for val in operation:
if type(val) is tuple:
(op, column) = val
if op == "__builtin__avg__" and self[column[0]].dtype in [
array.array,
numpy.ndarray,
]:
op = "__builtin__vector__avg__"
if op == "__builtin__sum__" and self[column[0]].dtype in [
array.array,
numpy.ndarray,
]:
op = "__builtin__vector__sum__"
if (
op == "__builtin__argmax__" or op == "__builtin__argmin__"
) and type(column[0]) is tuple:
for col in column[0]:
group_columns = group_columns + [[col, column[1]]]
group_ops = group_ops + [op]
group_output_columns = group_output_columns + [""]
else:
group_columns = group_columns + [column]
group_ops = group_ops + [op]
group_output_columns = group_output_columns + [""]
if op == "__builtin__concat__dict__":
key_column = column[0]
key_column_type = self.select_column(key_column).dtype
if not key_column_type in (int, float, str):
raise TypeError(
"CONCAT key column must be int, float or str type"
)
elif val == aggregate.COUNT:
group_output_columns = group_output_columns + [""]
val = aggregate.COUNT()
(op, column) = val
group_columns = group_columns + [column]
group_ops = group_ops + [op]
else:
raise TypeError("Unexpected type in aggregator definition.")
# let's validate group_columns and group_ops are valid
for (cols, op) in zip(group_columns, group_ops):
for col in cols:
if not isinstance(col, str):
raise TypeError("Column name must be a string")
if not isinstance(op, str):
raise TypeError("Operation type not recognized.")
if op is not aggregate.COUNT()[0]:
for col in cols:
if col not in my_column_names:
raise KeyError("Column " + col + " does not exist in SFrame")
with cython_context():
return SFrame(
_proxy=self.__proxy__.groupby_aggregate(
key_columns_array, group_columns, group_output_columns, group_ops
)
)
def join(self, right, on=None, how="inner", alter_name=None):
"""
Merge two SFrames. Merges the current (left) SFrame with the given
(right) SFrame using a SQL-style equi-join operation by columns.
Parameters
----------
right : SFrame
The SFrame to join.
on : None | str | list | dict, optional
The column name(s) representing the set of join keys. Each row that
has the same value in this set of columns will be merged together.
* If 'None' is given, join will use all columns that have the same
name as the set of join keys.
* If a str is given, this is interpreted as a join using one column,
where both SFrames have the same column name.
* If a list is given, this is interpreted as a join using one or
more column names, where each column name given exists in both
SFrames.
* If a dict is given, each dict key is taken as a column name in the
left SFrame, and each dict value is taken as the column name in
right SFrame that will be joined together. e.g.
{'left_col_name':'right_col_name'}.
how : {'left', 'right', 'outer', 'inner'}, optional
The type of join to perform. 'inner' is default.
* inner: Equivalent to a SQL inner join. Result consists of the
rows from the two frames whose join key values match exactly,
merged together into one SFrame.
* left: Equivalent to a SQL left outer join. Result is the union
between the result of an inner join and the rest of the rows from
the left SFrame, merged with missing values.
* right: Equivalent to a SQL right outer join. Result is the union
between the result of an inner join and the rest of the rows from
the right SFrame, merged with missing values.
* outer: Equivalent to a SQL full outer join. Result is
the union between the result of a left outer join and a right
outer join.
alter_name : None | dict
user provided names to resolve column name conflict when merging two sframe.
* 'None', then default conflict resolution will be used. For example, if 'X' is
defined in the sframe on the left side of join, and there's an column also called
'X' in the sframe on the right, 'X.1' will be used as the new column name when
appending the column 'X' from the right sframe, in order to avoid column name collision.
* if a dict is given, the dict key should be obtained from column names from the right
sframe. The dict value should be user preferred column name to resolve the name collision
instead of resolving by the default behavior. In general, dict key should not be any value
from the right sframe column names. If dict value will cause potential name confict
after an attempt to resolve, exception will be thrown.
Returns
-------
out : SFrame
Examples
--------
>>> animals = turicreate.SFrame({'id': [1, 2, 3, 4],
... 'name': ['dog', 'cat', 'sheep', 'cow']})
>>> sounds = turicreate.SFrame({'id': [1, 3, 4, 5],
... 'sound': ['woof', 'baa', 'moo', 'oink']})
>>> animals.join(sounds, how='inner')
+----+-------+-------+
| id | name | sound |
+----+-------+-------+
| 1 | dog | woof |
| 3 | sheep | baa |
| 4 | cow | moo |
+----+-------+-------+
[3 rows x 3 columns]
>>> animals.join(sounds, on='id', how='left')
+----+-------+-------+
| id | name | sound |
+----+-------+-------+
| 1 | dog | woof |
| 3 | sheep | baa |
| 4 | cow | moo |
| 2 | cat | None |
+----+-------+-------+
[4 rows x 3 columns]
>>> animals.join(sounds, on=['id'], how='right')
+----+-------+-------+
| id | name | sound |
+----+-------+-------+
| 1 | dog | woof |
| 3 | sheep | baa |
| 4 | cow | moo |
| 5 | None | oink |
+----+-------+-------+
[4 rows x 3 columns]
>>> animals.join(sounds, on={'id':'id'}, how='outer')
+----+-------+-------+
| id | name | sound |
+----+-------+-------+
| 1 | dog | woof |
| 3 | sheep | baa |
| 4 | cow | moo |
| 5 | None | oink |
| 2 | cat | None |
+----+-------+-------+
[5 rows x 3 columns]
"""
available_join_types = ["left", "right", "outer", "inner"]
if not isinstance(right, SFrame):
raise TypeError("Can only join two SFrames")
if how not in available_join_types:
raise ValueError("Invalid join type")
if (self.num_columns() <= 0) or (right.num_columns() <= 0):
raise ValueError("Cannot join an SFrame with no columns.")
join_keys = dict()
if on is None:
left_names = self.column_names()
right_names = right.column_names()
common_columns = [name for name in left_names if name in right_names]
for name in common_columns:
join_keys[name] = name
elif type(on) is str:
join_keys[on] = on
elif type(on) is list:
for name in on:
if type(name) is not str:
raise TypeError("Join keys must each be a str.")
join_keys[name] = name
elif type(on) is dict:
join_keys = on
else:
raise TypeError("Must pass a str, list, or dict of join keys")
with cython_context():
if alter_name is None:
return SFrame(
_proxy=self.__proxy__.join(right.__proxy__, how, join_keys)
)
if type(alter_name) is dict:
left_names = self.column_names()
right_names = right.column_names()
for (k, v) in alter_name.items():
if (k not in right_names) or (k in join_keys):
raise KeyError("Redundant key %s for collision resolution" % k)
if k == v:
raise ValueError("Key %s should not be equal to value" % k)
if v in left_names or v in right_names:
raise ValueError("Value %s will cause further collision" % v)
return SFrame(
_proxy=self.__proxy__.join_with_custom_name(
right.__proxy__, how, join_keys, alter_name
)
)
def filter_by(self, values, column_name, exclude=False):
"""
Filter an SFrame by values inside an iterable object. Result is an
SFrame that only includes (or excludes) the rows that have a column
with the given ``column_name`` which holds one of the values in the
given ``values`` :class:`~turicreate.SArray`. If ``values`` is not an
SArray, we attempt to convert it to one before filtering.
Parameters
----------
values : SArray | list | numpy.ndarray | pandas.Series | str | map
| generator | filter | None | range
The values to use to filter the SFrame. The resulting SFrame will
only include rows that have one of these values in the given
column.
column_name : str
The column of the SFrame to match with the given `values`.
exclude : bool
If True, the result SFrame will contain all rows EXCEPT those that
have one of ``values`` in ``column_name``.
Returns
-------
out : SFrame
The filtered SFrame.
Examples
--------
>>> sf = turicreate.SFrame({'id': [1, 2, 3, 4],
... 'animal_type': ['dog', 'cat', 'cow', 'horse'],
... 'name': ['bob', 'jim', 'jimbob', 'bobjim']})
>>> household_pets = ['cat', 'hamster', 'dog', 'fish', 'bird', 'snake']
>>> sf.filter_by(household_pets, 'animal_type')
+-------------+----+------+
| animal_type | id | name |
+-------------+----+------+
| dog | 1 | bob |
| cat | 2 | jim |
+-------------+----+------+
[2 rows x 3 columns]
>>> sf.filter_by(household_pets, 'animal_type', exclude=True)
+-------------+----+--------+
| animal_type | id | name |
+-------------+----+--------+
| horse | 4 | bobjim |
| cow | 3 | jimbob |
+-------------+----+--------+
[2 rows x 3 columns]
>>> sf.filter_by(None, 'name', exclude=True)
+-------------+----+--------+
| animal_type | id | name |
+-------------+----+--------+
| dog | 1 | bob |
| cat | 2 | jim |
| cow | 3 | jimbob |
| horse | 4 | bobjim |
+-------------+----+--------+
[4 rows x 3 columns]
>>> sf.filter_by(filter(lambda x : len(x) > 3, sf['name']), 'name', exclude=True)
+-------------+----+--------+
| animal_type | id | name |
+-------------+----+--------+
| dog | 1 | bob |
| cat | 2 | jim |
+-------------+----+--------+
[2 rows x 3 columns]
>>> sf.filter_by(range(3), 'id', exclude=True)
+-------------+----+--------+
| animal_type | id | name |
+-------------+----+--------+
| cow | 3 | jimbob |
| horse | 4 | bobjim |
+-------------+----+--------+
[2 rows x 3 columns]
"""
if type(column_name) is not str:
raise TypeError("Must pass a str as column_name")
existing_columns = self.column_names()
if column_name not in existing_columns:
raise KeyError("Column '" + column_name + "' not in SFrame.")
existing_type = self[column_name].dtype
if type(values) is not SArray:
# If we were given a single element, try to put in list and convert
# to SArray
if not _is_non_string_iterable(values):
values = [values]
else:
# is iterable
# if `values` is a map/filter/generator, then we need to convert it to list
# so we can repeatedly iterate through the iterable object through `all`.
# true that, we don't cover use defined iterators.
# I find it's too hard to check whether an iterable can be used repeatedly.
# just let em not use.
if SArray._is_iterable_required_to_listify(values):
values = list(values)
# if all vals are None, cast the sarray to existing type
# this will enable filter_by(None, column_name) to remove missing vals
if all(val is None for val in values):
values = SArray(values, existing_type)
else:
values = SArray(values)
value_sf = SFrame()
value_sf.add_column(values, column_name, inplace=True)
given_type = value_sf.column_types()[0]
if given_type != existing_type:
raise TypeError(
(
"Type of given values ({0}) does not match type of column '"
+ column_name
+ "' ({1}) in SFrame."
).format(given_type, existing_type)
)
# Make sure the values list has unique values, or else join will not
# filter.
value_sf = value_sf.groupby(column_name, {})
with cython_context():
if exclude:
id_name = "id"
# Make sure this name is unique so we know what to remove in
# the result
while id_name in existing_columns:
id_name += "1"
value_sf = value_sf.add_row_number(id_name)
tmp = SFrame(
_proxy=self.__proxy__.join(
value_sf.__proxy__, "left", {column_name: column_name}
)
)
ret_sf = tmp[tmp[id_name] == None]
del ret_sf[id_name]
return ret_sf
else:
return SFrame(
_proxy=self.__proxy__.join(
value_sf.__proxy__, "inner", {column_name: column_name}
)
)
def explore(self, title=None):
"""
Explore the SFrame in an interactive GUI. Opens a new app window.
Parameters
----------
title : str
The plot title to show for the resulting visualization. Defaults to None.
If the title is None, a default title will be provided.
Returns
-------
None
Examples
--------
Suppose 'sf' is an SFrame, we can view it using:
>>> sf.explore()
To override the default plot title and axis labels:
>>> sf.explore(title="My Plot Title")
"""
import sys
if (
sys.platform != "darwin"
and sys.platform != "linux2"
and sys.platform != "linux"
):
raise NotImplementedError(
"Visualization is currently supported only on macOS and Linux."
)
# Suppress visualization output if 'none' target is set
from ..visualization._plot import (
_target,
display_table_in_notebook,
_ensure_web_server,
)
if _target == "none":
return
if title is None:
title = ""
# If browser target is set, launch in web browser
if _target == "browser":
# First, make sure TURI_VISUALIZATION_WEB_SERVER_ROOT_DIRECTORY is set
_ensure_web_server()
# Launch localhost URL using Python built-in webbrowser module
import webbrowser
import turicreate as tc
url = tc.extensions.get_url_for_table(self, title)
webbrowser.open_new_tab(url)
return
# If auto target is set, try to show inline in Jupyter Notebook
try:
if _target == "auto" and (
get_ipython().__class__.__name__ == "ZMQInteractiveShell"
or get_ipython().__class__.__name__ == "Shell"
):
display_table_in_notebook(self, title)
return
except NameError:
pass
# Launch interactive GUI window
path_to_client = _get_client_app_path()
self.__proxy__.explore(path_to_client, title)
def show(self):
"""
Visualize a summary of each column in an SFrame. Opens a new app window.
Notes
-----
- The plot will render either inline in a Jupyter Notebook, in a web
browser, or in a native GUI window, depending on the value provided in
`turicreate.visualization.set_target` (defaults to 'auto').
Returns
-------
None
Examples
--------
Suppose 'sf' is an SFrame, we can view it using:
>>> sf.show()
"""
returned_plot = self.plot()
returned_plot.show()
def plot(self):
"""
Create a Plot object that contains a summary of each column
in an SFrame.
Returns
-------
out : Plot
A :class: Plot object that is the columnwise summary of the sframe.
Examples
--------
Suppose 'sf' is an SFrame, we can make a plot object as:
>>> plt = sf.plot()
We can then visualize the plot using:
>>> plt.show()
"""
return Plot(_proxy=self.__proxy__.plot())
def pack_columns(
self,
column_names=None,
column_name_prefix=None,
dtype=list,
fill_na=None,
remove_prefix=True,
new_column_name=None,
):
"""
Pack columns of the current SFrame into one single column. The result
is a new SFrame with the unaffected columns from the original SFrame
plus the newly created column.
The list of columns that are packed is chosen through either the
``column_names`` or ``column_name_prefix`` parameter. Only one of the parameters
is allowed to be provided. ``columns_names`` explicitly specifies the list of
columns to pack, while ``column_name_prefix`` specifies that all columns that
have the given prefix are to be packed.
The type of the resulting column is decided by the ``dtype`` parameter.
Allowed values for ``dtype`` are dict, array.array and list:
- *dict*: pack to a dictionary SArray where column name becomes
dictionary key and column value becomes dictionary value
- *array.array*: pack all values from the packing columns into an array
- *list*: pack all values from the packing columns into a list.
Parameters
----------
column_names : list[str], optional
A list of column names to be packed. If omitted and
`column_name_prefix` is not specified, all columns from current SFrame
are packed. This parameter is mutually exclusive with the
`column_name_prefix` parameter.
column_name_prefix : str, optional
Pack all columns with the given `column_name_prefix`.
This parameter is mutually exclusive with the `columns_names` parameter.
dtype : dict | array.array | list, optional
The resulting packed column type. If not provided, dtype is list.
fill_na : value, optional
Value to fill into packed column if missing value is encountered.
If packing to dictionary, `fill_na` is only applicable to dictionary
values; missing keys are not replaced.
remove_prefix : bool, optional
If True and `column_name_prefix` is specified, the dictionary key will
be constructed by removing the prefix from the column name.
This option is only applicable when packing to dict type.
new_column_name : str, optional
Packed column name. If not given and `column_name_prefix` is given,
then the prefix will be used as the new column name, otherwise name
is generated automatically.
Returns
-------
out : SFrame
An SFrame that contains columns that are not packed, plus the newly
packed column.
See Also
--------
unpack
Notes
-----
- If packing to dictionary, missing key is always dropped. Missing
values are dropped if fill_na is not provided, otherwise, missing
value is replaced by 'fill_na'. If packing to list or array, missing
values will be kept. If 'fill_na' is provided, the missing value is
replaced with 'fill_na' value.
Examples
--------
Suppose 'sf' is an an SFrame that maintains business category
information:
>>> sf = turicreate.SFrame({'business': range(1, 5),
... 'category.retail': [1, None, 1, None],
... 'category.food': [1, 1, None, None],
... 'category.service': [None, 1, 1, None],
... 'category.shop': [1, 1, None, 1]})
>>> sf
+----------+-----------------+---------------+------------------+---------------+
| business | category.retail | category.food | category.service | category.shop |
+----------+-----------------+---------------+------------------+---------------+
| 1 | 1 | 1 | None | 1 |
| 2 | None | 1 | 1 | 1 |
| 3 | 1 | None | 1 | None |
| 4 | None | 1 | None | 1 |
+----------+-----------------+---------------+------------------+---------------+
[4 rows x 5 columns]
To pack all category columns into a list:
>>> sf.pack_columns(column_name_prefix='category')
+----------+-----------------------+
| business | category |
+----------+-----------------------+
| 1 | [1, 1, None, 1] |
| 2 | [1, None, 1, 1] |
| 3 | [None, 1, 1, None] |
| 4 | [None, None, None, 1] |
+----------+-----------------------+
[4 rows x 2 columns]
To pack all category columns into a dictionary, with new column name:
>>> sf.pack_columns(column_name_prefix='category', dtype=dict,
... new_column_name='new name')
+----------+-------------------------------+
| business | new name |
+----------+-------------------------------+
| 1 | {'food': 1, 'shop': 1, 're... |
| 2 | {'food': 1, 'shop': 1, 'se... |
| 3 | {'retail': 1, 'service': 1} |
| 4 | {'shop': 1} |
+----------+-------------------------------+
[4 rows x 2 columns]
To keep column prefix in the resulting dict key:
>>> sf.pack_columns(column_name_prefix='category', dtype=dict,
remove_prefix=False)
+----------+-------------------------------+
| business | category |
+----------+-------------------------------+
| 1 | {'category.retail': 1, 'ca... |
| 2 | {'category.food': 1, 'cate... |
| 3 | {'category.retail': 1, 'ca... |
| 4 | {'category.shop': 1} |
+----------+-------------------------------+
[4 rows x 2 columns]
To explicitly pack a set of columns:
>>> sf.pack_columns(column_names = ['business', 'category.retail',
'category.food', 'category.service',
'category.shop'])
+-----------------------+
| X1 |
+-----------------------+
| [1, 1, 1, None, 1] |
| [2, None, 1, 1, 1] |
| [3, 1, None, 1, None] |
| [4, None, 1, None, 1] |
+-----------------------+
[4 rows x 1 columns]
To pack all columns with name starting with 'category' into an array
type, and with missing value replaced with 0:
>>> import array
>>> sf.pack_columns(column_name_prefix="category", dtype=array.array,
... fill_na=0)
+----------+----------------------+
| business | category |
+----------+----------------------+
| 1 | [1.0, 1.0, 0.0, 1.0] |
| 2 | [1.0, 0.0, 1.0, 1.0] |
| 3 | [0.0, 1.0, 1.0, 0.0] |
| 4 | [0.0, 0.0, 0.0, 1.0] |
+----------+----------------------+
[4 rows x 2 columns]
"""
if column_names is not None and column_name_prefix is not None:
raise ValueError(
"'column_names' and 'column_name_prefix' parameter cannot be given at the same time."
)
if new_column_name is None and column_name_prefix is not None:
new_column_name = column_name_prefix
if column_name_prefix is not None:
if type(column_name_prefix) != str:
raise TypeError("'column_name_prefix' must be a string")
column_names = [
name
for name in self.column_names()
if name.startswith(column_name_prefix)
]
if len(column_names) == 0:
raise ValueError(
"There is no column starts with prefix '" + column_name_prefix + "'"
)
elif column_names is None:
column_names = self.column_names()
else:
if not _is_non_string_iterable(column_names):
raise TypeError("column_names must be an iterable type")
column_name_set = set(self.column_names())
for column in column_names:
if column not in column_name_set:
raise ValueError(
"Current SFrame has no column called '" + str(column) + "'."
)
# check duplicate names
if len(set(column_names)) != len(column_names):
raise ValueError(
"There is duplicate column names in column_names parameter"
)
if dtype not in (dict, list, array.array):
raise ValueError(
"Resulting dtype has to be one of dict/array.array/list type"
)
# fill_na value for array needs to be numeric
if dtype == array.array:
if (fill_na is not None) and (type(fill_na) not in (int, float)):
raise ValueError("fill_na value for array needs to be numeric type")
# all column_names have to be numeric type
for column in column_names:
if self[column].dtype not in (int, float):
raise TypeError(
"Column '"
+ column
+ "' type is not numeric, cannot pack into array type"
)
# generate dict key names if pack to dictionary
# we try to be smart here
# if all column names are like: a.b, a.c, a.d,...
# we then use "b", "c", "d", etc as the dictionary key during packing
if (
(dtype == dict)
and (column_name_prefix is not None)
and (remove_prefix == True)
):
size_prefix = len(column_name_prefix)
first_char = set([c[size_prefix : size_prefix + 1] for c in column_names])
if (len(first_char) == 1) and first_char.pop() in [".", "-", "_"]:
dict_keys = [name[size_prefix + 1 :] for name in column_names]
else:
dict_keys = [name[size_prefix:] for name in column_names]
else:
dict_keys = column_names
rest_columns = [
name for name in self.column_names() if name not in column_names
]
if new_column_name is not None:
if type(new_column_name) != str:
raise TypeError("'new_column_name' has to be a string")
if new_column_name in rest_columns:
raise KeyError(
"Current SFrame already contains a column name " + new_column_name
)
else:
new_column_name = ""
ret_sa = None
with cython_context():
ret_sa = SArray(
_proxy=self.__proxy__.pack_columns(
column_names, dict_keys, dtype, fill_na
)
)
new_sf = self.select_columns(rest_columns)
new_sf.add_column(ret_sa, new_column_name, inplace=True)
return new_sf
def split_datetime(
self, column_name, column_name_prefix=None, limit=None, timezone=False
):
"""
Splits a datetime column of SFrame to multiple columns, with each value in a
separate column. Returns a new SFrame with the expanded column replaced with
a list of new columns. The expanded column must be of datetime type.
For more details regarding name generation and
other, refer to :py:func:`turicreate.SArray.split_datetime()`
Parameters
----------
column_name : str
Name of the unpacked column.
column_name_prefix : str, optional
If provided, expanded column names would start with the given prefix.
If not provided, the default value is the name of the expanded column.
limit: list[str], optional
Limits the set of datetime elements to expand.
Possible values are 'year','month','day','hour','minute','second',
'weekday', 'isoweekday', 'tmweekday', and 'us'.
If not provided, only ['year','month','day','hour','minute','second']
are expanded.
timezone : bool, optional
A boolean parameter that determines whether to show the timezone
column or not. Defaults to False.
Returns
-------
out : SFrame
A new SFrame that contains rest of columns from original SFrame with
the given column replaced with a collection of expanded columns.
Examples
---------
>>> sf
Columns:
id int
submission datetime
Rows: 2
Data:
+----+-------------------------------------------------+
| id | submission |
+----+-------------------------------------------------+
| 1 | datetime(2011, 1, 21, 7, 17, 21, tzinfo=GMT(+1))|
| 2 | datetime(2011, 1, 21, 5, 43, 21, tzinfo=GMT(+1))|
+----+-------------------------------------------------+
>>> sf.split_datetime('submission',limit=['hour','minute'])
Columns:
id int
submission.hour int
submission.minute int
Rows: 2
Data:
+----+-----------------+-------------------+
| id | submission.hour | submission.minute |
+----+-----------------+-------------------+
| 1 | 7 | 17 |
| 2 | 5 | 43 |
+----+-----------------+-------------------+
"""
if column_name not in self.column_names():
raise KeyError(
"column '" + column_name + "' does not exist in current SFrame"
)
if column_name_prefix is None:
column_name_prefix = column_name
new_sf = self[column_name].split_datetime(column_name_prefix, limit, timezone)
# construct return SFrame, check if there is conflict
rest_columns = [name for name in self.column_names() if name != column_name]
new_names = new_sf.column_names()
while set(new_names).intersection(rest_columns):
new_names = [name + ".1" for name in new_names]
new_sf.rename(dict(list(zip(new_sf.column_names(), new_names))), inplace=True)
ret_sf = self.select_columns(rest_columns)
ret_sf.add_columns(new_sf, inplace=True)
return ret_sf
def unpack(
self,
column_name=None,
column_name_prefix=None,
column_types=None,
na_value=None,
limit=None,
):
"""
Expand one column of this SFrame to multiple columns with each value in
a separate column. Returns a new SFrame with the unpacked column
replaced with a list of new columns. The column must be of
list/array/dict type.
For more details regarding name generation, missing value handling and
other, refer to the SArray version of
:py:func:`~turicreate.SArray.unpack()`.
Parameters
----------
column_name : str, optional
Name of the unpacked column, if provided. If not provided
and only one column is present then the column is unpacked.
In case of multiple columns, name must be provided to know
which column to be unpacked.
column_name_prefix : str, optional
If provided, unpacked column names would start with the given
prefix. If not provided, default value is the name of the unpacked
column.
column_types : [type], optional
Column types for the unpacked columns.
If not provided, column types are automatically inferred from first
100 rows. For array type, default column types are float. If
provided, column_types also restricts how many columns to unpack.
na_value : flexible_type, optional
If provided, convert all values that are equal to "na_value" to
missing value (None).
limit : list[str] | list[int], optional
Control unpacking only a subset of list/array/dict value. For
dictionary SArray, `limit` is a list of dictionary keys to restrict.
For list/array SArray, `limit` is a list of integers that are
indexes into the list/array value.
Returns
-------
out : SFrame
A new SFrame that contains rest of columns from original SFrame with
the given column replaced with a collection of unpacked columns.
See Also
--------
pack_columns, SArray.unpack
Examples
---------
>>> sf = turicreate.SFrame({'id': [1,2,3],
... 'wc': [{'a': 1}, {'b': 2}, {'a': 1, 'b': 2}]})
+----+------------------+
| id | wc |
+----+------------------+
| 1 | {'a': 1} |
| 2 | {'b': 2} |
| 3 | {'a': 1, 'b': 2} |
+----+------------------+
[3 rows x 2 columns]
>>> sf.unpack('wc')
+----+------+------+
| id | wc.a | wc.b |
+----+------+------+
| 1 | 1 | None |
| 2 | None | 2 |
| 3 | 1 | 2 |
+----+------+------+
[3 rows x 3 columns]
To not have prefix in the generated column name:
>>> sf.unpack('wc', column_name_prefix="")
+----+------+------+
| id | a | b |
+----+------+------+
| 1 | 1 | None |
| 2 | None | 2 |
| 3 | 1 | 2 |
+----+------+------+
[3 rows x 3 columns]
To limit subset of keys to unpack:
>>> sf.unpack('wc', limit=['b'])
+----+------+
| id | wc.b |
+----+------+
| 1 | None |
| 2 | 2 |
| 3 | 2 |
+----+------+
[3 rows x 3 columns]
To unpack an array column:
>>> import array
>>> sf = turicreate.SFrame({'id': [1,2,3],
... 'friends': [array.array('d', [1.0, 2.0, 3.0]),
... array.array('d', [2.0, 3.0, 4.0]),
... array.array('d', [3.0, 4.0, 5.0])]})
>>> sf
+-----------------+----+
| friends | id |
+-----------------+----+
| [1.0, 2.0, 3.0] | 1 |
| [2.0, 3.0, 4.0] | 2 |
| [3.0, 4.0, 5.0] | 3 |
+-----------------+----+
[3 rows x 2 columns]
>>> sf.unpack('friends')
+----+-----------+-----------+-----------+
| id | friends.0 | friends.1 | friends.2 |
+----+-----------+-----------+-----------+
| 1 | 1.0 | 2.0 | 3.0 |
| 2 | 2.0 | 3.0 | 4.0 |
| 3 | 3.0 | 4.0 | 5.0 |
+----+-----------+-----------+-----------+
[3 rows x 4 columns]
>>> sf = turicreate.SFrame([{'a':1,'b':2,'c':3},{'a':4,'b':5,'c':6}])
>>> sf.unpack()
+---+---+---+
| a | b | c |
+---+---+---+
| 1 | 2 | 3 |
| 4 | 5 | 6 |
+---+---+---+
[2 rows x 3 columns]
"""
if column_name is None:
if self.num_columns() == 0:
raise RuntimeError("No column exists in the current SFrame")
for t in range(self.num_columns()):
column_type = self.column_types()[t]
if (
column_type == dict
or column_type == list
or column_type == array.array
):
if column_name is None:
column_name = self.column_names()[t]
else:
raise RuntimeError("Column name needed to unpack")
if column_name is None:
raise RuntimeError("No columns can be unpacked")
elif column_name_prefix is None:
column_name_prefix = ""
elif column_name not in self.column_names():
raise KeyError(
"Column '" + column_name + "' does not exist in current SFrame"
)
if column_name_prefix is None:
column_name_prefix = column_name
new_sf = self[column_name].unpack(
column_name_prefix, column_types, na_value, limit
)
# construct return SFrame, check if there is conflict
rest_columns = [name for name in self.column_names() if name != column_name]
new_names = new_sf.column_names()
while set(new_names).intersection(rest_columns):
new_names = [name + ".1" for name in new_names]
new_sf.rename(dict(list(zip(new_sf.column_names(), new_names))), inplace=True)
ret_sf = self.select_columns(rest_columns)
ret_sf.add_columns(new_sf, inplace=True)
return ret_sf
def stack(
self, column_name, new_column_name=None, drop_na=False, new_column_type=None
):
"""
Convert a "wide" column of an SFrame to one or two "tall" columns by
stacking all values.
The stack works only for columns of dict, list, or array type. If the
column is dict type, two new columns are created as a result of
stacking: one column holds the key and another column holds the value.
The rest of the columns are repeated for each key/value pair.
If the column is array or list type, one new column is created as a
result of stacking. With each row holds one element of the array or list
value, and the rest columns from the same original row repeated.
The returned SFrame includes the newly created column(s) and all
columns other than the one that is stacked.
Parameters
--------------
column_name : str
The column to stack. This column must be of dict/list/array type
new_column_name : str | list of str, optional
The new column name(s). If original column is list/array type,
new_column_name must a string. If original column is dict type,
new_column_name must be a list of two strings. If not given, column
names are generated automatically.
drop_na : boolean, optional
If True, missing values and empty list/array/dict are all dropped
from the resulting column(s). If False, missing values are
maintained in stacked column(s).
new_column_type : type | list of types, optional
The new column types. If original column is a list/array type
new_column_type must be a single type, or a list of one type. If
original column is of dict type, new_column_type must be a list of
two types. If not provided, the types are automatically inferred
from the first 100 values of the SFrame.
Returns
-------
out : SFrame
A new SFrame that contains newly stacked column(s) plus columns in
original SFrame other than the stacked column.
See Also
--------
unstack
Examples
---------
Suppose 'sf' is an SFrame that contains a column of dict type:
>>> sf = turicreate.SFrame({'topic':[1,2,3,4],
... 'words': [{'a':3, 'cat':2},
... {'a':1, 'the':2},
... {'the':1, 'dog':3},
... {}]
... })
+-------+----------------------+
| topic | words |
+-------+----------------------+
| 1 | {'a': 3, 'cat': 2} |
| 2 | {'a': 1, 'the': 2} |
| 3 | {'the': 1, 'dog': 3} |
| 4 | {} |
+-------+----------------------+
[4 rows x 2 columns]
Stack would stack all keys in one column and all values in another
column:
>>> sf.stack('words', new_column_name=['word', 'count'])
+-------+------+-------+
| topic | word | count |
+-------+------+-------+
| 1 | a | 3 |
| 1 | cat | 2 |
| 2 | a | 1 |
| 2 | the | 2 |
| 3 | the | 1 |
| 3 | dog | 3 |
| 4 | None | None |
+-------+------+-------+
[7 rows x 3 columns]
Observe that since topic 4 had no words, an empty row is inserted.
To drop that row, set drop_na=True in the parameters to stack.
Suppose 'sf' is an SFrame that contains a user and his/her friends,
where 'friends' columns is an array type. Stack on 'friends' column
would create a user/friend list for each user/friend pair:
>>> sf = turicreate.SFrame({'topic':[1,2,3],
... 'friends':[[2,3,4], [5,6],
... [4,5,10,None]]
... })
>>> sf
+-------+------------------+
| topic | friends |
+-------+------------------+
| 1 | [2, 3, 4] |
| 2 | [5, 6] |
| 3 | [4, 5, 10, None] |
+----- -+------------------+
[3 rows x 2 columns]
>>> sf.stack('friends', new_column_name='friend')
+-------+--------+
| topic | friend |
+-------+--------+
| 1 | 2 |
| 1 | 3 |
| 1 | 4 |
| 2 | 5 |
| 2 | 6 |
| 3 | 4 |
| 3 | 5 |
| 3 | 10 |
| 3 | None |
+-------+--------+
[9 rows x 2 columns]
"""
# validate column_name
column_name = str(column_name)
if column_name not in self.column_names():
raise ValueError(
"Cannot find column '" + str(column_name) + "' in the SFrame."
)
stack_column_type = self[column_name].dtype
if stack_column_type not in [dict, array.array, list]:
raise TypeError(
"Stack is only supported for column of dict/list/array type."
)
# user defined types. do some checking
if new_column_type is not None:
# if new_column_type is a single type, just make it a list of one type
if type(new_column_type) is type:
new_column_type = [new_column_type]
if (stack_column_type in [list, array.array]) and len(new_column_type) != 1:
raise ValueError(
"Expecting a single column type to unpack list or array columns"
)
if (stack_column_type in [dict]) and len(new_column_type) != 2:
raise ValueError("Expecting two column types to unpack a dict column")
if new_column_name is not None:
if stack_column_type == dict:
if type(new_column_name) is not list:
raise TypeError(
"new_column_name has to be a list to stack dict type"
)
elif len(new_column_name) != 2:
raise TypeError("new_column_name must have length of two")
else:
if type(new_column_name) != str:
raise TypeError("new_column_name has to be a str")
new_column_name = [new_column_name]
# check if the new column name conflicts with existing ones
for name in new_column_name:
if (name in self.column_names()) and (name != column_name):
raise ValueError(
"Column with name '"
+ name
+ "' already exists, pick a new column name"
)
else:
if stack_column_type == dict:
new_column_name = ["", ""]
else:
new_column_name = [""]
# infer column types
head_row = SArray(self[column_name].head(100)).dropna()
if len(head_row) == 0:
raise ValueError(
"Cannot infer column type because there is not enough rows to infer value"
)
if new_column_type is None:
# we have to perform type inference
if stack_column_type == dict:
# infer key/value type
keys = []
values = []
for row in head_row:
for val in row:
keys.append(val)
if val is not None:
values.append(row[val])
new_column_type = [infer_type_of_list(keys), infer_type_of_list(values)]
else:
values = [v for v in itertools.chain.from_iterable(head_row)]
new_column_type = [infer_type_of_list(values)]
with cython_context():
return SFrame(
_proxy=self.__proxy__.stack(
column_name, new_column_name, new_column_type, drop_na
)
)
def unstack(self, column_names, new_column_name=None):
"""
Concatenate values from one or two columns into one column, grouping by
all other columns. The resulting column could be of type list, array or
dictionary. If ``column_names`` is a numeric column, the result will be of
array.array type. If ``column_names`` is a non-numeric column, the new column
will be of list type. If ``column_names`` is a list of two columns, the new
column will be of dict type where the keys are taken from the first
column in the list.
Parameters
----------
column_names : str | [str, str]
The column(s) that is(are) to be concatenated.
If str, then collapsed column type is either array or list.
If [str, str], then collapsed column type is dict
new_column_name : str, optional
New column name. If not given, a name is generated automatically.
Returns
-------
out : SFrame
A new SFrame containing the grouped columns as well as the new
column.
See Also
--------
stack : The inverse of unstack.
groupby : ``unstack`` is a special version of ``groupby`` that uses the
:mod:`~turicreate.aggregate.CONCAT` aggregator
Notes
-----
- There is no guarantee the resulting SFrame maintains the same order as
the original SFrame.
- Missing values are maintained during unstack.
- When unstacking into a dictionary, if there is more than one instance
of a given key for a particular group, an arbitrary value is selected.
Examples
--------
>>> sf = turicreate.SFrame({'count':[4, 2, 1, 1, 2, None],
... 'topic':['cat', 'cat', 'dog', 'elephant', 'elephant', 'fish'],
... 'word':['a', 'c', 'c', 'a', 'b', None]})
>>> sf.unstack(column_names=['word', 'count'], new_column_name='words')
+----------+------------------+
| topic | words |
+----------+------------------+
| elephant | {'a': 1, 'b': 2} |
| dog | {'c': 1} |
| cat | {'a': 4, 'c': 2} |
| fish | None |
+----------+------------------+
[4 rows x 2 columns]
>>> sf = turicreate.SFrame({'friend': [2, 3, 4, 5, 6, 4, 5, 2, 3],
... 'user': [1, 1, 1, 2, 2, 2, 3, 4, 4]})
>>> sf.unstack('friend', new_column_name='new name')
+------+-----------+
| user | new name |
+------+-----------+
| 3 | [5] |
| 1 | [2, 3, 4] |
| 2 | [6, 4, 5] |
| 4 | [2, 3] |
+------+-----------+
[4 rows x 2 columns]
"""
if type(column_names) != str and len(column_names) != 2:
raise TypeError(
"'column_names' parameter has to be either a string or a list of two strings."
)
with cython_context():
if type(column_names) == str:
key_columns = [i for i in self.column_names() if i != column_names]
if new_column_name is not None:
return self.groupby(
key_columns, {new_column_name: aggregate.CONCAT(column_names)}
)
else:
return self.groupby(key_columns, aggregate.CONCAT(column_names))
elif len(column_names) == 2:
key_columns = [i for i in self.column_names() if i not in column_names]
if new_column_name is not None:
return self.groupby(
key_columns,
{
new_column_name: aggregate.CONCAT(
column_names[0], column_names[1]
)
},
)
else:
return self.groupby(
key_columns, aggregate.CONCAT(column_names[0], column_names[1])
)
def unique(self):
"""
Remove duplicate rows of the SFrame. Will not necessarily preserve the
order of the given SFrame in the new SFrame.
Returns
-------
out : SFrame
A new SFrame that contains the unique rows of the current SFrame.
Raises
------
TypeError
If any column in the SFrame is a dictionary type.
See Also
--------
SArray.unique
Examples
--------
>>> sf = turicreate.SFrame({'id':[1,2,3,3,4], 'value':[1,2,3,3,4]})
>>> sf
+----+-------+
| id | value |
+----+-------+
| 1 | 1 |
| 2 | 2 |
| 3 | 3 |
| 3 | 3 |
| 4 | 4 |
+----+-------+
[5 rows x 2 columns]
>>> sf.unique()
+----+-------+
| id | value |
+----+-------+
| 2 | 2 |
| 4 | 4 |
| 3 | 3 |
| 1 | 1 |
+----+-------+
[4 rows x 2 columns]
"""
return self.groupby(self.column_names(), {})
def sort(self, key_column_names, ascending=True):
"""
Sort current SFrame by the given columns, using the given sort order.
Only columns that are type of str, int and float can be sorted.
Parameters
----------
key_column_names : str | list of str | list of (str, bool) pairs
Names of columns to be sorted. The result will be sorted first by
first column, followed by second column, and so on. All columns will
be sorted in the same order as governed by the `ascending`
parameter. To control the sort ordering for each column
individually, `key_column_names` must be a list of (str, bool) pairs.
Given this case, the first value is the column name and the second
value is a boolean indicating whether the sort order is ascending.
ascending : bool, optional
Sort all columns in the given order.
Returns
-------
out : SFrame
A new SFrame that is sorted according to given sort criteria
See Also
--------
topk
Examples
--------
Suppose 'sf' is an sframe that has three columns 'a', 'b', 'c'.
To sort by column 'a', ascending
>>> sf = turicreate.SFrame({'a':[1,3,2,1],
... 'b':['a','c','b','b'],
... 'c':['x','y','z','y']})
>>> sf
+---+---+---+
| a | b | c |
+---+---+---+
| 1 | a | x |
| 3 | c | y |
| 2 | b | z |
| 1 | b | y |
+---+---+---+
[4 rows x 3 columns]
>>> sf.sort('a')
+---+---+---+
| a | b | c |
+---+---+---+
| 1 | a | x |
| 1 | b | y |
| 2 | b | z |
| 3 | c | y |
+---+---+---+
[4 rows x 3 columns]
To sort by column 'a', descending
>>> sf.sort('a', ascending = False)
+---+---+---+
| a | b | c |
+---+---+---+
| 3 | c | y |
| 2 | b | z |
| 1 | a | x |
| 1 | b | y |
+---+---+---+
[4 rows x 3 columns]
To sort by column 'a' and 'b', all ascending
>>> sf.sort(['a', 'b'])
+---+---+---+
| a | b | c |
+---+---+---+
| 1 | a | x |
| 1 | b | y |
| 2 | b | z |
| 3 | c | y |
+---+---+---+
[4 rows x 3 columns]
To sort by column 'a' ascending, and then by column 'c' descending
>>> sf.sort([('a', True), ('c', False)])
+---+---+---+
| a | b | c |
+---+---+---+
| 1 | b | y |
| 1 | a | x |
| 2 | b | z |
| 3 | c | y |
+---+---+---+
[4 rows x 3 columns]
"""
sort_column_names = []
sort_column_orders = []
# validate key_column_names
if type(key_column_names) == str:
sort_column_names = [key_column_names]
elif type(key_column_names) == list:
if len(key_column_names) == 0:
raise ValueError("Please provide at least one column to sort")
first_param_types = set([type(i) for i in key_column_names])
if len(first_param_types) != 1:
raise ValueError("key_column_names element are not of the same type")
first_param_type = first_param_types.pop()
if first_param_type == tuple:
sort_column_names = [i[0] for i in key_column_names]
sort_column_orders = [i[1] for i in key_column_names]
elif first_param_type == str:
sort_column_names = key_column_names
else:
raise TypeError("key_column_names type is not supported")
else:
raise TypeError(
"key_column_names type is not correct. Supported types are str, list of str or list of (str,bool) pair."
)
# use the second parameter if the sort order is not given
if len(sort_column_orders) == 0:
sort_column_orders = [ascending for i in sort_column_names]
# make sure all column exists
my_column_names = set(self.column_names())
for column in sort_column_names:
if type(column) != str:
raise TypeError(
"Only string parameter can be passed in as column names"
)
if column not in my_column_names:
raise ValueError("SFrame has no column named: '" + str(column) + "'")
if self[column].dtype not in (str, int, float, datetime.datetime):
raise TypeError("Only columns of type (str, int, float) can be sorted")
with cython_context():
return SFrame(
_proxy=self.__proxy__.sort(sort_column_names, sort_column_orders)
)
def dropna(self, columns=None, how="any", recursive=False):
"""
Remove missing values from an SFrame. A missing value is either ``None``
or ``NaN``. If ``how`` is 'any', a row will be removed if any of the
columns in the ``columns`` parameter contains at least one missing
value. If ``how`` is 'all', a row will be removed if all of the columns
in the ``columns`` parameter are missing values.
If the ``columns`` parameter is not specified, the default is to
consider all columns when searching for missing values.
Parameters
----------
columns : list or str, optional
The columns to use when looking for missing values. By default, all
columns are used.
how : {'any', 'all'}, optional
Specifies whether a row should be dropped if at least one column
has missing values, or if all columns have missing values. 'any' is
default.
recursive: bool
By default is False. If this flag is set to True, then `nan` check will
be performed on each element of a sframe cell in a DFS manner if the cell
has a nested structure, such as dict, list.
Returns
-------
out : SFrame
SFrame with missing values removed (according to the given rules).
See Also
--------
dropna_split : Drops missing rows from the SFrame and returns them.
Examples
--------
Drop all missing values.
>>> sf = turicreate.SFrame({'a': [1, None, None], 'b': ['a', 'b', None]})
>>> sf.dropna()
+---+---+
| a | b |
+---+---+
| 1 | a |
+---+---+
[1 rows x 2 columns]
Drop rows where every value is missing.
>>> sf.dropna(any="all")
+------+---+
| a | b |
+------+---+
| 1 | a |
| None | b |
+------+---+
[2 rows x 2 columns]
Drop rows where column 'a' has a missing value.
>>> sf.dropna('a', any="all")
+---+---+
| a | b |
+---+---+
| 1 | a |
+---+---+
[1 rows x 2 columns]
"""
# If the user gives me an empty list (the indicator to use all columns)
# NA values being dropped would not be the expected behavior. This
# is a NOOP, so let's not bother the server
if type(columns) is list and len(columns) == 0:
return SFrame(_proxy=self.__proxy__)
(columns, all_behavior) = self.__dropna_errchk(columns, how)
with cython_context():
return SFrame(
_proxy=self.__proxy__.drop_missing_values(
columns, all_behavior, False, recursive
)
)
def dropna_split(self, columns=None, how="any", recursive=False):
"""
Split rows with missing values from this SFrame. This function has the
same functionality as :py:func:`~turicreate.SFrame.dropna`, but returns a
tuple of two SFrames. The first item is the expected output from
:py:func:`~turicreate.SFrame.dropna`, and the second item contains all the
rows filtered out by the `dropna` algorithm.
Parameters
----------
columns : list or str, optional
The columns to use when looking for missing values. By default, all
columns are used.
how : {'any', 'all'}, optional
Specifies whether a row should be dropped if at least one column
has missing values, or if all columns have missing values. 'any' is
default.
recursive: bool
By default is False. If this flag is set to True, then `nan` check will
be performed on each element of a sframe cell in a recursive manner if the cell
has a nested structure, such as dict, list.
Returns
-------
out : (SFrame, SFrame)
(SFrame with missing values removed,
SFrame with the removed missing values)
See Also
--------
dropna
Examples
--------
>>> sf = turicreate.SFrame({'a': [1, None, None], 'b': ['a', 'b', None]})
>>> good, bad = sf.dropna_split()
>>> good
+---+---+
| a | b |
+---+---+
| 1 | a |
+---+---+
[1 rows x 2 columns]
>>> bad
+------+------+
| a | b |
+------+------+
| None | b |
| None | None |
+------+------+
[2 rows x 2 columns]
"""
# If the user gives me an empty list (the indicator to use all columns)
# NA values being dropped would not be the expected behavior. This
# is a NOOP, so let's not bother the server
if type(columns) is list and len(columns) == 0:
return (SFrame(_proxy=self.__proxy__), SFrame())
(columns, all_behavior) = self.__dropna_errchk(columns, how)
sframe_tuple = self.__proxy__.drop_missing_values(
columns, all_behavior, True, recursive
)
if len(sframe_tuple) != 2:
raise RuntimeError("Did not return two SFrames!")
with cython_context():
return (SFrame(_proxy=sframe_tuple[0]), SFrame(_proxy=sframe_tuple[1]))
def __dropna_errchk(self, columns, how):
if columns is None:
# Default behavior is to consider every column, specified to
# the server by an empty list (to avoid sending all the column
# in this case, since it is the most common)
columns = list()
elif type(columns) is str:
columns = [columns]
elif type(columns) is not list:
raise TypeError("Must give columns as a list, str, or 'None'")
else:
# Verify that we are only passing strings in our list
list_types = set([type(i) for i in columns])
if (str not in list_types) or (len(list_types) > 1):
raise TypeError("All columns must be of 'str' type")
if how not in ["any", "all"]:
raise ValueError("Must specify 'any' or 'all'")
if how == "all":
all_behavior = True
else:
all_behavior = False
return (columns, all_behavior)
def fillna(self, column_name, value):
"""
Fill all missing values with a given value in a given column. If the
``value`` is not the same type as the values in ``column_name``, this method
attempts to convert the value to the original column's type. If this
fails, an error is raised.
Parameters
----------
column_name : str
The name of the column to modify.
value : type convertible to SArray's type
The value used to replace all missing values.
Returns
-------
out : SFrame
A new SFrame with the specified value in place of missing values.
See Also
--------
dropna
Examples
--------
>>> sf = turicreate.SFrame({'a':[1, None, None],
... 'b':['13.1', '17.2', None]})
>>> sf = sf.fillna('a', 0)
>>> sf
+---+------+
| a | b |
+---+------+
| 1 | 13.1 |
| 0 | 17.2 |
| 0 | None |
+---+------+
[3 rows x 2 columns]
"""
# Normal error checking
if type(column_name) is not str:
raise TypeError("column_name must be a str")
ret = self[self.column_names()]
ret[column_name] = ret[column_name].fillna(value)
return ret
def add_row_number(self, column_name="id", start=0, inplace=False):
"""
Returns an SFrame with a new column that numbers each row
sequentially. By default the count starts at 0, but this can be changed
to a positive or negative number. The new column will be named with
the given column name. An error will be raised if the given column
name already exists in the SFrame.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
column_name : str, optional
The name of the new column that will hold the row numbers.
start : int, optional
The number used to start the row number count.
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
Returns
-------
out : SFrame
The new SFrame with a column name
Notes
-----
The range of numbers is constrained by a signed 64-bit integer, so
beware of overflow if you think the results in the row number column
will be greater than 9 quintillion.
Examples
--------
>>> sf = turicreate.SFrame({'a': [1, None, None], 'b': ['a', 'b', None]})
>>> sf.add_row_number()
+----+------+------+
| id | a | b |
+----+------+------+
| 0 | 1 | a |
| 1 | None | b |
| 2 | None | None |
+----+------+------+
[3 rows x 3 columns]
"""
if type(column_name) is not str:
raise TypeError("Must give column_name as strs")
if type(start) is not int:
raise TypeError("Must give start as int")
if column_name in self.column_names():
raise RuntimeError(
"Column '" + column_name + "' already exists in the current SFrame"
)
the_col = _create_sequential_sarray(self.num_rows(), start)
# Make sure the row number column is the first column
new_sf = SFrame()
new_sf.add_column(the_col, column_name, inplace=True)
new_sf.add_columns(self, inplace=True)
if inplace:
self.__proxy__ = new_sf.__proxy__
return self
else:
return new_sf
@property
def shape(self):
"""
The shape of the SFrame, in a tuple. The first entry is the number of
rows, the second is the number of columns.
Examples
--------
>>> sf = turicreate.SFrame({'id':[1,2,3], 'val':['A','B','C']})
>>> sf.shape
(3, 2)
"""
return (self.num_rows(), self.num_columns())
@property
def __proxy__(self):
return self._proxy
@__proxy__.setter
def __proxy__(self, value):
assert type(value) is UnitySFrameProxy
self._cache = None
self._proxy = value
self._cache = None
|
edb/tools/gen_test_dumps.py | aaronbrighton/edgedb | 7,302 | 11134914 | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2020-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import *
import asyncio
import os
import pathlib
import shutil
import sys
import tempfile
import unittest
import click
from edb import buildmeta
from edb.server import cluster as edgedb_cluster
from edb.testbase import server as tb
from edb.tools.edb import edbcommands
class TestResult:
def wasSuccessful(self):
return True
class TestRunner:
def __init__(self):
self.cases = set()
def run(self, test):
self.cases.update(tb.get_test_cases([test]))
return TestResult()
async def execute(
tests_dir: str,
conn: Dict[str, Any],
num_workers: int,
version: str,
) -> None:
runner = TestRunner()
unittest.main(
module=None,
argv=["unittest", "discover", "-s", tests_dir],
testRunner=runner,
exit=False,
)
setup_scripts = tb.get_test_cases_setup(runner.cases)
dump_cases = {
db_name: case
for case, db_name, _ss in setup_scripts
if getattr(case, "STABLE_DUMP", False)
}
await tb.setup_test_cases(list(dump_cases.values()), conn, num_workers)
dumps_dir = pathlib.Path(tests_dir) / "dumps"
db_friendly_version = version.split("+", 1)[0]
db_friendly_version = db_friendly_version.replace("-alpha.", "a")
db_friendly_version = db_friendly_version.replace("-beta.", "b")
db_friendly_version = db_friendly_version.replace("-rc.", "rc")
db_friendly_version = db_friendly_version.replace("-", "_")
db_friendly_version = db_friendly_version.replace(".", "_")
for db_name in dump_cases:
with tempfile.NamedTemporaryFile() as f:
tb.CLITestCaseMixin.run_cli_on_connection(
conn, "-d", db_name, "dump", f.name
)
db_dumps_dir = dumps_dir / db_name
db_dumps_dir.mkdir(exist_ok=True)
dump_p = (db_dumps_dir / db_friendly_version).with_suffix(".dump")
shutil.copy(f.name, dump_p)
print(f"Dumped {dump_p}")
def die(msg):
print(f"FATAL: {msg}", file=sys.stderr)
sys.exit(1)
@edbcommands.command("gen-test-dumps")
@click.option(
"-t",
"--tests-dir",
type=str,
default=str(
pathlib.Path(__file__).parent.parent.parent.resolve() / "tests"
),
help="directory to start dump test discovery from",
)
@click.option(
"-j",
"--jobs",
type=int,
default=lambda: round((os.cpu_count() or 1) * 0.75),
help="number of parallel processes to use",
)
def gen_test_dumps(*, jobs, tests_dir):
if not jobs:
jobs = os.cpu_count()
with tempfile.TemporaryDirectory(
dir="/tmp/", prefix="edb_gen-test-dumps_"
) as data_dir:
asyncio.run(
_gen_test_dumps(
tests_dir=tests_dir,
data_dir=data_dir,
jobs=jobs,
),
)
async def _gen_test_dumps(*, jobs: int, tests_dir: str, data_dir: str) -> None:
version = str(buildmeta.get_version())
cluster = edgedb_cluster.Cluster(pathlib.Path(data_dir), testmode=True)
print(
f"Generating test dumps for version {version}"
f" with a temporary EdgeDB instance in {data_dir}..."
)
try:
await cluster.init()
await cluster.start(port=0)
await cluster.trust_local_connections()
except BaseException:
raise
conn = cluster.get_connect_args()
try:
execute(tests_dir, conn, num_workers=jobs, version=version)
except BaseException:
raise
finally:
cluster.stop()
cluster.destroy()
|
test/functional/test_key.py | Eternity-labs/py-ipfs-http-client | 186 | 11134925 | def test_add_list_rename_rm(client):
# Remove keys if they already exist
key_list = list(map(lambda k: k["Name"], client.key.list()["Keys"]))
if "ipfshttpclient-test-rsa" in key_list:
client.key.rm("ipfshttpclient-test-rsa")
if "ipfshttpclient-test-ed" in key_list:
client.key.rm("ipfshttpclient-test-ed")
# Add new RSA and ED25519 key
key1 = client.key.gen("ipfshttpclient-test-rsa", "rsa")["Name"]
key2 = client.key.gen("ipfshttpclient-test-ed", "ed25519")["Name"]
# Validate the keys exist now
key_list = list(map(lambda k: k["Name"], client.key.list()["Keys"]))
assert key1 in key_list
assert key2 in key_list
# Rename the EC key
key2_new = client.key.rename(key2, "ipfshttpclient-test-ed2")["Now"]
# Validate that the key was successfully renamed
key_list = list(map(lambda k: k["Name"], client.key.list()["Keys"]))
assert key1 in key_list
assert key2 not in key_list
assert key2_new in key_list
# Drop both keys with one request
client.key.rm(key1, key2_new)
# Validate that the keys are gone again
key_list = list(map(lambda k: k["Name"], client.key.list()["Keys"]))
assert key1 not in key_list
assert key2_new not in key_list |
bit_tf2/normalization.py | ZTH-NEU/big_transfer | 1,377 | 11134956 | <reponame>ZTH-NEU/big_transfer<filename>bit_tf2/normalization.py
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Group normalization."""
import tensorflow.compat.v2 as tf
def group_normalize(x, gamma, beta, num_groups=None, group_size=None, eps=1e-5):
"""Applies group-normalization to NHWC `x` (see abs/1803.08494, go/dune-gn).
This function just does the math, if you want a "layer" that creates the
necessary variables etc., see `group_norm` below.
You must either specify a fixed number of groups `num_groups`, which will
automatically select a corresponding group size depending on the input's
number of channels, or you must specify a `group_size`, which leads to an
automatic number of groups depending on the input's number of channels.
Args:
x: N..C-tensor, the input to group-normalize. For images, this would be a
NHWC-tensor, for time-series a NTC, for videos a NHWTC or NTHWC, all of
them work, as normalization includes everything between N and C. Even just
NC shape works, as C is grouped and normalized.
gamma: tensor with C entries, learnable scale after normalization.
beta: tensor with C entries, learnable bias after normalization.
num_groups: int, number of groups to normalize over (divides C).
group_size: int, size of the groups to normalize over (divides C).
eps: float, a small additive constant to avoid /sqrt(0).
Returns:
Group-normalized `x`, of the same shape and type as `x`.
Author: <NAME>
"""
assert x.shape.ndims >= 2, (
"Less than 2-dim Tensor passed to GroupNorm. Something's fishy.")
num_channels = x.shape[-1]
assert num_channels is not None, "Cannot apply GroupNorm on dynamic channels."
assert (num_groups is None) != (group_size is None), (
"You must specify exactly one of `num_groups`, `group_size`")
if group_size is not None:
num_groups = num_channels // group_size
assert num_channels % num_groups == 0, (
"GroupNorm: {} not divisible by {}".format(num_channels, num_groups))
orig_shape = tf.shape(x)
# This shape is NHWGS where G is #groups and S is group-size.
extra_shape = [num_groups, num_channels // num_groups]
group_shape = tf.concat([orig_shape[:-1], extra_shape], axis=-1)
x = tf.reshape(x, group_shape)
# The dimensions to normalize over: HWS for images, but more generally all
# dimensions except N (batch, first) and G (cross-groups, next-to-last).
# So more visually, normdims are the dots in N......G. (note the last one is
# also a dot, not a full-stop, argh!)
normdims = list(range(1, x.shape.ndims - 2)) + [x.shape.ndims - 1]
mean, var = tf.nn.moments(x, normdims, keepdims=True)
# Interestingly, we don't have a beta/gamma per group, but still one per
# channel, at least according to the original paper. Reshape such that they
# broadcast correctly.
beta = tf.reshape(beta, extra_shape)
gamma = tf.reshape(gamma, extra_shape)
x = tf.nn.batch_normalization(x, mean, var, beta, gamma, eps)
return tf.reshape(x, orig_shape)
class GroupNormalization(tf.keras.layers.Layer):
"""A group-norm "layer" (see abs/1803.08494 go/dune-gn).
This function creates beta/gamma variables in a name_scope, and uses them to
apply `group_normalize` on the input `x`.
You can either specify a fixed number of groups `num_groups`, which will
automatically select a corresponding group size depending on the input's
number of channels, or you must specify a `group_size`, which leads to an
automatic number of groups depending on the input's number of channels.
If you specify neither, the paper's recommended `num_groups=32` is used.
Authors: <NAME>, <NAME>.
"""
def __init__(self,
num_groups=None,
group_size=None,
eps=1e-5,
beta_init=tf.zeros_initializer(),
gamma_init=tf.ones_initializer(),
**kwargs):
"""Initializer.
Args:
num_groups: int, the number of channel-groups to normalize over.
group_size: int, size of the groups to normalize over.
eps: float, a small additive constant to avoid /sqrt(0).
beta_init: initializer for bias, defaults to zeros.
gamma_init: initializer for scale, defaults to ones.
**kwargs: other tf.keras.layers.Layer arguments.
"""
super(GroupNormalization, self).__init__(**kwargs)
if num_groups is None and group_size is None:
num_groups = 32
self._num_groups = num_groups
self._group_size = group_size
self._eps = eps
self._beta_init = beta_init
self._gamma_init = gamma_init
def build(self, input_size):
channels = input_size[-1]
assert channels is not None, "Cannot apply GN on dynamic channels."
self._gamma = self.add_weight(
name="gamma", shape=(channels,), initializer=self._gamma_init,
dtype=self.dtype)
self._beta = self.add_weight(
name="beta", shape=(channels,), initializer=self._beta_init,
dtype=self.dtype)
super(GroupNormalization, self).build(input_size)
def call(self, x):
return group_normalize(x, self._gamma, self._beta, self._num_groups,
self._group_size, self._eps)
|
test.py | Kamu1403/BBAVectors-Oriented-Object-Detection | 356 | 11134959 | <gh_stars>100-1000
import torch
import numpy as np
import cv2
import time
import os
import matplotlib.pyplot as plt
import func_utils
def apply_mask(image, mask, alpha=0.5):
"""Apply the given mask to the image.
"""
color = np.random.rand(3)
for c in range(3):
image[:, :, c] = np.where(mask == 1,
image[:, :, c] *
(1 - alpha) + alpha * color[c] * 255,
image[:, :, c])
return image
class TestModule(object):
def __init__(self, dataset, num_classes, model, decoder):
torch.manual_seed(317)
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.dataset = dataset
self.num_classes = num_classes
self.model = model
self.decoder = decoder
def load_model(self, model, resume):
checkpoint = torch.load(resume, map_location=lambda storage, loc: storage)
print('loaded weights from {}, epoch {}'.format(resume, checkpoint['epoch']))
state_dict_ = checkpoint['model_state_dict']
model.load_state_dict(state_dict_, strict=True)
return model
def map_mask_to_image(self, mask, img, color=None):
if color is None:
color = np.random.rand(3)
mask = np.repeat(mask[:, :, np.newaxis], 3, axis=2)
mskd = img * mask
clmsk = np.ones(mask.shape) * mask
clmsk[:, :, 0] = clmsk[:, :, 0] * color[0] * 256
clmsk[:, :, 1] = clmsk[:, :, 1] * color[1] * 256
clmsk[:, :, 2] = clmsk[:, :, 2] * color[2] * 256
img = img + 1. * clmsk - 1. * mskd
return np.uint8(img)
def imshow_heatmap(self, pr_dec, images):
wh = pr_dec['wh']
hm = pr_dec['hm']
cls_theta = pr_dec['cls_theta']
wh_w = wh[0, 0, :, :].data.cpu().numpy()
wh_h = wh[0, 1, :, :].data.cpu().numpy()
hm = hm[0, 0, :, :].data.cpu().numpy()
cls_theta = cls_theta[0, 0, :, :].data.cpu().numpy()
images = np.transpose((images.squeeze(0).data.cpu().numpy() + 0.5) * 255, (1, 2, 0)).astype(np.uint8)
wh_w = cv2.resize(wh_w, (images.shape[1], images.shape[0]))
wh_h = cv2.resize(wh_h, (images.shape[1], images.shape[0]))
hm = cv2.resize(hm, (images.shape[1], images.shape[0]))
fig = plt.figure(1)
ax1 = fig.add_subplot(2, 3, 1)
ax1.set_xlabel('width')
ax1.imshow(wh_w)
ax2 = fig.add_subplot(2, 3, 2)
ax2.set_xlabel('height')
ax2.imshow(wh_h)
ax3 = fig.add_subplot(2, 3, 3)
ax3.set_xlabel('center hm')
ax3.imshow(hm)
ax5 = fig.add_subplot(2, 3, 5)
ax5.set_xlabel('input image')
ax5.imshow(cls_theta)
ax6 = fig.add_subplot(2, 3, 6)
ax6.set_xlabel('input image')
ax6.imshow(images)
plt.savefig('heatmap.png')
def test(self, args, down_ratio):
save_path = 'weights_'+args.dataset
self.model = self.load_model(self.model, os.path.join(save_path, args.resume))
self.model = self.model.to(self.device)
self.model.eval()
dataset_module = self.dataset[args.dataset]
dsets = dataset_module(data_dir=args.data_dir,
phase='test',
input_h=args.input_h,
input_w=args.input_w,
down_ratio=down_ratio)
data_loader = torch.utils.data.DataLoader(dsets,
batch_size=1,
shuffle=False,
num_workers=1,
pin_memory=True)
total_time = []
for cnt, data_dict in enumerate(data_loader):
image = data_dict['image'][0].to(self.device)
img_id = data_dict['img_id'][0]
print('processing {}/{} image ...'.format(cnt, len(data_loader)))
begin_time = time.time()
with torch.no_grad():
pr_decs = self.model(image)
#self.imshow_heatmap(pr_decs[2], image)
torch.cuda.synchronize(self.device)
decoded_pts = []
decoded_scores = []
predictions = self.decoder.ctdet_decode(pr_decs)
pts0, scores0 = func_utils.decode_prediction(predictions, dsets, args, img_id, down_ratio)
decoded_pts.append(pts0)
decoded_scores.append(scores0)
#nms
results = {cat:[] for cat in dsets.category}
for cat in dsets.category:
if cat == 'background':
continue
pts_cat = []
scores_cat = []
for pts0, scores0 in zip(decoded_pts, decoded_scores):
pts_cat.extend(pts0[cat])
scores_cat.extend(scores0[cat])
pts_cat = np.asarray(pts_cat, np.float32)
scores_cat = np.asarray(scores_cat, np.float32)
if pts_cat.shape[0]:
nms_results = func_utils.non_maximum_suppression(pts_cat, scores_cat)
results[cat].extend(nms_results)
end_time = time.time()
total_time.append(end_time-begin_time)
#"""
ori_image = dsets.load_image(cnt)
height, width, _ = ori_image.shape
# ori_image = cv2.resize(ori_image, (args.input_w, args.input_h))
# ori_image = cv2.resize(ori_image, (args.input_w//args.down_ratio, args.input_h//args.down_ratio))
#nms
for cat in dsets.category:
if cat == 'background':
continue
result = results[cat]
for pred in result:
score = pred[-1]
tl = np.asarray([pred[0], pred[1]], np.float32)
tr = np.asarray([pred[2], pred[3]], np.float32)
br = np.asarray([pred[4], pred[5]], np.float32)
bl = np.asarray([pred[6], pred[7]], np.float32)
tt = (np.asarray(tl, np.float32) + np.asarray(tr, np.float32)) / 2
rr = (np.asarray(tr, np.float32) + np.asarray(br, np.float32)) / 2
bb = (np.asarray(bl, np.float32) + np.asarray(br, np.float32)) / 2
ll = (np.asarray(tl, np.float32) + np.asarray(bl, np.float32)) / 2
box = np.asarray([tl, tr, br, bl], np.float32)
cen_pts = np.mean(box, axis=0)
cv2.line(ori_image, (int(cen_pts[0]), int(cen_pts[1])), (int(tt[0]), int(tt[1])), (0,0,255),1,1)
cv2.line(ori_image, (int(cen_pts[0]), int(cen_pts[1])), (int(rr[0]), int(rr[1])), (255,0,255),1,1)
cv2.line(ori_image, (int(cen_pts[0]), int(cen_pts[1])), (int(bb[0]), int(bb[1])), (0,255,0),1,1)
cv2.line(ori_image, (int(cen_pts[0]), int(cen_pts[1])), (int(ll[0]), int(ll[1])), (255,0,0),1,1)
# cv2.line(ori_image, (int(cen_pts[0]), int(cen_pts[1])), (int(tl[0]), int(tl[1])), (0,0,255),1,1)
# cv2.line(ori_image, (int(cen_pts[0]), int(cen_pts[1])), (int(tr[0]), int(tr[1])), (255,0,255),1,1)
# cv2.line(ori_image, (int(cen_pts[0]), int(cen_pts[1])), (int(br[0]), int(br[1])), (0,255,0),1,1)
# cv2.line(ori_image, (int(cen_pts[0]), int(cen_pts[1])), (int(bl[0]), int(bl[1])), (255,0,0),1,1)
ori_image = cv2.drawContours(ori_image, [np.int0(box)], -1, (255,0,255),1,1)
# box = cv2.boxPoints(cv2.minAreaRect(box))
# ori_image = cv2.drawContours(ori_image, [np.int0(box)], -1, (0,255,0),1,1)
cv2.putText(ori_image, '{:.2f} {}'.format(score, cat), (box[1][0], box[1][1]),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (0,255,255), 1,1)
if args.dataset == 'hrsc':
gt_anno = dsets.load_annotation(cnt)
for pts_4 in gt_anno['pts']:
bl = pts_4[0, :]
tl = pts_4[1, :]
tr = pts_4[2, :]
br = pts_4[3, :]
cen_pts = np.mean(pts_4, axis=0)
box = np.asarray([bl, tl, tr, br], np.float32)
box = np.int0(box)
cv2.drawContours(ori_image, [box], 0, (255, 255, 255), 1)
cv2.imshow('pr_image', ori_image)
k = cv2.waitKey(0) & 0xFF
if k == ord('q'):
cv2.destroyAllWindows()
exit()
#"""
total_time = total_time[1:]
print('avg time is {}'.format(np.mean(total_time)))
print('FPS is {}'.format(1./np.mean(total_time)))
|
backend/database/lisence.py | uwer/coco-annotator | 1,584 | 11134982 | <gh_stars>1000+
from mongoengine import *
class LicenseModel(DynamicDocument):
id = SequenceField(primary_key=True)
name = StringField()
url = StringField()
__all__ = ["LicenseModel"] |
alipay/aop/api/domain/AlipayOpenMiniInnerclientinfoCreateModel.py | antopen/alipay-sdk-python-all | 213 | 11134984 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenMiniInnerclientinfoCreateModel(object):
def __init__(self):
self._bundle_id = None
self._bundle_name = None
self._bundle_prefix = None
self._inst_code = None
@property
def bundle_id(self):
return self._bundle_id
@bundle_id.setter
def bundle_id(self, value):
self._bundle_id = value
@property
def bundle_name(self):
return self._bundle_name
@bundle_name.setter
def bundle_name(self, value):
self._bundle_name = value
@property
def bundle_prefix(self):
return self._bundle_prefix
@bundle_prefix.setter
def bundle_prefix(self, value):
self._bundle_prefix = value
@property
def inst_code(self):
return self._inst_code
@inst_code.setter
def inst_code(self, value):
self._inst_code = value
def to_alipay_dict(self):
params = dict()
if self.bundle_id:
if hasattr(self.bundle_id, 'to_alipay_dict'):
params['bundle_id'] = self.bundle_id.to_alipay_dict()
else:
params['bundle_id'] = self.bundle_id
if self.bundle_name:
if hasattr(self.bundle_name, 'to_alipay_dict'):
params['bundle_name'] = self.bundle_name.to_alipay_dict()
else:
params['bundle_name'] = self.bundle_name
if self.bundle_prefix:
if hasattr(self.bundle_prefix, 'to_alipay_dict'):
params['bundle_prefix'] = self.bundle_prefix.to_alipay_dict()
else:
params['bundle_prefix'] = self.bundle_prefix
if self.inst_code:
if hasattr(self.inst_code, 'to_alipay_dict'):
params['inst_code'] = self.inst_code.to_alipay_dict()
else:
params['inst_code'] = self.inst_code
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenMiniInnerclientinfoCreateModel()
if 'bundle_id' in d:
o.bundle_id = d['bundle_id']
if 'bundle_name' in d:
o.bundle_name = d['bundle_name']
if 'bundle_prefix' in d:
o.bundle_prefix = d['bundle_prefix']
if 'inst_code' in d:
o.inst_code = d['inst_code']
return o
|
pyhindsight/utils.py | kumavis/hindsight | 730 | 11134991 | <gh_stars>100-1000
import datetime
import json
import logging
import os
import pytz
import shutil
import sqlite3
import struct
from pyhindsight import __version__
from pathlib import Path
log = logging.getLogger(__name__)
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def text_factory(row_data):
try:
return row_data.decode('utf-8')
except UnicodeDecodeError:
return row_data
def open_sqlite_db(chrome, database_path, database_name):
log.info(f' - Reading from {database_name} in {database_path}')
if chrome.no_copy:
db_path_to_open = os.path.join(database_path, database_name)
else:
try:
# Create 'temp' directory if doesn't exists
Path(chrome.temp_dir).mkdir(parents=True, exist_ok=True)
# Copy database to temp directory
db_path_to_open = os.path.join(chrome.temp_dir, database_name)
shutil.copyfile(os.path.join(database_path, database_name), db_path_to_open)
except Exception as e:
log.error(f' - Error copying {database_name}: {e}')
return None
try:
# Connect to copied database
db_conn = sqlite3.connect(db_path_to_open)
# Use a dictionary cursor
db_conn.row_factory = dict_factory
db_conn.text_factory = text_factory
except Exception as e:
log.error(f' - Error opening {database_name}: {e}')
return None
return db_conn
def format_plugin_output(name, version, items):
width = 80
left_side = width * 0.55
full_plugin_name = "{} (v{})".format(name, version)
pretty_name = "{name:>{left_width}}:{count:^{right_width}}" \
.format(name=full_plugin_name, left_width=int(left_side), version=version, count=' '.join(['-', items, '-']),
right_width=(width - int(left_side) - 2))
return pretty_name
def format_meta_output(name, content):
left_side = 17
pretty_name = "{name:>{left_width}}: {content}" \
.format(name=name, left_width=int(left_side), content=content)
return pretty_name
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
elif isinstance(obj, bytes):
return str(obj, encoding='utf-8', errors='replace')
else:
return obj.__dict__
def to_datetime(timestamp, timezone=None):
"""Convert a variety of timestamp formats to a datetime object."""
try:
if isinstance(timestamp, datetime.datetime):
return timestamp
try:
timestamp = float(timestamp)
except Exception as e:
log.warning(f'Exception parsing {timestamp} to datetime: {e}')
return datetime.datetime.fromtimestamp(0)
# Really big Webkit microseconds (18 digits), most often cookie expiry dates.
if timestamp >= 253402300800000000:
new_timestamp = datetime.datetime.max
log.warning(f'Timestamp value {timestamp} is too large to convert; replaced with {datetime.datetime.max}')
# Microsecond timestamps past 2038 can be problematic with datetime.utcfromtimestamp(timestamp).
elif timestamp > 13700000000000000:
new_timestamp = datetime.datetime.fromtimestamp(0) \
+ datetime.timedelta(seconds=(timestamp / 1000000) - 11644473600)
# Webkit microseconds (17 digits)
elif timestamp > 12000000000000000: # ts > 1981
new_timestamp = datetime.datetime.utcfromtimestamp((timestamp / 1000000) - 11644473600)
# Epoch microseconds (16 digits)
elif 2500000000000000 > timestamp > 1280000000000000: # 2049 > ts > 2010
new_timestamp = datetime.datetime.utcfromtimestamp(timestamp / 1000000)
# Epoch milliseconds (13 digits)
elif 2500000000000 > timestamp > 1280000000000: # 2049 > ts > 2010
new_timestamp = datetime.datetime.utcfromtimestamp(timestamp / 1000)
# Webkit seconds (11 digits)
elif 15000000000 > timestamp >= 12900000000: # 2076 > ts > 2009
new_timestamp = datetime.datetime.utcfromtimestamp(timestamp - 11644473600)
# Epoch seconds (10 digits typically, but could be less)
else:
try:
new_timestamp = datetime.datetime.utcfromtimestamp(timestamp)
except OSError as e:
log.warning(f'Exception parsing {timestamp} to datetime: {e}; '
f'common issue is value is too big for the OS to convert it')
return datetime.datetime.utcfromtimestamp(0)
if timezone is not None:
try:
return new_timestamp.replace(tzinfo=pytz.utc).astimezone(timezone)
except NameError:
return new_timestamp
else:
return new_timestamp
except Exception as e:
log.warning(f'Exception parsing {timestamp} to datetime: {e}')
return datetime.datetime.utcfromtimestamp(0)
def friendly_date(timestamp):
if isinstance(timestamp, (str, int)):
return to_datetime(timestamp).strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
elif timestamp is None:
return ''
else:
return timestamp.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
def get_ldb_records(ldb_path, prefix=''):
"""Open a LevelDB at given path and return a list of records, optionally
filtered by a prefix string. Key and value are kept as byte strings."""
try:
from pyhindsight.lib.ccl_chrome_indexeddb import ccl_leveldb
except ImportError:
log.warning(f' - Failed to import ccl_leveldb; unable to process {ldb_path}')
return []
# The ldb key and value are both bytearrays, so the prefix must be too. We allow
# passing the prefix into this function as a string for convenience.
if isinstance(prefix, str):
prefix = prefix.encode()
try:
db = ccl_leveldb.RawLevelDb(ldb_path)
except Exception as e:
log.warning(f' - Could not open {ldb_path} as LevelDB; {e}')
return []
cleaned_records = []
try:
for record in db.iterate_records_raw():
cleaned_record = record.__dict__
if record.file_type.name == 'Ldb':
cleaned_record['key'] = record.key[:-8]
if cleaned_record['key'].startswith(prefix):
cleaned_record['key'] = cleaned_record['key'][len(prefix):]
cleaned_record['state'] = cleaned_record['state'].name
cleaned_record['file_type'] = cleaned_record['file_type'].name
cleaned_records.append(cleaned_record)
except ValueError:
log.warning(f' - Exception reading LevelDB: ValueError')
except Exception as e:
log.warning(f' - Exception reading LevelDB: {e}')
db.close()
return cleaned_records
def read_varint(source):
result = 0
bytes_used = 0
for read in source:
result |= ((read & 0x7F) << (bytes_used * 7))
bytes_used += 1
if (read & 0x80) != 0x80:
return result, bytes_used
def read_string(input_bytes, ptr):
length = struct.unpack('<i', input_bytes[ptr:ptr+4])[0]
ptr += 4
end_ptr = ptr+length
string_value = input_bytes[ptr:end_ptr]
while end_ptr % 4 != 0:
end_ptr += 1
return string_value.decode(), end_ptr
def read_int32(input_bytes, ptr):
value = struct.unpack('<i', input_bytes[ptr:ptr + 4])[0]
return value, ptr + 4
def read_int64(input_bytes, ptr):
value = struct.unpack('<Q', input_bytes[ptr:ptr + 8])[0]
return value, ptr + 8
#
# def create_temp_db(path, database):
#
# # Create 'temp' directory if doesn't exists
# Path(temp_directory_name).mkdir(parents=True, exist_ok=True)
#
# # Copy database to temp directory
# shutil.copyfile(os.path.join(path, database), os.path.join(temp_directory_name, database))
#
# def get_temp_db_directory():
# return temp_directory_name
banner = r'''
################################################################################
_ _ _ _ _ _
| | (_) | | (_) | | | |
| |__ _ _ __ __| |___ _ __ _| |__ | |_
| '_ \| | '_ \ / _` / __| |/ _` | '_ \| __|
| | | | | | | | (_| \__ \ | (_| | | | | |_
|_| |_|_|_| |_|\__,_|___/_|\__, |_| |_|\__|
__/ |
by @_RyanBenson |___/ v{}
################################################################################
'''.format(__version__)
|
cctbx/maptbx/tst_interpolation_2.py | dperl-sol/cctbx_project | 155 | 11134997 | <gh_stars>100-1000
from __future__ import absolute_import, division, print_function
from cctbx.development import random_structure
from cctbx.sgtbx import space_group_info
import boost_adaptbx.boost.python as bp
from six.moves import range
ext = bp.import_ext("cctbx_asymmetric_map_ext")
from cctbx_asymmetric_map_ext import *
from cctbx.array_family import flex
from cctbx import miller
from cctbx import maptbx # import dependency
from libtbx.test_utils import approx_equal
import time, random
if(1):
random.seed(0)
flex.set_random_seed(0)
def get_map(xrs,ms):
fc = ms.structure_factors_from_scatterers(
xray_structure=xrs, algorithm="direct").f_calc()
fft_map = fc.fft_map(resolution_factor=1./10)
fft_map.apply_sigma_scaling()
return fft_map.real_map_unpadded()
def get_err(x,y):
return abs(x-y)/abs(x+y)*2*100
def run_group(symbol, err_l, err_q, err_t, err_lt1, err_lt2, err_lt12, err_qt1):
group = space_group_info(symbol)
elements = ('C', )*10
xrs = random_structure.xray_structure(
space_group_info = group,
volume_per_atom = 25.,
general_positions_only = False,
elements = elements,
min_distance = 1.0)
ms = miller.set(xrs.crystal_symmetry(),
flex.miller_index(((1,2,3),)), False).complete_set(d_min=2)
m1 = get_map(xrs=xrs, ms=ms)
#
sites_frac = xrs.sites_frac()
for i, shift in enumerate([[-2,-3,-4],[2,3,4]]):
sf_sh = sites_frac+shift
xrs.set_sites_frac(sites_frac=sf_sh)
#
m2 = get_map(xrs=xrs, ms=ms)
#
if(i==0): assert min(sf_sh.min())<0.
else: assert min(sf_sh.min())>1.
if(i==0): assert max(sf_sh.min())<0.
else: assert max(sf_sh.min())>1.
#
for sf in sf_sh:
l1 = abs(m1.eight_point_interpolation(sf))
l2 = abs(m2.eight_point_interpolation(sf))
err_l.append(get_err(l1,l2))
#
q1 = abs(m1.quadratic_interpolation_with_gradients(sf,[1,1,1])[0])
q2 = abs(m2.quadratic_interpolation_with_gradients(sf,[1,1,1])[0])
err_q.append(get_err(q1,q2))
#
t1 = abs(m1.tricubic_interpolation_with_gradients(sf,[1,1,1])[0])
t2 = abs(m2.tricubic_interpolation_with_gradients(sf,[1,1,1])[0])
err_t.append(get_err(t1,t2))
#
err_lt1.append( get_err(l1,t1))
err_lt2.append( get_err(l2,t2))
err_lt12.append(get_err(l1,t2))
#
err_qt1.append(get_err(l1,q1))
def run():
err_l = flex.double()
err_q = flex.double()
err_t = flex.double()
err_lt1 = flex.double()
err_lt2 = flex.double()
err_lt12 = flex.double()
err_qt1 = flex.double()
for i in range(1,231):
run_group(i, err_l, err_q, err_t, err_lt1, err_lt2, err_lt12, err_qt1)
assert approx_equal([flex.mean(err_l), flex.mean(err_q), flex.mean(err_t)],
[0,0,0], 1.e-3)
assert flex.mean(err_lt1) < 2.
assert flex.mean(err_lt2) < 2.
assert flex.mean(err_lt12) < 2.
assert flex.mean(err_qt1) < 2.
if (__name__ == "__main__"):
t0 = time.time()
run()
print("Time: %6.2f"%(time.time()-t0))
print("OK")
|
mmdet/models/registry.py | Pandinosaurus/Grid-R-CNN | 272 | 11135035 | import torch.nn as nn
class Registry(object):
def __init__(self, name):
self._name = name
self._module_dict = dict()
@property
def name(self):
return self._name
@property
def module_dict(self):
return self._module_dict
def _register_module(self, module_class):
"""Register a module.
Args:
module (:obj:`nn.Module`): Module to be registered.
"""
if not issubclass(module_class, nn.Module):
raise TypeError(
'module must be a child of nn.Module, but got {}'.format(
module_class))
module_name = module_class.__name__
if module_name in self._module_dict:
raise KeyError('{} is already registered in {}'.format(
module_name, self.name))
self._module_dict[module_name] = module_class
def register_module(self, cls):
self._register_module(cls)
return cls
BACKBONES = Registry('backbone')
NECKS = Registry('neck')
ROI_EXTRACTORS = Registry('roi_extractor')
SHARED_HEADS = Registry('shared_head')
HEADS = Registry('head')
DETECTORS = Registry('detector')
|
examples/ccxt.pro/py/binance-watch-many-orderbooks.py | DavidFelsen/ccxt | 24,910 | 11135049 | <gh_stars>1000+
import ccxtpro
import asyncio
orderbooks = {}
def when_orderbook_changed(exchange_spot, symbol, orderbook):
# this is a common handler function
# it is called when any of the orderbook is updated
# it has access to both the orderbook that was updated
# as well as the rest of the orderbooks
# ...................................................................
print('-------------------------------------------------------------')
print('Last updated:', exchange_spot.iso8601(exchange_spot.milliseconds()))
# ...................................................................
# print just one orderbook here
# print(orderbook['datetime'], symbol, orderbook['asks'][0], orderbook['bids'][0])
# ...................................................................
# or print all orderbooks that have been already subscribed-to
for symbol, orderbook in orderbooks.items():
print(orderbook['datetime'], symbol, orderbook['asks'][0], orderbook['bids'][0])
async def watch_one_orderbook(exchange_spot, symbol):
your_delay = 1000 # <-------------------------- 1000ms
await exchange_spot.throttle(your_delay)
while True:
try:
orderbook = await exchange_spot.watch_order_book(symbol)
orderbooks[symbol] = orderbook
when_orderbook_changed(exchange_spot, symbol, orderbook)
except Exception as e:
print(type(e).__name__, str(e))
async def watch_some_orderbooks(exchange_spot, symbol_list):
loops = [watch_one_orderbook(exchange_spot, symbol) for symbol in symbol_list]
# let them run, don't for all tasks cause they execute asynchronously
# don't print here
await asyncio.gather(*loops)
async def main():
exchange_spot = ccxtpro.binance()
await exchange_spot.load_markets()
await watch_some_orderbooks(exchange_spot, ['ZEN/USDT', 'RUNE/USDT', 'AAVE/USDT', 'SNX/USDT'])
await exchange_spot.close()
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
tests/test_inbuf_overflow.py | MaayanLab/aiohttp-wsgi | 244 | 11135077 | <filename>tests/test_inbuf_overflow.py
from tests.base import AsyncTestCase, streaming_request_body, echo_application
class InbufOverflowTest(AsyncTestCase):
def testInbufOverflow(self) -> None:
with self.run_server(echo_application, inbuf_overflow=3) as client:
response = client.request(data="foobar")
self.assertEqual(response.content, b"foobar")
def testInbufOverflowStreaming(self) -> None:
with self.run_server(echo_application, inbuf_overflow=20) as client:
response = client.request(data=streaming_request_body())
self.assertEqual(response.content, b"foobar" * 100)
|
tests/monitoring/test_check_mesos_outdated_tasks.py | sobolevn/paasta | 1,711 | 11135079 | # Copyright 2015-2017 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asynctest
from mock import patch
from paasta_tools.monitoring import check_mesos_outdated_tasks
@patch(
"paasta_tools.monitoring.check_mesos_outdated_tasks.get_mesos_master", autospec=True
)
def test_check_mesos_tasks(mock_get_mesos_master):
mock_get_mesos_master.return_value.state = asynctest.CoroutineMock(
func=asynctest.CoroutineMock(),
return_value={
"slaves": [
{
"id": "4abbb181-fd06-4729-815b-6b55cebdf8ee-S2",
"hostname": "mesos-slave1.example.com",
}
],
"frameworks": [
{
"name": "marathon",
"tasks": [
{
"state": "TASK_RUNNING",
"name": "service.instance.gitlast_SHA.config3f15fefe",
"slave_id": "4abbb181-fd06-4729-815b-6b55cebdf8ee-S2",
"statuses": [
{
"state": "TASK_RUNNING",
"timestamp": 1509392500.9267,
"container_status": {
"container_id": {
"value": "a69b426d-f283-4287-9bee-6b8811386e1a"
}
},
}
],
},
{
"state": "TASK_RUNNING",
"name": "service.instance.gitold_SHA.config3f15fefe",
"slave_id": "4abbb181-fd06-4729-815b-6b55cebdf8ee-S2",
"statuses": [
{
"state": "TASK_RUNNING",
"timestamp": 1509342500.9267,
"container_status": {
"container_id": {
"value": "a69b426d-f283-4287-9bee-6b8811386e1b"
}
},
}
],
},
],
}
],
},
)
output, remedy = check_mesos_outdated_tasks.check_mesos_tasks()
assert len(output) == 1
assert "a69b426d-f283-4287-9bee-6b8811386e1b" in output[0]
assert "old_SHA" in output[0]
|
haartrainingformat.py | nagyistoce/JoakimSoderberg-imageclipper | 131 | 11135081 | #!/usr/bin/python
import os
import re
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--clipdir", metavar = "CLIPDIR",
help = "Path to a directory containing the clipped images "
"in the format default imageclipper output format: "
"<imgout_format = %%d/imageclipper/\%%i.%%e_%%04r_%%04x_%%04y_%%04w_%%04h.png>")
parser.add_argument("--path", metavar = "PATH", default = "",
help = "The path to prepend to the image name in the output.")
args = parser.parse_args()
clip_images = os.listdir(args.clipdir)
for img in clip_images:
if os.path.isdir(os.path.join(args.clipdir, img)):
continue
try:
m = re.match("(.*?\..*?)_(-?\d+)_(-?\d+)_(-?\d+)_(-?\d+)_(-?\d+)\..*?", img)
basename = m.group(1)
r = max(0, int(m.group(2)))
x = max(0, int(m.group(3)))
y = max(0, int(m.group(4)))
w = max(0, int(m.group(5)))
h = max(0, int(m.group(6)))
base_img_path = os.path.join(args.path, basename)
if (os.path.exists(base_img_path)):
print("%s %d %d %d %d %d" % (base_img_path, 1, x, y, w, h))
except Exception as ex:
print ex
return
if __name__ == '__main__': main()
|
tool/check_link_references.py | salihgueler/website | 2,260 | 11135091 | <gh_stars>1000+
import re
import sys
from pathlib import Path
from typing import Dict, List
def find_invalid_link_references(folder: str) -> Dict[str, List[str]]:
""" Search for invalid link references in all HTML
files within the given directory path.
"""
invalid_links = {}
# Iterate all `.html`s.
for file in Path(folder).rglob("*.html"):
with file.open(encoding="utf8") as f:
html = f.read()
# Ignore blocks with TODOs:
#
# <!-- TODO(somebody): [Links here][are not rendered]. -->
html = re.sub(r"<!--.*?-->", "", html, flags=re.DOTALL)
# Ignore blocks with code:
#
# ```dart
# [[highlight]]flutter[[/highlight]]
# ```
html = re.sub(r"<pre.*?</pre>", "", html, flags=re.DOTALL)
# Remove PRs title that looks like a link, they're typically laid in release notes:
#
# <p><a href="https://github.com/flutter/engine/pull/27070">27070</a>
# [web][felt] Fix stdout inheritance for sub-processes
# (cla: yes, waiting for tree to go green, platform-web, needs tests)
# </p>
html = re.sub(
r'<p><a href="https://github.com/.*?/pull/\d+">\d+</a> .*?</p>',
"",
html,
)
# Use regex to find all links that displayed abnormally,
# since a valid referenced link should be an <a> tag after rendered:
#
# * <p>[flutter.dev][]</p>
# * <p>[GitHub repo][repo]</p>
#
# See also:
# * https://github.github.com/gfm/#reference-link
matches = re.findall(r"\[[^\[\]]+]\[[^\[\]]*]", html)
if matches:
invalid_links[file.relative_to(folder).as_posix()] = matches
return invalid_links
if __name__ == "__main__":
result = find_invalid_link_references("_site")
if result:
for file_path, links in result.items():
print(f'/{file_path}')
print(*links, '\n', sep="\n")
sys.exit(-1)
|
examples/riscv/RVC_misaligned_force.py | noahsherrill/force-riscv | 111 | 11135094 | #
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from riscv.EnvRISCV import EnvRISCV
from riscv.GenThreadRISCV import GenThreadRISCV
from base.Sequence import Sequence
# *********************************************************************************
# generate N misaligned random RVC load/stores...
# *********************************************************************************
# Riscv RVC load/store instrs, word/dword accesses:
RVC_load_store_instructions = {
"C.FLD##RISCV": 10,
"C.LD##RISCV": 10,
"C.LW##RISCV": 10,
"C.FLDSP##RISCV": 10,
"C.LDSP##RISCV": 10,
"C.LWSP##RISCV": 10,
"C.FSD##RISCV": 10,
"C.SD##RISCV": 10,
"C.SW##RISCV": 10,
"C.FSDSP##RISCV": 10,
"C.SDSP##RISCV": 10,
"C.SWSP##RISCV": 10,
}
class MyMainSequence(Sequence):
def generate(self, **kargs):
for _ in range(100):
# pick random RVC load/store instruction...
instr = self.pickWeighted(RVC_load_store_instructions)
# pick a random address aligned to a page boundary,
# then (re)align that address close to the end of the page,
# on half-word boundary. should yield a fair amount of misaligned
# load/stores...
target_addr = self.genVA(Align=0x1000) | 0xFFE
self.notice(
">>>>> Instruction: {} Target addr: {:012x}".format(
instr, target_addr
)
)
self.genInstruction(instr, {"LSTarget": target_addr})
MainSequenceClass = MyMainSequence
GenThreadClass = GenThreadRISCV
EnvClass = EnvRISCV
|
lib/clx/settings.py | pooya/disco | 786 | 11135104 | <filename>lib/clx/settings.py
import os
class Settings(dict):
"""
A dictionary for storing settings.
Provides special mechanisms for setting and overriding default values.
Defaults can be overridden by a settings file and/or the environment.
"""
defaults = {}
globals = globals()
settings_file_var = None
def __init__(self, *args, **kwargs):
super(Settings, self).__init__(*args, **kwargs)
self.settings_file_defs = {}
if self.settings_file_var:
settings_file = self[self.settings_file_var]
if os.path.exists(settings_file):
exec(compile(open(settings_file).read(), settings_file, 'exec'),
{}, self.settings_file_defs)
def __getitem__(self, key):
"""Get `key`: check the instance, then the env, then defaults."""
if key in self:
return super(Settings, self).__getitem__(key)
if key in os.environ:
return os.environ[key]
if key in self.settings_file_defs:
return self.settings_file_defs[key]
return eval(self.defaults[key], self.globals, self)
def __reduce__(self):
return type(self), (dict((k, self[k]) for k in self.defaults),)
def safedir(self, key):
"""Make sure the directory path stored in the setting `key` exists."""
path = self[key]
if not os.path.exists(path):
os.makedirs(path)
return path
@property
def env(self):
env = os.environ.copy()
env.update((k, str(self[k])) for k in self.defaults)
return env
|
visual_mpc/policy/cem_controllers/variants/nce_cost_controller.py | thomasweng15/visual_foresight | 108 | 11135109 | <filename>visual_mpc/policy/cem_controllers/variants/nce_cost_controller.py
from visual_mpc.policy.cem_controllers import CEMBaseController
import imp
import control_embedding
import numpy as np
from visual_mpc.video_prediction.pred_util import get_context, rollout_predictions
from ..visualizer.construct_html import save_gifs, save_html, save_img, fill_template, img_entry_html
from ..visualizer.plot_helper import plot_score_hist
from collections import OrderedDict
class NCECostController(CEMBaseController):
"""
Cross Entropy Method Stochastic Optimizer
"""
def __init__(self, ag_params, policyparams, gpu_id, ngpu):
"""
:param ag_params: agent parameters
:param policyparams: policy parameters
:param gpu_id: starting gpu id
:param ngpu: number of gpus
"""
CEMBaseController.__init__(self, ag_params, policyparams)
params = imp.load_source('params', ag_params['current_dir'] + '/conf.py')
net_conf = params.configuration
if ngpu > 1:
vpred_ngpu = ngpu - 1
else: vpred_ngpu = ngpu
self._predictor = net_conf['setup_predictor'](ag_params, net_conf, gpu_id, vpred_ngpu, self._logger)
self._scoring_func = control_embedding.deploy_model(self._hp.nce_conf_path, batch_size=self._hp.nce_batch_size,
restore_path=self._hp.nce_restore_path,
device_id=gpu_id + ngpu - 1)
self._vpred_bsize = net_conf['batch_size']
self._seqlen = net_conf['sequence_length']
self._net_context = net_conf['context_frames']
self._hp.start_planning = self._net_context # skip steps so there are enough context frames
self._n_pred = self._seqlen - self._net_context
assert self._n_pred > 0, "context_frames must be larger than sequence_length"
self._img_height, self._img_width = net_conf['orig_size']
self._n_cam = net_conf['ncam']
self._images = None
self._expert_images = None
self._expert_score = None
self._goal_image = None
self._start_image = None
self._verbose_worker = None
def reset(self):
self._expert_score = None
self._images = None
self._expert_images = None
self._goal_image = None
self._start_image = None
self._verbose_worker = None
return super(NCECostController, self).reset()
def _default_hparams(self):
default_dict = {
'score_fn': 'dot_prod',
'finalweight': 100,
'nce_conf_path': '',
'nce_restore_path': '',
'nce_batch_size': 200,
'state_append': None,
'compare_to_expert': False,
'verbose_img_height': 128,
'verbose_frac_display': 0.
}
parent_params = super(NCECostController, self)._default_hparams()
for k in default_dict.keys():
parent_params.add_hparam(k, default_dict[k])
return parent_params
def evaluate_rollouts(self, actions, cem_itr):
last_frames, last_states = get_context(self._net_context, self._t,
self._state, self._images, self._hp)
gen_images = rollout_predictions(self._predictor, self._vpred_bsize, actions,
last_frames, last_states, logger=self._logger)[0]
gen_images = np.concatenate(gen_images, 0) * 255.
raw_scores = np.zeros((self._n_cam, actions.shape[0], self._n_pred))
for c in range(self._n_cam):
goal, start = self._goal_image[c][None], self._start_image[c][None]
input_images = gen_images[:, :, c].reshape((-1, self._img_height, self._img_width, 3))
embed_dict = self._scoring_func(goal, start, input_images)
gs_enc = embed_dict['goal_enc'][0][None]
in_enc = embed_dict['input_enc'].reshape((actions.shape[0], self._n_pred, -1))
raw_scores[c] = self._eval_embedding_cost(gs_enc, in_enc)
raw_scores = np.sum(raw_scores, axis=0)
scores = self._weight_scores(raw_scores)
if self._verbose_condition(cem_itr):
verbose_folder = "planning_{}_itr_{}".format(self._t, cem_itr)
content_dict = OrderedDict()
visualize_indices = scores.argsort()[:max(10, int(actions.shape[0] * self._hp.verbose_frac_display))]
# start image and predictions (alternate by camera)
for c in range(self._n_cam):
name = 'cam_{}_start'.format(c)
save_path = save_img(self._verbose_worker, verbose_folder, name, self._images[-1, c])
content_dict[name] = [save_path for _ in visualize_indices]
name = 'cam_{}_goal'.format(c)
save_path = save_img(self._verbose_worker, verbose_folder, name, self._goal_image[c].astype(np.uint8))
content_dict[name] = [save_path for _ in visualize_indices]
verbose_images = [gen_images[g_i, :, c].astype(np.uint8) for g_i in visualize_indices]
row_name = 'cam_{}_pred_images'.format(c)
content_dict[row_name] = save_gifs(self._verbose_worker, verbose_folder,
row_name, verbose_images)
# scores
content_dict['scores'] = scores[visualize_indices]
content_dict['NCE Res'] = raw_scores[visualize_indices]
if self._hp.compare_to_expert and self._expert_score is None:
expert_scores = np.zeros((self._n_cam, 1, self._n_pred))
for c in range(self._n_cam):
expert_goal, expert_start = self._expert_images[-1][c], self._expert_images[0][c]
embed_dict = self._scoring_func(expert_goal[None], expert_start[None], self._expert_images[:,c])
gs_enc = embed_dict['goal_enc'][0][None]
in_enc = embed_dict['input_enc'].reshape((1, self._n_pred, -1))
expert_scores[c] = self._eval_embedding_cost(gs_enc, in_enc)
self._expert_score = self._weight_scores(np.sum(expert_scores, axis=0))[0]
hist = plot_score_hist(scores, tick_value=self._expert_score)
hist_path = save_img(self._verbose_worker, verbose_folder, "score_histogram", hist)
extra_entry = img_entry_html(hist_path, height=hist.shape[0], caption="score histogram")
html_page = fill_template(cem_itr, self._t, content_dict, img_height=self._hp.verbose_img_height,
extra_html=extra_entry)
save_html(self._verbose_worker, "{}/plan.html".format(verbose_folder), html_page)
return scores
def _weight_scores(self, raw_scores):
if self._hp.finalweight >= 0:
scores = raw_scores.copy()
scores[:, -1] *= self._hp.finalweight
scores = np.sum(scores, axis=1) / sum([1. for _ in range(self._n_pred - 1)] + [self._hp.finalweight])
else:
scores = raw_scores[:, -1].copy()
return scores
def _eval_embedding_cost(self, goal_embed, input_embed):
if self._hp.score_fn == 'dot_prod':
# - log prob ignoring constant term (denominator)
return -np.matmul(goal_embed[None], np.swapaxes(input_embed, 2, 1))[:, 0]
raise NotImplementedError
def act(self, t=None, i_tr=None, goal_image=None, images=None, state=None, verbose_worker=None):
self._start_image = images[-1].astype(np.float32)
self._goal_image = goal_image[-1] * 255
self._images = images
self._verbose_worker = verbose_worker
if self._hp.compare_to_expert:
self._expert_images = goal_image[1:self._n_pred + 1] * 255
return super(NCECostController, self).act(t, i_tr, state)
|
graphdash/__init__.py | NunoEdgarGFlowHub/GraphDash | 306 | 11135115 | <reponame>NunoEdgarGFlowHub/GraphDash<gh_stars>100-1000
# -*- coding: utf-8 -*-
from .routes import app, CONF
__all__ = ['app', 'CONF']
|
powerfulseal/k8s/pod.py | fahedouch/powerfulseal | 1,362 | 11135136 |
# Copyright 2017 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Pod():
""" Internal representation of a pod. Use to easily manipulate them
internally.
"""
def __init__(self, name, namespace, num=None, uid=None, host_ip=None, ip=None,
container_ids=None, restart_count=None, state=None, labels=None, annotations=None, meta=None):
self.name = name
self.namespace = namespace
self.num = num
self.uid = uid
self.host_ip = host_ip
self.ip = ip
self.container_ids = container_ids or []
self.restart_count = restart_count or 0
self.state = state
self.labels = labels or dict()
self.annotations = annotations or dict()
self.meta = meta
def __str__(self):
return (
"[pod #{num} name={name} namespace={namespace} containers={containers} ip={ip} host_ip={host_ip} "
"state={state} labels:{labels} annotations:{annotations}]"
).format(
num=self.num,
name=self.name,
namespace=self.namespace,
containers=len(self.container_ids),
ip=self.ip,
host_ip=self.host_ip,
state=str(self.state),
labels=",".join(["%s=%s" % (k,v) for k,v in self.labels.items()]),
annotations=",".join(["%s=%s" % (k,v) for k,v in self.annotations.items()]),
)
def __repr__(self):
return self.__str__()
def __hash__(self):
if self.uid:
return hash(self.uid)
return hash(self.name + self.namespace)
def __eq__(self, other):
return self.__hash__() == other.__hash__()
def get_label_or_annotation(self, key, default):
return self.labels.get(key) or self.annotations.get(key) or default
|
scripts/experimental/abelian_log_parser_analysis.py | bigwater/Galois | 230 | 11135145 | ##########################################
# To parse log files generated by abelian.
# Author: <NAME>
# Email: <EMAIL>
#########################################
import re
import os
import sys, getopt
import csv
import numpy
######## NOTES:
# All time values are in sec by default.
def sd_iterations(inputFile, outputFile, outputFile_mainfile, benchmark, runs, time_unit, hostNum, iterationNum, variant, input_graph, deviceKind, devices, partition):
mean_time = 0.0;
recvNum_total = 0
recvBytes_total = 0
sendNum_total = 0
sendBytes_total = 0
sync_pull_avg_time_total = 0.0;
extract_avg_time_total = 0.0;
set_avg_time_total = 0.0;
sync_push_avg_time_total = 0.0;
graph_init_time = 0
hg_init_time = 0
total_time = 0
if(benchmark == "cc"):
benchmark = "ConnectedComp"
if (time_unit == 'seconds'):
divisor = 1000
else:
divisor = 1
log_data = open(inputFile).read()
data = [variant, input_graph, hostNum, benchmark, partition, deviceKind, devices]
fd_outputFile = open(outputFile, 'a')
fd_outputFile_main = open(outputFile_mainfile, 'a')
rep_regex = re.compile(r'.*,\(NULL\),0\s,\sREPLICATION_FACTOR_0_0,(\d*),\d*,(.*)')
Total_mean_compute = 0.0
Total_rsd_compute = 0.0
rep_search = rep_regex.search(log_data)
if rep_search is not None:
rep_factor = rep_search.group(2)
rep_factor = round(float(rep_factor), 3)
print ("FOUND : ", rep_factor)
iterNum_start = 0
#do_all_regex = re.compile(r'.*,\(NULL\),0\s,\sDO_ALL_IMPL_FirstItr_(?i)' + re.escape(benchmark) + r'_0_' + r',.*' + r',\d*,(\d*)')
#do_all_all_hosts = re.findall(do_all_regex, log_data)
#num_arr = numpy.array(map(int,do_all_all_hosts))
#if(num_arr.size > 0):
#sd = numpy.std(num_arr, axis=0)
#mean = numpy.mean(num_arr, axis=0)
#var = numpy.var(num_arr, axis=0)
#complete_data = data + [rep_factor,iterNum, mean, var, sd, sd/mean]
#wr = csv.writer(fd_outputFile, quoting=csv.QUOTE_NONE, lineterminator='\n')
#wr.writerow(complete_data)
#iterNum_start += 1
#Total_mean_compute += mean
#Total_rsd_compute += sd/mean
for iterNum in range(iterNum_start, int(iterationNum)):
do_all_regex = re.compile(r'.*,\(NULL\),0\s,\sDO_ALL_IMPL_(?i)' + re.escape(benchmark) + r'_0_' + re.escape(str(iterNum)) +r',.*' + r',\d*,(\d*)')
do_all_all_hosts = re.findall(do_all_regex, log_data)
num_arr_tmp = numpy.array(map(int,do_all_all_hosts))
if(num_arr_tmp.size < int(hostNum) and iterNum == 0):
num_arr = numpy.zeros(int(hostNum));
for i in range(0, num_arr_tmp.size):
num_arr[i] = num_arr_tmp[i]
else:
num_arr = num_arr_tmp
print num_arr
if(num_arr.size < int(hostNum)):
print "SOME DATA IS MISSING\n"
#sys.exit("aa! errors! SOME DATA MISSING IN THE LOG FILES!!")
sd=0.0
mean=0.0
var=0.0
try:
if(num_arr.size > 0):
sd = numpy.std(num_arr, axis=0)
mean = numpy.mean(num_arr, axis=0)
var = numpy.var(num_arr, axis=0)
except ValueError:
pass
rsd = 0.0;
if(mean > 0):
rsd = sd/mean
complete_data = data + [rep_factor,iterNum, mean, var, sd, rsd]
wr = csv.writer(fd_outputFile, quoting=csv.QUOTE_NONE, lineterminator='\n')
wr.writerow(complete_data)
Total_mean_compute += mean
Total_rsd_compute += rsd
print ("MEAN : ", Total_mean_compute)
print ("RSD : ", Total_rsd_compute)
Total_mean_compute = round(Total_mean_compute,3)
Total_rsd_compute = round(Total_rsd_compute/int(iterationNum),3)
print ("Total_mean_compute : ", Total_mean_compute)
print ("Total_rsd_compute : ", Total_rsd_compute)
complete_data = data + [rep_factor,iterNum, Total_mean_compute, Total_rsd_compute]
wr = csv.writer(fd_outputFile_main, quoting=csv.QUOTE_NONE, lineterminator='\n')
wr.writerow(complete_data)
fd_outputFile_main.close();
fd_outputFile.close()
def get_basicInfo(fileName):
hostNum_regex = re.compile(r'.*,\(NULL\),0\s,\sHosts,0,0,(\d*)')
cmdLine_regex = re.compile(r'.*,\(NULL\),0\s,\sCommandLine,0,0,(.*)')
threads_regex = re.compile(r'.*,\(NULL\),0\s,\sThreads,0,0,(\d*)')
runs_regex = re.compile(r'.*,\(NULL\),0\s,\sRuns,0,0,(\d*)')
num_itr_regex = re.compile(r'.*,\(NULL\),0\s,\sNUM_ITERATIONS_\d*,0,0,(\d*)')
log_data = open(fileName).read()
hostNum = ''
cmdLine = ''
threads = ''
runs = ''
benchmark = ''
variant = ''
cut_type = ''
input_graph = ''
iterationNum = ''
hostNum_search = hostNum_regex.search(log_data)
if hostNum_search is not None:
hostNum = hostNum_search.group(1)
cmdLine_search = cmdLine_regex.search(log_data)
if cmdLine_search is not None:
cmdLine = cmdLine_search.group(1)
threads_search = threads_regex.search(log_data)
if threads_search is not None:
threads = threads_search.group(1)
runs_search = runs_regex.search(log_data)
if runs_search is not None:
runs = runs_search.group(1)
num_itr_search = num_itr_regex.search(log_data)
if num_itr_search is not None:
iterationNum = num_itr_search.group(1)
split_cmdLine_algo = cmdLine.split()[0].split("/")[-1].split("_")
benchmark, variant = split_cmdLine_algo
split_cmdLine_input = cmdLine.split()[1].split("/")
input_graph_name = split_cmdLine_input[-1]
input_graph = input_graph_name.split(".")[0]
split_cmdLine = cmdLine.split()
cut_type = "edge-cut"
for index in range(0, len(split_cmdLine)):
if split_cmdLine[index] == "-enableVertexCut=1":
cut_type = "vertex-cut"
break
elif split_cmdLine[index] == "-enableVertexCut":
cut_type = "vertex-cut"
break
elif split_cmdLine[index] == "-enableVertexCut=0":
cut_type = "edge-cut"
break
#cut_type = "edge-cut"
#for index in range(0, len(split_cmdLine_input)):
#if split_cmdLine_input[index] == "-enableVertexCut":
#cut_type = "vertex-cut"
#break
devices = str(hostNum) + " CPU"
deviceKind = "CPU"
for index in range(2, len(cmdLine.split())):
split_cmdLine_devices = cmdLine.split()[index].split("=")
if split_cmdLine_devices[0] == '-pset':
devices_str = split_cmdLine_devices[-1]
cpus = devices_str.count('c')
gpus = devices_str.count('g')
if str(cpus + gpus) == hostNum and gpus > 0:
if cpus == 0:
devices = str(gpus) + " GPU"
deviceKind = "GPU"
else:
devices = str(cpus) + " CPU + " + str(gpus) + " GPU"
deviceKind = "CPU+GPU"
hostNum = str(int(hostNum) - cpus)
break
return hostNum, cmdLine, threads, runs, benchmark, variant, cut_type, input_graph, devices, deviceKind, iterationNum
def main(argv):
inputFile = ''
forHost = '0'
outputFile = 'LOG_output.csv'
time_unit = 'seconds'
try:
opts, args = getopt.getopt(argv,"hi:n:o:md",["ifile=","node=","ofile=","milliseconds"])
except getopt.GetoptError:
print 'abelian_log_parser.py -i <inputFile> [-o <outputFile> -n <hostNumber 0 to hosts-1> --milliseconds]'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'abelian_log_parser.py -i <inputFile> [-o <outputFile> -n <hostNumber 0 to hosts-1> --milliseconds]'
sys.exit()
elif opt in ("-i", "--ifile"):
inputFile = arg
elif opt in ("-n", "--node"):
forHost = arg
elif opt in ("-o", "--ofile"):
outputFile = arg
elif opt in ("-m", "--milliseconds"):
time_unit = 'milliseconds'
if inputFile == '':
print 'abelian_log_parser_analysis.py -i <inputFile> [-o <outputFile> -n <hostNumber 0 to hosts-1> --milliseconds]'
sys.exit(2)
print 'Input file is : ', inputFile
print 'Output file is : ', outputFile
print 'Data for host : ', forHost
hostNum, cmdLine, threads, runs, benchmark, variant, cut_type, input_graph, devices, deviceKind, iterationNum = get_basicInfo(inputFile)
#shorten the graph names:
if input_graph == "twitter-ICWSM10-component_withRandomWeights" or input_graph == "twitter-ICWSM10-component-transpose" or input_graph == "twitter-ICWSM10-component":
input_graph = "twitter-50"
elif input_graph == "twitter-WWW10-component_withRandomWeights" or input_graph == "twitter-WWW10-component-transpose" or input_graph == "twitter-WWW10-component":
input_graph = "twitter-40"
print 'Hosts : ', hostNum , ' CmdLine : ', cmdLine, ' Threads : ', threads , ' Runs : ', runs, ' benchmark :' , benchmark , ' variant :', variant, ' cut_type : ', cut_type, ' input_graph : ', input_graph, 'iterationNum :', iterationNum
print 'Devices : ', devices
header_csv_str = "variant,input,hosts,benchmark,partition,"
header_csv_str += "deviceKind,devices,replication,iteration,mean,variance,sd,sdByMean"
header_csv_str_mainfile = "variant,input,hosts,benchmark,partition,"
header_csv_str_mainfile += "deviceKind,devices,replication,total_mean_compute,rsd_total"
output_str = variant + ',' + input_graph + ',' + hostNum + ',' + benchmark + ','
output_str += deviceKind + ',' + devices + ','
header_csv_list = header_csv_str.split(',')
header_csv_list_mainfile = header_csv_str_mainfile.split(',')
outputFile_mainfile = outputFile
outputFile = outputFile + ".csv"
#if outputFile is empty add the header to the file
try:
if os.path.isfile(outputFile) is False:
fd_outputFile = open(outputFile, 'wb')
wr = csv.writer(fd_outputFile, quoting=csv.QUOTE_NONE, lineterminator='\n')
wr.writerow(header_csv_list)
fd_outputFile.close()
print "Adding header to the empty file."
else:
print "outputFile : ", outputFile, " exists, results will be appended to it."
except OSError:
print "Error in outfile opening\n"
outputFile_mainfile = outputFile_mainfile + "_main.csv"
try:
if os.path.isfile(outputFile_mainfile) is False:
fd_outputFile = open(outputFile_mainfile, 'wb')
wr = csv.writer(fd_outputFile, quoting=csv.QUOTE_NONE, lineterminator='\n')
wr.writerow(header_csv_list_mainfile)
fd_outputFile.close()
print "Adding header to the empty file."
else:
print "outputFile_mainfile : ", outputFile_mainfile, " exists, results will be appended to it."
except OSError:
print "Error in outfile opening\n"
sd_iterations(inputFile, outputFile, outputFile_mainfile, benchmark, runs, time_unit, hostNum, iterationNum, variant, input_graph, deviceKind, devices, cut_type)
'''
data_list = list(data) #[data] #list(data)
#data_list.extend((total_SendBytes, total_SendBytes_pull_sync, total_SendBytes_pull_reply, total_SendBytes_push_sync))
complete_data = output_str.split(",") + data_list + [rep_factor]#+ list(sendBytes_list)
fd_outputFile = open(outputFile, 'a')
wr = csv.writer(fd_outputFile, quoting=csv.QUOTE_NONE, lineterminator='\n')
wr.writerow(complete_data)
fd_outputFile.close()
'''
'''
## Write ghost and slave nodes to a file.
ghost_array = build_master_ghost_matrix(inputFile, benchmark, cut_type, hostNum, runs, threads)
ghostNodes_file = outputFile + "_" + cut_type
fd_ghostNodes_file = open(ghostNodes_file, 'ab')
fd_ghostNodes_file.write("\n--------------------------------------------------------------\n")
fd_ghostNodes_file.write("\nHosts : " + hostNum + "\nInputFile : "+ inputFile + "\nBenchmark: " + benchmark + "\nPartition: " + cut_type + "\n\n")
numpy.savetxt(fd_ghostNodes_file, ghost_array, delimiter=',', fmt='%d')
fd_ghostNodes_file.write("\n--------------------------------------------------------------\n")
fd_ghostNodes_file.close()
'''
if __name__ == "__main__":
main(sys.argv[1:])
|
locust/test/test_parser.py | KevinHoi128/locust | 18,336 | 11135163 | import unittest
import os
import tempfile
import mock
from io import StringIO
import locust
from locust.argument_parser import parse_options, get_parser, parse_locustfile_option, ui_extra_args_dict
from .mock_locustfile import mock_locustfile
from .testcases import LocustTestCase
class TestParser(unittest.TestCase):
def setUp(self):
self.parser = get_parser(default_config_files=[])
def test_default(self):
opts = self.parser.parse_args([])
self.assertEqual(opts.reset_stats, False)
self.assertEqual(opts.skip_log_setup, False)
def test_reset_stats(self):
args = ["--reset-stats"]
opts = self.parser.parse_args(args)
self.assertEqual(opts.reset_stats, True)
def test_skip_log_setup(self):
args = ["--skip-log-setup"]
opts = self.parser.parse_args(args)
self.assertEqual(opts.skip_log_setup, True)
def test_parameter_parsing(self):
with tempfile.NamedTemporaryFile(mode="w") as file:
os.environ["LOCUST_LOCUSTFILE"] = "locustfile_from_env"
file.write("host host_from_config\nweb-host webhost_from_config")
file.flush()
parser = get_parser(default_config_files=[file.name])
options = parser.parse_args(["-H", "host_from_args"])
del os.environ["LOCUST_LOCUSTFILE"]
self.assertEqual(options.web_host, "webhost_from_config")
self.assertEqual(options.locustfile, "locustfile_from_env")
self.assertEqual(options.host, "host_from_args") # overridden
def test_web_auth(self):
args = ["--web-auth", "hello:bye"]
opts = self.parser.parse_args(args)
self.assertEqual(opts.web_auth, "hello:bye")
class TestArgumentParser(LocustTestCase):
def test_parse_options(self):
options = parse_options(
args=[
"-f",
"locustfile.py",
"-u",
"100",
"-r",
"10",
"-t",
"5m",
"--reset-stats",
"--stop-timeout",
"5",
"MyUserClass",
]
)
self.assertEqual("locustfile.py", options.locustfile)
self.assertEqual(100, options.num_users)
self.assertEqual(10, options.spawn_rate)
self.assertEqual("5m", options.run_time)
self.assertTrue(options.reset_stats)
self.assertEqual(5, options.stop_timeout)
self.assertEqual(["MyUserClass"], options.user_classes)
# check default arg
self.assertEqual(8089, options.web_port)
def test_parse_locustfile(self):
with mock_locustfile() as mocked:
locustfile = parse_locustfile_option(
args=[
"-f",
mocked.file_path,
"-u",
"100",
"-r",
"10",
"-t",
"5m",
"--reset-stats",
"--stop-timeout",
"5",
"MyUserClass",
]
)
self.assertEqual(mocked.file_path, locustfile)
locustfile = parse_locustfile_option(
args=[
"-f",
mocked.file_path,
]
)
self.assertEqual(mocked.file_path, locustfile)
def test_unknown_command_line_arg(self):
with self.assertRaises(SystemExit):
with mock.patch("sys.stderr", new=StringIO()):
parse_options(
args=[
"-f",
"something.py",
"-u",
"100",
"-r",
"10",
"-t",
"5m",
"--reset-stats",
"--stop-timeout",
"5",
"--unknown-flag",
"MyUserClass",
]
)
def test_custom_argument(self):
@locust.events.init_command_line_parser.add_listener
def _(parser, **kw):
parser.add_argument("--custom-bool-arg", action="store_true", help="Custom boolean flag")
parser.add_argument(
"--custom-string-arg",
help="Custom string arg",
)
options = parse_options(
args=[
"-u",
"666",
"--custom-bool-arg",
"--custom-string-arg",
"HEJ",
]
)
self.assertEqual(666, options.num_users)
self.assertEqual("HEJ", options.custom_string_arg)
self.assertTrue(options.custom_bool_arg)
def test_custom_argument_help_message(self):
@locust.events.init_command_line_parser.add_listener
def _(parser, **kw):
parser.add_argument("--custom-bool-arg", action="store_true", help="Custom boolean flag")
parser.add_argument(
"--custom-string-arg",
help="Custom string arg",
)
out = StringIO()
with mock.patch("sys.stdout", new=out):
with self.assertRaises(SystemExit):
parse_options(args=["--help"])
out.seek(0)
stdout = out.read()
self.assertIn("Custom boolean flag", stdout)
self.assertIn("Custom string arg", stdout)
def test_csv_full_history_requires_csv(self):
with mock.patch("sys.stderr", new=StringIO()):
with self.assertRaises(SystemExit):
parse_options(
args=[
"-f",
"locustfile.py",
"--csv-full-history",
]
)
def test_custom_argument_included_in_web_ui(self):
@locust.events.init_command_line_parser.add_listener
def _(parser, **kw):
parser.add_argument("--a1", help="a1 help")
parser.add_argument("--a2", help="a2 help", include_in_web_ui=False)
args = [
"-u",
"666",
"--a1",
"v1",
"--a2",
"v2",
]
options = parse_options(args=args)
self.assertEqual(666, options.num_users)
self.assertEqual("v1", options.a1)
self.assertEqual("v2", options.a2)
extra_args = ui_extra_args_dict(args)
self.assertIn("a1", extra_args)
self.assertNotIn("a2", extra_args)
self.assertEqual("v1", extra_args["a1"])
|
Demo/tkinter/matt/animation-w-velocity-ctrl.py | cemeyer/tauthon | 2,293 | 11135165 | from Tkinter import *
# this is the same as simple-demo-1.py, but uses
# subclassing.
# note that there is no explicit call to start Tk.
# Tkinter is smart enough to start the system if it's not already going.
class Test(Frame):
def printit(self):
print "hi"
def createWidgets(self):
self.QUIT = Button(self, text='QUIT', foreground='red',
command=self.quit)
self.QUIT.pack(side=BOTTOM, fill=BOTH)
self.draw = Canvas(self, width="5i", height="5i")
self.speed = Scale(self, orient=HORIZONTAL, from_=-100, to=100)
self.speed.pack(side=BOTTOM, fill=X)
# all of these work..
self.draw.create_rectangle(0, 0, 10, 10, tags="thing", fill="blue")
self.draw.pack(side=LEFT)
def moveThing(self, *args):
velocity = self.speed.get()
str = float(velocity) / 1000.0
str = "%ri" % (str,)
self.draw.move("thing", str, str)
self.after(10, self.moveThing)
def __init__(self, master=None):
Frame.__init__(self, master)
Pack.config(self)
self.createWidgets()
self.after(10, self.moveThing)
test = Test()
test.mainloop()
|
aicsimageio/readers/czi_reader.py | brisvag/aicsimageio | 110 | 11135181 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import warnings
import xml.etree.ElementTree as ET
from copy import copy
from pathlib import Path
from typing import Any, Dict, Hashable, List, Optional, Tuple, Union
import dask.array as da
import numpy as np
import xarray as xr
from dask import delayed
from fsspec.implementations.local import LocalFileSystem
from fsspec.spec import AbstractFileSystem
from ome_types.model.ome import OME
from .. import constants, exceptions, types
from ..dimensions import DEFAULT_CHUNK_DIMS, REQUIRED_CHUNK_DIMS, DimensionNames
from ..metadata import utils as metadata_utils
from ..utils import io_utils
from .reader import Reader
try:
from _aicspylibczi import BBox, TileInfo
from aicspylibczi import CziFile
except ImportError:
raise ImportError(
"aicspylibczi is required for this reader. "
"Install with `pip install aicsimageio[czi]`"
)
###############################################################################
CZI_SAMPLES_DIM_CHAR = "A"
CZI_BLOCK_DIM_CHAR = "B"
CZI_SCENE_DIM_CHAR = "S"
###############################################################################
PIXEL_DICT = {
"gray8": np.uint8,
"gray16": np.uint16,
"gray32": np.uint32,
"bgr24": np.uint8,
"bgr48": np.uint16,
}
###############################################################################
class CziReader(Reader):
"""
Wraps the aicspylibczi API to provide the same aicsimageio Reader API but for
volumetric Zeiss CZI images.
Parameters
----------
image: types.PathLike
Path to image file to construct Reader for.
chunk_dims: Union[str, List[str]]
Which dimensions to create chunks for.
Default: DEFAULT_CHUNK_DIMS
Note: DimensionNames.SpatialY, DimensionNames.SpatialX, and
DimensionNames.Samples, will always be added to the list if not present during
dask array construction.
Notes
-----
To use this reader, install with: `pip install aicsimageio[czi]`.
"""
@staticmethod
def _is_supported_image(fs: AbstractFileSystem, path: str, **kwargs: Any) -> bool:
try:
with fs.open(path) as open_resource:
CziFile(open_resource)
return True
except RuntimeError:
return False
def __init__(
self,
image: types.PathLike,
chunk_dims: Union[str, List[str]] = DEFAULT_CHUNK_DIMS,
chunk_by_dims: Optional[Union[str, List[str]]] = None,
):
# Expand details of provided image
self._fs, self._path = io_utils.pathlike_to_fs(image, enforce_exists=True)
# Catch non-local file system
if not isinstance(self._fs, LocalFileSystem):
raise ValueError(
f"Cannot read CZIs from non-local file system. "
f"Received URI: {self._path}, which points to {type(self._fs)}."
)
# Handle deprecated parameter
if chunk_by_dims is not None:
warnings.warn(
"CziReader parameter 'chunk_by_dims' has been renamed to 'chunk_dims'. "
"'chunk_by_dims' will be removed in aicsimageio>=4.1.",
DeprecationWarning,
)
chunk_dims = chunk_by_dims
# Store params
if isinstance(chunk_dims, str):
chunk_dims = list(chunk_dims)
self.chunk_dims = chunk_dims
# Delayed storage
self._px_sizes: Optional[types.PhysicalPixelSizes] = None
self._mapped_dims: Optional[str] = None
# Enforce valid image
if not self._is_supported_image(self._fs, self._path):
raise exceptions.UnsupportedFileFormatError(
self.__class__.__name__, self._path
)
@property
def mapped_dims(self) -> str:
if self._mapped_dims is None:
with self._fs.open(self._path) as open_resource:
czi = CziFile(open_resource)
self._mapped_dims = CziReader._fix_czi_dims(czi.dims)
return self._mapped_dims
@staticmethod
def _fix_czi_dims(dims: str) -> str:
return (
dims.replace(CZI_BLOCK_DIM_CHAR, "")
.replace(CZI_SCENE_DIM_CHAR, "")
.replace(CZI_SAMPLES_DIM_CHAR, DimensionNames.Samples)
)
@property
def scenes(self) -> Tuple[str, ...]:
if self._scenes is None:
with self._fs.open(self._path) as open_resource:
czi = CziFile(open_resource)
xpath_str = "./Metadata/Information/Image/Dimensions/S/Scenes/Scene"
meta_scenes = czi.meta.findall(xpath_str)
scene_names: List[str] = []
# Some "scenes" may have the same name but each scene has a sub-scene
# "Shape" with a name.
#
# An example of this is where someone images a 96 well plate with each
# well being it's own scene but they name every scene the same value.
# The sub-scene "Shape" elements have actual names of each well.
#
# If we didn't do this, the produced list would have 96 of the same
# string name making it impossible to switch scenes.
for meta_scene in meta_scenes:
shape = meta_scene.find("Shape")
if shape is not None:
shape_name = shape.get("Name")
scene_name = meta_scene.get("Name")
combined_scene_name = f"{scene_name}-{shape_name}"
else:
combined_scene_name = meta_scene.get("Name")
scene_names.append(combined_scene_name)
# If the scene is implicit just assign it name Scene:0
if len(scene_names) < 1:
scene_names = [metadata_utils.generate_ome_image_id(0)]
self._scenes = tuple(scene_names)
return self._scenes
@staticmethod
def _dims_shape_to_scene_dims_shape(
dims_shape: List[Dict], scene_index: int, consistent: bool
) -> Dict[str, Tuple[int, int]]:
"""
This function takes the output of `get_dims_shape()` and returns a
dictionary of dimensions for the selected scene
Parameters
----------
dims_shape: List[Dict]
a list of dictionaries, generated by `get_dims_shape()`
scene_index: int
the index of the scene being used
consistent: bool
true if the dictionaries are consistent could be represented
compactly (dims_shape with length 1)
Returns
-------
A dictionary of dimensions, ie
{"T": (0, 1), "C": (0, 3), "Y": (0, 256), "X":(0, 256)}.
"""
dims_shape_index = 0 if consistent else scene_index
dims_shape_dict = dims_shape[dims_shape_index]
dims_shape_dict.pop(CZI_SCENE_DIM_CHAR, None)
return dims_shape_dict
@staticmethod
def _read_chunk_from_image(
fs: AbstractFileSystem,
path: str,
scene: int,
read_dims: Optional[Dict[str, int]] = None,
) -> np.ndarray:
return CziReader._get_image_data(
fs=fs, path=path, scene=scene, read_dims=read_dims
)[0]
@staticmethod
def _get_image_data(
fs: AbstractFileSystem,
path: str,
scene: int,
read_dims: Optional[Dict[str, int]] = None,
) -> Tuple[np.ndarray, List[Tuple[str, int]]]:
"""
Read and return the squeezed image data requested along with the dimension info
that was read.
Parameters
----------
fs: AbstractFileSystem
The file system to use for reading.
path: str
The path to the file to read.
scene: int
The scene index to pull the chunk from.
read_dims: Optional[Dict[str, int]]
The dimensions to read from the file as a dictionary of string to integer.
Default: None (Read all data from the image)
Returns
-------
chunk: np.ndarray
The image chunk read as a numpy array.
read_dimensions: List[Tuple[str, int]]]
The dimension sizes that were returned from the read.
"""
# Catch optional read dim
if read_dims is None:
read_dims = {}
# Get current scene read dims
read_dims[CZI_SCENE_DIM_CHAR] = scene
# Init czi
with fs.open(path) as open_resource:
czi = CziFile(open_resource)
# Read image
data, dims = czi.read_image(**read_dims)
# Drop dims that shouldn't be provided back
ops: List[Union[int, slice]] = []
real_dims = []
for dim_info in dims:
# Expand dimension info
dim, _ = dim_info
# If the dim was provided in the read dims
# we know a single plane for that dimension was requested so remove it
if dim in read_dims or dim is CZI_BLOCK_DIM_CHAR:
ops.append(0)
# Otherwise just read the full slice
else:
ops.append(slice(None, None, None))
real_dims.append(dim_info)
# Convert ops and run getitem
return data[tuple(ops)], real_dims
def _create_dask_array(self, czi: CziFile) -> xr.DataArray:
"""
Creates a delayed dask array for the file.
Parameters
----------
czi: CziFile
An open CziFile for processing.
Returns
-------
image_data: da.Array
The fully constructed and fully delayed image as a Dask Array object.
"""
# Always add the plane dimensions if not present already
for dim in REQUIRED_CHUNK_DIMS:
if dim not in self.chunk_dims:
self.chunk_dims.append(dim)
# Safety measure / "feature"
self.chunk_dims = [d.upper() for d in self.chunk_dims]
# Construct the delayed dask array
dims_shape = CziReader._dims_shape_to_scene_dims_shape(
czi.get_dims_shape(),
scene_index=self.current_scene_index,
consistent=czi.shape_is_consistent,
)
# Remove block dim as not useful
dims_shape.pop(CZI_BLOCK_DIM_CHAR, None)
dims_str = czi.dims
for remove_dim_char in [CZI_BLOCK_DIM_CHAR, CZI_SCENE_DIM_CHAR]:
dims_str = dims_str.replace(remove_dim_char, "")
# Get the shape for the chunk and operating shape for the dask array
# We also collect the chunk and non chunk dimension ordering so that we can
# swap the dimensions after we
# block the dask array together.
sample_chunk_shape = []
operating_shape = []
non_chunk_dimension_ordering = []
chunk_dimension_ordering = []
for i, dim in enumerate(dims_str):
# Unpack dim info
_, dim_size = dims_shape[dim]
# If the dim is part of the specified chunk dims then append it to the
# sample, and, append the dimension
# to the chunk dimension ordering
if dim in self.chunk_dims:
sample_chunk_shape.append(dim_size)
chunk_dimension_ordering.append(dim)
# Otherwise, append the dimension to the non chunk dimension ordering, and,
# append the true size of the
# image at that dimension
else:
non_chunk_dimension_ordering.append(dim)
operating_shape.append(dim_size)
# Convert shapes to tuples and combine the non and chunked dimension orders as
# that is the order the data will
# actually come out of the read data as
sample_chunk_shape_tuple = tuple(sample_chunk_shape)
blocked_dimension_order = (
non_chunk_dimension_ordering + chunk_dimension_ordering
)
# Fill out the rest of the operating shape with dimension sizes of 1 to match
# the length of the sample chunk
# When dask.block happens it fills the dimensions from inner-most to outer-most
# with the chunks as long as the dimension is size 1
# Basically, we are adding empty dimensions to the operating shape that will be
# filled by the chunks from dask
operating_shape_tuple = tuple(operating_shape) + (1,) * len(
sample_chunk_shape_tuple
)
# Create empty numpy array with the operating shape so that we can iter through
# and use the multi_index to create the readers.
lazy_arrays: np.ndarray = np.ndarray(operating_shape_tuple, dtype=object)
# We can enumerate over the multi-indexed array and construct read_dims
# dictionaries by simply zipping together the ordered dims list and the current
# multi-index plus the begin index for that plane. We then set the value of the
# array at the same multi-index to the delayed reader using the constructed
# read_dims dictionary.
dims = [
d for d in czi.dims if d not in [CZI_BLOCK_DIM_CHAR, CZI_SCENE_DIM_CHAR]
]
begin_indicies = tuple(dims_shape[d][0] for d in dims)
for np_index, _ in np.ndenumerate(lazy_arrays):
# Add the czi file begin index for each dimension to the array dimension
# index
this_chunk_read_indicies = (
current_dim_begin_index + curr_dim_index
for current_dim_begin_index, curr_dim_index in zip(
begin_indicies, np_index
)
)
# Zip the dims with the read indices
this_chunk_read_dims = dict(
zip(blocked_dimension_order, this_chunk_read_indicies)
)
# Remove the dimensions that we want to chunk by from the read dims
for d in self.chunk_dims:
this_chunk_read_dims.pop(d, None)
# Get pixel type and catch unsupported
pixel_type = PIXEL_DICT.get(czi.pixel_type)
if pixel_type is None:
raise TypeError(f"Pixel type: {pixel_type} is not supported.")
# Add delayed array to lazy arrays at index
lazy_arrays[np_index] = da.from_delayed(
delayed(CziReader._read_chunk_from_image)(
fs=self._fs,
path=self._path,
scene=self.current_scene_index,
read_dims=this_chunk_read_dims,
),
shape=sample_chunk_shape,
dtype=pixel_type,
)
# Convert the numpy array of lazy readers into a dask array and fill the inner
# most empty dimensions with chunks
merged = da.block(lazy_arrays.tolist())
# Because we have set certain dimensions to be chunked and others not
# we will need to transpose back to original dimension ordering
# Example being, if the original dimension ordering was "SZYX" and we want to
# chunk by "S", "Y", and "X" we created an array with dimensions ordering "ZSYX"
transpose_indices = []
transpose_required = False
for i, d in enumerate(dims_str):
new_index = blocked_dimension_order.index(d)
if new_index != i:
transpose_required = True
transpose_indices.append(new_index)
else:
transpose_indices.append(i)
# Only run if the transpose is actually required
# The default case is "Z", "Y", "X", which _usually_ doesn't need to be
# transposed because that is _usually_ the normal dimension order of the CZI
# file anyway
if transpose_required:
merged = da.transpose(merged, tuple(transpose_indices))
# Because dimensions outside of Y and X can be in any order and present or not
# we also return the dimension order string.
return merged
@staticmethod
def _get_coords_and_physical_px_sizes(
xml: ET.Element, scene_index: int, dims_shape: Dict[str, Any]
) -> Tuple[Dict[str, Any], types.PhysicalPixelSizes]:
# Create coord dict
coords: Dict[str, Any] = {}
# Get all images
img_sets = xml.findall(".//Image/Dimensions/Channels")
if len(img_sets) != 0:
# Select the current scene
img = img_sets[0]
if scene_index < len(img_sets):
img = img_sets[scene_index]
# Construct channel list
scene_channel_list = []
channels = img.findall("./Channel")
for channel in channels:
channel_name = channel.attrib["Name"]
scene_channel_list.append(channel_name)
# Attach channel names to coords
coords[DimensionNames.Channel] = scene_channel_list
# Unpack short info scales
list_xs = xml.findall(".//Distance[@Id='X']")
list_ys = xml.findall(".//Distance[@Id='Y']")
list_zs = xml.findall(".//Distance[@Id='Z']")
scale_xe = list_xs[0].find("./Value")
scale_ye = list_ys[0].find("./Value")
scale_ze = None if len(list_zs) == 0 else list_zs[0].find("./Value")
# Set default scales
scale_x = None
scale_y = None
scale_z = None
# Unpack the string value to a float
# Split by "E" and take the first part because the values are stored
# with E-06 for micrometers, even though the unit is also present in metadata
# 🤷
if scale_xe is not None and scale_xe.text is not None:
scale_x = float(scale_xe.text.split("E")[0])
if scale_ye is not None and scale_ye.text is not None:
scale_y = float(scale_ye.text.split("E")[0])
if scale_ze is not None and scale_ze.text is not None:
scale_z = float(scale_ze.text.split("E")[0])
# Handle Spatial Dimensions
for scale, dim_name in [
(scale_z, DimensionNames.SpatialZ),
(scale_y, DimensionNames.SpatialY),
(scale_x, DimensionNames.SpatialX),
]:
if scale is not None and dim_name in dims_shape:
dim_size = dims_shape[dim_name][1] - dims_shape[dim_name][0]
coords[dim_name] = Reader._generate_coord_array(0, dim_size, scale)
# Time
# TODO: unpack "TimeSpan" elements
# I can find a single "TimeSpan" in our data but unsure how multi-scene handles
# Create physical pixel sizes
px_sizes = types.PhysicalPixelSizes(scale_z, scale_y, scale_x)
return coords, px_sizes
def _read_delayed(self) -> xr.DataArray:
"""
Construct the delayed xarray DataArray object for the image.
Returns
-------
image: xr.DataArray
The fully constructed and fully delayed image as a DataArray object.
Metadata is attached in some cases as coords, dims, and attrs.
Raises
------
exceptions.UnsupportedFileFormatError
The file could not be read or is not supported.
"""
with self._fs.open(self._path) as open_resource:
czi = CziFile(open_resource)
dims_shape = CziReader._dims_shape_to_scene_dims_shape(
dims_shape=czi.get_dims_shape(),
scene_index=self.current_scene_index,
consistent=czi.shape_is_consistent,
)
# Get dims as list for xarray
img_dims_list = list(self.mapped_dims)
# Get image data
image_data = self._create_dask_array(czi)
# Create coordinate planes
meta = czi.meta
coords, px_sizes = self._get_coords_and_physical_px_sizes(
xml=meta,
scene_index=self.current_scene_index,
dims_shape=dims_shape,
)
# Store pixel sizes
self._px_sizes = px_sizes
return xr.DataArray(
image_data,
dims=img_dims_list,
coords=coords, # type: ignore
attrs={constants.METADATA_UNPROCESSED: meta},
)
def _read_immediate(self) -> xr.DataArray:
"""
Construct the in-memory xarray DataArray object for the image.
Returns
-------
image: xr.DataArray
The fully constructed and fully read into memory image as a DataArray
object. Metadata is attached in some cases as coords, dims, and attrs.
Raises
------
exceptions.UnsupportedFileFormatError
The file could not be read or is not supported.
"""
with self._fs.open(self._path) as open_resource:
czi = CziFile(open_resource)
dims_shape = CziReader._dims_shape_to_scene_dims_shape(
dims_shape=czi.get_dims_shape(),
scene_index=self.current_scene_index,
consistent=czi.shape_is_consistent,
)
# Get image data
image_data, _ = self._get_image_data(
fs=self._fs,
path=self._path,
scene=self.current_scene_index,
)
# Get metadata
meta = czi.meta
# Create coordinate planes
coords, px_sizes = self._get_coords_and_physical_px_sizes(
xml=meta,
scene_index=self.current_scene_index,
dims_shape=dims_shape,
)
# Store pixel sizes
self._px_sizes = px_sizes
return xr.DataArray(
image_data,
dims=[d for d in self.mapped_dims],
coords=coords, # type: ignore
attrs={constants.METADATA_UNPROCESSED: meta},
)
@staticmethod
def _stitch_tiles(
data: types.ArrayLike,
data_dims: str,
data_dims_shape: Dict[str, Tuple[int, int]],
tile_bboxes: Dict[TileInfo, BBox],
final_bbox: BBox,
) -> types.ArrayLike:
# Assumptions: 1) docs for ZEISSRAW(CZI) say:
# Scene – for clustering items in X/Y direction (data belonging to
# contiguous regions of interests in a mosaic image).
# Store the mosaic array shape
arr_shape_list = []
ordered_dims_present = [
dim
for dim in data_dims
if dim not in [CZI_BLOCK_DIM_CHAR, DimensionNames.MosaicTile]
]
for dim in ordered_dims_present:
if dim not in REQUIRED_CHUNK_DIMS:
arr_shape_list.append(data_dims_shape[dim][1])
if dim is DimensionNames.SpatialY:
arr_shape_list.append(final_bbox.h)
if dim is DimensionNames.SpatialX:
arr_shape_list.append(final_bbox.w)
if dim is DimensionNames.Samples:
arr_shape_list.append(data_dims_shape[CZI_SAMPLES_DIM_CHAR][1])
ans = None
if isinstance(data, da.Array):
ans = da.zeros(
shape=tuple(arr_shape_list),
dtype=data.dtype,
)
else:
ans = np.zeros(arr_shape_list, dtype=data.dtype)
for (tile_info, box) in tile_bboxes.items():
# Construct data indexes to use
tile_dims = tile_info.dimension_coordinates
tile_dims.pop(CZI_SCENE_DIM_CHAR, None)
tile_dims.pop(CZI_BLOCK_DIM_CHAR, None)
data_indexes = [
tile_dims[t_dim]
for t_dim in data_dims
if t_dim not in REQUIRED_CHUNK_DIMS
]
# Add Y and X
data_indexes.append(slice(None)) # Y ":"
data_indexes.append(slice(None)) # X ":"
if CZI_SAMPLES_DIM_CHAR in tile_dims.keys():
data_indexes.append(slice(None))
# Construct data indexes for ans
ans_indexes = []
for dim in ordered_dims_present:
if dim not in [
DimensionNames.MosaicTile,
DimensionNames.Samples,
DimensionNames.SpatialY,
DimensionNames.SpatialX,
]:
if dim in tile_dims.keys():
ans_indexes.append(tile_dims[dim])
if dim is DimensionNames.SpatialY:
start = box.y - final_bbox.y
ans_indexes.append(slice(start, start + box.h, 1))
if dim is DimensionNames.SpatialX:
start = box.x - final_bbox.x
ans_indexes.append(slice(start, start + box.w, 1))
if dim is DimensionNames.Samples:
ans_indexes.append(slice(None))
# Assign the tiles into ans
ans[tuple(ans_indexes)] = data[tuple(data_indexes)]
return ans
def _construct_mosaic_xarray(self, data: types.ArrayLike) -> xr.DataArray:
# Get max of mosaic positions from lif
with self._fs.open(self._path) as open_resource:
czi = CziFile(open_resource)
dims_shape = CziReader._dims_shape_to_scene_dims_shape(
dims_shape=czi.get_dims_shape(),
scene_index=self.current_scene_index,
consistent=czi.shape_is_consistent,
)
bboxes = czi.get_all_mosaic_tile_bounding_boxes(S=self.current_scene_index)
mosaic_scene_bbox = czi.get_mosaic_scene_bounding_box(
index=self.current_scene_index
)
# Stitch
stitched = self._stitch_tiles(
data=data,
data_dims=self.mapped_dims,
data_dims_shape=dims_shape,
tile_bboxes=bboxes,
final_bbox=mosaic_scene_bbox,
)
# Copy metadata
dims = [
d
for d in self.xarray_dask_data.dims
if d is not DimensionNames.MosaicTile
]
coords: Dict[Hashable, Any] = {
d: v
for d, v in self.xarray_dask_data.coords.items()
if d
not in [
DimensionNames.MosaicTile,
DimensionNames.SpatialY,
DimensionNames.SpatialX,
]
}
# Add expanded Y and X coords
if self.physical_pixel_sizes.Y is not None:
dim_y_index = dims.index(DimensionNames.SpatialY)
coords[DimensionNames.SpatialY] = Reader._generate_coord_array(
0, stitched.shape[dim_y_index], self.physical_pixel_sizes.Y
)
if self.physical_pixel_sizes.X is not None:
dim_x_index = dims.index(DimensionNames.SpatialX)
coords[DimensionNames.SpatialX] = Reader._generate_coord_array(
0, stitched.shape[dim_x_index], self.physical_pixel_sizes.X
)
attrs = copy(self.xarray_dask_data.attrs)
return xr.DataArray(
data=stitched,
dims=dims,
coords=coords,
attrs=attrs,
)
def _get_stitched_dask_mosaic(self) -> xr.DataArray:
return self._construct_mosaic_xarray(self.dask_data)
def _get_stitched_mosaic(self) -> xr.DataArray:
return self._construct_mosaic_xarray(self.data)
@property
def ome_metadata(self) -> OME:
return metadata_utils.transform_metadata_with_xslt(
self.metadata,
Path(__file__).parent.parent
/ "metadata/czi-to-ome-xslt/xslt/czi-to-ome.xsl",
)
@property
def physical_pixel_sizes(self) -> types.PhysicalPixelSizes:
"""
Returns
-------
sizes: PhysicalPixelSizes
Using available metadata, the floats representing physical pixel sizes for
dimensions Z, Y, and X.
Notes
-----
We currently do not handle unit attachment to these values. Please see the file
metadata for unit information.
"""
if self._px_sizes is None:
# We get pixel sizes as a part of array construct
# so simply run array construct
self.dask_data
return self._px_sizes # type: ignore
def get_mosaic_tile_position(self, mosaic_tile_index: int) -> Tuple[int, int]:
"""
Get the absolute position of the top left point for a single mosaic tile.
Parameters
----------
mosaic_tile_index: int
The index for the mosaic tile to retrieve position information for.
Returns
-------
top: int
The Y coordinate for the tile position.
left: int
The X coordinate for the tile position.
Raises
------
UnexpectedShapeError
The image has no mosaic dimension available.
IndexError
No matching mosaic tile index found.
"""
if DimensionNames.MosaicTile not in self.dims.order:
raise exceptions.UnexpectedShapeError("No mosaic dimension in image.")
# Get max of mosaic positions from lif
with self._fs.open(self._path) as open_resource:
czi = CziFile(open_resource)
bboxes = czi.get_all_mosaic_tile_bounding_boxes(S=self.current_scene_index)
bbox = list(bboxes.values())[mosaic_tile_index]
return bbox.y, bbox.x
|
tests/store/tracking/test_sqlalchemy_store_schema.py | PeterSulcs/mlflow | 10,351 | 11135194 | """Tests verifying that the SQLAlchemyStore generates the expected database schema"""
import os
import pytest
from alembic import command
from alembic.script import ScriptDirectory
from alembic.migration import MigrationContext # pylint: disable=import-error
from alembic.autogenerate import compare_metadata
import sqlalchemy
import mlflow.db
from mlflow.exceptions import MlflowException
from mlflow.store.db.utils import _get_alembic_config, _verify_schema
from mlflow.store.db.base_sql_model import Base
# pylint: disable=unused-import
from mlflow.store.model_registry.dbmodels.models import (
SqlRegisteredModel,
SqlModelVersion,
SqlRegisteredModelTag,
SqlModelVersionTag,
)
from mlflow.store.tracking.sqlalchemy_store import SqlAlchemyStore
from mlflow.store.tracking.dbmodels.initial_models import Base as InitialBase
from tests.store.dump_schema import dump_db_schema
from tests.integration.utils import invoke_cli_runner
def _assert_schema_files_equal(generated_schema_file, expected_schema_file):
"""
Assert equivalence of two SQL schema dump files consisting of CREATE TABLE statements delimited
by double-newlines, allowing for the reordering of individual lines within each CREATE TABLE
statement to account for differences in schema-dumping across platforms & Python versions.
"""
# Extract "CREATE TABLE" statement chunks from both files, assuming tables are listed in the
# same order across files
with open(generated_schema_file, "r") as generated_schema_handle:
generated_schema_table_chunks = generated_schema_handle.read().split("\n\n")
with open(expected_schema_file, "r") as expected_schema_handle:
expected_schema_table_chunks = expected_schema_handle.read().split("\n\n")
# Compare the two files table-by-table. We assume each CREATE TABLE statement is valid and
# so sort the lines within the statements before comparing them.
for generated_schema_table, expected_schema_table in zip(
generated_schema_table_chunks, expected_schema_table_chunks
):
generated_lines = [x.strip() for x in sorted(generated_schema_table.split("\n"))]
expected_lines = [x.strip() for x in sorted(expected_schema_table.split("\n"))]
assert generated_lines == expected_lines, (
"Generated schema did not match expected schema. Generated schema had table "
"definition:\n{generated_table}\nExpected schema had table definition:"
"\n{expected_table}\nIf you intended to make schema changes, run "
"'python tests/store/dump_schema.py {expected_file}' from your checkout of MLflow to "
"update the schema snapshot.".format(
generated_table=generated_schema_table,
expected_table=expected_schema_table,
expected_file=expected_schema_file,
)
)
@pytest.fixture()
def expected_schema_file():
current_dir = os.path.dirname(os.path.abspath(__file__))
yield os.path.normpath(
os.path.join(current_dir, os.pardir, os.pardir, "resources", "db", "latest_schema.sql")
)
@pytest.fixture()
def db_url(tmpdir):
return "sqlite:///%s" % tmpdir.join("db_file").strpath
def test_sqlalchemystore_idempotently_generates_up_to_date_schema(
tmpdir, db_url, expected_schema_file
):
generated_schema_file = tmpdir.join("generated-schema.sql").strpath
# Repeatedly initialize a SQLAlchemyStore against the same DB URL. Initialization should
# succeed and the schema should be the same.
for _ in range(3):
SqlAlchemyStore(db_url, tmpdir.join("ARTIFACTS").strpath)
dump_db_schema(db_url, dst_file=generated_schema_file)
_assert_schema_files_equal(generated_schema_file, expected_schema_file)
def test_running_migrations_generates_expected_schema(tmpdir, expected_schema_file, db_url):
"""Test that migrating an existing database generates the desired schema."""
engine = sqlalchemy.create_engine(db_url)
InitialBase.metadata.create_all(engine)
invoke_cli_runner(mlflow.db.commands, ["upgrade", db_url])
generated_schema_file = tmpdir.join("generated-schema.sql").strpath
dump_db_schema(db_url, generated_schema_file)
_assert_schema_files_equal(generated_schema_file, expected_schema_file)
def test_sqlalchemy_store_detects_schema_mismatch(
tmpdir, db_url
): # pylint: disable=unused-argument
def _assert_invalid_schema(engine):
with pytest.raises(MlflowException) as ex:
_verify_schema(engine)
assert ex.message.contains("Detected out-of-date database schema.")
# Initialize an empty database & verify that we detect a schema mismatch
engine = sqlalchemy.create_engine(db_url)
_assert_invalid_schema(engine)
# Create legacy tables, verify schema is still out of date
InitialBase.metadata.create_all(engine)
_assert_invalid_schema(engine)
# Run each migration. Until the last one, schema should be out of date
config = _get_alembic_config(db_url)
script = ScriptDirectory.from_config(config)
revisions = list(script.walk_revisions())
revisions.reverse()
for rev in revisions[:-1]:
command.upgrade(config, rev.revision)
_assert_invalid_schema(engine)
# Run migrations, schema verification should now pass
invoke_cli_runner(mlflow.db.commands, ["upgrade", db_url])
_verify_schema(engine)
def test_store_generated_schema_matches_base(tmpdir, db_url):
# Create a SQLAlchemyStore against tmpfile, directly verify that tmpfile contains a
# database with a valid schema
SqlAlchemyStore(db_url, tmpdir.join("ARTIFACTS").strpath)
engine = sqlalchemy.create_engine(db_url)
mc = MigrationContext.configure(engine.connect())
diff = compare_metadata(mc, Base.metadata)
assert len(diff) == 0
|
manim/utils/scale.py | PhotonSpheres/manim | 9,497 | 11135202 | import math
from typing import TYPE_CHECKING, Any, Dict, Iterable, List
__all__ = ["LogBase", "LinearBase"]
from ..mobject.numbers import Integer
if TYPE_CHECKING:
from manim.mobject.mobject import Mobject
class _ScaleBase:
"""Scale baseclass for graphing/functions."""
def __init__(self, custom_labels: bool = False):
"""
Parameters
----------
custom_labels
Whether to create custom labels when plotted on a :class:`~.NumberLine`.
"""
self.custom_labels = custom_labels
def function(self, value: float) -> float:
"""The function that will be used to scale the values.
Parameters
----------
value
The number/``np.ndarray`` to be scaled.
Returns
-------
float
The value after it has undergone the scaling.
Raises
------
NotImplementedError
Must be subclassed.
"""
raise NotImplementedError
def inverse_function(self, value: float) -> float:
"""The inverse of ``function``. Used for plotting on a particular axis.
Raises
------
NotImplementedError
Must be subclassed.
"""
raise NotImplementedError
def get_custom_labels(
self,
val_range: Iterable[float],
) -> Iterable["Mobject"]:
"""Custom instructions for generating labels along an axis.
Parameters
----------
val_range
The position of labels. Also used for defining the content of the labels.
Returns
-------
Dict
A list consisting of the labels.
Can be passed to :meth:`~.NumberLine.add_labels() along with ``val_range``.
Raises
------
NotImplementedError
Can be subclassed, optional.
"""
raise NotImplementedError
class LinearBase(_ScaleBase):
def __init__(self, scale_factor: float = 1.0):
"""The default scaling class.
Parameters
----------
scale_factor
The slope of the linear function, by default 1.0
"""
super().__init__()
self.scale_factor = scale_factor
def function(self, value: float) -> float:
"""Multiplies the value by the scale factor.
Parameters
----------
value
Value to be multiplied by the scale factor.
"""
return self.scale_factor * value
def inverse_function(self, value: float) -> float:
"""Inverse of function. Divides the value by the scale factor.
Parameters
----------
value
value to be divided by the scale factor.
"""
return value / self.scale_factor
class LogBase(_ScaleBase):
def __init__(self, base: float = 10, custom_labels: bool = True):
"""Scale for logarithmic graphs/functions.
Parameters
----------
base
The base of the log, by default 10.
custom_labels : bool, optional
For use with :class:`~.Axes`:
Whetherer or not to include ``LaTeX`` axis labels, by default True.
Examples
--------
.. code-block:: python
func = ParametricFunction(lambda x: x, scaling=LogBase(base=2))
"""
super().__init__()
self.base = base
self.custom_labels = custom_labels
def function(self, value: float) -> float:
"""Scales the value to fit it to a logarithmic scale.``self.function(5)==10**5``"""
return self.base ** value
def inverse_function(self, value: float) -> float:
"""Inverse of ``function``. The value must be greater than 0"""
if value <= 0:
raise ValueError(
"log(0) is undefined. Make sure the value is in the domain of the function"
)
value = math.log(value, self.base)
return value
def get_custom_labels(
self,
val_range: Iterable[float],
unit_decimal_places: int = 0,
**base_config: Dict[str, Any],
) -> List["Mobject"]:
"""Produces custom :class:`~.Integer` labels in the form of ``10^2``.
Parameters
----------
val_range
The iterable of values used to create the labels. Determines the exponent.
units_decimal_places
The number of decimal places to include in the exponent
base_config
Additional arguments to be passed to :class:`~.Integer`.
"""
# uses `format` syntax to control the number of decimal places.
tex_labels = [
Integer(
self.base,
unit="^{%s}" % (f"{self.inverse_function(i):.{unit_decimal_places}f}"),
**base_config,
)
for i in val_range
]
return tex_labels
|
test/system/tools/fds/tests.py | timmylinux/Fast-DDS | 575 | 11135239 | # Copyright 2020 Proyectos y Sistemas de Mantenimiento SL (eProsima).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for the fastdds tool.
Contains a package of system test for fastdds tool
usage: test.py <binary_path> <test_name>
binary_path: Fast-DDS binary path
test_name: Test to run.
Available tests:
test_fast_discovery_closure
test_fast_discovery_parse_XML_file_prefix_OK
test_fast_discovery_parse_XML_file_prefix_OK_URI
test_fast_discovery_parse_XML_file_server_address
test_fast_discovery_parse_XML_file_server_address_URI
"""
import argparse
import subprocess
import sys
from tabnanny import check
import time
import signal
import os
from xml.dom import minidom
from xml.etree.ElementTree import XML
def signal_handler(signum, frame):
# ignore signals if the test generates them
pass
def send_command(command):
print("Executing command: " + str(command))
# this subprocess cannot be executed in shell=True or using bash
# because a background script will not broadcast the signals
# it receives
proc = subprocess.Popen(command,
stdout=subprocess.PIPE,
universal_newlines=True
)
# sleep to let the server run
time.sleep(1)
# 1. An exit code of 0 means everything was alright
# 2. An exit code of 1 means the tool's process terminated before even
# sending the kill signal.
# 3. An exit code of 2 means the signal could not terminate the process
# 4. An exit code of 3 means the signal terminated the process, but the
# output was different than expected
exit_code = 0
# direct this script to ignore SIGINT
signal.signal(signal.SIGINT, signal_handler)
# send SIGINT to process and wait for processing
lease = 0
while True:
if os.name == 'posix':
proc.send_signal(signal.SIGINT)
elif os.name == 'nt':
proc.send_signal(signal.CTRL_C_EVENT)
time.sleep(1)
lease += 1
# Break when signal kills the process or it hangs
if proc.poll() is None and lease < 10:
print('iterating...')
else:
break
# Check whether SIGINT was able to terminate the process
if proc.poll() is None:
# SIGINT couldn't terminate the process. Kill it and exit with code 2
proc.kill()
print('Signal could not kill process')
sys.exit(2)
# Get process output
output, err = proc.communicate()
return output, err, exit_code
def XML_parse_profile(XMLfile, profile_name):
XML_file = minidom.parse(XMLfile)
participants = XML_file.getElementsByTagName("participant")
for participant in participants:
if not profile_name:
if participant.getAttribute("is_default_profile") == "true":
participant_profile = participant
else:
if participant.getAttribute("profile_name") == profile_name:
participant_profile = participant
return participant_profile
def check_output(output, err, output_to_check, override):
EXPECTED_CLOSURE = "### Server shut down ###"
if EXPECTED_CLOSURE in output or override:
if output_to_check in output:
# Success
exit_code = 0
else:
# Failure
print('STDOUT:')
print(output)
print('STDERR:')
print(err)
exit_code = 3
else:
# Failure
print('STDOUT:')
print(output)
print('STDERR:')
print(err)
exit_code = 3
return exit_code
def test_fast_discovery_closure(fast_discovery_tool):
"""Test that discovery command closes correctly."""
command = [fast_discovery_tool, '-i', '0']
output, err, exit_code = send_command(command)
EXPECTED_CLOSURE = "### Server shut down ###"
exit_code = check_output(output, err, EXPECTED_CLOSURE, False)
sys.exit(exit_code)
def test_fast_discovery_parse_XML_file_default_profile(fast_discovery_tool):
"""Test that discovery command read XML default profile correctly."""
XML_file_path = 'test_xml_discovery_server.xml'
default_profile = XML_parse_profile(XML_file_path, "")
prefix = default_profile.getElementsByTagName('prefix')
PREFIX = prefix[0].firstChild.data
EXPECTED_SERVER_ADDRESS = []
udpv4 = default_profile.getElementsByTagName('udpv4')
for elem in udpv4:
address2 = elem.getElementsByTagName('address')[0].firstChild.data
port2 = elem.getElementsByTagName('port')[0].firstChild.data
if port2[0] == '0':
port2 = port2[1:]
EXPECTED_SERVER_ADDRESS.append("UDPv4:[" + address2 + "]:" + port2)
command = [fast_discovery_tool, '-x', XML_file_path]
output, err, exit_code = send_command(command)
EXPECTED_SERVER_ID = "Server GUID prefix: " + PREFIX.lower()
print(EXPECTED_SERVER_ID)
exit_code = check_output(output, err, EXPECTED_SERVER_ID, False)
if exit_code != 0:
sys.exit(exit_code)
for add in EXPECTED_SERVER_ADDRESS:
exit_code = check_output(output, err, add, False)
if exit_code != 0:
sys.exit(exit_code)
sys.exit(exit_code)
def test_fast_discovery_parse_XML_file_URI_profile(fast_discovery_tool):
"""Test that discovery command read XML profile using URI."""
XML_file_path = 'test_xml_discovery_server.xml'
uri_profile = XML_parse_profile(XML_file_path, "UDP_server_two")
prefix = uri_profile.getElementsByTagName('prefix')
PREFIX = prefix[0].firstChild.data
EXPECTED_SERVER_ADDRESS = []
udpv4 = uri_profile.getElementsByTagName('udpv4')
for elem in udpv4:
address2 = elem.getElementsByTagName('address')[0].firstChild.data
port2 = elem.getElementsByTagName('port')[0].firstChild.data
if port2[0] == '0':
port2 = port2[1:]
EXPECTED_SERVER_ADDRESS.append("UDPv4:[" + address2 + "]:" + port2)
command = [fast_discovery_tool, '-x', 'UDP_server_two@' + XML_file_path]
output, err, exit_code = send_command(command)
EXPECTED_SERVER_ID = "Server GUID prefix: " + PREFIX.lower()
print(EXPECTED_SERVER_ID)
exit_code = check_output(output, err, EXPECTED_SERVER_ID, False)
if exit_code != 0:
sys.exit(exit_code)
for add in EXPECTED_SERVER_ADDRESS:
exit_code = check_output(output, err, add, False)
if exit_code != 0:
sys.exit(exit_code)
sys.exit(exit_code)
def test_fast_discovery_prefix_override(fast_discovery_tool):
"""Test that discovery command overrides prefix given in XML file"""
XML_file_path = 'test_xml_discovery_server.xml'
default_profile = XML_parse_profile(XML_file_path, "")
EXPECTED_SERVER_ID = "Server GUID prefix: 44.53.00.5f.45.50.52.4f.53.49.4d.41"
EXPECTED_SERVER_ADDRESS = []
udpv4 = default_profile.getElementsByTagName('udpv4')
for elem in udpv4:
address2 = elem.getElementsByTagName('address')[0].firstChild.data
port2 = elem.getElementsByTagName('port')[0].firstChild.data
if port2[0] == '0':
port2 = port2[1:]
EXPECTED_SERVER_ADDRESS.append("UDPv4:[" + address2 + "]:" + port2)
command = [fast_discovery_tool, '-i', '0', '-x', XML_file_path]
output, err, exit_code = send_command(command)
exit_code = check_output(output, err, EXPECTED_SERVER_ID, False)
if exit_code != 0:
sys.exit(exit_code)
for add in EXPECTED_SERVER_ADDRESS:
exit_code = check_output(output, err, add, False)
if exit_code != 0:
sys.exit(exit_code)
sys.exit(exit_code)
def test_fast_discovery_locator_address_override(fast_discovery_tool):
"""Test that discovery command overrides locator given in XML file when using -l option"""
XML_file_path = 'test_xml_discovery_server.xml'
default_profile = XML_parse_profile(XML_file_path, "")
prefix = default_profile.getElementsByTagName('prefix')
PREFIX = prefix[0].firstChild.data
EXPECTED_SERVER_ID = "Server GUID prefix: " + PREFIX.lower()
EXPECTED_SERVER_ADDRESS = []
EXPECTED_SERVER_ADDRESS.append("UDPv4:[172.16.17.32]:11811")
XML_SERVER_ADDRESS = []
udpv4 = default_profile.getElementsByTagName('udpv4')
for elem in udpv4:
address2 = elem.getElementsByTagName('address')[0].firstChild.data
port2 = elem.getElementsByTagName('port')[0].firstChild.data
if port2[0] == '0':
port2 = port2[1:]
XML_SERVER_ADDRESS.append("UDPv4:[" + address2 + "]:" + port2)
command = [fast_discovery_tool, '-x', XML_file_path, '-l', '172.16.17.32']
output, err, exit_code = send_command(command)
exit_code = check_output(output, err, EXPECTED_SERVER_ID, False)
if exit_code != 0:
sys.exit(exit_code)
for add in EXPECTED_SERVER_ADDRESS:
exit_code = check_output(output, err, add, False)
if exit_code != 0:
sys.exit(exit_code)
for add in XML_SERVER_ADDRESS:
exit_code = check_output(output, err, add, False)
if exit_code != 3:
sys.exit(3)
sys.exit(0)
def test_fast_discovery_locator_override_same_address(fast_discovery_tool):
"""Test that discovery command overrides locator given in XML file even if the address is the same"""
XML_file_path = 'test_xml_discovery_server.xml'
default_profile = XML_parse_profile(XML_file_path, "")
prefix = default_profile.getElementsByTagName('prefix')
PREFIX = prefix[0].firstChild.data
EXPECTED_SERVER_ID = "Server GUID prefix: " + PREFIX.lower()
EXPECTED_SERVER_ADDRESS = []
EXPECTED_SERVER_ADDRESS.append("UDPv4:[127.0.0.9]:11811")
XML_SERVER_ADDRESS = []
udpv4 = default_profile.getElementsByTagName('udpv4')
for elem in udpv4:
address2 = elem.getElementsByTagName('address')[0].firstChild.data
port2 = elem.getElementsByTagName('port')[0].firstChild.data
if port2[0] == '0':
port2 = port2[1:]
XML_SERVER_ADDRESS.append("UDPv4:[" + address2 + "]:" + port2)
command = [fast_discovery_tool, '-x', XML_file_path, '-l', '127.0.0.9']
output, err, exit_code = send_command(command)
exit_code = check_output(output, err, EXPECTED_SERVER_ID, False)
if exit_code != 0:
sys.exit(exit_code)
for add in EXPECTED_SERVER_ADDRESS:
exit_code = check_output(output, err, add, False)
if exit_code != 0:
sys.exit(exit_code)
for add in XML_SERVER_ADDRESS:
exit_code = check_output(output, err, add, False)
if exit_code != 3:
sys.exit(3)
sys.exit(0)
def test_fast_discovery_locator_port_override(fast_discovery_tool):
"""Test that discovery command overrides locator given in XML file when using -p option"""
XML_file_path = 'test_xml_discovery_server.xml'
default_profile = XML_parse_profile(XML_file_path, "")
prefix = default_profile.getElementsByTagName('prefix')
PREFIX = prefix[0].firstChild.data
EXPECTED_SERVER_ID = "Server GUID prefix: " + PREFIX.lower()
EXPECTED_SERVER_ADDRESS = []
EXPECTED_SERVER_ADDRESS.append("UDPv4:[0.0.0.0]:1234")
XML_SERVER_ADDRESS = []
udpv4 = default_profile.getElementsByTagName('udpv4')
for elem in udpv4:
address2 = elem.getElementsByTagName('address')[0].firstChild.data
port2 = elem.getElementsByTagName('port')[0].firstChild.data
if port2[0] == '0':
port2 = port2[1:]
XML_SERVER_ADDRESS.append("UDPv4:[" + address2 + "]:" + port2)
command = [fast_discovery_tool, '-x', XML_file_path, '-p', '1234']
output, err, exit_code = send_command(command)
exit_code = check_output(output, err, EXPECTED_SERVER_ID, False)
if exit_code != 0:
sys.exit(exit_code)
for add in EXPECTED_SERVER_ADDRESS:
exit_code = check_output(output, err, add, False)
if exit_code != 0:
sys.exit(exit_code)
for add in XML_SERVER_ADDRESS:
exit_code = check_output(output, err, add, False)
if exit_code != 3:
sys.exit(3)
sys.exit(0)
def test_fast_discovery_locator_override_same_port(fast_discovery_tool):
"""Test that discovery command overrides locator given in XML file even if the port is the same"""
XML_file_path = 'test_xml_discovery_server.xml'
default_profile = XML_parse_profile(XML_file_path, "")
prefix = default_profile.getElementsByTagName('prefix')
PREFIX = prefix[0].firstChild.data
EXPECTED_SERVER_ID = "Server GUID prefix: " + PREFIX.lower()
EXPECTED_SERVER_ADDRESS = []
EXPECTED_SERVER_ADDRESS.append("UDPv4:[0.0.0.0]:2811")
XML_SERVER_ADDRESS = []
udpv4 = default_profile.getElementsByTagName('udpv4')
for elem in udpv4:
address2 = elem.getElementsByTagName('address')[0].firstChild.data
port2 = elem.getElementsByTagName('port')[0].firstChild.data
if port2[0] == '0':
port2 = port2[1:]
XML_SERVER_ADDRESS.append("UDPv4:[" + address2 + "]:" + port2)
command = [fast_discovery_tool, '-x', XML_file_path, '-p', '2811']
output, err, exit_code = send_command(command)
exit_code = check_output(output, err, EXPECTED_SERVER_ID, False)
if exit_code != 0:
sys.exit(exit_code)
for add in EXPECTED_SERVER_ADDRESS:
exit_code = check_output(output, err, add, False)
if exit_code != 0:
sys.exit(exit_code)
for add in XML_SERVER_ADDRESS:
exit_code = check_output(output, err, add, False)
if exit_code != 3:
sys.exit(3)
sys.exit(0)
def test_fast_discovery_backup(fast_discovery_tool):
"""Test that launches a BACKUP using CLI and XML"""
XML_file_path = "test_xml_discovery_server.xml"
EXPECTED_PARTICIPANT_TYPE = "Participant Type: BACKUP"
EXPECTED_SERVER_ID = "Server GUID prefix: 44.53.00.5f.45.50.52.4f.53.49.4d.41"
EXPECTED_SERVER_ADDRESS = []
EXPECTED_SERVER_ADDRESS.append("UDPv4:[0.0.0.0]:11811")
command = [fast_discovery_tool, '-b', '-i', '0']
output, err, exit_code = send_command(command)
exit_code = check_output(output, err, EXPECTED_PARTICIPANT_TYPE, False)
if exit_code != 0:
sys.exit(exit_code)
exit_code = check_output(output, err, EXPECTED_SERVER_ID, False)
if exit_code != 0:
sys.exit(exit_code)
for add in EXPECTED_SERVER_ADDRESS:
exit_code = check_output(output, err, add, False)
if exit_code != 0:
sys.exit(exit_code)
EXPECTED_XML_SERVER_ID = "Server GUID prefix: 44.53.33.5f.45.50.52.4f.53.49.4d.41"
EXPECTED_XML_SERVER_ADDRESS = []
EXPECTED_XML_SERVER_ADDRESS.append("UDPv4:[127.0.0.105]:11825")
command = [fast_discovery_tool, '-x', 'UDP_backup@' + XML_file_path]
output, err, exit_code = send_command(command)
exit_code = check_output(output, err, EXPECTED_PARTICIPANT_TYPE, False)
if exit_code != 0:
sys.exit(exit_code)
exit_code = check_output(output, err, EXPECTED_XML_SERVER_ID, False)
if exit_code != 0:
sys.exit(exit_code)
for add in EXPECTED_XML_SERVER_ADDRESS:
exit_code = check_output(output, err, add, False)
if exit_code != 0:
sys.exit(exit_code)
sys.exit(exit_code)
def test_fast_discovery_no_XML(fast_discovery_tool):
"""Test that checks output when the XML file provided does not exist"""
XML_file_path = "non_existent_xml_file.xml"
command = [fast_discovery_tool, '-x', XML_file_path]
output, err, exit_code = send_command(command)
exit_code = check_output(output, err, "Cannot open XML file", True)
sys.exit(exit_code)
def test_fast_discovery_incorrect_participant(fast_discovery_tool):
"""Test that checks failure if the participant is not SERVER/BACKUP"""
XML_file_path = "test_wrong_xml_discovery_server.xml"
command = [fast_discovery_tool, '-x', 'UDP_simple@' + XML_file_path]
output, err, exit_code = send_command(command)
exit_code = check_output(output, err, "The provided configuration is not valid", True)
if exit_code != 0:
sys.exit(exit_code)
command = [fast_discovery_tool, '-x', 'UDP_client@' + XML_file_path]
output, err, exit_code = send_command(command)
exit_code = check_output(output, err, "The provided configuration is not valid", True)
sys.exit(exit_code)
def test_fast_discovery_no_prefix(fast_discovery_tool):
"""Test failure when no server ID is provided"""
XML_file_path = "test_wrong_xml_discovery_server.xml"
command = [fast_discovery_tool, '-x', 'UDP_no_prefix@' + XML_file_path]
output, err, exit_code = send_command(command)
exit_code = check_output(output, err, "Server id is mandatory if not defined in the XML file", True)
sys.exit(exit_code)
def test_fast_discovery_several_server_ids(fast_discovery_tool):
"""Test failure when several Server IDs are provided"""
command = [fast_discovery_tool, '-i', '0', '-i', '1']
output, err, exit_code = send_command(command)
exit_code = check_output(output, err, "only one server id can be specified", True)
sys.exit(exit_code)
def test_fast_discovery_invalid_locator(fast_discovery_tool):
"""Test failure when the locator is invalid"""
command = [fast_discovery_tool, '-i', '0', '-l', '256.0.0.1']
output, err, exit_code = send_command(command)
exit_code = check_output(output, err, "Invalid listening locator address specified", True)
sys.exit(exit_code)
def test_fast_discovery_non_existent_profile(fast_discovery_tool):
"""Test failure when the profile does not exist in the XML file"""
XML_file_path = "test_xml_discovery_server.xml"
command = [fast_discovery_tool, '-x', 'non_existent_profile@' + XML_file_path]
output, err, exit_code = send_command(command)
exit_code = check_output(output, err, "Error loading specified profile from XML file", True)
sys.exit(exit_code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
usage='test.py <binary_path> <test_name>',
)
parser.add_argument('binary_path',
help='''fast-discovery-server binary fully
qualified path''')
parser.add_argument('test_name',
help='Test to run')
args = parser.parse_args()
# Tests dictionary
tests = {
'test_fast_discovery_closure': lambda: test_fast_discovery_closure(
args.binary_path),
'test_fast_discovery_parse_XML_file_default_profile': lambda:
test_fast_discovery_parse_XML_file_default_profile(args.binary_path),
'test_fast_discovery_parse_XML_file_URI_profile': lambda:
test_fast_discovery_parse_XML_file_URI_profile(args.binary_path),
'test_fast_discovery_prefix_override': lambda:
test_fast_discovery_prefix_override(args.binary_path),
'test_fast_discovery_locator_address_override': lambda:
test_fast_discovery_locator_address_override(args.binary_path),
'test_fast_discovery_locator_override_same_address': lambda:
test_fast_discovery_locator_override_same_address(args.binary_path),
'test_fast_discovery_locator_port_override': lambda:
test_fast_discovery_locator_port_override(args.binary_path),
'test_fast_discovery_locator_override_same_port': lambda:
test_fast_discovery_locator_override_same_port(args.binary_path),
'test_fast_discovery_backup': lambda:
test_fast_discovery_backup(args.binary_path),
'test_fast_discovery_no_XML': lambda:
test_fast_discovery_no_XML(args.binary_path),
'test_fast_discovery_incorrect_participant': lambda:
test_fast_discovery_incorrect_participant(args.binary_path),
'test_fast_discovery_no_prefix': lambda:
test_fast_discovery_no_prefix(args.binary_path),
'test_fast_discovery_several_server_ids': lambda:
test_fast_discovery_several_server_ids(args.binary_path),
'test_fast_discovery_invalid_locator': lambda:
test_fast_discovery_invalid_locator(args.binary_path),
'test_fast_discovery_non_existent_profile': lambda:
test_fast_discovery_non_existent_profile(args.binary_path),
}
tests[args.test_name]()
|
rotkehlchen/accounting/pot.py | rotkehlchenio/rotkehlchen | 137 | 11135247 | import logging
from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Tuple
from rotkehlchen.accounting.cost_basis import CostBasisCalculator
from rotkehlchen.accounting.cost_basis.prefork import (
handle_prefork_asset_acquisitions,
handle_prefork_asset_spends,
)
from rotkehlchen.accounting.mixins.event import AccountingEventType
from rotkehlchen.accounting.pnl import PNL, PnlTotals
from rotkehlchen.accounting.structures.processed_event import ProcessedAccountingEvent
from rotkehlchen.accounting.transactions import TransactionsAccountant
from rotkehlchen.assets.asset import Asset
from rotkehlchen.constants.assets import A_KFEE
from rotkehlchen.constants.misc import ONE, ZERO
from rotkehlchen.db.reports import DBAccountingReports
from rotkehlchen.db.settings import DBSettings
from rotkehlchen.errors.misc import InputError, RemoteError
from rotkehlchen.errors.price import NoPriceForGivenTimestamp, PriceQueryUnsupportedAsset
from rotkehlchen.errors.serialization import DeserializationError
from rotkehlchen.fval import FVal
from rotkehlchen.history.price import PriceHistorian
from rotkehlchen.logging import RotkehlchenLogsAdapter
from rotkehlchen.types import Location, Price, Timestamp
from rotkehlchen.user_messages import MessagesAggregator
from rotkehlchen.utils.mixins.customizable_date import CustomizableDateMixin
if TYPE_CHECKING:
from rotkehlchen.chain.ethereum.accounting.aggregator import EVMAccountingAggregator
from rotkehlchen.db.dbhandler import DBHandler
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
class AccountingPot(CustomizableDateMixin):
"""
Represents a single accounting depot for which events are processed
under a specific set of rules
"""
def __init__(
self,
database: 'DBHandler',
evm_accounting_aggregator: 'EVMAccountingAggregator',
msg_aggregator: MessagesAggregator,
) -> None:
super().__init__(database=database)
self.profit_currency = self.settings.main_currency
self.cost_basis = CostBasisCalculator(
database=database,
msg_aggregator=msg_aggregator,
)
self.pnls = PnlTotals()
self.processed_events: List[ProcessedAccountingEvent] = []
self.transactions = TransactionsAccountant(
evm_accounting_aggregator=evm_accounting_aggregator,
pot=self,
)
self.query_start_ts = self.query_end_ts = Timestamp(0)
def _add_processed_event(self, event: ProcessedAccountingEvent) -> None:
dbpnl = DBAccountingReports(self.database)
self.processed_events.append(event)
try:
dbpnl.add_report_data(
report_id=self.report_id,
time=event.timestamp,
ts_converter=self.timestamp_to_date,
event=event,
)
except (DeserializationError, InputError) as e:
log.error(str(e))
return
log.debug(event.to_string(self.timestamp_to_date))
def get_rate_in_profit_currency(self, asset: Asset, timestamp: Timestamp) -> Price:
"""Get the profit_currency price of asset in the given timestamp
May raise:
- PriceQueryUnsupportedAsset if from/to asset is missing from price oracles
- NoPriceForGivenTimestamp if we can't find a price for the asset in the given
timestamp from the price oracle
- RemoteError if there is a problem reaching the price oracle server
or with reading the response returned by the server
"""
if asset == self.profit_currency:
rate = Price(FVal(1))
else:
rate = PriceHistorian().query_historical_price(
from_asset=asset,
to_asset=self.profit_currency,
timestamp=timestamp,
)
return rate
def reset(
self,
settings: DBSettings,
start_ts: Timestamp,
end_ts: Timestamp,
report_id: int,
) -> None:
self.settings = settings
self.report_id = report_id
self.profit_currency = self.settings.main_currency
self.query_start_ts = start_ts
self.query_end_ts = end_ts
self.pnls.reset()
self.cost_basis.reset(settings)
self.transactions.reset()
self.processed_events = []
def add_acquisition(
self, # pylint: disable=unused-argument
event_type: AccountingEventType,
notes: str,
location: Location,
timestamp: Timestamp,
asset: Asset,
amount: FVal,
taxable: bool,
given_price: Optional[Price] = None,
extra_data: Optional[Dict] = None,
**kwargs: Any, # to be able to consume args given by add_asset_change_event
) -> None:
"""Add an asset acquisition event for the pot and count it in PnL if needed.
If a custom price for the asset should be used it can be passed here via
given_price. Price is always in profit currency during accounting."""
if amount == ZERO: # do nothing for zero acquisitions
return
if given_price is not None:
price = given_price
else:
price = self.get_rate_in_profit_currency(asset=asset, timestamp=timestamp)
prefork_events = handle_prefork_asset_acquisitions(
cost_basis=self.cost_basis,
location=location,
timestamp=timestamp,
asset=asset,
amount=amount,
price=price,
starting_index=len(self.processed_events),
)
for prefork_event in prefork_events:
self._add_processed_event(prefork_event)
event = ProcessedAccountingEvent(
type=event_type,
notes=notes,
location=location,
timestamp=timestamp,
asset=asset,
taxable_amount=amount,
free_amount=ZERO,
price=price,
pnl=PNL(), # filled out later
cost_basis=None,
index=len(self.processed_events),
)
if extra_data:
event.extra_data = extra_data
self.cost_basis.obtain_asset(event)
# count profit/losses if we are inside the query period
if timestamp >= self.query_start_ts and taxable:
self.pnls[event_type] += event.calculate_pnl(
count_entire_amount_spend=False,
count_cost_basis_pnl=True,
)
self._add_processed_event(event)
def add_spend(
self,
event_type: AccountingEventType,
notes: str,
location: Location,
timestamp: Timestamp,
asset: Asset,
amount: FVal,
taxable: bool,
given_price: Optional[Price] = None,
taxable_amount_ratio: FVal = ONE,
count_entire_amount_spend: bool = True,
count_cost_basis_pnl: bool = True,
extra_data: Optional[Dict[str, Any]] = None,
) -> Tuple[FVal, FVal]:
"""Add an asset spend event for the pot and count it in PnL if needed
If a custom price for the asset should be used it can be passed here via
given_price. Price is always in profit currency during accounting.
If taxable_ratio is given then this is how we initialize the taxable and
free amounts in the case of missing cost basis. By default it's all taxable.
If count_entire_amount_spend is True then the entire amount is counted as a spend.
Which means an expense (negative pnl).
If count_cost_basis_pnl is True then we also count any profit/loss the asset
may have had compared to when it was acquired.
Returns (free, taxable) amounts.
"""
if amount == ZERO: # do nothing for zero spends
return ZERO, ZERO
if asset.is_fiat() and event_type != AccountingEventType.FEE:
taxable = False
handle_prefork_asset_spends(
cost_basis=self.cost_basis,
asset=asset,
amount=amount,
timestamp=timestamp,
)
if given_price is not None:
price = given_price
else:
price = self.get_rate_in_profit_currency(
asset=asset,
timestamp=timestamp,
)
if asset == A_KFEE:
count_cost_basis_pnl = False
taxable = False
spend_cost = None
if count_cost_basis_pnl:
spend_cost = self.cost_basis.spend_asset(
location=location,
timestamp=timestamp,
asset=asset,
amount=amount,
rate=price,
taxable_spend=taxable,
)
taxable_amount = taxable_amount_ratio * amount
free_amount = amount - taxable_amount
if spend_cost:
taxable_amount = spend_cost.taxable_amount
free_amount = amount - spend_cost.taxable_amount
spend_event = ProcessedAccountingEvent(
type=event_type,
notes=notes,
location=location,
timestamp=timestamp,
asset=asset,
taxable_amount=taxable_amount,
free_amount=free_amount,
price=price,
pnl=PNL(), # filled out later
cost_basis=spend_cost,
index=len(self.processed_events),
)
if extra_data:
spend_event.extra_data = extra_data
# count profit/losses if we are inside the query period
if timestamp >= self.query_start_ts and taxable:
self.pnls[event_type] += spend_event.calculate_pnl(
count_entire_amount_spend=count_entire_amount_spend,
count_cost_basis_pnl=count_cost_basis_pnl,
)
self._add_processed_event(spend_event)
return free_amount, taxable_amount
def add_asset_change_event(
self,
method: Literal['acquisition', 'spend'],
event_type: AccountingEventType,
notes: str,
location: Location,
timestamp: Timestamp,
asset: Asset,
amount: FVal,
taxable: bool,
given_price: Optional[Price] = None,
**kwargs: Any,
) -> None:
fn = getattr(self, f'add_{method}')
return fn(
event_type=event_type,
notes=notes,
location=location,
timestamp=timestamp,
asset=asset,
amount=amount,
taxable=taxable,
given_price=given_price,
**kwargs,
)
def get_prices_for_swap(
self,
timestamp: Timestamp,
amount_in: FVal,
asset_in: Asset,
amount_out: FVal,
asset_out: Asset,
fee: Optional[FVal],
fee_asset: Optional[Asset],
) -> Optional[Tuple[Price, Price]]:
"""Calculates the prices for assets going in and out of a swap/trade.
The rules are:
- For the asset_in we get the equivalent rate from asset_out + fee if any.
If there is no price found for fee_currency we ignore it.
If there is no price for asset_out then we switch to using the asset_in price itself.
If neither of the 2 assets can have their price known, we bail.
- For the asset_out we get the equivalent rate from asset_in.
if there is no price found for asset_in then we switch to using the asset_out price.
If neither of the 2 assets can have their price known we bail.
Returns (out_price, in_price) or None if it can't find proper prices
"""
if ZERO in (amount_in, amount_out):
log.error(
f'At get_prices_for_swap got a zero amount. {asset_in=} {amount_in=} '
f'{asset_out=} {amount_out=}. Skipping ...')
return None
try:
out_price = self.get_rate_in_profit_currency(
asset=asset_out,
timestamp=timestamp,
)
except (PriceQueryUnsupportedAsset, NoPriceForGivenTimestamp, RemoteError):
out_price = None
fee_price = None
if fee is not None and fee_asset is not None and fee != ZERO:
# also checking fee_asset != None due to https://github.com/rotki/rotki/issues/4172
try:
fee_price = self.get_rate_in_profit_currency(
asset=fee_asset,
timestamp=timestamp,
)
except (PriceQueryUnsupportedAsset, NoPriceForGivenTimestamp, RemoteError):
fee_price = None
try:
in_price = self.get_rate_in_profit_currency(
asset=asset_in,
timestamp=timestamp,
)
except (PriceQueryUnsupportedAsset, NoPriceForGivenTimestamp, RemoteError):
in_price = None
if out_price is None and in_price is None:
return None
if out_price is not None:
paid = amount_out * out_price
if fee_price is not None:
paid += fee_price * fee # type: ignore # fee should exist here
calculated_in = Price(paid / amount_in)
else:
calculated_in = in_price # type: ignore # in_price should exist here
if in_price is not None:
calculated_out = Price((amount_in * in_price) / amount_out)
else:
calculated_out = out_price # type: ignore # out_price should exist here
return (calculated_out, calculated_in)
|
climpred/tests/test_logging.py | rom-py/climpred | 104 | 11135251 | <filename>climpred/tests/test_logging.py<gh_stars>100-1000
import logging
from climpred.prediction import compute_hindcast
def test_log_compute_hindcast(
hind_ds_initialized_1d_cftime, reconstruction_ds_1d_cftime, caplog
):
"""Tests that logging works for compute_hindcast."""
LOG_STRINGS = ["lead", "inits", "verifs"]
with caplog.at_level(logging.INFO):
compute_hindcast(hind_ds_initialized_1d_cftime, reconstruction_ds_1d_cftime)
for i, record in enumerate(caplog.record_tuples):
# Skip header information.
if i >= 2:
print(record)
assert all(x in record[2] for x in LOG_STRINGS)
def test_log_HindcastEnsemble_verify(hindcast_hist_obs_1d, caplog):
"""Test that verify logs."""
LOG_STRINGS = ["lead", "inits", "verifs"]
with caplog.at_level(logging.INFO):
hindcast_hist_obs_1d.verify(
metric="mse", comparison="e2o", dim="init", alignment="same_verif"
)
for i, record in enumerate(caplog.record_tuples):
# Skip header information.
if i >= 2:
print(record)
assert all(x in record[2] for x in LOG_STRINGS)
assert "initialized" in record[2]
|
Co-Simulation/PTV-Vissim/vissim_integration/constants.py | adelbennaceur/carla | 7,883 | 11135282 | #!/usr/bin/env python
# Copyright (c) 2020 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
""" This module defines constants used for the vissim-carla co-simulation. """
# ==================================================================================================
# -- constants -------------------------------------------------------------------------------------
# ==================================================================================================
INVALID_ACTOR_ID = -1
CARLA_SPAWN_OFFSET_Z = 25.0 # meters
# Maximum distance of a Vissim veh/ped from a simulator veh/ped to be seen by the simulator (<=0
# means unlimited radius).
VISSIM_VISIBILITY_RADIUS = 0.0
# Maximum number of simulator vehicles/pedestrians/detectors (to be passed to Vissim).
VISSIM_MAX_SIMULATOR_VEH = 5000
VISSIM_MAX_SIMULATOR_PED = 5000
VISSIM_MAX_SIMULATOR_DET = 500
# Maximum number of vissim vehicles/pedestrians/signal groups (to be passed to the simulator).
VISSIM_MAX_VISSIM_VEH = 5000
VISSIM_MAX_VISSIM_PED = 5000
VISSIM_MAX_VISSIM_SIGGRP = 5000
# VISSIM Vehicle data constants.
NAME_MAX_LENGTH = 100
MAX_UDA = 16
|
bumblebee_status/modules/contrib/caffeine.py | rosalogia/bumblebee-status | 1,089 | 11135296 | # pylint: disable=C0111,R0903,W0212
"""Enable/disable automatic screen locking.
Requires the following executables:
* xdg-screensaver
* xdotool
* xprop (as dependency for xdotool)
* notify-send
contributed by `TheEdgeOfRage <https://github.com/TheEdgeOfRage>`_ - many thanks!
"""
import logging
import os
import shutil
import psutil
import core.module
import core.widget
import core.input
import core.decorators
import util.cli
class Module(core.module.Module):
@core.decorators.every(minutes=10)
def __init__(self, config, theme):
super().__init__(config, theme, core.widget.Widget(""))
self.__active = False
self.__xid = None
core.input.register(self, button=core.input.LEFT_MOUSE, cmd=self.__toggle)
def __check_requirements(self):
requirements = ["xdotool", "xprop", "xdg-screensaver"]
missing = []
for tool in requirements:
if not shutil.which(tool):
missing.append(tool)
return missing
def __get_i3bar_xid(self):
xid = (
util.cli.execute("xdotool search --class 'i3bar'")
.partition("\n")[0]
.strip()
)
if xid.isdigit():
return xid
logging.warning("Module caffeine: xdotool couldn't get X window ID of 'i3bar'.")
return None
def __notify(self):
if not shutil.which("notify-send"):
return
if self.__active:
util.cli.execute("notify-send 'Consuming caffeine'")
else:
util.cli.execute("notify-send 'Out of coffee'")
def _suspend_screensaver(self):
self.__xid = self.__get_i3bar_xid()
if self.__xid is None:
return False
pid = os.fork()
if pid == 0:
os.setsid()
util.cli.execute("xdg-screensaver suspend {}".format(self.__xid))
os._exit(0)
else:
os.waitpid(pid, 0)
return True
def __resume_screensaver(self):
success = True
xprop_path = shutil.which("xprop")
pids = [
p.pid
for p in psutil.process_iter()
if p.cmdline() == [xprop_path, "-id", str(self.__xid), "-spy"]
]
for pid in pids:
try:
os.kill(pid, 9)
except OSError:
success = False
return success
def state(self, _):
if self.__active:
return "activated"
return "deactivated"
def __toggle(self, _):
missing = self.__check_requirements()
if missing:
logging.warning("Could not run caffeine - missing %s!", ", ".join(missing))
return
self.__active = not self.__active
if self.__active:
success = self._suspend_screensaver()
else:
success = self.__resume_screensaver()
if success:
self.__notify()
else:
self.__active = not self.__active
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
Python/Algorithms/Dijkstra-Algorithm/dijkstra_algorithm.py | sjsneha/Data-Structures-and-Algorithms | 245 | 11135297 | <reponame>sjsneha/Data-Structures-and-Algorithms<filename>Python/Algorithms/Dijkstra-Algorithm/dijkstra_algorithm.py
"""Python 3 implementation of Djikstra's algorithm for finding the shortest
path between nodes in a graph. Written as a learning exercise, so lots of
comments and no error handling.
"""
from collections import deque
INFINITY = float("inf")
class Graph:
def __init__(self, filename):
"""Reads graph definition and stores it. Each line of the graph
definition file defines an edge by specifying the start node,
end node, and distance, delimited by spaces.
Stores the graph definition in two properties which are used by
Dijkstra's algorithm in the shortest_path method:
self.nodes = set of all unique nodes in the graph
self.adjacency_list = dict that maps each node to an unordered set of
(neighbor, distance) tuples.
"""
# Read the graph definition file and store in graph_edges as a list of
# lists of [from_node, to_node, distance]. This data structure is not
# used by Dijkstra's algorithm, it's just an intermediate step in the
# create of self.nodes and self.adjacency_list.
graph_edges = []
with open(filename) as fhandle:
for line in fhandle:
edge_from, edge_to, cost, *_ = line.strip().split(" ")
graph_edges.append((edge_from, edge_to, float(cost)))
self.nodes = set()
for edge in graph_edges:
self.nodes.update([edge[0], edge[1]])
self.adjacency_list = {node: set() for node in self.nodes}
for edge in graph_edges:
self.adjacency_list[edge[0]].add((edge[1], edge[2]))
def shortest_path(self, start_node, end_node):
"""Uses Dijkstra's algorithm to determine the shortest path from
start_node to end_node. Returns (path, distance).
"""
unvisited_nodes = self.nodes.copy() # All nodes are initially unvisited.
# Create a dictionary of each node's distance from start_node. We will
# update each node's distance whenever we find a shorter path.
distance_from_start = {
node: (0 if node == start_node else INFINITY) for node in self.nodes
}
# Initialize previous_node, the dictionary that maps each node to the
# node it was visited from when the the shortest path to it was found.
previous_node = {node: None for node in self.nodes}
while unvisited_nodes:
# Set current_node to the unvisited node with shortest distance
# calculated so far.
current_node = min(
unvisited_nodes, key=lambda node: distance_from_start[node]
)
unvisited_nodes.remove(current_node)
# If current_node's distance is INFINITY, the remaining unvisited
# nodes are not connected to start_node, so we're done.
if distance_from_start[current_node] == INFINITY:
break
# For each neighbor of current_node, check whether the total distance
# to the neighbor via current_node is shorter than the distance we
# currently have for that node. If it is, update the neighbor's values
# for distance_from_start and previous_node.
for neighbor, distance in self.adjacency_list[current_node]:
new_path = distance_from_start[current_node] + distance
if new_path < distance_from_start[neighbor]:
distance_from_start[neighbor] = new_path
previous_node[neighbor] = current_node
if current_node == end_node:
break # we've visited the destination node, so we're done
# To build the path to be returned, we iterate through the nodes from
# end_node back to start_node. Note the use of a deque, which can
# appendleft with O(1) performance.
path = deque()
current_node = end_node
while previous_node[current_node] is not None:
path.appendleft(current_node)
current_node = previous_node[current_node]
path.appendleft(start_node)
return path, distance_from_start[end_node]
def main():
"""Runs a few simple tests to verify the implementation.
"""
"""verify_algorithm(
filename="",
start="",
end="",
path=["", "", "", ""],
distance=,
)"""
def verify_algorithm(filename, start, end, path, distance):
"""Helper function to run simple tests and print results to console.
filename = graph definition file
start/end = path to be calculated
path = expected shorted path
distance = expected distance of path
"""
graph = Graph(filename)
returned_path, returned_distance = graph.shortest_path(start, end)
assert list(returned_path) == path
assert returned_distance == distance
print('\ngraph definition file: {0}'.format(filename))
print(' start/end nodes: {0} -> {1}'.format(start, end))
print(' shortest path: {0}'.format(path))
print(' total distance: {0}'.format(distance))
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.