ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a39553831913b0d4e53c5100a132ccae9fc13f2 | # Test of a feature of PSet validation:
# messageSummaryToJobReport
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
import FWCore.Framework.test.cmsExceptionsFatal_cff
process.options = FWCore.Framework.test.cmsExceptionsFatal_cff.options
process.load("FWCore.MessageService.test.Services_cff")
process.MessageLogger = cms.Service("MessageLogger",
#enable one of the following -- the first should pass, the rest fail
messageSummaryToJobReport = cms.untracked.bool(True),
# messageSummaryToJobReport = cms.bool(True),
# messageSummaryToJobReport = cms.untracked.int32(2),
u1_infos = cms.untracked.PSet(
threshold = cms.untracked.string('INFO'),
noTimeStamps = cms.untracked.bool(True),
FwkJob = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
preEventProcessing = cms.untracked.PSet(
limit = cms.untracked.int32(0)
)
),
u1_warnings = cms.untracked.PSet(
threshold = cms.untracked.string('WARNING'),
noTimeStamps = cms.untracked.bool(True)
),
u1_debugs = cms.untracked.PSet(
threshold = cms.untracked.string('DEBUG'),
noTimeStamps = cms.untracked.bool(True),
FwkJob = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
preEventProcessing = cms.untracked.PSet(
limit = cms.untracked.int32(0)
)
),
u1_default = cms.untracked.PSet(
noTimeStamps = cms.untracked.bool(True),
FwkJob = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
preEventProcessing = cms.untracked.PSet(
limit = cms.untracked.int32(0)
)
),
u1_errors = cms.untracked.PSet(
threshold = cms.untracked.string('ERROR'),
noTimeStamps = cms.untracked.bool(True)
),
fwkJobReports = cms.untracked.vstring('u1_job_report.mxml'),
debugModules = cms.untracked.vstring('*'),
categories = cms.untracked.vstring('preEventProcessing',
'FwkJob'),
destinations = cms.untracked.vstring('u1_warnings',
'u1_errors',
'u1_infos',
'u1_debugs',
'u1_default')
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(2)
)
process.source = cms.Source("EmptySource")
process.sendSomeMessages = cms.EDAnalyzer("UnitTestClient_A")
process.p = cms.Path(process.sendSomeMessages)
|
py | 1a39558631be0705303de35e25c64ab9d435d1e9 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2014, Lars Asplund [email protected]
from __future__ import print_function
from os.path import join, exists
from os import makedirs
from shutil import rmtree
import traceback
import vunit.ostools as ostools
from vunit.test_report import TestResult, PASSED, FAILED
import sys
class TestRunner:
def __init__(self, report, output_path, verbose=False):
self._report = report
self._output_path = output_path
self._verbose = verbose
def _run_test_suite(self, test_suite, num_tests):
def add_and_print_results(results, runtime):
time = runtime/len(test_suite.test_cases)
for test_name in test_suite.test_cases:
self._report.add_result(test_name,
results[test_name],
time,
output_file_name)
self._report.print_latest_status(total_tests=num_tests)
print()
for test_name in test_suite.test_cases:
self._print_test_case_banner(test_name)
start = ostools.get_time()
old_stdout = sys.stdout
old_stderr = sys.stderr
output_path = join(self._output_path, self._encode_path(test_suite.name))
output_file_name = join(output_path, "output.txt")
try:
# If we could not clean output path, fail all tests
if exists(output_path):
rmtree(output_path)
makedirs(output_path)
output_file = open(output_file_name, "w")
except:
traceback.print_exc()
results = self._fail_suite(test_suite)
add_and_print_results(results, 0.0)
return
try:
if self._verbose:
sys.stdout = TeeToFile([old_stderr, output_file])
sys.stderr = TeeToFile([old_stdout, output_file])
else:
sys.stdout = TeeToFile([output_file])
sys.stderr = TeeToFile([output_file])
results = test_suite.run(output_path)
except:
traceback.print_exc()
results = self._fail_suite(test_suite)
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
output_file.close()
any_not_passed = any(value != PASSED for value in results.values())
if (not self._verbose) and any_not_passed:
with open(output_file_name, "r") as fread:
for line in fread:
print(line, end="")
runtime = ostools.get_time() - start
add_and_print_results(results, runtime)
def _fail_suite(self, test_suite):
" Return failure for all tests in suite "
results = {}
for test_name in test_suite.test_cases:
results[test_name] = FAILED
return results
def _print_test_case_banner(self, test_case_name):
" Print a banner before running each testcase "
print("running %s" % test_case_name)
def _encode_path(self, path):
" @TODO what if two tests named 'Test 1' and 'Test_1' ? "
return path.replace(" ", "_")
def run(self, test_suites):
num_tests = 0
for test_suite in test_suites:
for test_name in test_suite.test_cases:
num_tests += 1
if self._verbose:
print("Running test: " + test_name)
if self._verbose:
print("Running %i tests" % num_tests)
for test_suite in test_suites:
self._run_test_suite(test_suite, num_tests)
class TeeToFile:
def __init__(self, files):
self._files = files
def write(self, txt):
for ofile in self._files:
ofile.write(txt)
def flush(self):
for ofile in self._files:
ofile.flush()
|
py | 1a3957f520d61c3edada34c318336255b59c1a62 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import math
from typing import Dict, List, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytext.config import ConfigBase
from pytext.config.module_config import ModuleConfig
from pytext.models.module import create_module
from pytext.models.representations.transformer.positional_embedding import (
PositionalEmbedding,
)
from pytext.models.seq_models.base import (
PlaceholderAttentionIdentity,
PlaceholderIdentity,
)
from pytext.models.seq_models.positional import (
PostionalEmbedCombine,
PostionalEmbedType,
SinusoidalPositionalEmbedding,
)
from pytext.models.seq_models.utils import Linear
from torch import Tensor
from torch.nn import LayerNorm
from .attention import MultiheadAttention
from .base import (
PyTextIncrementalDecoderComponent,
PyTextSeq2SeqModule,
PlaceholderIdentity,
)
from .light_conv import LightweightConv
from .projection_layers import (
DecoderWithLinearOutputProjection,
DecoupledDecoderHead,
)
from .utils import extract_ontology_vocab
class LightConvDecoderLayer(PyTextSeq2SeqModule):
class Config(ConfigBase):
attention_dropout: float = 0.0
decoder_attention_heads: int = 1
self_attention_heads: int = 1
decoder_conv_dim: int = 128
decoder_conv_type: Union[
LightweightConv.Config, PlaceholderIdentity.Config
] = LightweightConv.Config()
attention_type: Union[
MultiheadAttention.Config, None
] = MultiheadAttention.Config()
self_attention_type: Optional[MultiheadAttention.Config] = None
decoder_embed_dim: int = 128
decoder_ffn_embed_dim: int = 512
decoder_glu: bool = True
decoder_normalize_before: bool = False
dropout: float = 0.1
input_dropout: float = 0.1
relu_dropout: float = 0.0
need_attention: bool = True
convolution_type: str = "causal"
@classmethod
def from_config(cls, config, kernel_size):
conv = create_module(
config.decoder_conv_type,
input_size=config.decoder_conv_dim,
kernel_size=kernel_size,
convolution_type=config.convolution_type,
)
if config.attention_type is not None:
attention = create_module(
config.attention_type,
config.decoder_embed_dim,
config.decoder_attention_heads,
)
else:
attention = None
if config.self_attention_type is not None:
self_attention = create_module(
config.self_attention_type,
config.decoder_embed_dim,
config.self_attention_heads,
)
else:
self_attention = None
return cls(
**config._asdict(),
conv=conv,
self_attention=self_attention,
attention=attention
)
def __init__(
self,
attention_dropout,
decoder_attention_heads,
self_attention_heads,
decoder_conv_dim,
# ARBABU: need to remove these two type parameters
decoder_conv_type,
attention_type,
self_attention_type,
decoder_embed_dim,
decoder_ffn_embed_dim,
decoder_glu,
decoder_normalize_before,
dropout,
input_dropout,
relu_dropout,
need_attention,
convolution_type,
conv=None,
self_attention=None,
attention=None,
):
super().__init__()
self.embed_dim = decoder_embed_dim
self.conv_dim = decoder_conv_dim
if decoder_glu:
self.linear1 = Linear(self.embed_dim, 2 * self.conv_dim)
self.act = nn.GLU()
else:
self.linear1 = Linear(self.embed_dim, self.conv_dim)
self.act = PlaceholderIdentity()
self.conv = conv
self.linear2 = Linear(self.conv_dim, self.embed_dim)
self.dropout = dropout
self.relu_dropout = relu_dropout
self.input_dropout = input_dropout
self.normalize_before = decoder_normalize_before
self.conv_layer_norm = LayerNorm(self.embed_dim)
if attention is None:
self.no_encoder_attn = True
self.encoder_attn = PlaceholderAttentionIdentity()
self.encoder_attn_layer_norm = PlaceholderIdentity()
else:
self.no_encoder_attn = False
self.encoder_attn = attention
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim)
if self_attention is None:
self.has_self_attn = False
self.self_attn = PlaceholderAttentionIdentity()
else:
self.has_self_attn = True
self.self_attn = self_attention
self.fc1 = Linear(self.embed_dim, decoder_ffn_embed_dim)
self.fc2 = Linear(decoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
self.need_attn = need_attention
def forward(
self,
x,
encoder_out: Tensor,
encoder_padding_mask: Optional[Tensor],
decoder_padding_mask: Optional[Tensor],
incremental_state: Optional[Dict[str, Tensor]],
):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
Returns:
encoded output of shape `(batch, src_len, embed_dim)`
"""
residual = x
normalize = self.maybe_layer_norm(before=True)
if normalize:
x = self.conv_layer_norm(x)
if self.has_self_attn:
x, _ = self.self_attn(
x,
key=x,
value=x,
key_padding_mask=decoder_padding_mask,
need_weights=False,
incremental_state=incremental_state,
)
x = residual + x
residual = x
x = F.dropout(x, p=self.input_dropout, training=self.training)
x = self.linear1(x)
x = self.act(x)
if decoder_padding_mask is not None:
x = x.masked_fill(decoder_padding_mask.transpose(0, 1).unsqueeze(2), 0)
x = self.conv(x, incremental_state=incremental_state)
x = self.linear2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
normalize = self.maybe_layer_norm(after=True)
if normalize:
x = self.conv_layer_norm(x)
attn: Optional[Tensor] = None
if not self.no_encoder_attn:
residual = x
normalize = self.maybe_layer_norm(before=True)
if normalize:
x = self.encoder_attn_layer_norm(x)
x, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
need_weights=(not self.training and self.need_attn),
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
normalize = self.maybe_layer_norm(after=True)
if normalize:
x = self.encoder_attn_layer_norm(x)
residual = x
normalize = self.maybe_layer_norm(before=True)
if normalize:
x = self.final_layer_norm(x)
x = F.relu(self.fc1(x))
x = F.dropout(x, p=self.relu_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
normalize = self.maybe_layer_norm(after=True)
if normalize:
x = self.final_layer_norm(x)
return x, attn
def maybe_layer_norm(self, before: bool = False, after: bool = False):
"""This a utility function which helps to control the layer norm behavior
`before` and `after` specific components using one variable in config.
If self.normalize_before is set to True, output is true only when `before`
is True
"""
assert before ^ after, "Incorrect usage"
return after ^ self.normalize_before
def reorder_incremental_state(
self, incremental_state: Dict[str, Tensor], new_order: Tensor
):
self.self_attn.reorder_incremental_state(incremental_state, new_order)
self.encoder_attn.reorder_incremental_state(incremental_state, new_order)
self.conv.reorder_incremental_state(incremental_state, new_order)
def extra_repr(self):
return (
"dropout={}, relu_dropout={}, input_dropout={}, normalize_before={}".format(
self.dropout,
self.relu_dropout,
self.input_dropout,
self.normalize_before,
)
)
class ConvDecoderConfig(ConfigBase):
dropout: float = 0.1
decoder_embed_dim: int = 128
decoder_input_dim: int = 128
decoder_output_dim: int = 128
max_target_positions: int = 128
decoder_learned_pos: bool = False
no_token_positional_embeddings: bool = False
positional_embedding_type: PostionalEmbedType = PostionalEmbedType.LEARNED
combine_pos_embed: PostionalEmbedCombine = PostionalEmbedCombine.CONCAT
decoder_normalize_before: bool = False
class LightConvDecoderBase(PyTextIncrementalDecoderComponent):
class Config(ModuleConfig):
decoder_config: ConvDecoderConfig = ConvDecoderConfig()
layer_config: LightConvDecoderLayer.Config = LightConvDecoderLayer.Config()
decoder_kernel_size_list: List[int] = [3, 7, 15]
@classmethod
def from_config(cls, config, tgt_dict, tgt_embedding):
kernel_size_list = config.decoder_kernel_size_list
layers = []
for size in kernel_size_list:
assert (
config.decoder_config.decoder_embed_dim
== config.layer_config.decoder_embed_dim
)
layers.append(create_module(config.layer_config, kernel_size=size))
return cls(tgt_dict, tgt_embedding, layers, config.decoder_config)
def __init__(self, target_dict, embed_tokens, layers, decoder_config):
super().__init__()
self.dropout = decoder_config.dropout
input_embed_dim = embed_tokens.embedding_dim
embed_dim = decoder_config.decoder_embed_dim
output_embed_dim = decoder_config.decoder_output_dim
padding_idx = target_dict.get_pad_index()
self.max_target_positions = decoder_config.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim
self.padding_idx = padding_idx
self.no_token_positional_embeddings = (
decoder_config.no_token_positional_embeddings
)
# creating this is also conditional
self.project_in_dim = (
Linear(input_embed_dim, embed_dim)
if embed_dim != input_embed_dim
else PlaceholderIdentity()
)
self.embed_layer_norm = LayerNorm(embed_dim)
self.combine_pos_embed = decoder_config.combine_pos_embed.value
if decoder_config.combine_pos_embed == PostionalEmbedCombine.SUM:
pos_embed_dim = embed_dim
elif decoder_config.combine_pos_embed == PostionalEmbedCombine.CONCAT:
pos_embed_dim = embed_dim - input_embed_dim
else:
raise NotImplementedError
if not decoder_config.no_token_positional_embeddings:
if decoder_config.positional_embedding_type == PostionalEmbedType.LEARNED:
self.embed_positions = PositionalEmbedding(
decoder_config.max_target_positions,
pos_embed_dim,
padding_idx,
)
elif (
decoder_config.positional_embedding_type
== PostionalEmbedType.SINUSOIDAL
or decoder_config.positional_embedding_type == PostionalEmbedType.HYBRID
):
self.embed_positions = SinusoidalPositionalEmbedding(
pos_embed_dim,
padding_idx,
init_size=decoder_config.max_target_positions,
learned_embed=decoder_config.positional_embedding_type
== PostionalEmbedType.HYBRID,
)
else:
raise NotImplementedError("Positional embedding type not supported")
else:
self.embed_positions = PlaceholderIdentity()
self.layers = nn.ModuleList(layers)
self.project_out_dim = (
Linear(embed_dim, output_embed_dim, bias=False)
if embed_dim != output_embed_dim
else PlaceholderIdentity()
)
self.normalize = decoder_config.decoder_normalize_before
if self.normalize:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = PlaceholderIdentity()
def forward_unprojected(
self,
prev_output_tokens: Tensor,
encoder_out: Dict[str, Tensor],
incremental_state: Optional[Dict[str, Tensor]] = None,
timestep: Optional[int] = None,
) -> Tuple[Tensor, Dict[str, Tensor]]:
output_dict: Dict[str, Tensor] = {}
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens([[prev_output_tokens]])
if not self.no_token_positional_embeddings:
# TODO : Verify incremental generation for AR mode
x = self.pos_embed(x, prev_output_tokens)
else:
x = self.project_in_dim(x)
x = self.embed_layer_norm(x)
x = F.dropout(x, p=self.dropout, training=self.training)
output_dict["decoder_layer_0"] = x.clone()
# B x T x C -> T x B x C
x = x.transpose(0, 1)
last_layer_attn: Optional[Tensor] = None
decoder_padding_mask = prev_output_tokens.eq(self.padding_idx)
target_lengths = (~decoder_padding_mask).sum(dim=1)
if not decoder_padding_mask.any():
decoder_mask = None
else:
decoder_mask = decoder_padding_mask
encoder = encoder_out["encoder_out"]
encoder_mask: Optional[Tensor] = None
if "encoder_mask" in encoder_out:
encoder_mask = encoder_out["encoder_mask"]
# decoder layers
for idx, layer in enumerate(self.layers):
encoder = encoder_out["encoder_out"]
encoder_mask: Optional[Tensor] = None
if "encoder_mask" in encoder_out:
encoder_mask = encoder_out["encoder_mask"]
x, last_layer_attn = layer(
x, encoder, encoder_mask, decoder_mask, incremental_state
)
output_dict["decoder_layer_" + str(idx + 1)] = x.transpose(0, 1).clone()
if self.normalize:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
x = self.project_out_dim(x)
if last_layer_attn is not None:
output_dict["attn_scores"] = last_layer_attn
output_dict["target_lengths"] = target_lengths
output_dict["decoder_mask"] = decoder_padding_mask
for key in encoder_out.keys():
output_dict[key] = encoder_out[key]
return x, output_dict
def pos_embed(self, x, src_tokens):
# TODO : Positional embeddings needs to be tested in AR mode
if self.combine_pos_embed == PostionalEmbedCombine.SUM.value:
x = self.project_in_dim(x)
return self._vanilla_transformer(x, src_tokens)
elif self.combine_pos_embed == PostionalEmbedCombine.CONCAT.value:
return self._concat_pos_embed(x, src_tokens)
else:
raise NotImplementedError("Method not supported")
def _vanilla_transformer(self, x, src_tokens):
x += self.embed_positions(src_tokens)
return x
def _concat_pos_embed(self, x, src_tokens):
pos_embed = self.embed_positions(src_tokens)
return torch.cat([x, pos_embed], dim=2)
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.no_token_positional_embeddings:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions())
def reorder_incremental_state(
self, incremental_state: Dict[str, Tensor], new_order: Tensor
):
for layer in self.layers:
layer.reorder_incremental_state(incremental_state, new_order)
def get_probs(
self, decoder_out: Tuple[Tensor, Dict[str, Tensor]]
) -> Tuple[Tensor, Tensor, Tensor]:
return self.projection_layer.get_probs(decoder_out)
class LightConvDecoder(LightConvDecoderBase):
def __init__(self, target_dict, embed_tokens, layers, decoder_config):
super().__init__(target_dict, embed_tokens, layers, decoder_config)
self.projection_layer = DecoderWithLinearOutputProjection(
target_dict, target_dict, decoder_config.decoder_output_dim
)
def forward(
self,
prev_output_tokens: Tensor,
encoder_out: Dict[str, Tensor],
incremental_state: Optional[Dict[str, Tensor]] = None,
timestep: Optional[int] = None,
) -> Tuple[Tensor, Dict[str, Tensor]]:
hidden_decoder_output = self.forward_unprojected(
prev_output_tokens, encoder_out, incremental_state, timestep
)
return self.projection_layer(
encoder_out=encoder_out,
decoder_out=hidden_decoder_output,
incremental_state=incremental_state,
)
def get_probs(
self, decoder_out: Tuple[Tensor, Dict[str, Tensor]]
) -> Tuple[Tensor, Tensor, Tensor]:
return self.projection_layer.get_probs(decoder_out)
class LightConvDecoupledDecoder(LightConvDecoderBase):
class Config(ModuleConfig):
decoder_config: ConvDecoderConfig = ConvDecoderConfig()
layer_config: LightConvDecoderLayer.Config = LightConvDecoderLayer.Config()
decoder_kernel_size_list: List[int] = [3, 7, 15]
decoder_layers: int = 3
decoupled_attention_heads: int = 1
ontology_generation_only: bool = False
model_output_logprob: bool = True
def __init__(
self,
target_dict,
embed_tokens,
layers,
decoder_config,
ontology_generation_only,
decoupled_attention_heads,
model_output_logprob,
):
super().__init__(target_dict, embed_tokens, layers, decoder_config)
fixed_generation_vocab = None
if ontology_generation_only:
fixed_generation_vocab = extract_ontology_vocab(target_dict)
self.projection_layer = DecoupledDecoderHead(
target_dict,
target_dict,
out_embed_dim=decoder_config.decoder_output_dim,
encoder_hidden_dim=decoder_config.decoder_input_dim,
pointer_attention_heads=decoupled_attention_heads,
fixed_generation_vocab=fixed_generation_vocab,
model_output_logprob=model_output_logprob,
)
def forward(
self,
prev_output_tokens: Tensor,
encoder_out: Dict[str, Tensor],
incremental_state: Optional[Dict[str, Tensor]] = None,
timestep: Optional[int] = None,
) -> Tuple[Tensor, Dict[str, Tensor]]:
hidden_decoder_output = self.forward_unprojected(
prev_output_tokens, encoder_out, incremental_state, timestep
)
return self.projection_layer(
encoder_out=encoder_out,
decoder_out=hidden_decoder_output,
incremental_state=incremental_state,
)
@classmethod
def from_config(cls, config, tgt_dict, tgt_embedding):
kernel_size_list = config.decoder_kernel_size_list
layers = []
for size in kernel_size_list:
assert (
config.decoder_config.decoder_embed_dim
== config.layer_config.decoder_embed_dim
)
layers.append(create_module(config.layer_config, kernel_size=size))
return cls(
tgt_dict,
tgt_embedding,
layers,
config.decoder_config,
config.ontology_generation_only,
config.decoupled_attention_heads,
config.model_output_logprob,
)
|
py | 1a39587cdaf24bac1011db1a093c8bef7549f081 | # -*- coding: utf-8 -*-
"""Unit test package for virfac."""
|
py | 1a3958e410634c60197941a388f30973996a8816 | # original: https://gist.github.com/karpitsky/29b49c3ae759a606b7db39ad3c3315ca
# This code was taken from karpitsky's gist.
# Modifications:
# --------------
# Takes a collection id obtained from a public collection
# on 'translate.yandex.ru'. Writes the collection to `dict/newdict.txt`
# under the current folder.
import sys
import string
import random
import requests
from pathlib import Path
collection_id = sys.argv[1]
uid = ''.join(random.choices(string.digits, k=18))
cookies = {
'first_visit_src': 'collection_share_desktop',
'yandexuid': uid
}
url = f'https://translate.yandex.ru/props/api/collections/{collection_id}?srv=tr-text&uid'
response = requests.get(url, cookies=cookies).json()
Path('dict').mkdir(exist_ok=True)
with open('dict/newdict.txt', 'w') as fp:
for pair in response['collection']['records']:
fp.write(f'{pair["text"]} - {pair["translation"]}\n')
|
py | 1a3959d02099b17cd92a47fde2e07803a86fb7e7 | import pandas as pd
from ggindex.IndexViz.IndexReport import IndexReport
from ggindex.IndexViz.IndexComparator import IndexComparator
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import numpy as np
from ggindex.GreenGrowthStuff import GreenGrowthStuff
class IndexCrossReport(GreenGrowthStuff):
'''
A class to compare two GreenGrowthIndex AND the data used to compute them.
It is more general than the IndexComparator which only compare the GreenGrowthIndex with looking at the data.
Attributes
----------
Report_1: IndexReport
The IndexReport of the first GreenGrowthIndex using first data
name_1: str
Name of the first Report
Report_2: IndexReport
The IndexReport of the second GreenGrowthIndex using first data
name_2: str
Name of the second Report
IndexComparator: IndexComparator
The comparison of first and second GreenGrowthIndex
data: pd.DataFrame
The full data of the first and second Index
'''
def __init__(self, data_1, ST_1, name_1, data_2, ST_2, name_2):
'''
Initialization
Parameters
----------
data_1: pd.DataFrame
The data to compute the first GreenGrowthIndex
ST_1: DataFrame
The sustainable targets to compute the first GreenGrowthIndex
name_1: str
Name of the first GreenGrowthIndex and Data
data_2: pd.DataFrame
The data to compute the second GreenGrowthIndex
ST_2: DataFrame
The sustainable targets to compute the second GreenGrowthIndex
name_2: str
Name of the second GreenGrowthIndex and Data
'''
super(IndexCrossReport, self).__init__()
self.Report_1 = IndexReport(data_1, ST_1)
self.Report_2 = IndexReport(data_2, ST_2)
self.name_1 = name_1
self.name_2 = name_2
self.IndexComparator = IndexComparator(self.Report_1.GGI,
self.Report_2.GGI,
name_GGI_1=name_1,
name_GGI_2=name_2)
self.data = self.merge_data(data_1, data_2, name_1, name_2)
def add_normalized_to_data(self, data, GGI):
'''
Add the normalized value to the dataframe
'''
data = data.copy().set_index(['ISO', 'Indicator'])
value_normed = GGI.to_long()
value_normed = value_normed[value_normed.Aggregation == 'Indicator_normed'].drop(columns=['Aggregation']).dropna().set_index('Variable', append=True)
data['Value_normalized'] = value_normed['Value']
return data.reset_index()
def merge_data(self, data_1, data_2, name_1, name_2):
'''
TO DO
'''
data_1['name'] = name_1
data_2['name'] = name_2
data_1 = self.add_normalized_to_data(data_1, self.Report_1.GGI)
data_2 = self.add_normalized_to_data(data_2, self.Report_2.GGI)
df = pd.concat([data_1, data_2], axis=0)
return df
def cross_indicators_dimension_continent(self, dimension, continent, normalized=True, save=None):
'''
TO DO
'''
if normalized:
value = 'Value_normalized'
title = f"{dimension} indicators normalized {continent}: {self.name_1} and {self.name_2}"
save_name = f'CrossReport_indicators_normalized_{dimension}_{continent}'
else:
value = 'Value'
title = f"{dimension} indicators {continent}: {self.name_1} and {self.name_2}"
save_name = f'CrossReport_indicators_{dimension}_{continent}'
indicator_names = self.IND_CAT_DIM[self.IND_CAT_DIM.Dimension ==
dimension]['Indicator'].values
df = self.data[(self.data.Indicator.isin(indicator_names))
& (self.data.Continent == continent)]
hover_text = "%{text} <br>%{x}"
fig = make_subplots(rows=1,
cols=len(indicator_names),
subplot_titles=indicator_names,
y_title='ISO')
for k, ind in enumerate(indicator_names):
tmp_df = df[df.Indicator == ind].set_index('name')
fig.add_trace(go.Bar(y=tmp_df.loc[self.name_1]['ISO'],
x=tmp_df.loc[self.name_1][value],
orientation='h',
marker=dict(opacity=0.5),
marker_color='red',
hovertemplate=hover_text,
text=tmp_df.loc[self.name_1]['Text'],
name=self.name_1,
width=0.4,
),
row=1,
col=k + 1)
fig.add_trace(go.Bar(y=tmp_df.loc[self.name_2]['ISO'],
x=tmp_df.loc[self.name_2][value],
orientation='h',
marker=dict(opacity=0.5),
hovertemplate=hover_text,
text=tmp_df.loc[self.name_2]['Text'],
marker_color='blue',
width=0.4,
name=self.name_2,
),
row=1,
col=k + 1)
fig.update_layout(height=1000, width=len(indicator_names) * 200,
title_text=title,
hoverlabel_align='right',
showlegend=False,
barmode='group')
if save:
fig.write_html(f"{save}/{save_name}.html")
return fig
def cross_indicators_ISO(self, ISO, normalized=True, save=None):
'''
TO DO
'''
if normalized:
value = 'Value_normalized'
else:
value = 'Value'
df = self.data[(self.data.ISO == ISO)]
country = df['Country'].unique()[0]
indicator_names = self.IND_CAT_DIM['Indicator'].to_numpy().reshape(18, 2)
hover_text = "%{text} <br>%{x}"
fig = make_subplots(rows=18, cols=2,
subplot_titles=indicator_names.flatten())
for (x, y), ind in np.ndenumerate(indicator_names):
row = x + 1
col = y + 1
tmp_df = df[df.Indicator == ind]
fig.add_trace(go.Bar(x=tmp_df[value],
y=tmp_df['name'],
width=0.1,
marker=dict(opacity=0.5),
orientation='h',
marker_color=['red', 'blue'],
),
row=row,
col=col)
fig.add_trace(go.Scatter(x=tmp_df[value],
y=tmp_df['name'],
marker=dict(opacity=0.99, size=10),
marker_color=['red', 'blue'],
mode='markers',
text=tmp_df['Text'],
hovertemplate=hover_text,
),
row=row,
col=col)
# hover text goes here
fig.update_layout(height=100 * 18, width=2 * 400,
title_text=f"indicators {country} {ISO}",
showlegend=False,
hoverlabel_align='right', barmode='group')
if save:
fig.write_html(f"{save}/CrossReport_indicators_{ISO}.html")
return fig
|
py | 1a395ab42314424376329959b8cf785c57fb543a | #!/usr/bin/env python
import cv2
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import numpy as np
import time
import open3d
import models
from utils.kitti_util import Calibration
from matplotlib import cm
from utilities import disp_to_depth, read_calib_file, dynamic_baseline, cv2_image_to_tensor, \
load_test_data, depth_to_cloud, visualize_point_cloud, run_test
if __name__ == "__main__":
print("loading model")
model = models.__dict__["SDNet"](maxdepth=80, maxdisp=192, down=2)
model = nn.DataParallel(model).cuda()
torch.backends.cudnn.benchmark = True
checkpoint = torch.load("weights/sdn_kitti_object.pth")
model.load_state_dict(checkpoint["state_dict"])
model.eval()
for sample_index in range(7517):
left_image_path = f"data/images/left/testing/image_2/{sample_index:06}.png"
right_image_path = f"data/images/right/testing/image_3/{sample_index:06}.png"
calib_path = f"data/calib/testing/calib/{sample_index:06}.txt"
left_img, right_img, calib = load_test_data(left_image_path, right_image_path, calib_path)
start_time = time.time()
depth = run_test(left_img, right_img, calib, model)[0]
print(f"inference took {time.time() - start_time} seconds")
calibration = Calibration(calib_path)
cloud = depth_to_cloud(calibration, depth, 1)
visualize_point_cloud(cloud)
print("done")
|
py | 1a395ab42ca208d8429baea123f730d3d1593388 | from rest_framework import serializers
from api.app.views.core import UserSerializer
from api.app.models import Character
class CharacterSerializer(serializers.ModelSerializer):
owner = UserSerializer(read_only=True)
class Meta:
model = Character
fields = ('name', 'owner', 'image', 'public_profile')
|
py | 1a395b6e12f0ef64f0b60693502a48bda8519001 | # coding: utf-8
from __future__ import unicode_literals
import itertools
import json
import os.path
import random
import re
import time
import traceback
from .common import InfoExtractor, SearchInfoExtractor
from ..jsinterp import JSInterpreter
from ..swfinterp import SWFInterpreter
from ..compat import (
compat_chr,
compat_parse_qs,
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
compat_urlparse,
compat_str,
)
from ..utils import (
bool_or_none,
clean_html,
error_to_compat_str,
ExtractorError,
float_or_none,
get_element_by_id,
int_or_none,
mimetype2ext,
orderedSet,
parse_codecs,
parse_duration,
remove_quotes,
remove_start,
smuggle_url,
str_or_none,
str_to_int,
try_get,
unescapeHTML,
unified_strdate,
unsmuggle_url,
uppercase_escape,
url_or_none,
urlencode_postdata,
urljoin,
)
class YoutubeBaseInfoExtractor(InfoExtractor):
"""Provide base functions for Youtube extractors"""
_LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
_TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
_LOOKUP_URL = 'https://accounts.google.com/_/signin/sl/lookup'
_CHALLENGE_URL = 'https://accounts.google.com/_/signin/sl/challenge'
_TFA_URL = 'https://accounts.google.com/_/signin/challenge?hl=en&TL={0}'
_NETRC_MACHINE = 'youtube'
# If True it will raise an error if no login info is provided
_LOGIN_REQUIRED = False
_PLAYLIST_ID_RE = r'(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}'
_YOUTUBE_CLIENT_HEADERS = {
'x-youtube-client-name': '1',
'x-youtube-client-version': '1.20200609.04.02',
}
def _set_language(self):
self._set_cookie(
'.youtube.com', 'PREF', 'f1=50000000&f6=8&hl=en',
# YouTube sets the expire time to about two months
expire_time=time.time() + 2 * 30 * 24 * 3600)
def _ids_to_results(self, ids):
return [
self.url_result(vid_id, 'Youtube', video_id=vid_id)
for vid_id in ids]
def _login(self):
"""
Attempt to log in to YouTube.
True is returned if successful or skipped.
False is returned if login failed.
If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
"""
username, password = self._get_login_info()
# No authentication to be performed
if username is None:
if self._LOGIN_REQUIRED and self._downloader.params.get('cookiefile') is None:
raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
return True
login_page = self._download_webpage(
self._LOGIN_URL, None,
note='Downloading login page',
errnote='unable to fetch login page', fatal=False)
if login_page is False:
return
login_form = self._hidden_inputs(login_page)
def req(url, f_req, note, errnote):
data = login_form.copy()
data.update({
'pstMsg': 1,
'checkConnection': 'youtube',
'checkedDomains': 'youtube',
'hl': 'en',
'deviceinfo': '[null,null,null,[],null,"US",null,null,[],"GlifWebSignIn",null,[null,null,[]]]',
'f.req': json.dumps(f_req),
'flowName': 'GlifWebSignIn',
'flowEntry': 'ServiceLogin',
# TODO: reverse actual botguard identifier generation algo
'bgRequest': '["identifier",""]',
})
return self._download_json(
url, None, note=note, errnote=errnote,
transform_source=lambda s: re.sub(r'^[^[]*', '', s),
fatal=False,
data=urlencode_postdata(data), headers={
'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8',
'Google-Accounts-XSRF': 1,
})
def warn(message):
self._downloader.report_warning(message)
lookup_req = [
username,
None, [], None, 'US', None, None, 2, False, True,
[
None, None,
[2, 1, None, 1,
'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn',
None, [], 4],
1, [None, None, []], None, None, None, True
],
username,
]
lookup_results = req(
self._LOOKUP_URL, lookup_req,
'Looking up account info', 'Unable to look up account info')
if lookup_results is False:
return False
user_hash = try_get(lookup_results, lambda x: x[0][2], compat_str)
if not user_hash:
warn('Unable to extract user hash')
return False
challenge_req = [
user_hash,
None, 1, None, [1, None, None, None, [password, None, True]],
[
None, None, [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4],
1, [None, None, []], None, None, None, True
]]
challenge_results = req(
self._CHALLENGE_URL, challenge_req,
'Logging in', 'Unable to log in')
if challenge_results is False:
return
login_res = try_get(challenge_results, lambda x: x[0][5], list)
if login_res:
login_msg = try_get(login_res, lambda x: x[5], compat_str)
warn(
'Unable to login: %s' % 'Invalid password'
if login_msg == 'INCORRECT_ANSWER_ENTERED' else login_msg)
return False
res = try_get(challenge_results, lambda x: x[0][-1], list)
if not res:
warn('Unable to extract result entry')
return False
login_challenge = try_get(res, lambda x: x[0][0], list)
if login_challenge:
challenge_str = try_get(login_challenge, lambda x: x[2], compat_str)
if challenge_str == 'TWO_STEP_VERIFICATION':
# SEND_SUCCESS - TFA code has been successfully sent to phone
# QUOTA_EXCEEDED - reached the limit of TFA codes
status = try_get(login_challenge, lambda x: x[5], compat_str)
if status == 'QUOTA_EXCEEDED':
warn('Exceeded the limit of TFA codes, try later')
return False
tl = try_get(challenge_results, lambda x: x[1][2], compat_str)
if not tl:
warn('Unable to extract TL')
return False
tfa_code = self._get_tfa_info('2-step verification code')
if not tfa_code:
warn(
'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
'(Note that only TOTP (Google Authenticator App) codes work at this time.)')
return False
tfa_code = remove_start(tfa_code, 'G-')
tfa_req = [
user_hash, None, 2, None,
[
9, None, None, None, None, None, None, None,
[None, tfa_code, True, 2]
]]
tfa_results = req(
self._TFA_URL.format(tl), tfa_req,
'Submitting TFA code', 'Unable to submit TFA code')
if tfa_results is False:
return False
tfa_res = try_get(tfa_results, lambda x: x[0][5], list)
if tfa_res:
tfa_msg = try_get(tfa_res, lambda x: x[5], compat_str)
warn(
'Unable to finish TFA: %s' % 'Invalid TFA code'
if tfa_msg == 'INCORRECT_ANSWER_ENTERED' else tfa_msg)
return False
check_cookie_url = try_get(
tfa_results, lambda x: x[0][-1][2], compat_str)
else:
CHALLENGES = {
'LOGIN_CHALLENGE': "This device isn't recognized. For your security, Google wants to make sure it's really you.",
'USERNAME_RECOVERY': 'Please provide additional information to aid in the recovery process.',
'REAUTH': "There is something unusual about your activity. For your security, Google wants to make sure it's really you.",
}
challenge = CHALLENGES.get(
challenge_str,
'%s returned error %s.' % (self.IE_NAME, challenge_str))
warn('%s\nGo to https://accounts.google.com/, login and solve a challenge.' % challenge)
return False
else:
check_cookie_url = try_get(res, lambda x: x[2], compat_str)
if not check_cookie_url:
warn('Unable to extract CheckCookie URL')
return False
check_cookie_results = self._download_webpage(
check_cookie_url, None, 'Checking cookie', fatal=False)
if check_cookie_results is False:
return False
if 'https://myaccount.google.com/' not in check_cookie_results:
warn('Unable to log in')
return False
return True
def _real_initialize(self):
if self._downloader is None:
return
self._set_language()
if not self._login():
return
_DEFAULT_API_DATA = {
'context': {
'client': {
'clientName': 'WEB',
'clientVersion': '2.20201021.03.00',
}
},
}
def _call_api(self, ep, query, video_id):
data = self._DEFAULT_API_DATA.copy()
data.update(query)
response = self._download_json(
'https://www.youtube.com/youtubei/v1/%s' % ep, video_id=video_id,
note='Downloading API JSON', errnote='Unable to download API page',
data=json.dumps(data).encode('utf8'),
headers={'content-type': 'application/json'},
query={'key': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'})
return response
def _extract_yt_initial_data(self, video_id, webpage):
return self._parse_json(
self._search_regex(
r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;',
webpage, 'yt initial data'),
video_id)
class YoutubeIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com'
_VALID_URL = r"""(?x)^
(
(?:https?://|//) # http(s):// or protocol-independent URL
(?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie|kids)?\.com/|
(?:www\.)?deturl\.com/www\.youtube\.com/|
(?:www\.)?pwnyoutube\.com/|
(?:www\.)?hooktube\.com/|
(?:www\.)?yourepeat\.com/|
tube\.majestyc\.net/|
# Invidious instances taken from https://github.com/omarroth/invidious/wiki/Invidious-Instances
(?:(?:www|dev)\.)?invidio\.us/|
(?:(?:www|no)\.)?invidiou\.sh/|
(?:(?:www|fi|de)\.)?invidious\.snopyta\.org/|
(?:www\.)?invidious\.kabi\.tk/|
(?:www\.)?invidious\.13ad\.de/|
(?:www\.)?invidious\.mastodon\.host/|
(?:www\.)?invidious\.nixnet\.xyz/|
(?:www\.)?invidious\.drycat\.fr/|
(?:www\.)?tube\.poal\.co/|
(?:www\.)?vid\.wxzm\.sx/|
(?:www\.)?yewtu\.be/|
(?:www\.)?yt\.elukerio\.org/|
(?:www\.)?yt\.lelux\.fi/|
(?:www\.)?invidious\.ggc-project\.de/|
(?:www\.)?yt\.maisputain\.ovh/|
(?:www\.)?invidious\.13ad\.de/|
(?:www\.)?invidious\.toot\.koeln/|
(?:www\.)?invidious\.fdn\.fr/|
(?:www\.)?watch\.nettohikari\.com/|
(?:www\.)?kgg2m7yk5aybusll\.onion/|
(?:www\.)?qklhadlycap4cnod\.onion/|
(?:www\.)?axqzx4s6s54s32yentfqojs3x5i7faxza6xo3ehd4bzzsg2ii4fv2iid\.onion/|
(?:www\.)?c7hqkpkpemu6e7emz5b4vyz7idjgdvgaaa3dyimmeojqbgpea3xqjoid\.onion/|
(?:www\.)?fz253lmuao3strwbfbmx46yu7acac2jz27iwtorgmbqlkurlclmancad\.onion/|
(?:www\.)?invidious\.l4qlywnpwqsluw65ts7md3khrivpirse744un3x7mlskqauz5pyuzgqd\.onion/|
(?:www\.)?owxfohz4kjyv25fvlqilyxast7inivgiktls3th44jhk3ej3i7ya\.b32\.i2p/|
(?:www\.)?4l2dgddgsrkf2ous66i6seeyi6etzfgrue332grh2n7madpwopotugyd\.onion/|
youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
(?:.*?\#/)? # handle anchor (#/) redirect urls
(?: # the various things that can precede the ID:
(?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
|(?: # or the v= param in all its forms
(?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
(?:\?|\#!?) # the params delimiter ? or # or #!
(?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&v=V36LpHqtcDY)
v=
)
))
|(?:
youtu\.be| # just youtu.be/xxxx
vid\.plus| # or vid.plus/xxxx
zwearz\.com/watch| # or zwearz.com/watch/xxxx
)/
|(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
)
)? # all until now is optional -> you can pass the naked ID
(?P<id>[0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
(?!.*?\blist=
(?:
%(playlist_id)s| # combined list/video URLs are handled by the playlist IE
WL # WL are handled by the watch later IE
)
)
(?(1).+)? # if we found the ID, everything can follow
$""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
_NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
_PLAYER_INFO_RE = (
r'/(?P<id>[a-zA-Z0-9_-]{8,})/player_ias\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?/base\.(?P<ext>[a-z]+)$',
r'\b(?P<id>vfl[a-zA-Z0-9_-]+)\b.*?\.(?P<ext>[a-z]+)$',
)
_formats = {
'5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
'17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
'18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
'22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
'35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
# itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
'36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
'37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
'44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
'45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
'46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
'59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
'78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
# 3D videos
'82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
'83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
'84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
'85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
'100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
'101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
'102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
# Apple HTTP Live Streaming
'91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
'94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
'95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
'96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
'132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
# DASH mp4 video
'133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264'},
'134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264'},
'135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
'136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264'},
'137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264'},
'138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264'}, # Height can vary (https://github.com/ytdl-org/youtube-dl/issues/4559)
'160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264'},
'212': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
'264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264'},
'298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
'299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
'266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264'},
# Dash mp4 audio
'139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'container': 'm4a_dash'},
'140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'container': 'm4a_dash'},
'141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'container': 'm4a_dash'},
'256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
'258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
'325': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'dtse', 'container': 'm4a_dash'},
'328': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'ec-3', 'container': 'm4a_dash'},
# Dash webm
'167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9'},
'242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9'},
# itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
'272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
# Dash webm audio
'171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128},
'172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256},
# Dash webm audio with opus inside
'249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50},
'250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70},
'251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160},
# RTMP (unnamed)
'_rtmp': {'protocol': 'rtmp'},
# av01 video only formats sometimes served with "unknown" codecs
'394': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
'395': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
'396': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
'397': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
}
_SUBTITLE_FORMATS = ('srv1', 'srv2', 'srv3', 'ttml', 'vtt')
_GEO_BYPASS = False
IE_NAME = 'youtube'
_TESTS = [
{
'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
'channel_id': 'UCLqxVugv74EIW3VWh2NOa3Q',
'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCLqxVugv74EIW3VWh2NOa3Q',
'upload_date': '20121002',
'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact [email protected] .',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'duration': 10,
'view_count': int,
'like_count': int,
'dislike_count': int,
'start_time': 1,
'end_time': 9,
}
},
{
'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
'note': 'Embed-only video (#1746)',
'info_dict': {
'id': 'yZIXLfi8CZQ',
'ext': 'mp4',
'upload_date': '20120608',
'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
'uploader': 'SET India',
'uploader_id': 'setindia',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/setindia',
'age_limit': 18,
}
},
{
'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&v=yZIXLfi8CZQ',
'note': 'Use the first video ID in the URL',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
'upload_date': '20121002',
'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact [email protected] .',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'duration': 10,
'view_count': int,
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtube.com/watch?v=a9LDPn-MO4I',
'note': '256k DASH audio (format 141) via DASH manifest',
'info_dict': {
'id': 'a9LDPn-MO4I',
'ext': 'm4a',
'upload_date': '20121002',
'uploader_id': '8KVIDEO',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
'description': '',
'uploader': '8KVIDEO',
'title': 'UHDTV TEST 8K VIDEO.mp4'
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141',
},
'skip': 'format 141 not served anymore',
},
# DASH manifest with encrypted signature
{
'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
'info_dict': {
'id': 'IB3lcPjvWLA',
'ext': 'm4a',
'title': 'Afrojack, Spree Wilson - The Spark (Official Music Video) ft. Spree Wilson',
'description': 'md5:8f5e2b82460520b619ccac1f509d43bf',
'duration': 244,
'uploader': 'AfrojackVEVO',
'uploader_id': 'AfrojackVEVO',
'upload_date': '20131011',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141/bestaudio[ext=m4a]',
},
},
# Controversy video
{
'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
'info_dict': {
'id': 'T4XJQO3qol8',
'ext': 'mp4',
'duration': 219,
'upload_date': '20100909',
'uploader': 'Amazing Atheist',
'uploader_id': 'TheAmazingAtheist',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheAmazingAtheist',
'title': 'Burning Everyone\'s Koran',
'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
}
},
# Normal age-gate video (No vevo, embed allowed)
{
'url': 'https://youtube.com/watch?v=HtVdAasjOgU',
'info_dict': {
'id': 'HtVdAasjOgU',
'ext': 'mp4',
'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
'description': r're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
'duration': 142,
'uploader': 'The Witcher',
'uploader_id': 'WitcherGame',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/WitcherGame',
'upload_date': '20140605',
'age_limit': 18,
},
},
# video_info is None (https://github.com/ytdl-org/youtube-dl/issues/4421)
# YouTube Red ad is not captured for creator
{
'url': '__2ABJjxzNo',
'info_dict': {
'id': '__2ABJjxzNo',
'ext': 'mp4',
'duration': 266,
'upload_date': '20100430',
'uploader_id': 'deadmau5',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/deadmau5',
'creator': 'Dada Life, deadmau5',
'description': 'md5:12c56784b8032162bb936a5f76d55360',
'uploader': 'deadmau5',
'title': 'Deadmau5 - Some Chords (HD)',
'alt_title': 'This Machine Kills Some Chords',
},
'expected_warnings': [
'DASH manifest missing',
]
},
# Olympics (https://github.com/ytdl-org/youtube-dl/issues/4431)
{
'url': 'lqQg6PlCWgI',
'info_dict': {
'id': 'lqQg6PlCWgI',
'ext': 'mp4',
'duration': 6085,
'upload_date': '20150827',
'uploader_id': 'olympic',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/olympic',
'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
'uploader': 'Olympic',
'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
},
'params': {
'skip_download': 'requires avconv',
}
},
# Non-square pixels
{
'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
'info_dict': {
'id': '_b-2C3KPAM0',
'ext': 'mp4',
'stretched_ratio': 16 / 9.,
'duration': 85,
'upload_date': '20110310',
'uploader_id': 'AllenMeow',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/AllenMeow',
'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
'uploader': '孫ᄋᄅ',
'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
},
},
# url_encoded_fmt_stream_map is empty string
{
'url': 'qEJwOuvDf7I',
'info_dict': {
'id': 'qEJwOuvDf7I',
'ext': 'webm',
'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
'description': '',
'upload_date': '20150404',
'uploader_id': 'spbelect',
'uploader': 'Наблюдатели Петербурга',
},
'params': {
'skip_download': 'requires avconv',
},
'skip': 'This live event has ended.',
},
# Extraction from multiple DASH manifests (https://github.com/ytdl-org/youtube-dl/pull/6097)
{
'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
'info_dict': {
'id': 'FIl7x6_3R5Y',
'ext': 'webm',
'title': 'md5:7b81415841e02ecd4313668cde88737a',
'description': 'md5:116377fd2963b81ec4ce64b542173306',
'duration': 220,
'upload_date': '20150625',
'uploader_id': 'dorappi2000',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
'uploader': 'dorappi2000',
'formats': 'mincount:31',
},
'skip': 'not actual anymore',
},
# DASH manifest with segment_list
{
'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
'md5': '8ce563a1d667b599d21064e982ab9e31',
'info_dict': {
'id': 'CsmdDsKjzN8',
'ext': 'mp4',
'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
'uploader': 'Airtek',
'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '135', # bestvideo
},
'skip': 'This live event has ended.',
},
{
# Multifeed videos (multiple cameras), URL is for Main Camera
'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs',
'info_dict': {
'id': 'jqWvoWXjCVs',
'title': 'teamPGP: Rocket League Noob Stream',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
},
'playlist': [{
'info_dict': {
'id': 'jqWvoWXjCVs',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (Main Camera)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7335,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}, {
'info_dict': {
'id': '6h8e8xoXJzg',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (kreestuh)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7337,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}, {
'info_dict': {
'id': 'PUOgX5z9xZw',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (grizzle)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7337,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}, {
'info_dict': {
'id': 'teuwxikvS5k',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (zim)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7334,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}],
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
},
{
# Multifeed video with comma in title (see https://github.com/ytdl-org/youtube-dl/issues/8536)
'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
'info_dict': {
'id': 'gVfLd0zydlo',
'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
},
'playlist_count': 2,
'skip': 'Not multifeed anymore',
},
{
'url': 'https://vid.plus/FlRa-iH7PGw',
'only_matching': True,
},
{
'url': 'https://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
'only_matching': True,
},
{
# Title with JS-like syntax "};" (see https://github.com/ytdl-org/youtube-dl/issues/7468)
# Also tests cut-off URL expansion in video description (see
# https://github.com/ytdl-org/youtube-dl/issues/1892,
# https://github.com/ytdl-org/youtube-dl/issues/8164)
'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
'info_dict': {
'id': 'lsguqyKfVQg',
'ext': 'mp4',
'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
'alt_title': 'Dark Walk - Position Music',
'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
'duration': 133,
'upload_date': '20151119',
'uploader_id': 'IronSoulElf',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IronSoulElf',
'uploader': 'IronSoulElf',
'creator': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan',
'track': 'Dark Walk - Position Music',
'artist': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan',
'album': 'Position Music - Production Music Vol. 143 - Dark Walk',
},
'params': {
'skip_download': True,
},
},
{
# Tags with '};' (see https://github.com/ytdl-org/youtube-dl/issues/7468)
'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
'only_matching': True,
},
{
# Video with yt:stretch=17:0
'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
'info_dict': {
'id': 'Q39EVAstoRM',
'ext': 'mp4',
'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
'description': 'md5:ee18a25c350637c8faff806845bddee9',
'upload_date': '20151107',
'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
'uploader': 'CH GAMER DROID',
},
'params': {
'skip_download': True,
},
'skip': 'This video does not exist.',
},
{
# Video licensed under Creative Commons
'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
'info_dict': {
'id': 'M4gD1WSo5mA',
'ext': 'mp4',
'title': 'md5:e41008789470fc2533a3252216f1c1d1',
'description': 'md5:a677553cf0840649b731a3024aeff4cc',
'duration': 721,
'upload_date': '20150127',
'uploader_id': 'BerkmanCenter',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter',
'uploader': 'The Berkman Klein Center for Internet & Society',
'license': 'Creative Commons Attribution license (reuse allowed)',
},
'params': {
'skip_download': True,
},
},
{
# Channel-like uploader_url
'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
'info_dict': {
'id': 'eQcmzGIKrzg',
'ext': 'mp4',
'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
'description': 'md5:dda0d780d5a6e120758d1711d062a867',
'duration': 4060,
'upload_date': '20151119',
'uploader': 'Bernie Sanders',
'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
'license': 'Creative Commons Attribution license (reuse allowed)',
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;v=V36LpHqtcDY',
'only_matching': True,
},
{
# YouTube Red paid video (https://github.com/ytdl-org/youtube-dl/issues/10059)
'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo',
'only_matching': True,
},
{
# Rental video preview
'url': 'https://www.youtube.com/watch?v=yYr8q0y5Jfg',
'info_dict': {
'id': 'uGpuVWrhIzE',
'ext': 'mp4',
'title': 'Piku - Trailer',
'description': 'md5:c36bd60c3fd6f1954086c083c72092eb',
'upload_date': '20150811',
'uploader': 'FlixMatrix',
'uploader_id': 'FlixMatrixKaravan',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/FlixMatrixKaravan',
'license': 'Standard YouTube License',
},
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
},
{
# YouTube Red video with episode data
'url': 'https://www.youtube.com/watch?v=iqKdEhx-dD4',
'info_dict': {
'id': 'iqKdEhx-dD4',
'ext': 'mp4',
'title': 'Isolation - Mind Field (Ep 1)',
'description': 'md5:46a29be4ceffa65b92d277b93f463c0f',
'duration': 2085,
'upload_date': '20170118',
'uploader': 'Vsauce',
'uploader_id': 'Vsauce',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Vsauce',
'series': 'Mind Field',
'season_number': 1,
'episode_number': 1,
},
'params': {
'skip_download': True,
},
'expected_warnings': [
'Skipping DASH manifest',
],
},
{
# The following content has been identified by the YouTube community
# as inappropriate or offensive to some audiences.
'url': 'https://www.youtube.com/watch?v=6SJNVb0GnPI',
'info_dict': {
'id': '6SJNVb0GnPI',
'ext': 'mp4',
'title': 'Race Differences in Intelligence',
'description': 'md5:5d161533167390427a1f8ee89a1fc6f1',
'duration': 965,
'upload_date': '20140124',
'uploader': 'New Century Foundation',
'uploader_id': 'UCEJYpZGqgUob0zVVEaLhvVg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCEJYpZGqgUob0zVVEaLhvVg',
},
'params': {
'skip_download': True,
},
},
{
# itag 212
'url': '1t24XAntNCY',
'only_matching': True,
},
{
# geo restricted to JP
'url': 'sJL6WA-aGkQ',
'only_matching': True,
},
{
'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
'only_matching': True,
},
{
'url': 'https://invidio.us/watch?v=BaW_jenozKc',
'only_matching': True,
},
{
# DRM protected
'url': 'https://www.youtube.com/watch?v=s7_qI6_mIXc',
'only_matching': True,
},
{
# Video with unsupported adaptive stream type formats
'url': 'https://www.youtube.com/watch?v=Z4Vy8R84T1U',
'info_dict': {
'id': 'Z4Vy8R84T1U',
'ext': 'mp4',
'title': 'saman SMAN 53 Jakarta(Sancety) opening COFFEE4th at SMAN 53 Jakarta',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
'duration': 433,
'upload_date': '20130923',
'uploader': 'Amelia Putri Harwita',
'uploader_id': 'UCpOxM49HJxmC1qCalXyB3_Q',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCpOxM49HJxmC1qCalXyB3_Q',
'formats': 'maxcount:10',
},
'params': {
'skip_download': True,
'youtube_include_dash_manifest': False,
},
'skip': 'not actual anymore',
},
{
# Youtube Music Auto-generated description
'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs',
'info_dict': {
'id': 'MgNrAu2pzNs',
'ext': 'mp4',
'title': 'Voyeur Girl',
'description': 'md5:7ae382a65843d6df2685993e90a8628f',
'upload_date': '20190312',
'uploader': 'Stephen - Topic',
'uploader_id': 'UC-pWHpBjdGG69N9mM2auIAA',
'artist': 'Stephen',
'track': 'Voyeur Girl',
'album': 'it\'s too much love to know my dear',
'release_date': '20190313',
'release_year': 2019,
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtubekids.com/watch?v=3b8nCWDgZ6Q',
'only_matching': True,
},
{
# invalid -> valid video id redirection
'url': 'DJztXj2GPfl',
'info_dict': {
'id': 'DJztXj2GPfk',
'ext': 'mp4',
'title': 'Panjabi MC - Mundian To Bach Ke (The Dictator Soundtrack)',
'description': 'md5:bf577a41da97918e94fa9798d9228825',
'upload_date': '20090125',
'uploader': 'Prochorowka',
'uploader_id': 'Prochorowka',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Prochorowka',
'artist': 'Panjabi MC',
'track': 'Beware of the Boys (Mundian to Bach Ke) - Motivo Hi-Lectro Remix',
'album': 'Beware of the Boys (Mundian To Bach Ke)',
},
'params': {
'skip_download': True,
},
},
{
# empty description results in an empty string
'url': 'https://www.youtube.com/watch?v=x41yOUIvK2k',
'info_dict': {
'id': 'x41yOUIvK2k',
'ext': 'mp4',
'title': 'IMG 3456',
'description': '',
'upload_date': '20170613',
'uploader_id': 'ElevageOrVert',
'uploader': 'ElevageOrVert',
},
'params': {
'skip_download': True,
},
},
]
def __init__(self, *args, **kwargs):
super(YoutubeIE, self).__init__(*args, **kwargs)
self._player_cache = {}
def report_video_info_webpage_download(self, video_id):
"""Report attempt to download video info webpage."""
self.to_screen('%s: Downloading video info webpage' % video_id)
def report_information_extraction(self, video_id):
"""Report attempt to extract video information."""
self.to_screen('%s: Extracting video information' % video_id)
def report_unavailable_format(self, video_id, format):
"""Report extracted video URL."""
self.to_screen('%s: Format %s not available' % (video_id, format))
def report_rtmp_download(self):
"""Indicate the download will use the RTMP protocol."""
self.to_screen('RTMP download detected')
def _signature_cache_id(self, example_sig):
""" Return a string representation of a signature """
return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
@classmethod
def _extract_player_info(cls, player_url):
for player_re in cls._PLAYER_INFO_RE:
id_m = re.search(player_re, player_url)
if id_m:
break
else:
raise ExtractorError('Cannot identify player %r' % player_url)
return id_m.group('ext'), id_m.group('id')
def _extract_signature_function(self, video_id, player_url, example_sig):
player_type, player_id = self._extract_player_info(player_url)
# Read from filesystem cache
func_id = '%s_%s_%s' % (
player_type, player_id, self._signature_cache_id(example_sig))
assert os.path.basename(func_id) == func_id
cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
if cache_spec is not None:
return lambda s: ''.join(s[i] for i in cache_spec)
download_note = (
'Downloading player %s' % player_url
if self._downloader.params.get('verbose') else
'Downloading %s player %s' % (player_type, player_id)
)
if player_type == 'js':
code = self._download_webpage(
player_url, video_id,
note=download_note,
errnote='Download of %s failed' % player_url)
res = self._parse_sig_js(code)
elif player_type == 'swf':
urlh = self._request_webpage(
player_url, video_id,
note=download_note,
errnote='Download of %s failed' % player_url)
code = urlh.read()
res = self._parse_sig_swf(code)
else:
assert False, 'Invalid player type %r' % player_type
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = res(test_string)
cache_spec = [ord(c) for c in cache_res]
self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
return res
def _print_sig_code(self, func, example_sig):
def gen_sig_code(idxs):
def _genslice(start, end, step):
starts = '' if start == 0 else str(start)
ends = (':%d' % (end + step)) if end + step >= 0 else ':'
steps = '' if step == 1 else (':%d' % step)
return 's[%s%s%s]' % (starts, ends, steps)
step = None
# Quelch pyflakes warnings - start will be set when step is set
start = '(Never used)'
for i, prev in zip(idxs[1:], idxs[:-1]):
if step is not None:
if i - prev == step:
continue
yield _genslice(start, prev, step)
step = None
continue
if i - prev in [-1, 1]:
step = i - prev
start = prev
continue
else:
yield 's[%d]' % prev
if step is None:
yield 's[%d]' % i
else:
yield _genslice(start, i, step)
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = func(test_string)
cache_spec = [ord(c) for c in cache_res]
expr_code = ' + '.join(gen_sig_code(cache_spec))
signature_id_tuple = '(%s)' % (
', '.join(compat_str(len(p)) for p in example_sig.split('.')))
code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
' return %s\n') % (signature_id_tuple, expr_code)
self.to_screen('Extracted signature function:\n' + code)
def _parse_sig_js(self, jscode):
funcname = self._search_regex(
(r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
# Obsolete patterns
r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(',
r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*a\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\('),
jscode, 'Initial JS player signature function name', group='sig')
jsi = JSInterpreter(jscode)
initial_function = jsi.extract_function(funcname)
return lambda s: initial_function([s])
def _parse_sig_swf(self, file_contents):
swfi = SWFInterpreter(file_contents)
TARGET_CLASSNAME = 'SignatureDecipher'
searched_class = swfi.extract_class(TARGET_CLASSNAME)
initial_function = swfi.extract_function(searched_class, 'decipher')
return lambda s: initial_function([s])
def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
"""Turn the encrypted s field into a working signature"""
if player_url is None:
raise ExtractorError('Cannot decrypt signature without player_url')
if player_url.startswith('//'):
player_url = 'https:' + player_url
elif not re.match(r'https?://', player_url):
player_url = compat_urlparse.urljoin(
'https://www.youtube.com', player_url)
try:
player_id = (player_url, self._signature_cache_id(s))
if player_id not in self._player_cache:
func = self._extract_signature_function(
video_id, player_url, s
)
self._player_cache[player_id] = func
func = self._player_cache[player_id]
if self._downloader.params.get('youtube_print_sig_code'):
self._print_sig_code(func, s)
return func(s)
except Exception as e:
tb = traceback.format_exc()
raise ExtractorError(
'Signature extraction failed: ' + tb, cause=e)
def _get_subtitles(self, video_id, webpage):
try:
subs_doc = self._download_xml(
'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
video_id, note=False)
except ExtractorError as err:
self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err))
return {}
sub_lang_list = {}
for track in subs_doc.findall('track'):
lang = track.attrib['lang_code']
if lang in sub_lang_list:
continue
sub_formats = []
for ext in self._SUBTITLE_FORMATS:
params = compat_urllib_parse_urlencode({
'lang': lang,
'v': video_id,
'fmt': ext,
'name': track.attrib['name'].encode('utf-8'),
})
sub_formats.append({
'url': 'https://www.youtube.com/api/timedtext?' + params,
'ext': ext,
})
sub_lang_list[lang] = sub_formats
if not sub_lang_list:
self._downloader.report_warning('video doesn\'t have subtitles')
return {}
return sub_lang_list
def _get_ytplayer_config(self, video_id, webpage):
patterns = (
# User data may contain arbitrary character sequences that may affect
# JSON extraction with regex, e.g. when '};' is contained the second
# regex won't capture the whole JSON. Yet working around by trying more
# concrete regex first keeping in mind proper quoted string handling
# to be implemented in future that will replace this workaround (see
# https://github.com/ytdl-org/youtube-dl/issues/7468,
# https://github.com/ytdl-org/youtube-dl/pull/7599)
r';ytplayer\.config\s*=\s*({.+?});ytplayer',
r';ytplayer\.config\s*=\s*({.+?});',
)
config = self._search_regex(
patterns, webpage, 'ytplayer.config', default=None)
if config:
return self._parse_json(
uppercase_escape(config), video_id, fatal=False)
def _get_automatic_captions(self, video_id, webpage):
"""We need the webpage for getting the captions url, pass it as an
argument to speed up the process."""
self.to_screen('%s: Looking for automatic captions' % video_id)
player_config = self._get_ytplayer_config(video_id, webpage)
err_msg = 'Couldn\'t find automatic captions for %s' % video_id
if not player_config:
self._downloader.report_warning(err_msg)
return {}
try:
args = player_config['args']
caption_url = args.get('ttsurl')
if caption_url:
timestamp = args['timestamp']
# We get the available subtitles
list_params = compat_urllib_parse_urlencode({
'type': 'list',
'tlangs': 1,
'asrs': 1,
})
list_url = caption_url + '&' + list_params
caption_list = self._download_xml(list_url, video_id)
original_lang_node = caption_list.find('track')
if original_lang_node is None:
self._downloader.report_warning('Video doesn\'t have automatic captions')
return {}
original_lang = original_lang_node.attrib['lang_code']
caption_kind = original_lang_node.attrib.get('kind', '')
sub_lang_list = {}
for lang_node in caption_list.findall('target'):
sub_lang = lang_node.attrib['lang_code']
sub_formats = []
for ext in self._SUBTITLE_FORMATS:
params = compat_urllib_parse_urlencode({
'lang': original_lang,
'tlang': sub_lang,
'fmt': ext,
'ts': timestamp,
'kind': caption_kind,
})
sub_formats.append({
'url': caption_url + '&' + params,
'ext': ext,
})
sub_lang_list[sub_lang] = sub_formats
return sub_lang_list
def make_captions(sub_url, sub_langs):
parsed_sub_url = compat_urllib_parse_urlparse(sub_url)
caption_qs = compat_parse_qs(parsed_sub_url.query)
captions = {}
for sub_lang in sub_langs:
sub_formats = []
for ext in self._SUBTITLE_FORMATS:
caption_qs.update({
'tlang': [sub_lang],
'fmt': [ext],
})
sub_url = compat_urlparse.urlunparse(parsed_sub_url._replace(
query=compat_urllib_parse_urlencode(caption_qs, True)))
sub_formats.append({
'url': sub_url,
'ext': ext,
})
captions[sub_lang] = sub_formats
return captions
# New captions format as of 22.06.2017
player_response = args.get('player_response')
if player_response and isinstance(player_response, compat_str):
player_response = self._parse_json(
player_response, video_id, fatal=False)
if player_response:
renderer = player_response['captions']['playerCaptionsTracklistRenderer']
base_url = renderer['captionTracks'][0]['baseUrl']
sub_lang_list = []
for lang in renderer['translationLanguages']:
lang_code = lang.get('languageCode')
if lang_code:
sub_lang_list.append(lang_code)
return make_captions(base_url, sub_lang_list)
# Some videos don't provide ttsurl but rather caption_tracks and
# caption_translation_languages (e.g. 20LmZk1hakA)
# Does not used anymore as of 22.06.2017
caption_tracks = args['caption_tracks']
caption_translation_languages = args['caption_translation_languages']
caption_url = compat_parse_qs(caption_tracks.split(',')[0])['u'][0]
sub_lang_list = []
for lang in caption_translation_languages.split(','):
lang_qs = compat_parse_qs(compat_urllib_parse_unquote_plus(lang))
sub_lang = lang_qs.get('lc', [None])[0]
if sub_lang:
sub_lang_list.append(sub_lang)
return make_captions(caption_url, sub_lang_list)
# An extractor error can be raise by the download process if there are
# no automatic captions but there are subtitles
except (KeyError, IndexError, ExtractorError):
self._downloader.report_warning(err_msg)
return {}
def _mark_watched(self, video_id, video_info, player_response):
playback_url = url_or_none(try_get(
player_response,
lambda x: x['playbackTracking']['videostatsPlaybackUrl']['baseUrl']) or try_get(
video_info, lambda x: x['videostats_playback_base_url'][0]))
if not playback_url:
return
parsed_playback_url = compat_urlparse.urlparse(playback_url)
qs = compat_urlparse.parse_qs(parsed_playback_url.query)
# cpn generation algorithm is reverse engineered from base.js.
# In fact it works even with dummy cpn.
CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16)))
qs.update({
'ver': ['2'],
'cpn': [cpn],
})
playback_url = compat_urlparse.urlunparse(
parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
self._download_webpage(
playback_url, video_id, 'Marking watched',
'Unable to mark watched', fatal=False)
@staticmethod
def _extract_urls(webpage):
# Embedded YouTube player
entries = [
unescapeHTML(mobj.group('url'))
for mobj in re.finditer(r'''(?x)
(?:
<iframe[^>]+?src=|
data-video-url=|
<embed[^>]+?src=|
embedSWF\(?:\s*|
<object[^>]+data=|
new\s+SWFObject\(
)
(["\'])
(?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
(?:embed|v|p)/[0-9A-Za-z_-]{11}.*?)
\1''', webpage)]
# lazyYT YouTube embed
entries.extend(list(map(
unescapeHTML,
re.findall(r'class="lazyYT" data-youtube-id="([^"]+)"', webpage))))
# Wordpress "YouTube Video Importer" plugin
matches = re.findall(r'''(?x)<div[^>]+
class=(?P<q1>[\'"])[^\'"]*\byvii_single_video_player\b[^\'"]*(?P=q1)[^>]+
data-video_id=(?P<q2>[\'"])([^\'"]+)(?P=q2)''', webpage)
entries.extend(m[-1] for m in matches)
return entries
@staticmethod
def _extract_url(webpage):
urls = YoutubeIE._extract_urls(webpage)
return urls[0] if urls else None
@classmethod
def extract_id(cls, url):
mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
video_id = mobj.group(2)
return video_id
def _extract_chapters_from_json(self, webpage, video_id, duration):
if not webpage:
return
data = self._extract_yt_initial_data(video_id, webpage)
if not data or not isinstance(data, dict):
return
chapters_list = try_get(
data,
lambda x: x['playerOverlays']
['playerOverlayRenderer']
['decoratedPlayerBarRenderer']
['decoratedPlayerBarRenderer']
['playerBar']
['chapteredPlayerBarRenderer']
['chapters'],
list)
if not chapters_list:
return
def chapter_time(chapter):
return float_or_none(
try_get(
chapter,
lambda x: x['chapterRenderer']['timeRangeStartMillis'],
int),
scale=1000)
chapters = []
for next_num, chapter in enumerate(chapters_list, start=1):
start_time = chapter_time(chapter)
if start_time is None:
continue
end_time = (chapter_time(chapters_list[next_num])
if next_num < len(chapters_list) else duration)
if end_time is None:
continue
title = try_get(
chapter, lambda x: x['chapterRenderer']['title']['simpleText'],
compat_str)
chapters.append({
'start_time': start_time,
'end_time': end_time,
'title': title,
})
return chapters
@staticmethod
def _extract_chapters_from_description(description, duration):
if not description:
return None
chapter_lines = re.findall(
r'(?:^|<br\s*/>)([^<]*<a[^>]+onclick=["\']yt\.www\.watch\.player\.seekTo[^>]+>(\d{1,2}:\d{1,2}(?::\d{1,2})?)</a>[^>]*)(?=$|<br\s*/>)',
description)
if not chapter_lines:
return None
chapters = []
for next_num, (chapter_line, time_point) in enumerate(
chapter_lines, start=1):
start_time = parse_duration(time_point)
if start_time is None:
continue
if start_time > duration:
break
end_time = (duration if next_num == len(chapter_lines)
else parse_duration(chapter_lines[next_num][1]))
if end_time is None:
continue
if end_time > duration:
end_time = duration
if start_time > end_time:
break
chapter_title = re.sub(
r'<a[^>]+>[^<]+</a>', '', chapter_line).strip(' \t-')
chapter_title = re.sub(r'\s+', ' ', chapter_title)
chapters.append({
'start_time': start_time,
'end_time': end_time,
'title': chapter_title,
})
return chapters
def _extract_chapters(self, webpage, description, video_id, duration):
return (self._extract_chapters_from_json(webpage, video_id, duration)
or self._extract_chapters_from_description(description, duration))
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
proto = (
'http' if self._downloader.params.get('prefer_insecure', False)
else 'https')
start_time = None
end_time = None
parsed_url = compat_urllib_parse_urlparse(url)
for component in [parsed_url.fragment, parsed_url.query]:
query = compat_parse_qs(component)
if start_time is None and 't' in query:
start_time = parse_duration(query['t'][0])
if start_time is None and 'start' in query:
start_time = parse_duration(query['start'][0])
if end_time is None and 'end' in query:
end_time = parse_duration(query['end'][0])
# Extract original video URL from URL with redirection, like age verification, using next_url parameter
mobj = re.search(self._NEXT_URL_RE, url)
if mobj:
url = proto + '://www.youtube.com/' + compat_urllib_parse_unquote(mobj.group(1)).lstrip('/')
video_id = self.extract_id(url)
# Get video webpage
url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
video_webpage, urlh = self._download_webpage_handle(url, video_id)
qs = compat_parse_qs(compat_urllib_parse_urlparse(urlh.geturl()).query)
video_id = qs.get('v', [None])[0] or video_id
# Attempt to extract SWF player URL
mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
if mobj is not None:
player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
else:
player_url = None
dash_mpds = []
def add_dash_mpd(video_info):
dash_mpd = video_info.get('dashmpd')
if dash_mpd and dash_mpd[0] not in dash_mpds:
dash_mpds.append(dash_mpd[0])
def add_dash_mpd_pr(pl_response):
dash_mpd = url_or_none(try_get(
pl_response, lambda x: x['streamingData']['dashManifestUrl'],
compat_str))
if dash_mpd and dash_mpd not in dash_mpds:
dash_mpds.append(dash_mpd)
is_live = None
view_count = None
def extract_view_count(v_info):
return int_or_none(try_get(v_info, lambda x: x['view_count'][0]))
def extract_player_response(player_response, video_id):
pl_response = str_or_none(player_response)
if not pl_response:
return
pl_response = self._parse_json(pl_response, video_id, fatal=False)
if isinstance(pl_response, dict):
add_dash_mpd_pr(pl_response)
return pl_response
player_response = {}
# Get video info
video_info = {}
embed_webpage = None
if (self._og_search_property('restrictions:age', video_webpage, default=None) == '18+'
or re.search(r'player-age-gate-content">', video_webpage) is not None):
age_gate = True
# We simulate the access to the video from www.youtube.com/v/{video_id}
# this can be viewed without login into Youtube
url = proto + '://www.youtube.com/embed/%s' % video_id
embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
data = compat_urllib_parse_urlencode({
'video_id': video_id,
'eurl': 'https://youtube.googleapis.com/v/' + video_id,
'sts': self._search_regex(
r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''),
})
video_info_url = proto + '://www.youtube.com/get_video_info?' + data
try:
video_info_webpage = self._download_webpage(
video_info_url, video_id,
note='Refetching age-gated info webpage',
errnote='unable to download video info webpage')
except ExtractorError:
video_info_webpage = None
if video_info_webpage:
video_info = compat_parse_qs(video_info_webpage)
pl_response = video_info.get('player_response', [None])[0]
player_response = extract_player_response(pl_response, video_id)
add_dash_mpd(video_info)
view_count = extract_view_count(video_info)
else:
age_gate = False
# Try looking directly into the video webpage
ytplayer_config = self._get_ytplayer_config(video_id, video_webpage)
if ytplayer_config:
args = ytplayer_config['args']
if args.get('url_encoded_fmt_stream_map') or args.get('hlsvp'):
# Convert to the same format returned by compat_parse_qs
video_info = dict((k, [v]) for k, v in args.items())
add_dash_mpd(video_info)
# Rental video is not rented but preview is available (e.g.
# https://www.youtube.com/watch?v=yYr8q0y5Jfg,
# https://github.com/ytdl-org/youtube-dl/issues/10532)
if not video_info and args.get('ypc_vid'):
return self.url_result(
args['ypc_vid'], YoutubeIE.ie_key(), video_id=args['ypc_vid'])
if args.get('livestream') == '1' or args.get('live_playback') == 1:
is_live = True
if not player_response:
player_response = extract_player_response(args.get('player_response'), video_id)
if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True):
add_dash_mpd_pr(player_response)
if not video_info and not player_response:
player_response = extract_player_response(
self._search_regex(
r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;', video_webpage,
'initial player response', default='{}'),
video_id)
def extract_unavailable_message():
messages = []
for tag, kind in (('h1', 'message'), ('div', 'submessage')):
msg = self._html_search_regex(
r'(?s)<{tag}[^>]+id=["\']unavailable-{kind}["\'][^>]*>(.+?)</{tag}>'.format(tag=tag, kind=kind),
video_webpage, 'unavailable %s' % kind, default=None)
if msg:
messages.append(msg)
if messages:
return '\n'.join(messages)
if not video_info and not player_response:
unavailable_message = extract_unavailable_message()
if not unavailable_message:
unavailable_message = 'Unable to extract video data'
raise ExtractorError(
'YouTube said: %s' % unavailable_message, expected=True, video_id=video_id)
if not isinstance(video_info, dict):
video_info = {}
video_details = try_get(
player_response, lambda x: x['videoDetails'], dict) or {}
microformat = try_get(
player_response, lambda x: x['microformat']['playerMicroformatRenderer'], dict) or {}
video_title = video_info.get('title', [None])[0] or video_details.get('title')
if not video_title:
self._downloader.report_warning('Unable to extract video title')
video_title = '_'
description_original = video_description = get_element_by_id("eow-description", video_webpage)
if video_description:
def replace_url(m):
redir_url = compat_urlparse.urljoin(url, m.group(1))
parsed_redir_url = compat_urllib_parse_urlparse(redir_url)
if re.search(r'^(?:www\.)?(?:youtube(?:-nocookie)?\.com|youtu\.be)$', parsed_redir_url.netloc) and parsed_redir_url.path == '/redirect':
qs = compat_parse_qs(parsed_redir_url.query)
q = qs.get('q')
if q and q[0]:
return q[0]
return redir_url
description_original = video_description = re.sub(r'''(?x)
<a\s+
(?:[a-zA-Z-]+="[^"]*"\s+)*?
(?:title|href)="([^"]+)"\s+
(?:[a-zA-Z-]+="[^"]*"\s+)*?
class="[^"]*"[^>]*>
[^<]+\.{3}\s*
</a>
''', replace_url, video_description)
video_description = clean_html(video_description)
else:
video_description = video_details.get('shortDescription')
if video_description is None:
video_description = self._html_search_meta('description', video_webpage)
if not smuggled_data.get('force_singlefeed', False):
if not self._downloader.params.get('noplaylist'):
multifeed_metadata_list = try_get(
player_response,
lambda x: x['multicamera']['playerLegacyMulticameraRenderer']['metadataList'],
compat_str) or try_get(
video_info, lambda x: x['multifeed_metadata_list'][0], compat_str)
if multifeed_metadata_list:
entries = []
feed_ids = []
for feed in multifeed_metadata_list.split(','):
# Unquote should take place before split on comma (,) since textual
# fields may contain comma as well (see
# https://github.com/ytdl-org/youtube-dl/issues/8536)
feed_data = compat_parse_qs(compat_urllib_parse_unquote_plus(feed))
def feed_entry(name):
return try_get(feed_data, lambda x: x[name][0], compat_str)
feed_id = feed_entry('id')
if not feed_id:
continue
feed_title = feed_entry('title')
title = video_title
if feed_title:
title += ' (%s)' % feed_title
entries.append({
'_type': 'url_transparent',
'ie_key': 'Youtube',
'url': smuggle_url(
'%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]),
{'force_singlefeed': True}),
'title': title,
})
feed_ids.append(feed_id)
self.to_screen(
'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
% (', '.join(feed_ids), video_id))
return self.playlist_result(entries, video_id, video_title, video_description)
else:
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
if view_count is None:
view_count = extract_view_count(video_info)
if view_count is None and video_details:
view_count = int_or_none(video_details.get('viewCount'))
if view_count is None and microformat:
view_count = int_or_none(microformat.get('viewCount'))
if is_live is None:
is_live = bool_or_none(video_details.get('isLive'))
# Check for "rental" videos
if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
raise ExtractorError('"rental" videos not supported. See https://github.com/ytdl-org/youtube-dl/issues/359 for more information.', expected=True)
def _extract_filesize(media_url):
return int_or_none(self._search_regex(
r'\bclen[=/](\d+)', media_url, 'filesize', default=None))
streaming_formats = try_get(player_response, lambda x: x['streamingData']['formats'], list) or []
streaming_formats.extend(try_get(player_response, lambda x: x['streamingData']['adaptiveFormats'], list) or [])
if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
self.report_rtmp_download()
formats = [{
'format_id': '_rtmp',
'protocol': 'rtmp',
'url': video_info['conn'][0],
'player_url': player_url,
}]
elif not is_live and (streaming_formats or len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1):
encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
if 'rtmpe%3Dyes' in encoded_url_map:
raise ExtractorError('rtmpe downloads are not supported, see https://github.com/ytdl-org/youtube-dl/issues/343 for more information.', expected=True)
formats = []
formats_spec = {}
fmt_list = video_info.get('fmt_list', [''])[0]
if fmt_list:
for fmt in fmt_list.split(','):
spec = fmt.split('/')
if len(spec) > 1:
width_height = spec[1].split('x')
if len(width_height) == 2:
formats_spec[spec[0]] = {
'resolution': spec[1],
'width': int_or_none(width_height[0]),
'height': int_or_none(width_height[1]),
}
for fmt in streaming_formats:
itag = str_or_none(fmt.get('itag'))
if not itag:
continue
quality = fmt.get('quality')
quality_label = fmt.get('qualityLabel') or quality
formats_spec[itag] = {
'asr': int_or_none(fmt.get('audioSampleRate')),
'filesize': int_or_none(fmt.get('contentLength')),
'format_note': quality_label,
'fps': int_or_none(fmt.get('fps')),
'height': int_or_none(fmt.get('height')),
# bitrate for itag 43 is always 2147483647
'tbr': float_or_none(fmt.get('averageBitrate') or fmt.get('bitrate'), 1000) if itag != '43' else None,
'width': int_or_none(fmt.get('width')),
}
for fmt in streaming_formats:
if fmt.get('drmFamilies') or fmt.get('drm_families'):
continue
url = url_or_none(fmt.get('url'))
if not url:
cipher = fmt.get('cipher') or fmt.get('signatureCipher')
if not cipher:
continue
url_data = compat_parse_qs(cipher)
url = url_or_none(try_get(url_data, lambda x: x['url'][0], compat_str))
if not url:
continue
else:
cipher = None
url_data = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
stream_type = int_or_none(try_get(url_data, lambda x: x['stream_type'][0]))
# Unsupported FORMAT_STREAM_TYPE_OTF
if stream_type == 3:
continue
format_id = fmt.get('itag') or url_data['itag'][0]
if not format_id:
continue
format_id = compat_str(format_id)
if cipher:
if 's' in url_data or self._downloader.params.get('youtube_include_dash_manifest', True):
ASSETS_RE = (
r'<script[^>]+\bsrc=("[^"]+")[^>]+\bname=["\']player_ias/base',
r'"jsUrl"\s*:\s*("[^"]+")',
r'"assets":.+?"js":\s*("[^"]+")')
jsplayer_url_json = self._search_regex(
ASSETS_RE,
embed_webpage if age_gate else video_webpage,
'JS player URL (1)', default=None)
if not jsplayer_url_json and not age_gate:
# We need the embed website after all
if embed_webpage is None:
embed_url = proto + '://www.youtube.com/embed/%s' % video_id
embed_webpage = self._download_webpage(
embed_url, video_id, 'Downloading embed webpage')
jsplayer_url_json = self._search_regex(
ASSETS_RE, embed_webpage, 'JS player URL')
player_url = json.loads(jsplayer_url_json)
if player_url is None:
player_url_json = self._search_regex(
r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
video_webpage, 'age gate player URL')
player_url = json.loads(player_url_json)
if 'sig' in url_data:
url += '&signature=' + url_data['sig'][0]
elif 's' in url_data:
encrypted_sig = url_data['s'][0]
if self._downloader.params.get('verbose'):
if player_url is None:
player_desc = 'unknown'
else:
player_type, player_version = self._extract_player_info(player_url)
player_desc = '%s player %s' % ('flash' if player_type == 'swf' else 'html5', player_version)
parts_sizes = self._signature_cache_id(encrypted_sig)
self.to_screen('{%s} signature length %s, %s' %
(format_id, parts_sizes, player_desc))
signature = self._decrypt_signature(
encrypted_sig, video_id, player_url, age_gate)
sp = try_get(url_data, lambda x: x['sp'][0], compat_str) or 'signature'
url += '&%s=%s' % (sp, signature)
if 'ratebypass' not in url:
url += '&ratebypass=yes'
dct = {
'format_id': format_id,
'url': url,
'player_url': player_url,
}
if format_id in self._formats:
dct.update(self._formats[format_id])
if format_id in formats_spec:
dct.update(formats_spec[format_id])
# Some itags are not included in DASH manifest thus corresponding formats will
# lack metadata (see https://github.com/ytdl-org/youtube-dl/pull/5993).
# Trying to extract metadata from url_encoded_fmt_stream_map entry.
mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0])
width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None)
if width is None:
width = int_or_none(fmt.get('width'))
if height is None:
height = int_or_none(fmt.get('height'))
filesize = int_or_none(url_data.get(
'clen', [None])[0]) or _extract_filesize(url)
quality = url_data.get('quality', [None])[0] or fmt.get('quality')
quality_label = url_data.get('quality_label', [None])[0] or fmt.get('qualityLabel')
tbr = (float_or_none(url_data.get('bitrate', [None])[0], 1000)
or float_or_none(fmt.get('bitrate'), 1000)) if format_id != '43' else None
fps = int_or_none(url_data.get('fps', [None])[0]) or int_or_none(fmt.get('fps'))
more_fields = {
'filesize': filesize,
'tbr': tbr,
'width': width,
'height': height,
'fps': fps,
'format_note': quality_label or quality,
}
for key, value in more_fields.items():
if value:
dct[key] = value
type_ = url_data.get('type', [None])[0] or fmt.get('mimeType')
if type_:
type_split = type_.split(';')
kind_ext = type_split[0].split('/')
if len(kind_ext) == 2:
kind, _ = kind_ext
dct['ext'] = mimetype2ext(type_split[0])
if kind in ('audio', 'video'):
codecs = None
for mobj in re.finditer(
r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)', type_):
if mobj.group('key') == 'codecs':
codecs = mobj.group('val')
break
if codecs:
dct.update(parse_codecs(codecs))
if dct.get('acodec') == 'none' or dct.get('vcodec') == 'none':
dct['downloader_options'] = {
# Youtube throttles chunks >~10M
'http_chunk_size': 10485760,
}
formats.append(dct)
else:
manifest_url = (
url_or_none(try_get(
player_response,
lambda x: x['streamingData']['hlsManifestUrl'],
compat_str))
or url_or_none(try_get(
video_info, lambda x: x['hlsvp'][0], compat_str)))
if manifest_url:
formats = []
m3u8_formats = self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', fatal=False)
for a_format in m3u8_formats:
itag = self._search_regex(
r'/itag/(\d+)/', a_format['url'], 'itag', default=None)
if itag:
a_format['format_id'] = itag
if itag in self._formats:
dct = self._formats[itag].copy()
dct.update(a_format)
a_format = dct
a_format['player_url'] = player_url
# Accept-Encoding header causes failures in live streams on Youtube and Youtube Gaming
a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = 'True'
formats.append(a_format)
else:
error_message = extract_unavailable_message()
if not error_message:
error_message = clean_html(try_get(
player_response, lambda x: x['playabilityStatus']['reason'],
compat_str))
if not error_message:
error_message = clean_html(
try_get(video_info, lambda x: x['reason'][0], compat_str))
if error_message:
raise ExtractorError(error_message, expected=True)
raise ExtractorError('no conn, hlsvp, hlsManifestUrl or url_encoded_fmt_stream_map information found in video info')
# uploader
video_uploader = try_get(
video_info, lambda x: x['author'][0],
compat_str) or str_or_none(video_details.get('author'))
if video_uploader:
video_uploader = compat_urllib_parse_unquote_plus(video_uploader)
else:
self._downloader.report_warning('unable to extract uploader name')
# uploader_id
video_uploader_id = None
video_uploader_url = None
mobj = re.search(
r'<link itemprop="url" href="(?P<uploader_url>https?://www\.youtube\.com/(?:user|channel)/(?P<uploader_id>[^"]+))">',
video_webpage)
if mobj is not None:
video_uploader_id = mobj.group('uploader_id')
video_uploader_url = mobj.group('uploader_url')
else:
owner_profile_url = url_or_none(microformat.get('ownerProfileUrl'))
if owner_profile_url:
video_uploader_id = self._search_regex(
r'(?:user|channel)/([^/]+)', owner_profile_url, 'uploader id',
default=None)
video_uploader_url = owner_profile_url
channel_id = (
str_or_none(video_details.get('channelId'))
or self._html_search_meta(
'channelId', video_webpage, 'channel id', default=None)
or self._search_regex(
r'data-channel-external-id=(["\'])(?P<id>(?:(?!\1).)+)\1',
video_webpage, 'channel id', default=None, group='id'))
channel_url = 'http://www.youtube.com/channel/%s' % channel_id if channel_id else None
thumbnails = []
thumbnails_list = try_get(
video_details, lambda x: x['thumbnail']['thumbnails'], list) or []
for t in thumbnails_list:
if not isinstance(t, dict):
continue
thumbnail_url = url_or_none(t.get('url'))
if not thumbnail_url:
continue
thumbnails.append({
'url': thumbnail_url,
'width': int_or_none(t.get('width')),
'height': int_or_none(t.get('height')),
})
if not thumbnails:
video_thumbnail = None
# We try first to get a high quality image:
m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
video_webpage, re.DOTALL)
if m_thumb is not None:
video_thumbnail = m_thumb.group(1)
thumbnail_url = try_get(video_info, lambda x: x['thumbnail_url'][0], compat_str)
if thumbnail_url:
video_thumbnail = compat_urllib_parse_unquote_plus(thumbnail_url)
if video_thumbnail:
thumbnails.append({'url': video_thumbnail})
# upload date
upload_date = self._html_search_meta(
'datePublished', video_webpage, 'upload date', default=None)
if not upload_date:
upload_date = self._search_regex(
[r'(?s)id="eow-date.*?>(.*?)</span>',
r'(?:id="watch-uploader-info".*?>.*?|["\']simpleText["\']\s*:\s*["\'])(?:Published|Uploaded|Streamed live|Started) on (.+?)[<"\']'],
video_webpage, 'upload date', default=None)
if not upload_date:
upload_date = microformat.get('publishDate') or microformat.get('uploadDate')
upload_date = unified_strdate(upload_date)
video_license = self._html_search_regex(
r'<h4[^>]+class="title"[^>]*>\s*License\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li',
video_webpage, 'license', default=None)
m_music = re.search(
r'''(?x)
<h4[^>]+class="title"[^>]*>\s*Music\s*</h4>\s*
<ul[^>]*>\s*
<li>(?P<title>.+?)
by (?P<creator>.+?)
(?:
\(.+?\)|
<a[^>]*
(?:
\bhref=["\']/red[^>]*>| # drop possible
>\s*Listen ad-free with YouTube Red # YouTube Red ad
)
.*?
)?</li
''',
video_webpage)
if m_music:
video_alt_title = remove_quotes(unescapeHTML(m_music.group('title')))
video_creator = clean_html(m_music.group('creator'))
else:
video_alt_title = video_creator = None
def extract_meta(field):
return self._html_search_regex(
r'<h4[^>]+class="title"[^>]*>\s*%s\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li>\s*' % field,
video_webpage, field, default=None)
track = extract_meta('Song')
artist = extract_meta('Artist')
album = extract_meta('Album')
# Youtube Music Auto-generated description
release_date = release_year = None
if video_description:
mobj = re.search(r'(?s)Provided to YouTube by [^\n]+\n+(?P<track>[^·]+)·(?P<artist>[^\n]+)\n+(?P<album>[^\n]+)(?:.+?℗\s*(?P<release_year>\d{4})(?!\d))?(?:.+?Released on\s*:\s*(?P<release_date>\d{4}-\d{2}-\d{2}))?(.+?\nArtist\s*:\s*(?P<clean_artist>[^\n]+))?', video_description)
if mobj:
if not track:
track = mobj.group('track').strip()
if not artist:
artist = mobj.group('clean_artist') or ', '.join(a.strip() for a in mobj.group('artist').split('·'))
if not album:
album = mobj.group('album'.strip())
release_year = mobj.group('release_year')
release_date = mobj.group('release_date')
if release_date:
release_date = release_date.replace('-', '')
if not release_year:
release_year = int(release_date[:4])
if release_year:
release_year = int(release_year)
m_episode = re.search(
r'<div[^>]+id="watch7-headline"[^>]*>\s*<span[^>]*>.*?>(?P<series>[^<]+)</a></b>\s*S(?P<season>\d+)\s*•\s*E(?P<episode>\d+)</span>',
video_webpage)
if m_episode:
series = unescapeHTML(m_episode.group('series'))
season_number = int(m_episode.group('season'))
episode_number = int(m_episode.group('episode'))
else:
series = season_number = episode_number = None
m_cat_container = self._search_regex(
r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
video_webpage, 'categories', default=None)
category = None
if m_cat_container:
category = self._html_search_regex(
r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
default=None)
if not category:
category = try_get(
microformat, lambda x: x['category'], compat_str)
video_categories = None if category is None else [category]
video_tags = [
unescapeHTML(m.group('content'))
for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)]
if not video_tags:
video_tags = try_get(video_details, lambda x: x['keywords'], list)
def _extract_count(count_name):
return str_to_int(self._search_regex(
r'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>'
% re.escape(count_name),
video_webpage, count_name, default=None))
like_count = _extract_count('like')
dislike_count = _extract_count('dislike')
if view_count is None:
view_count = str_to_int(self._search_regex(
r'<[^>]+class=["\']watch-view-count[^>]+>\s*([\d,\s]+)', video_webpage,
'view count', default=None))
average_rating = (
float_or_none(video_details.get('averageRating'))
or try_get(video_info, lambda x: float_or_none(x['avg_rating'][0])))
# subtitles
video_subtitles = self.extract_subtitles(video_id, video_webpage)
automatic_captions = self.extract_automatic_captions(video_id, video_webpage)
video_duration = try_get(
video_info, lambda x: int_or_none(x['length_seconds'][0]))
if not video_duration:
video_duration = int_or_none(video_details.get('lengthSeconds'))
if not video_duration:
video_duration = parse_duration(self._html_search_meta(
'duration', video_webpage, 'video duration'))
# annotations
video_annotations = None
if self._downloader.params.get('writeannotations', False):
xsrf_token = self._search_regex(
r'([\'"])XSRF_TOKEN\1\s*:\s*([\'"])(?P<xsrf_token>[A-Za-z0-9+/=]+)\2',
video_webpage, 'xsrf token', group='xsrf_token', fatal=False)
invideo_url = try_get(
player_response, lambda x: x['annotations'][0]['playerAnnotationsUrlsRenderer']['invideoUrl'], compat_str)
if xsrf_token and invideo_url:
xsrf_field_name = self._search_regex(
r'([\'"])XSRF_FIELD_NAME\1\s*:\s*([\'"])(?P<xsrf_field_name>\w+)\2',
video_webpage, 'xsrf field name',
group='xsrf_field_name', default='session_token')
video_annotations = self._download_webpage(
self._proto_relative_url(invideo_url),
video_id, note='Downloading annotations',
errnote='Unable to download video annotations', fatal=False,
data=urlencode_postdata({xsrf_field_name: xsrf_token}))
chapters = self._extract_chapters(video_webpage, description_original, video_id, video_duration)
# Look for the DASH manifest
if self._downloader.params.get('youtube_include_dash_manifest', True):
dash_mpd_fatal = True
for mpd_url in dash_mpds:
dash_formats = {}
try:
def decrypt_sig(mobj):
s = mobj.group(1)
dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
return '/signature/%s' % dec_s
mpd_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, mpd_url)
for df in self._extract_mpd_formats(
mpd_url, video_id, fatal=dash_mpd_fatal,
formats_dict=self._formats):
if not df.get('filesize'):
df['filesize'] = _extract_filesize(df['url'])
# Do not overwrite DASH format found in some previous DASH manifest
if df['format_id'] not in dash_formats:
dash_formats[df['format_id']] = df
# Additional DASH manifests may end up in HTTP Error 403 therefore
# allow them to fail without bug report message if we already have
# some DASH manifest succeeded. This is temporary workaround to reduce
# burst of bug reports until we figure out the reason and whether it
# can be fixed at all.
dash_mpd_fatal = False
except (ExtractorError, KeyError) as e:
self.report_warning(
'Skipping DASH manifest: %r' % e, video_id)
if dash_formats:
# Remove the formats we found through non-DASH, they
# contain less info and it can be wrong, because we use
# fixed values (for example the resolution). See
# https://github.com/ytdl-org/youtube-dl/issues/5774 for an
# example.
formats = [f for f in formats if f['format_id'] not in dash_formats.keys()]
formats.extend(dash_formats.values())
# Check for malformed aspect ratio
stretched_m = re.search(
r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
video_webpage)
if stretched_m:
w = float(stretched_m.group('w'))
h = float(stretched_m.group('h'))
# yt:stretch may hold invalid ratio data (e.g. for Q39EVAstoRM ratio is 17:0).
# We will only process correct ratios.
if w > 0 and h > 0:
ratio = w / h
for f in formats:
if f.get('vcodec') != 'none':
f['stretched_ratio'] = ratio
if not formats:
if 'reason' in video_info:
if 'The uploader has not made this video available in your country.' in video_info['reason']:
regions_allowed = self._html_search_meta(
'regionsAllowed', video_webpage, default=None)
countries = regions_allowed.split(',') if regions_allowed else None
self.raise_geo_restricted(
msg=video_info['reason'][0], countries=countries)
reason = video_info['reason'][0]
if 'Invalid parameters' in reason:
unavailable_message = extract_unavailable_message()
if unavailable_message:
reason = unavailable_message
raise ExtractorError(
'YouTube said: %s' % reason,
expected=True, video_id=video_id)
if video_info.get('license_info') or try_get(player_response, lambda x: x['streamingData']['licenseInfos']):
raise ExtractorError('This video is DRM protected.', expected=True)
self._sort_formats(formats)
self.mark_watched(video_id, video_info, player_response)
return {
'id': video_id,
'uploader': video_uploader,
'uploader_id': video_uploader_id,
'uploader_url': video_uploader_url,
'channel_id': channel_id,
'channel_url': channel_url,
'upload_date': upload_date,
'license': video_license,
'creator': video_creator or artist,
'title': video_title,
'alt_title': video_alt_title or track,
'thumbnails': thumbnails,
'description': video_description,
'categories': video_categories,
'tags': video_tags,
'subtitles': video_subtitles,
'automatic_captions': automatic_captions,
'duration': video_duration,
'age_limit': 18 if age_gate else 0,
'annotations': video_annotations,
'chapters': chapters,
'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'average_rating': average_rating,
'formats': formats,
'is_live': is_live,
'start_time': start_time,
'end_time': end_time,
'series': series,
'season_number': season_number,
'episode_number': episode_number,
'track': track,
'artist': artist,
'album': album,
'release_date': release_date,
'release_year': release_year,
}
class YoutubeTabIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com tab'
_VALID_URL = r'https?://(?:\w+\.)?(?:youtube(?:kids)?\.com|invidio\.us)/(?:(?:channel|c|user)/|playlist\?.*?\blist=)(?P<id>[^/?#&]+)'
IE_NAME = 'youtube:tab'
_TESTS = [{
# playlists, multipage
'url': 'https://www.youtube.com/c/ИгорьКлейнер/playlists?view=1&flow=grid',
'playlist_mincount': 94,
'info_dict': {
'id': 'UCqj7Cz7revf5maW9g5pgNcg',
'title': 'Игорь Клейнер - Playlists',
},
}, {
# playlists, multipage, different order
'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
'playlist_mincount': 94,
'info_dict': {
'id': 'UCqj7Cz7revf5maW9g5pgNcg',
'title': 'Игорь Клейнер - Playlists',
},
}, {
# playlists, singlepage
'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
'playlist_mincount': 4,
'info_dict': {
'id': 'ThirstForScience',
'title': 'ThirstForScience',
}
}, {
'url': 'https://www.youtube.com/c/ChristophLaimer/playlists',
'only_matching': True,
}, {
# basic, single video playlist
'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
'info_dict': {
'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
'uploader': 'Sergey M.',
'id': 'PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
'title': 'youtube-dl public playlist',
},
'playlist_count': 1,
}, {
# empty playlist
'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
'info_dict': {
'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
'uploader': 'Sergey M.',
'id': 'PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
'title': 'youtube-dl empty playlist',
},
'playlist_count': 0,
}, {
# Home tab
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/featured',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Home',
},
'playlist_mincount': 2,
}, {
# Videos tab
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Videos',
},
'playlist_mincount': 975,
}, {
# Videos tab, sorted by popular
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos?view=0&sort=p&flow=grid',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Videos',
},
'playlist_mincount': 199,
}, {
# Playlists tab
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/playlists',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Playlists',
},
'playlist_mincount': 17,
}, {
# Community tab
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/community',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Community',
},
'playlist_mincount': 18,
}, {
# Channels tab
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/channels',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Channels',
},
'playlist_mincount': 138,
}, {
'url': 'https://invidio.us/channel/UC23qupoDRn9YOAVzeoxjOQA',
'only_matching': True,
}, {
'url': 'https://www.youtubekids.com/channel/UCyu8StPfZWapR6rfW_JgqcA',
'only_matching': True,
}, {
'url': 'https://music.youtube.com/channel/UCT-K0qO8z6NzWrywqefBPBQ',
'only_matching': True,
}, {
'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
'info_dict': {
'title': '29C3: Not my department',
'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
'uploader': 'Christiaan008',
'uploader_id': 'ChRiStIaAn008',
},
'playlist_count': 96,
}, {
'note': 'Large playlist',
'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
'info_dict': {
'title': 'Uploads from Cauchemar',
'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
'uploader': 'Cauchemar',
'uploader_id': 'Cauchemar89',
},
'playlist_mincount': 1123,
}, {
# even larger playlist, 8832 videos
'url': 'http://www.youtube.com/user/NASAgovVideo/videos',
'only_matching': True,
}, {
'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
'info_dict': {
'title': 'Uploads from Interstellar Movie',
'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
'uploader': 'Interstellar Movie',
'uploader_id': 'InterstellarMovie1',
},
'playlist_mincount': 21,
}, {
# https://github.com/ytdl-org/youtube-dl/issues/21844
'url': 'https://www.youtube.com/playlist?list=PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
'info_dict': {
'title': 'Data Analysis with Dr Mike Pound',
'id': 'PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
'uploader_id': 'Computerphile',
'uploader': 'Computerphile',
},
'playlist_mincount': 11,
}, {
'url': 'https://invidio.us/playlist?list=PLDIoUOhQQPlXr63I_vwF9GD8sAKh77dWU',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return False if YoutubeLiveIE.suitable(url) else super(
YoutubeTabIE, cls).suitable(url)
def _extract_channel_id(self, webpage):
channel_id = self._html_search_meta(
'channelId', webpage, 'channel id', default=None)
if channel_id:
return channel_id
channel_url = self._html_search_meta(
('og:url', 'al:ios:url', 'al:android:url', 'al:web:url',
'twitter:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad',
'twitter:app:url:googleplay'), webpage, 'channel url')
return self._search_regex(
r'https?://(?:www\.)?youtube\.com/channel/([^/?#&])+',
channel_url, 'channel id')
@staticmethod
def _extract_grid_item_renderer(item):
for item_kind in ('Playlist', 'Video', 'Channel'):
renderer = item.get('grid%sRenderer' % item_kind)
if renderer:
return renderer
def _extract_video(self, renderer):
video_id = renderer.get('videoId')
title = try_get(
renderer, lambda x: x['title']['runs'][0]['text'], compat_str)
description = try_get(
renderer, lambda x: x['descriptionSnippet']['runs'][0]['text'],
compat_str)
duration = parse_duration(try_get(
renderer, lambda x: x['lengthText']['simpleText'], compat_str))
view_count_text = try_get(
renderer, lambda x: x['viewCountText']['simpleText'], compat_str) or ''
view_count = int_or_none(self._search_regex(
r'^(\d+)', re.sub(r'\s', '', view_count_text),
'view count', default=None))
uploader = try_get(
renderer, lambda x: x['ownerText']['runs'][0]['text'], compat_str)
return {
'_type': 'url_transparent',
'ie_key': YoutubeIE.ie_key(),
'id': video_id,
'url': video_id,
'title': title,
'description': description,
'duration': duration,
'view_count': view_count,
'uploader': uploader,
}
def _grid_entries(self, grid_renderer):
for item in grid_renderer['items']:
if not isinstance(item, dict):
continue
renderer = self._extract_grid_item_renderer(item)
if not isinstance(renderer, dict):
continue
title = try_get(
renderer, lambda x: x['title']['runs'][0]['text'], compat_str)
# playlist
playlist_id = renderer.get('playlistId')
if playlist_id:
yield self.url_result(
'https://www.youtube.com/playlist?list=%s' % playlist_id,
ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
video_title=title)
# video
video_id = renderer.get('videoId')
if video_id:
yield self._extract_video(renderer)
# channel
channel_id = renderer.get('channelId')
if channel_id:
title = try_get(
renderer, lambda x: x['title']['simpleText'], compat_str)
yield self.url_result(
'https://www.youtube.com/channel/%s' % channel_id,
ie=YoutubeTabIE.ie_key(), video_title=title)
def _shelf_entries_trimmed(self, shelf_renderer):
renderer = try_get(
shelf_renderer, lambda x: x['content']['horizontalListRenderer'], dict)
if not renderer:
return
# TODO: add support for nested playlists so each shelf is processed
# as separate playlist
# TODO: this includes only first N items
for entry in self._grid_entries(renderer):
yield entry
def _shelf_entries(self, shelf_renderer):
ep = try_get(
shelf_renderer, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
compat_str)
shelf_url = urljoin('https://www.youtube.com', ep)
if not shelf_url:
return
title = try_get(
shelf_renderer, lambda x: x['title']['runs'][0]['text'], compat_str)
yield self.url_result(shelf_url, video_title=title)
def _playlist_entries(self, video_list_renderer):
for content in video_list_renderer['contents']:
if not isinstance(content, dict):
continue
renderer = content.get('playlistVideoRenderer')
if not isinstance(renderer, dict):
continue
video_id = renderer.get('videoId')
if not video_id:
continue
yield self._extract_video(renderer)
def _video_entry(self, video_renderer):
video_id = video_renderer.get('videoId')
if video_id:
return self._extract_video(video_renderer)
def _post_thread_entries(self, post_thread_renderer):
post_renderer = try_get(
post_thread_renderer, lambda x: x['post']['backstagePostRenderer'], dict)
if not post_renderer:
return
# video attachment
video_renderer = try_get(
post_renderer, lambda x: x['backstageAttachment']['videoRenderer'], dict)
video_id = None
if video_renderer:
entry = self._video_entry(video_renderer)
if entry:
yield entry
# inline video links
runs = try_get(post_renderer, lambda x: x['contentText']['runs'], list) or []
for run in runs:
if not isinstance(run, dict):
continue
ep_url = try_get(
run, lambda x: x['navigationEndpoint']['urlEndpoint']['url'], compat_str)
if not ep_url:
continue
if not YoutubeIE.suitable(ep_url):
continue
ep_video_id = YoutubeIE._match_id(ep_url)
if video_id == ep_video_id:
continue
yield self.url_result(ep_url, ie=YoutubeIE.ie_key(), video_id=video_id)
def _post_thread_continuation_entries(self, post_thread_continuation):
contents = post_thread_continuation.get('contents')
if not isinstance(contents, list):
return
for content in contents:
renderer = content.get('backstagePostThreadRenderer')
if not isinstance(renderer, dict):
continue
for entry in self._post_thread_entries(renderer):
yield entry
@staticmethod
def _extract_next_continuation_data(renderer):
next_continuation = try_get(
renderer, lambda x: x['continuations'][0]['nextContinuationData'], dict)
if not next_continuation:
return
continuation = next_continuation.get('continuation')
if not continuation:
return
ctp = next_continuation.get('clickTrackingParams')
return {
'ctoken': continuation,
'continuation': continuation,
'itct': ctp,
}
@classmethod
def _extract_continuation(cls, renderer):
next_continuation = cls._extract_next_continuation_data(renderer)
if next_continuation:
return next_continuation
contents = renderer.get('contents')
if not isinstance(contents, list):
return
for content in contents:
if not isinstance(content, dict):
continue
continuation_ep = try_get(
content, lambda x: x['continuationItemRenderer']['continuationEndpoint'],
dict)
if not continuation_ep:
continue
continuation = try_get(
continuation_ep, lambda x: x['continuationCommand']['token'], compat_str)
if not continuation:
continue
ctp = continuation_ep.get('clickTrackingParams')
if not ctp:
continue
return {
'ctoken': continuation,
'continuation': continuation,
'itct': ctp,
}
def _entries(self, tab, identity_token):
continuation = None
slr_contents = tab['sectionListRenderer']['contents']
for slr_content in slr_contents:
if not isinstance(slr_content, dict):
continue
is_renderer = try_get(slr_content, lambda x: x['itemSectionRenderer'], dict)
if not is_renderer:
continue
isr_contents = try_get(is_renderer, lambda x: x['contents'], list) or []
for isr_content in isr_contents:
if not isinstance(isr_content, dict):
continue
renderer = isr_content.get('playlistVideoListRenderer')
if renderer:
for entry in self._playlist_entries(renderer):
yield entry
continuation = self._extract_continuation(renderer)
continue
renderer = isr_content.get('gridRenderer')
if renderer:
for entry in self._grid_entries(renderer):
yield entry
continuation = self._extract_continuation(renderer)
continue
renderer = isr_content.get('shelfRenderer')
if renderer:
for entry in self._shelf_entries(renderer):
yield entry
continue
renderer = isr_content.get('backstagePostThreadRenderer')
if renderer:
for entry in self._post_thread_entries(renderer):
yield entry
continuation = self._extract_continuation(renderer)
continue
renderer = isr_content.get('videoRenderer')
if renderer:
entry = self._video_entry(renderer)
if entry:
yield entry
if not continuation:
continuation = self._extract_continuation(is_renderer)
headers = {
'x-youtube-client-name': '1',
'x-youtube-client-version': '2.20201112.04.01',
}
if identity_token:
headers['x-youtube-identity-token'] = identity_token
for page_num in itertools.count(1):
if not continuation:
break
browse = self._download_json(
'https://www.youtube.com/browse_ajax', None,
'Downloading page %d' % page_num,
headers=headers, query=continuation, fatal=False)
if not browse:
break
response = try_get(browse, lambda x: x[1]['response'], dict)
if not response:
break
continuation_contents = try_get(
response, lambda x: x['continuationContents'], dict)
if continuation_contents:
continuation_renderer = continuation_contents.get('playlistVideoListContinuation')
if continuation_renderer:
for entry in self._playlist_entries(continuation_renderer):
yield entry
continuation = self._extract_continuation(continuation_renderer)
continue
continuation_renderer = continuation_contents.get('gridContinuation')
if continuation_renderer:
for entry in self._grid_entries(continuation_renderer):
yield entry
continuation = self._extract_continuation(continuation_renderer)
continue
continuation_renderer = continuation_contents.get('itemSectionContinuation')
if continuation_renderer:
for entry in self._post_thread_continuation_entries(continuation_renderer):
yield entry
continuation = self._extract_continuation(continuation_renderer)
continue
continuation_items = try_get(
response, lambda x: x['onResponseReceivedActions'][0]['appendContinuationItemsAction']['continuationItems'], list)
if continuation_items:
continuation_item = continuation_items[0]
if not isinstance(continuation_item, dict):
continue
renderer = continuation_item.get('playlistVideoRenderer')
if renderer:
video_list_renderer = {'contents': continuation_items}
for entry in self._playlist_entries(video_list_renderer):
yield entry
continuation = self._extract_continuation(video_list_renderer)
continue
break
@staticmethod
def _extract_selected_tab(tabs):
for tab in tabs:
if try_get(tab, lambda x: x['tabRenderer']['selected'], bool):
return tab['tabRenderer']
else:
raise ExtractorError('Unable to find selected tab')
def _real_extract(self, url):
channel_id = self._match_id(url)
url = compat_urlparse.urlunparse(
compat_urlparse.urlparse(url)._replace(netloc='www.youtube.com'))
webpage = self._download_webpage(url, channel_id)
data = self._extract_yt_initial_data(channel_id, webpage)
tabs = data['contents']['twoColumnBrowseResultsRenderer']['tabs']
selected_tab = self._extract_selected_tab(tabs)
channel_title = try_get(
data, lambda x: x['metadata']['channelMetadataRenderer']['title'],
compat_str)
channel_external_id = try_get(
data, lambda x: x['metadata']['channelMetadataRenderer']['externalId'],
compat_str)
tab_title = selected_tab.get('title')
title = channel_title or channel_id
if tab_title:
title += ' - %s' % tab_title
identity_token = self._search_regex(
r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
'identity token', default=None)
return self.playlist_result(
self._entries(selected_tab['content'], identity_token),
playlist_id=channel_external_id or channel_id,
playlist_title=title)
class YoutubePlaylistIE(InfoExtractor):
IE_DESC = 'YouTube.com playlists'
_VALID_URL = r"""(?x)(?:
(?:https?://)?
(?:\w+\.)?
(?:
(?:
youtube(?:kids)?\.com|
invidio\.us
)
/
(?:
(?:course|view_play_list|my_playlists|artist|playlist|watch|embed/(?:videoseries|[0-9A-Za-z_-]{11}))
\? (?:.*?[&;])*? (?:p|a|list)=
| p/
)|
youtu\.be/[0-9A-Za-z_-]{11}\?.*?\blist=
)
(
(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)?[0-9A-Za-z-_]{10,}
# Top tracks, they can also include dots
|(?:MC)[\w\.]*
)
.*
|
(%(playlist_id)s)
)""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
IE_NAME = 'youtube:playlist'
_TESTS = [{
'note': 'issue #673',
'url': 'PLBB231211A4F62143',
'info_dict': {
'title': '[OLD]Team Fortress 2 (Class-based LP)',
'id': 'PLBB231211A4F62143',
'uploader': 'Wickydoo',
'uploader_id': 'Wickydoo',
},
'playlist_mincount': 29,
}, {
'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
'info_dict': {
'title': 'YDL_safe_search',
'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
},
'playlist_count': 2,
'skip': 'This playlist is private',
}, {
'note': 'embedded',
'url': 'https://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
'playlist_count': 4,
'info_dict': {
'title': 'JODA15',
'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
'uploader': 'milan',
'uploader_id': 'UCEI1-PVPcYXjB73Hfelbmaw',
}
}, {
'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
'playlist_mincount': 982,
'info_dict': {
'title': '2018 Chinese New Singles (11/6 updated)',
'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
'uploader': 'LBK',
'uploader_id': 'sdragonfang',
}
}, {
'note': 'Embedded SWF player',
'url': 'https://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
'playlist_count': 4,
'info_dict': {
'title': 'JODA7',
'id': 'YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ',
},
'skip': 'This playlist does not exist',
}, {
# Playlist URL that does not actually serve a playlist
'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
'info_dict': {
'id': 'FqZTN594JQw',
'ext': 'webm',
'title': "Smiley's People 01 detective, Adventure Series, Action",
'uploader': 'STREEM',
'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng',
'upload_date': '20150526',
'license': 'Standard YouTube License',
'description': 'md5:507cdcb5a49ac0da37a920ece610be80',
'categories': ['People & Blogs'],
'tags': list,
'view_count': int,
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
'add_ie': [YoutubeIE.ie_key()],
}, {
'url': 'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5',
'info_dict': {
'id': 'yeWKywCrFtk',
'ext': 'mp4',
'title': 'Small Scale Baler and Braiding Rugs',
'uploader': 'Backus-Page House Museum',
'uploader_id': 'backuspagemuseum',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/backuspagemuseum',
'upload_date': '20161008',
'description': 'md5:800c0c78d5eb128500bffd4f0b4f2e8a',
'categories': ['Nonprofits & Activism'],
'tags': list,
'like_count': int,
'dislike_count': int,
},
'params': {
'noplaylist': True,
'skip_download': True,
},
}, {
'url': 'https://youtu.be/uWyaPkt-VOI?list=PL9D9FC436B881BA21',
'only_matching': True,
}, {
'url': 'TLGGrESM50VT6acwMjAyMjAxNw',
'only_matching': True,
}, {
# music album playlist
'url': 'OLAK5uy_m4xAFdmMC5rX3Ji3g93pQe3hqLZw_9LhM',
'only_matching': True,
}, {
'url': 'https://www.youtubekids.com/watch?v=Agk7R8I8o5U&list=PUZ6jURNr1WQZCNHF0ao-c0g',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return False if YoutubeTabIE.suitable(url) else super(
YoutubePlaylistIE, cls).suitable(url)
def _real_extract(self, url):
# Extract playlist id
mobj = re.match(self._VALID_URL, url)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
playlist_id = mobj.group(1) or mobj.group(2)
return self.url_result(
'https://www.youtube.com/playlist?list=%s' % playlist_id,
ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
class YoutubeYtUserIE(InfoExtractor):
_VALID_URL = r'ytuser:(?P<id>.+)'
_TESTS = [{
'url': 'ytuser:phihag',
'only_matching': True,
}]
def _real_extract(self, url):
user_id = self._match_id(url)
return self.url_result(
'https://www.youtube.com/user/%s' % user_id,
ie=YoutubeTabIE.ie_key(), video_id=user_id)
class YoutubeLiveIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com live streams'
_VALID_URL = r'(?P<base_url>https?://(?:\w+\.)?youtube\.com/(?:(?:user|channel|c)/)?(?P<id>[^/]+))/live'
IE_NAME = 'youtube:live'
_TESTS = [{
'url': 'https://www.youtube.com/user/TheYoungTurks/live',
'info_dict': {
'id': 'a48o2S1cPoo',
'ext': 'mp4',
'title': 'The Young Turks - Live Main Show',
'uploader': 'The Young Turks',
'uploader_id': 'TheYoungTurks',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
'upload_date': '20150715',
'license': 'Standard YouTube License',
'description': 'md5:438179573adcdff3c97ebb1ee632b891',
'categories': ['News & Politics'],
'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/TheYoungTurks/live',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
channel_id = mobj.group('id')
base_url = mobj.group('base_url')
webpage = self._download_webpage(url, channel_id, fatal=False)
if webpage:
page_type = self._og_search_property(
'type', webpage, 'page type', default='')
video_id = self._html_search_meta(
'videoId', webpage, 'video id', default=None)
if page_type.startswith('video') and video_id and re.match(
r'^[0-9A-Za-z_-]{11}$', video_id):
return self.url_result(video_id, YoutubeIE.ie_key())
return self.url_result(base_url)
class YoutubeSearchIE(SearchInfoExtractor, YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com searches'
# there doesn't appear to be a real limit, for example if you search for
# 'python' you get more than 8.000.000 results
_MAX_RESULTS = float('inf')
IE_NAME = 'youtube:search'
_SEARCH_KEY = 'ytsearch'
_SEARCH_PARAMS = None
_TESTS = []
def _entries(self, query, n):
data = {
'context': {
'client': {
'clientName': 'WEB',
'clientVersion': '2.20201021.03.00',
}
},
'query': query,
}
if self._SEARCH_PARAMS:
data['params'] = self._SEARCH_PARAMS
total = 0
for page_num in itertools.count(1):
search = self._download_json(
'https://www.youtube.com/youtubei/v1/search?key=AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
video_id='query "%s"' % query,
note='Downloading page %s' % page_num,
errnote='Unable to download API page', fatal=False,
data=json.dumps(data).encode('utf8'),
headers={'content-type': 'application/json'})
if not search:
break
slr_contents = try_get(
search,
(lambda x: x['contents']['twoColumnSearchResultsRenderer']['primaryContents']['sectionListRenderer']['contents'],
lambda x: x['onResponseReceivedCommands'][0]['appendContinuationItemsAction']['continuationItems']),
list)
if not slr_contents:
break
isr_contents = try_get(
slr_contents,
lambda x: x[0]['itemSectionRenderer']['contents'],
list)
if not isr_contents:
break
for content in isr_contents:
if not isinstance(content, dict):
continue
video = content.get('videoRenderer')
if not isinstance(video, dict):
continue
video_id = video.get('videoId')
if not video_id:
continue
title = try_get(video, lambda x: x['title']['runs'][0]['text'], compat_str)
description = try_get(video, lambda x: x['descriptionSnippet']['runs'][0]['text'], compat_str)
duration = parse_duration(try_get(video, lambda x: x['lengthText']['simpleText'], compat_str))
view_count_text = try_get(video, lambda x: x['viewCountText']['simpleText'], compat_str) or ''
view_count = int_or_none(self._search_regex(
r'^(\d+)', re.sub(r'\s', '', view_count_text),
'view count', default=None))
uploader = try_get(video, lambda x: x['ownerText']['runs'][0]['text'], compat_str)
total += 1
yield {
'_type': 'url_transparent',
'ie_key': YoutubeIE.ie_key(),
'id': video_id,
'url': video_id,
'title': title,
'description': description,
'duration': duration,
'view_count': view_count,
'uploader': uploader,
}
if total == n:
return
token = try_get(
slr_contents,
lambda x: x[1]['continuationItemRenderer']['continuationEndpoint']['continuationCommand']['token'],
compat_str)
if not token:
break
data['continuation'] = token
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
return self.playlist_result(self._entries(query, n), query)
class YoutubeSearchDateIE(YoutubeSearchIE):
IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
_SEARCH_KEY = 'ytsearchdate'
IE_DESC = 'YouTube.com searches, newest videos first'
_SEARCH_PARAMS = 'CAI%3D'
r"""
class YoutubeSearchURLIE(YoutubeSearchIE):
IE_DESC = 'YouTube.com search URLs'
IE_NAME = 'youtube:search_url'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?P<query>[^&]+)(?:[&]|$)'
_TESTS = [{
'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
'playlist_mincount': 5,
'info_dict': {
'title': 'youtube-dl test video',
}
}, {
'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
query = compat_urllib_parse_unquote_plus(mobj.group('query'))
webpage = self._download_webpage(url, query)
return self.playlist_result(self._process_page(webpage), playlist_title=query)
"""
class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
"""
Base class for feed extractors
Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
"""
_LOGIN_REQUIRED = True
@property
def IE_NAME(self):
return 'youtube:%s' % self._FEED_NAME
def _real_initialize(self):
self._login()
def _entries(self, page):
# The extraction process is the same as for playlists, but the regex
# for the video ids doesn't contain an index
ids = []
more_widget_html = content_html = page
for page_num in itertools.count(1):
matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html)
# 'recommended' feed has infinite 'load more' and each new portion spins
# the same videos in (sometimes) slightly different order, so we'll check
# for unicity and break when portion has no new videos
new_ids = list(filter(lambda video_id: video_id not in ids, orderedSet(matches)))
if not new_ids:
break
ids.extend(new_ids)
for entry in self._ids_to_results(new_ids):
yield entry
mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
if not mobj:
break
more = self._download_json(
'https://www.youtube.com/%s' % mobj.group('more'), self._PLAYLIST_TITLE,
'Downloading page #%s' % page_num,
transform_source=uppercase_escape,
headers=self._YOUTUBE_CLIENT_HEADERS)
content_html = more['content_html']
more_widget_html = more['load_more_widget_html']
def _real_extract(self, url):
page = self._download_webpage(
'https://www.youtube.com/feed/%s' % self._FEED_NAME,
self._PLAYLIST_TITLE)
return self.playlist_result(
self._entries(page), playlist_title=self._PLAYLIST_TITLE)
class YoutubeWatchLaterIE(InfoExtractor):
IE_NAME = 'youtube:watchlater'
IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/(?:feed/watch_later|(?:playlist|watch)\?(?:.+&)?list=WL)|:ytwatchlater'
_TESTS = [{
'url': 'https://www.youtube.com/watch?v=bCNU9TrbiRk&index=1&list=WL',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/feed/watch_later',
'only_matching': True,
}]
def _real_extract(self, url):
return self.url_result(
'https://www.youtube.com/playlist?list=WL', ie=YoutubeTabIE.ie_key())
class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/recommended|:ytrec(?:ommended)?'
_FEED_NAME = 'recommended'
_PLAYLIST_TITLE = 'Youtube Recommended videos'
class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
_FEED_NAME = 'subscriptions'
_PLAYLIST_TITLE = 'Youtube Subscriptions'
class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/history|:ythistory'
_FEED_NAME = 'history'
_PLAYLIST_TITLE = 'Youtube History'
class YoutubeTruncatedURLIE(InfoExtractor):
IE_NAME = 'youtube:truncated_url'
IE_DESC = False # Do not list
_VALID_URL = r'''(?x)
(?:https?://)?
(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
(?:watch\?(?:
feature=[a-z_]+|
annotation_id=annotation_[^&]+|
x-yt-cl=[0-9]+|
hl=[^&]*|
t=[0-9]+
)?
|
attribution_link\?a=[^&]+
)
$
'''
_TESTS = [{
'url': 'https://www.youtube.com/watch?annotation_id=annotation_3951667041',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?feature=foo',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?hl=en-GB',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?t=2372',
'only_matching': True,
}]
def _real_extract(self, url):
raise ExtractorError(
'Did you forget to quote the URL? Remember that & is a meta '
'character in most shells, so you want to put the URL in quotes, '
'like youtube-dl '
'"https://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
' or simply youtube-dl BaW_jenozKc .',
expected=True)
class YoutubeTruncatedIDIE(InfoExtractor):
IE_NAME = 'youtube:truncated_id'
IE_DESC = False # Do not list
_VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
_TESTS = [{
'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
raise ExtractorError(
'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
expected=True)
|
py | 1a395b9f9b884395a5ac3bf46b14dbcc14e04b0b | from . import encode
import numpy
def pygame_play(data, rate=44100):
''' Send audio array to pygame for playback
'''
import pygame
pygame.mixer.init(rate, -16, 1, 1024)
sound = pygame.sndarray.numpysnd.make_sound(encode.as_int16(data))
length = sound.get_length()
sound.play()
pygame.time.wait(int(length * 1000))
pygame.mixer.quit()
def pygame_supported():
''' Return True is pygame playback is supported
'''
try:
import pygame
except:
return False
return True
def oss_play(data, rate=44100):
''' Send audio array to oss for playback
'''
import ossaudiodev
audio = ossaudiodev.open('/dev/audio','w')
formats = audio.getfmts()
if ossaudiodev.AFMT_S16_LE & formats:
# Use 16 bit if available
audio.setfmt(ossaudiodev.AFMT_S16_LE)
data = encode.as_int16(data)
elif ossaudiodev.AFMT_U8 & formats:
# Otherwise use 8 bit
audio.setfmt(ossaudiodev.AFMT_U8)
data = encode.as_uint8(data)
audio.speed(rate)
while len(data):
audio.write(data[:1024])
data = data[1024:]
audio.flush()
audio.sync()
audio.close()
def oss_supported():
''' Return True is oss playback is supported
'''
try:
import ossaudiodev
except:
return False
return True
def pyaudio_play(data, rate=44100):
''' Send audio array to pyaudio for playback
'''
import pyaudio
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paFloat32, channels=1, rate=rate, output=1)
stream.write(data.astype(numpy.float32).tostring())
stream.close()
p.terminate()
def pyaudio_supported():
''' Return True is pyaudio playback is supported
'''
try:
import pyaudio
except:
return False
return True
def play(data, rate=44100):
''' Send audio to first available playback method
'''
if pygame_supported():
return pygame_play(data, rate)
elif oss_supported():
return oss_play(data, rate)
elif pyaudio_supported():
return pyaudio_play(data, rate)
else:
raise Exception("No supported playback method found")
|
py | 1a395bd2cbaded9c39b8319c53f3728be69e9f45 | """Functions to manipulate season data"""
import os
import logging
import pandas as pd
from pynba.config import config
from pynba.constants import WNBA, LOCAL, S3
from pynba import load_pbpstats
from pynba.parquet import load_pq_to_df, save_df_to_pq
from pynba.aws_s3 import list_objects
__all__ = [
"season_from_file",
"season_from_pbpstats",
"seasons_on_file",
"save_season",
]
logger = logging.getLogger(__name__)
def save_season(season):
"""Save season data as a csv"""
league = season["league"].iloc[0]
year = season["year"].iloc[0]
season_type = season["season_type"].iloc[0]
save_df_to_pq(season, _season_filepath(league, year, season_type))
def _season_filename(league, year, season_type):
return f"{league}_{year}_{season_type}_games.parquet"
def _seasons_dir():
return os.path.join(config.local_data_directory, config.seasons_directory)
def _season_filepath(league, year, season_type):
return os.path.join(_seasons_dir(), _season_filename(league, year, season_type))
def seasons_on_file():
"""Produces a Pandas DataFrame with info on the seasons on file"""
if config.seasons_source == LOCAL:
filenames = os.listdir(_seasons_dir())
elif config.seasons_source == S3:
prefix = f"{config.aws_s3_key_prefix}/{config.seasons_directory}/"
objects = list_objects(config.aws_s3_bucket, Prefix=prefix)
filenames = [obj["Key"][len(prefix) :] for obj in objects]
else:
raise ValueError(
f"Incompatible config for season source data: {config.seasons_source}"
)
leagues, years, season_types = zip(
*[fn.split(".")[0].split("_")[:3] for fn in filenames]
)
return pd.DataFrame(
{
"league": leagues,
"year": [int(year) for year in years],
"season_type": season_types,
}
).sort_values(by=["league", "year", "season_type"], ascending=False)
def season_from_file(league, year, season_type):
"""
Loads season data from file
Parameters
----------
league : str
e.g. "nba", "wnba"
year : int
e.g. 2018
season_type : str
e.g. "Regular Season", "Playoffs"
Returns
-------
pd.DataFrame
"""
if config.seasons_source == LOCAL:
source = _season_filepath(league, year, season_type)
elif config.seasons_source == S3:
filename = _season_filename(league, year, season_type)
source = (
f"s3://{config.aws_s3_bucket}/{config.aws_s3_key_prefix}/"
f"{config.seasons_directory}/{filename}"
)
else:
raise ValueError(
f"Incompatible config for season source data: {config.seasons_source}"
)
return load_pq_to_df(source)
def season_from_pbpstats(league, year, season_type):
"""
Loads season data from pbpstats data
Parameters
----------
league : str
e.g. "nba", "wnba"
year : int
e.g. 2018
season_type : str
e.g. "Regular Season", "Playoffs"
Returns
-------
pd.DataFrame
"""
pbpstats_year = _parse_year(year, league)
pbpstats_season = load_pbpstats.load_season_from_web(
league, pbpstats_year, season_type
)
season = pd.DataFrame(pbpstats_season.games.final_games)
season["league"] = league
season["year"] = year
season["season_type"] = season_type
if "visitor_team_id" in season:
season.rename(
columns={"visitor_team_id": "away_team_id"}, copy=False, inplace=True
)
return season
def _parse_year(year, league):
"""
Parses a year integer into a pbpstats
compatible year string. The year int represents
the end of the season.
"""
if league == WNBA:
return str(year)
return f"{year - 1}-{(year) % 100:02}"
|
py | 1a395c30eab53d318af3e99075e4d0a7935ea841 | # Copyright (c) 2018, Hubert Kario
#
# See the LICENSE file for legal information regarding use of this file.
# compatibility with Python 2.6, for that we need unittest2 package,
# which is not available on 3.3 or 3.4
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
import mock
from mock import call
except ImportError:
import unittest.mock as mock
from unittest.mock import call
from tlslite.utils.deprecations import deprecated_params, \
deprecated_attrs, deprecated_class_name, \
deprecated_method
# see https://github.com/pytest-dev/py/issues/110
# preload the list until the list of loaded modules is static
try:
import py.error
except ImportError:
pass # ignore
import sys
while True:
end = True
for v in list(sys.modules.values()):
old = set(sys.modules.values())
_ = getattr(v, '__warningregistry__', None)
new = set(sys.modules.values())
if new - old:
end = False
if end:
break
for v in list(sys.modules.values()):
old = set(sys.modules.values())
_ = getattr(v, '__warningregistry__', None)
new = set(sys.modules.values())
if new - old:
print("changed: {0}".format(new - old))
class TestDeprecatedClassName(unittest.TestCase):
def test_check_class(self):
@deprecated_class_name('bad_name')
class Test1(object):
def __init__(self, param):
self.param = param
def method(self):
return self.param
instance = Test1('value')
self.assertEqual('value', instance.method())
self.assertIsInstance(instance, bad_name)
self.assertIsInstance(instance, Test1)
with self.assertWarns(DeprecationWarning) as e:
instance = bad_name('value')
self.assertIn('Test1', str(e.warning))
self.assertIn('bad_name', str(e.warning))
with self.assertWarns(DeprecationWarning) as e:
val = bad_name('value')
self.assertIn('Test1', str(e.warning))
self.assertIn('bad_name', str(e.warning))
def test_check_callable(self):
@deprecated_class_name('bad_func')
def good_func(param):
return "got '{0}'".format(param)
self.assertEqual("got 'some'", good_func('some'))
with self.assertWarns(DeprecationWarning) as e:
val = bad_func('other')
self.assertIn('good_func', str(e.warning))
self.assertIn('bad_func', str(e.warning))
self.assertEqual("got 'other'", val)
def test_check_with_duplicated_name(self):
@deprecated_class_name('bad_func2')
def good_func():
return None
with self.assertRaises(NameError):
@deprecated_class_name('bad_func2')
def other_func():
return None
class TestDeprecatedParams(unittest.TestCase):
def test_no_changes(self):
@deprecated_params({})
def method(param_a, param_b):
"""Some doc string."""
return (param_a, param_b)
a = mock.Mock()
b = mock.Mock()
r = method(param_a=a, param_b=b)
self.assertIsInstance(r, tuple)
self.assertEqual(r, (a, b))
self.assertIs(r[0], a)
self.assertIs(r[1], b)
self.assertEqual("Some doc string.", method.__doc__)
def test_change_param(self):
@deprecated_params({'param_a': 'old_param'})
def method(param_a, param_b):
return (param_a, param_b)
old = mock.Mock()
b = mock.Mock()
with self.assertWarns(DeprecationWarning) as e:
r = method(old_param=old, param_b=b)
self.assertIsInstance(r, tuple)
self.assertEqual(r, (old, b))
self.assertIs(r[0], old)
self.assertIs(r[1], b)
self.assertIn('old_param', str(e.warning))
def test_both_params(self):
@deprecated_params({'param_a': 'older_param'})
def method(param_a, param_b):
return (param_a, param_b)
a = mock.Mock()
b = mock.Mock()
c = mock.Mock()
with self.assertRaises(TypeError) as e:
method(param_a=a, param_b=b, older_param=c)
self.assertIn('multiple values', str(e.exception))
def test_in_class(self):
class Clazz(object):
@staticmethod
@deprecated_params({"new_param": "old_param"})
def method(param, new_param=None):
return "{0} {1}".format(param, new_param)
instance = Clazz()
self.assertEqual(instance.method("aa", "BB"), "aa BB")
with self.assertWarns(DeprecationWarning) as e:
self.assertEqual(instance.method("aa", old_param="CC"), "aa CC")
self.assertIn("old_param", str(e.warning))
self.assertIn("new_param", str(e.warning))
with self.assertWarns(DeprecationWarning) as e:
self.assertEqual(Clazz.method("g", old_param="D"), "g D")
self.assertIn("old_param", str(e.warning))
self.assertIn("new_param", str(e.warning))
def test_deprecated_twice(self):
@deprecated_params({'param_a': 'paramA'})
@deprecated_params({'param_b': 'ParamB'},
"{old_name} custom {new_name}")
def method(param_a, param_b):
return "{0} {1}".format(param_a, param_b)
with self.assertWarns(DeprecationWarning) as e:
self.assertEqual(method(paramA="aa", param_b="ZZ"), "aa ZZ")
self.assertIn("paramA", str(e.warning))
self.assertIn("param_a", str(e.warning))
self.assertNotIn("custom", str(e.warning))
with self.assertWarns(DeprecationWarning) as e:
self.assertEqual(method("aa", ParamB="zz"), "aa zz")
self.assertIn("ParamB", str(e.warning))
self.assertIn("param_b", str(e.warning))
self.assertIn("custom", str(e.warning))
class TestDeprecatedFields(unittest.TestCase):
def test_no_change(self):
@deprecated_attrs({})
class Clazz(object):
"""Some nice class."""
class_field = "I'm class_field"
def __init__(self):
self.new_field = "I'm new_field"
def new_method(self):
"""Good method."""
return "in new_method"
@staticmethod
def new_static_method():
return "in new_static_method"
@classmethod
def new_cls_method(cls, param):
return "cls methd: {0}".format(param)
instance = Clazz()
self.assertEqual(instance.new_field, "I'm new_field")
self.assertEqual(instance.class_field, "I'm class_field")
self.assertEqual(instance.new_method(), "in new_method")
self.assertEqual(instance.new_static_method(), "in new_static_method")
self.assertEqual(instance.new_cls_method("a"), "cls methd: a")
self.assertEqual(Clazz.new_cls_method("a"), "cls methd: a")
self.assertEqual(Clazz.new_static_method(), "in new_static_method")
self.assertEqual(instance.__doc__, "Some nice class.")
self.assertEqual(instance.new_method.__doc__, "Good method.")
def test_deprecated_instance_variable(self):
@deprecated_attrs({"new_field": "old_field"})
class Clazz(object):
def __init__(self):
self.new_field = "I'm new_field"
instance = Clazz()
self.assertEqual(instance.new_field, "I'm new_field")
with self.assertWarns(DeprecationWarning) as e:
self.assertEqual(instance.old_field, "I'm new_field")
instance.old_field = "I've been set"
self.assertEqual(instance.new_field, "I've been set")
self.assertIn("old_field", str(e.warning))
with self.assertWarns(DeprecationWarning):
del instance.old_field
self.assertFalse(hasattr(instance, "new_field"))
def test_deprecated_instance_method(self):
@deprecated_attrs({"new_method": "old_method"})
class Clazz(object):
def new_method(self, param):
return "new_method: {0}".format(param)
instance = Clazz()
self.assertEqual(instance.new_method("aa"), "new_method: aa")
with self.assertWarns(DeprecationWarning) as e:
self.assertEqual(instance.old_method("aa"), "new_method: aa")
self.assertIn("old_method", str(e.warning))
def test_deprecated_class_method(self):
@deprecated_attrs({"foo": "bar"})
class Clazz(object):
@classmethod
def foo(cls, arg):
return "foo: {0}".format(arg)
instance = Clazz()
self.assertEqual(instance.foo("aa"), "foo: aa")
self.assertEqual(Clazz.foo("aa"), "foo: aa")
with self.assertWarns(DeprecationWarning) as e:
self.assertEqual(instance.bar("aa"), "foo: aa")
self.assertIn("bar", str(e.warning))
with self.assertWarns(DeprecationWarning) as e:
self.assertEqual(Clazz.bar("aa"), "foo: aa")
self.assertIn("bar", str(e.warning))
self.assertFalse(hasattr(Clazz, "non_existing"))
def test_deprecated_static_method(self):
@deprecated_attrs({"new_stic": "old_stic"})
class Clazz(object):
@staticmethod
def new_stic(param):
return "new_stic: {0}".format(param)
instance = Clazz()
self.assertEqual(instance.new_stic("aaa"), "new_stic: aaa")
self.assertEqual(Clazz.new_stic("aaa"), "new_stic: aaa")
with self.assertWarns(DeprecationWarning) as e:
self.assertEqual(instance.old_stic("aaa"), "new_stic: aaa")
self.assertIn("old_stic", str(e.warning))
with self.assertWarns(DeprecationWarning) as e:
self.assertEqual(Clazz.old_stic("aaa"), "new_stic: aaa")
self.assertIn("old_stic", str(e.warning))
def test_deprecated_class_variable(self):
@deprecated_attrs({"new_cvar": "old_cvar"})
class Clazz(object):
new_cvar = "some string"
def method(self):
return self.new_cvar
instance = Clazz()
self.assertEqual(instance.method(), "some string")
Clazz.new_cvar = bytearray(b"new string")
self.assertEqual(instance.new_cvar, b"new string")
with self.assertWarns(DeprecationWarning) as e:
self.assertEqual(instance.old_cvar, b"new string")
self.assertIn("old_cvar", str(e.warning))
self.assertIn("new_cvar", str(e.warning))
with self.assertWarns(DeprecationWarning) as e:
self.assertEqual(Clazz.old_cvar, b"new string")
self.assertIn("old_cvar", str(e.warning))
self.assertIn("new_cvar", str(e.warning))
# direct assignment to old value won't work, ex:
# Clazz.old_cvar = b'newest string'
with self.assertWarns(DeprecationWarning) as e:
Clazz.old_cvar[:] = b"newest string"
self.assertIn("old_cvar", str(e.warning))
self.assertIn("new_cvar", str(e.warning))
self.assertEqual(instance.method(), b"newest string")
def test_class_with_custom_getattr(self):
@deprecated_attrs({"new_cvar": "old_cvar"})
class Clazz(object):
new_cvar = "first title"
def __getattr__(self, name):
if name == "intresting":
return "some value"
raise AttributeError("Clazz does not have {0}".format(name))
instance = Clazz()
self.assertEqual(instance.intresting, "some value")
with self.assertWarns(DeprecationWarning) as e:
self.assertEqual(instance.old_cvar, "first title")
self.assertFalse(hasattr(instance, "non_existing"))
def test_deprecated_attrs_variable_deletion(self):
@deprecated_attrs({"new_cvar": "old_cvar"})
class Clazz(object):
new_cvar = "first title"
def __init__(self):
self.val = "something"
@classmethod
def method(cls):
return cls.new_cvar
instance = Clazz()
self.assertEqual(instance.method(), "first title")
self.assertEqual(instance.new_cvar, "first title")
with self.assertWarns(DeprecationWarning) as e:
self.assertEqual(Clazz.old_cvar, "first title")
self.assertIn("old_cvar", str(e.warning))
self.assertIn("new_cvar", str(e.warning))
with self.assertWarns(DeprecationWarning) as e:
self.assertEqual(instance.old_cvar, "first title")
self.assertIn("old_cvar", str(e.warning))
self.assertIn("new_cvar", str(e.warning))
Clazz.new_cvar = "second"
self.assertEqual(instance.method(), "second")
with self.assertWarns(DeprecationWarning) as e:
self.assertEqual(instance.old_cvar, "second")
self.assertIn("old_cvar", str(e.warning))
self.assertIn("new_cvar", str(e.warning))
with self.assertWarns(DeprecationWarning) as e:
Clazz.old_cvar = "third"
self.assertIn("old_cvar", str(e.warning))
self.assertIn("new_cvar", str(e.warning))
self.assertEqual(instance.method(), "third")
self.assertEqual(Clazz.new_cvar, "third")
self.assertEqual(instance.new_cvar, "third")
with self.assertWarns(DeprecationWarning) as e:
del Clazz.old_cvar
self.assertIn("old_cvar", str(e.warning))
self.assertIn("new_cvar", str(e.warning))
self.assertFalse(hasattr(Clazz, "new_cvar"))
with self.assertWarns(DeprecationWarning) as e:
self.assertFalse(hasattr(Clazz, "old_cvar"))
def test_class_variable_deletion(self):
@deprecated_attrs({"new_cvar": "old_cvar"})
class Clazz(object):
new_cvar = "first title"
@classmethod
def method(cls):
return cls.new_cvar
instance = Clazz()
self.assertEqual(instance.method(), "first title")
self.assertEqual(instance.new_cvar, "first title")
with self.assertWarns(DeprecationWarning) as e:
self.assertEqual(Clazz.old_cvar, "first title")
self.assertIn("old_cvar", str(e.warning))
self.assertIn("new_cvar", str(e.warning))
with self.assertWarns(DeprecationWarning) as e:
self.assertEqual(instance.old_cvar, "first title")
self.assertIn("old_cvar", str(e.warning))
self.assertIn("new_cvar", str(e.warning))
Clazz.new_cvar = "second"
self.assertEqual(instance.method(), "second")
with self.assertWarns(DeprecationWarning) as e:
self.assertEqual(instance.old_cvar, "second")
self.assertIn("old_cvar", str(e.warning))
self.assertIn("new_cvar", str(e.warning))
with self.assertWarns(DeprecationWarning) as e:
Clazz.old_cvar = "third"
self.assertIn("old_cvar", str(e.warning))
self.assertIn("new_cvar", str(e.warning))
self.assertEqual(instance.method(), "third")
self.assertEqual(Clazz.new_cvar, "third")
self.assertEqual(instance.new_cvar, "third")
with self.assertWarns(DeprecationWarning) as e:
del Clazz.old_cvar
self.assertIn("old_cvar", str(e.warning))
self.assertIn("new_cvar", str(e.warning))
self.assertFalse(hasattr(Clazz, "new_cvar"))
with self.assertWarns(DeprecationWarning) as e:
self.assertFalse(hasattr(Clazz, "old_cvar"))
class TestDeprecatedMethods(unittest.TestCase):
def test_deprecated_method(self):
@deprecated_method("Please use foo method instead.")
def test(param):
return param
with self.assertWarns(DeprecationWarning) as e:
r = test("test")
self.assertEqual(r, "test")
self.assertEqual("test is a deprecated method. Please" \
" use foo method instead.",
str(e.warning))
|
py | 1a395ca19f002b190110f4a561cf7efebe88b26e | from unittest import TestCase
from scheduler.jobs_board import JobsBoard
from scheduler.match_maker import MatchMaker
from scheduler.partial_slot_scheduler import PartialSlotScheduler
from scheduler.tests.helpers import *
class TestPartialSlotScheduler(TestCase):
def setUp(self):
self.volunteers = {
'Jack': create_chef('Jack'),
'John': create_chef('John'),
'Jill': create_delivery_driver('Jill'),
'Sue': create_food_critic('Sue')
}
self.constraints_by_role = {
'chef': [can_cook],
'taster': [can_critique],
'delivery': [can_deliver]
}
def test_merges_two_schedules(self):
matchmaker = MatchMaker(self.volunteers, self.constraints_by_role)
jobs = JobsBoard(
['chef'],
['delivery'],
['taster']
)
partial_schedule = {
'Jack': 'manager'
}
scheduler = PartialSlotScheduler(jobs, matchmaker)
assert scheduler.generate_schedule(partial_schedule) == {
'Jack': 'manager',
'John': 'chef',
'Sue': 'taster',
'Jill': 'delivery'
}
|
py | 1a395ea1714023e81fa1c2333bd662bfd122b1c7 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def get_version(*file_paths):
"""Retrieves the version from exo_changelog/__init__.py"""
filename = os.path.join(os.path.dirname(__file__), *file_paths)
version_file = open(filename).read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version string.')
version = get_version('exo_changelog', '__init__.py')
if sys.argv[-1] == 'publish':
try:
import wheel
print('Wheel version: ', wheel.__version__)
except ImportError:
print('Wheel library missing. Please run "pip install wheel"')
sys.exit()
os.system('python setup.py sdist upload')
os.system('python setup.py bdist_wheel upload')
sys.exit()
if sys.argv[-1] == 'tag':
print('Tagging the version on git:')
os.system("git tag -a %s -m 'version %s'" % (version, version))
os.system('git push --tags')
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='exo-changelog',
version=version,
description="""Manage changelog as migrations""",
long_description=readme + '\n\n' + history,
author='Tomas Garzon',
author_email='[email protected]',
url='https://github.com/tomasgarzon/exo-changelog',
packages=[
'exo_changelog',
],
include_package_data=True,
install_requires=['django-model-utils>=2.0', ],
license='BSD',
zip_safe=False,
keywords='exo-changelog',
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django :: 1.11',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
|
py | 1a395efffe88f4ea88afecfc38bbe0752ca33bb9 | import os
import platform
import sys
def is_active():
return True
def get_name():
return "LinuxBSD"
def can_build():
if (os.name != "posix" or sys.platform == "darwin"):
return False
# Check the minimal dependencies
x11_error = os.system("pkg-config --version > /dev/null")
if (x11_error):
return False
x11_error = os.system("pkg-config x11 --modversion > /dev/null ")
if (x11_error):
return False
x11_error = os.system("pkg-config xcursor --modversion > /dev/null ")
if (x11_error):
print("xcursor not found.. x11 disabled.")
return False
x11_error = os.system("pkg-config xinerama --modversion > /dev/null ")
if (x11_error):
print("xinerama not found.. x11 disabled.")
return False
x11_error = os.system("pkg-config xrandr --modversion > /dev/null ")
if (x11_error):
print("xrandr not found.. x11 disabled.")
return False
x11_error = os.system("pkg-config xrender --modversion > /dev/null ")
if (x11_error):
print("xrender not found.. x11 disabled.")
return False
x11_error = os.system("pkg-config xi --modversion > /dev/null ")
if (x11_error):
print("xi not found.. Aborting.")
return False
return True
def get_opts():
from SCons.Variables import BoolVariable, EnumVariable
return [
BoolVariable('use_llvm', 'Use the LLVM compiler', False),
BoolVariable('use_lld', 'Use the LLD linker', False),
BoolVariable('use_thinlto', 'Use ThinLTO', False),
BoolVariable('use_static_cpp', 'Link libgcc and libstdc++ statically for better portability', False),
BoolVariable('use_coverage', 'Test Godot coverage', False),
BoolVariable('use_ubsan', 'Use LLVM/GCC compiler undefined behavior sanitizer (UBSAN)', False),
BoolVariable('use_asan', 'Use LLVM/GCC compiler address sanitizer (ASAN))', False),
BoolVariable('use_lsan', 'Use LLVM/GCC compiler leak sanitizer (LSAN))', False),
BoolVariable('use_tsan', 'Use LLVM/GCC compiler thread sanitizer (TSAN))', False),
BoolVariable('pulseaudio', 'Detect and use PulseAudio', True),
BoolVariable('udev', 'Use udev for gamepad connection callbacks', False),
EnumVariable('debug_symbols', 'Add debugging symbols to release builds', 'yes', ('yes', 'no', 'full')),
BoolVariable('separate_debug_symbols', 'Create a separate file containing debugging symbols', False),
BoolVariable('touch', 'Enable touch events', True),
BoolVariable('execinfo', 'Use libexecinfo on systems where glibc is not available', False),
]
def get_flags():
return []
def configure(env):
## Build type
if (env["target"] == "release"):
if (env["optimize"] == "speed"): #optimize for speed (default)
env.Prepend(CCFLAGS=['-O3'])
else: #optimize for size
env.Prepend(CCFLAGS=['-Os'])
if (env["debug_symbols"] == "yes"):
env.Prepend(CCFLAGS=['-g1'])
if (env["debug_symbols"] == "full"):
env.Prepend(CCFLAGS=['-g2'])
elif (env["target"] == "release_debug"):
if (env["optimize"] == "speed"): #optimize for speed (default)
env.Prepend(CCFLAGS=['-O2'])
else: #optimize for size
env.Prepend(CCFLAGS=['-Os'])
env.Prepend(CPPDEFINES=['DEBUG_ENABLED'])
if (env["debug_symbols"] == "yes"):
env.Prepend(CCFLAGS=['-g1'])
if (env["debug_symbols"] == "full"):
env.Prepend(CCFLAGS=['-g2'])
elif (env["target"] == "debug"):
env.Prepend(CCFLAGS=['-g3'])
env.Prepend(CPPDEFINES=['DEBUG_ENABLED', 'DEBUG_MEMORY_ENABLED'])
env.Append(LINKFLAGS=['-rdynamic'])
## Architecture
is64 = sys.maxsize > 2**32
if (env["bits"] == "default"):
env["bits"] = "64" if is64 else "32"
## Compiler configuration
if 'CXX' in env and 'clang' in os.path.basename(env['CXX']):
# Convenience check to enforce the use_llvm overrides when CXX is clang(++)
env['use_llvm'] = True
if env['use_llvm']:
if ('clang++' not in os.path.basename(env['CXX'])):
env["CC"] = "clang"
env["CXX"] = "clang++"
env["LINK"] = "clang++"
env.Append(CPPDEFINES=['TYPED_METHOD_BIND'])
env.extra_suffix = ".llvm" + env.extra_suffix
if env['use_lld']:
if env['use_llvm']:
env.Append(LINKFLAGS=['-fuse-ld=lld'])
if env['use_thinlto']:
# A convenience so you don't need to write use_lto too when using SCons
env['use_lto'] = True
else:
print("Using LLD with GCC is not supported yet, try compiling with 'use_llvm=yes'.")
sys.exit(255)
if env['use_coverage']:
env.Append(CCFLAGS=['-ftest-coverage', '-fprofile-arcs'])
env.Append(LINKFLAGS=['-ftest-coverage', '-fprofile-arcs'])
if env['use_ubsan'] or env['use_asan'] or env['use_lsan'] or env['use_tsan']:
env.extra_suffix += "s"
if env['use_ubsan']:
env.Append(CCFLAGS=['-fsanitize=undefined'])
env.Append(LINKFLAGS=['-fsanitize=undefined'])
if env['use_asan']:
env.Append(CCFLAGS=['-fsanitize=address'])
env.Append(LINKFLAGS=['-fsanitize=address'])
if env['use_lsan']:
env.Append(CCFLAGS=['-fsanitize=leak'])
env.Append(LINKFLAGS=['-fsanitize=leak'])
if env['use_tsan']:
env.Append(CCFLAGS=['-fsanitize=thread'])
env.Append(LINKFLAGS=['-fsanitize=thread'])
if env['use_lto']:
if not env['use_llvm'] and env.GetOption("num_jobs") > 1:
env.Append(CCFLAGS=['-flto'])
env.Append(LINKFLAGS=['-flto=' + str(env.GetOption("num_jobs"))])
else:
if env['use_lld'] and env['use_thinlto']:
env.Append(CCFLAGS=['-flto=thin'])
env.Append(LINKFLAGS=['-flto=thin'])
else:
env.Append(CCFLAGS=['-flto'])
env.Append(LINKFLAGS=['-flto'])
if not env['use_llvm']:
env['RANLIB'] = 'gcc-ranlib'
env['AR'] = 'gcc-ar'
env.Append(CCFLAGS=['-pipe'])
env.Append(LINKFLAGS=['-pipe'])
# -fpie and -no-pie is supported on GCC 6+ and Clang 4+, both below our
# minimal requirements.
env.Append(CCFLAGS=['-fpie'])
env.Append(LINKFLAGS=['-no-pie'])
## Dependencies
env.ParseConfig('pkg-config x11 --cflags --libs')
env.ParseConfig('pkg-config xcursor --cflags --libs')
env.ParseConfig('pkg-config xinerama --cflags --libs')
env.ParseConfig('pkg-config xrandr --cflags --libs')
env.ParseConfig('pkg-config xrender --cflags --libs')
env.ParseConfig('pkg-config xi --cflags --libs')
if (env['touch']):
env.Append(CPPDEFINES=['TOUCH_ENABLED'])
# FIXME: Check for existence of the libs before parsing their flags with pkg-config
# freetype depends on libpng and zlib, so bundling one of them while keeping others
# as shared libraries leads to weird issues
if env['builtin_freetype'] or env['builtin_libpng'] or env['builtin_zlib']:
env['builtin_freetype'] = True
env['builtin_libpng'] = True
env['builtin_zlib'] = True
if not env['builtin_freetype']:
env.ParseConfig('pkg-config freetype2 --cflags --libs')
if not env['builtin_libpng']:
env.ParseConfig('pkg-config libpng16 --cflags --libs')
if not env['builtin_bullet']:
# We need at least version 2.89
import subprocess
bullet_version = subprocess.check_output(['pkg-config', 'bullet', '--modversion']).strip()
if str(bullet_version) < "2.89":
# Abort as system bullet was requested but too old
print("Bullet: System version {0} does not match minimal requirements ({1}). Aborting.".format(bullet_version, "2.89"))
sys.exit(255)
env.ParseConfig('pkg-config bullet --cflags --libs')
if False: # not env['builtin_assimp']:
# FIXME: Add min version check
env.ParseConfig('pkg-config assimp --cflags --libs')
if not env['builtin_enet']:
env.ParseConfig('pkg-config libenet --cflags --libs')
if not env['builtin_squish']:
env.ParseConfig('pkg-config libsquish --cflags --libs')
if not env['builtin_zstd']:
env.ParseConfig('pkg-config libzstd --cflags --libs')
# Sound and video libraries
# Keep the order as it triggers chained dependencies (ogg needed by others, etc.)
if not env['builtin_libtheora']:
env['builtin_libogg'] = False # Needed to link against system libtheora
env['builtin_libvorbis'] = False # Needed to link against system libtheora
env.ParseConfig('pkg-config theora theoradec --cflags --libs')
else:
list_of_x86 = ['x86_64', 'x86', 'i386', 'i586']
if any(platform.machine() in s for s in list_of_x86):
env["x86_libtheora_opt_gcc"] = True
if not env['builtin_libvpx']:
env.ParseConfig('pkg-config vpx --cflags --libs')
if not env['builtin_libvorbis']:
env['builtin_libogg'] = False # Needed to link against system libvorbis
env.ParseConfig('pkg-config vorbis vorbisfile --cflags --libs')
if not env['builtin_opus']:
env['builtin_libogg'] = False # Needed to link against system opus
env.ParseConfig('pkg-config opus opusfile --cflags --libs')
if not env['builtin_libogg']:
env.ParseConfig('pkg-config ogg --cflags --libs')
if not env['builtin_libwebp']:
env.ParseConfig('pkg-config libwebp --cflags --libs')
if not env['builtin_mbedtls']:
# mbedTLS does not provide a pkgconfig config yet. See https://github.com/ARMmbed/mbedtls/issues/228
env.Append(LIBS=['mbedtls', 'mbedcrypto', 'mbedx509'])
if not env['builtin_wslay']:
env.ParseConfig('pkg-config libwslay --cflags --libs')
if not env['builtin_miniupnpc']:
# No pkgconfig file so far, hardcode default paths.
env.Prepend(CPPPATH=["/usr/include/miniupnpc"])
env.Append(LIBS=["miniupnpc"])
# On Linux wchar_t should be 32-bits
# 16-bit library shouldn't be required due to compiler optimisations
if not env['builtin_pcre2']:
env.ParseConfig('pkg-config libpcre2-32 --cflags --libs')
## Flags
if (os.system("pkg-config --exists alsa") == 0): # 0 means found
print("Enabling ALSA")
env.Append(CPPDEFINES=["ALSA_ENABLED", "ALSAMIDI_ENABLED"])
# Don't parse --cflags, we don't need to add /usr/include/alsa to include path
env.ParseConfig('pkg-config alsa --libs')
else:
print("ALSA libraries not found, disabling driver")
if env['pulseaudio']:
if (os.system("pkg-config --exists libpulse") == 0): # 0 means found
print("Enabling PulseAudio")
env.Append(CPPDEFINES=["PULSEAUDIO_ENABLED"])
env.ParseConfig('pkg-config --cflags --libs libpulse')
else:
print("PulseAudio development libraries not found, disabling driver")
if (platform.system() == "Linux"):
env.Append(CPPDEFINES=["JOYDEV_ENABLED"])
if env['udev']:
if (os.system("pkg-config --exists libudev") == 0): # 0 means found
print("Enabling udev support")
env.Append(CPPDEFINES=["UDEV_ENABLED"])
env.ParseConfig('pkg-config libudev --cflags --libs')
else:
print("libudev development libraries not found, disabling udev support")
# Linkflags below this line should typically stay the last ones
if not env['builtin_zlib']:
env.ParseConfig('pkg-config zlib --cflags --libs')
env.Prepend(CPPPATH=['#platform/linuxbsd'])
env.Append(CPPDEFINES=['X11_ENABLED', 'UNIX_ENABLED'])
env.Append(CPPDEFINES=['VULKAN_ENABLED'])
if not env['builtin_vulkan']:
env.ParseConfig('pkg-config vulkan --cflags --libs')
if not env['builtin_glslang']:
# No pkgconfig file for glslang so far
env.Append(LIBS=['glslang', 'SPIRV'])
#env.Append(CPPDEFINES=['OPENGL_ENABLED'])
env.Append(LIBS=['GL'])
env.Append(LIBS=['pthread'])
if (platform.system() == "Linux"):
env.Append(LIBS=['dl'])
if (platform.system().find("BSD") >= 0):
env["execinfo"] = True
if env["execinfo"]:
env.Append(LIBS=['execinfo'])
if not env['tools']:
import subprocess
import re
linker_version_str = subprocess.check_output([env.subst(env["LINK"]), '-Wl,--version']).decode("utf-8")
gnu_ld_version = re.search('^GNU ld [^$]*(\d+\.\d+)$', linker_version_str, re.MULTILINE)
if not gnu_ld_version:
print("Warning: Creating template binaries enabled for PCK embedding is currently only supported with GNU ld")
else:
if float(gnu_ld_version.group(1)) >= 2.30:
env.Append(LINKFLAGS=['-T', 'platform/linuxbsd/pck_embed.ld'])
else:
env.Append(LINKFLAGS=['-T', 'platform/linuxbsd/pck_embed.legacy.ld'])
## Cross-compilation
if (is64 and env["bits"] == "32"):
env.Append(CCFLAGS=['-m32'])
env.Append(LINKFLAGS=['-m32', '-L/usr/lib/i386-linux-gnu'])
elif (not is64 and env["bits"] == "64"):
env.Append(CCFLAGS=['-m64'])
env.Append(LINKFLAGS=['-m64', '-L/usr/lib/i686-linux-gnu'])
# Link those statically for portability
if env['use_static_cpp']:
env.Append(LINKFLAGS=['-static-libgcc', '-static-libstdc++'])
|
py | 1a395f1fba6edd124e9af82adc70515eb4a5d731 | class AttributeContainer(object):
"""Generic class to hold attributes."""
def __init__(self, **kw):
self.__dict__ = kw
|
py | 1a395f477e8d93ad41e26aafc379e1d006a10bed | # Run, then execute
'''
dump(JSON.stringify(usedFunctions))
'''
# then strip with profile_strip.py
from __future__ import print_function
import sys
print('var usedFunctions = {};')
for line in open(sys.argv[1]).readlines():
line = line.strip()
print(line)
if line.startswith('function _') and line.endswith(('){', ') {')):
name = line.split(' ')[1].split('(')[0]
print('usedFunctions["%s"] = 1;' % name)
|
py | 1a395f632dc6ee479203e2e20a87f500ee1bfc21 | # Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SeqGAN for language modeling
"""
import os
import argparse
import importlib
import tensorflow as tf
import texar as tx
parser = argparse.ArgumentParser(description='prepare data')
parser.add_argument('--dataset', type=str, default='ptb',
help='dataset to prepare')
parser.add_argument('--data_path', type=str, default='./',
help="Directory containing coco. If not exists, "
"the directory will be created, and the data "
"will be downloaded.")
parser.add_argument('--config', type=str, default='config_ptb_small',
help='The config to use.')
args = parser.parse_args()
config = importlib.import_module(args.config)
def prepare_data(args, config, train_path):
"""Downloads the PTB or COCO dataset
"""
if not os.path.exists(config.log_dir):
os.mkdir(config.log_dir)
ptb_url = 'https://jxhe.github.io/download/ptb_data.tgz'
coco_url = 'https://VegB.github.io/downloads/coco_data.tgz'
data_path = args.data_path
if not tf.gfile.Exists(train_path):
url = ptb_url if args.dataset == 'ptb' else coco_url
tx.data.maybe_download(url, data_path, extract=True)
os.remove('%s_data.tgz' % args.dataset)
if __name__ == '__main__':
prepare_data(args, config, config.train_data_hparams['dataset']['files'])
|
py | 1a3961b48e4c0546877580f92e68271630bc5982 | # Copyright (C) 2015 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import forms
from openstack_dashboard.dashboards.identity.identity_providers.protocols \
import forms as protocol_forms
class AddProtocolView(forms.ModalFormView):
template_name = 'identity/identity_providers/protocols/create.html'
form_id = "create_protocol_form"
form_class = protocol_forms.AddProtocolForm
submit_label = _("Create Protocol")
success_url = "horizon:identity:identity_providers:protocols_tab"
page_title = _("Create Protocol")
def get_success_url(self):
return reverse(self.success_url,
args=(self.kwargs['identity_provider_id'],))
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["submit_url"] = reverse(
"horizon:identity:identity_providers:protocols:create",
args=(self.kwargs['identity_provider_id'],))
return context
def get_initial(self):
return {"idp_id": self.kwargs['identity_provider_id']}
|
py | 1a39630175138c4f6a51da79fdc4cd62eec82c7e | # Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import configparser
import logging
import math
import os
from datetime import datetime
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.plugins_manager import AirflowPlugin
from airflow.utils.decorators import apply_defaults
import sqlalchemy
try:
from deckhand_client_factory import DeckhandClientFactory
import service_endpoint
from get_k8s_logs import get_pod_logs
from get_k8s_logs import K8sLoggingException
from service_token import shipyard_service_token
from xcom_puller import XcomPuller
except ImportError:
from shipyard_airflow.plugins.deckhand_client_factory import \
DeckhandClientFactory
from shipyard_airflow.plugins import service_endpoint
from shipyard_airflow.plugins.get_k8s_logs import get_pod_logs
from shipyard_airflow.plugins.get_k8s_logs import K8sLoggingException
from shipyard_airflow.plugins.service_token import shipyard_service_token
from shipyard_airflow.plugins.xcom_puller import XcomPuller
from shipyard_airflow.common.document_validators.document_validation_utils \
import DocumentValidationUtils
from shipyard_airflow.common.notes.notes import NotesManager
from shipyard_airflow.common.notes.notes_helper import NotesHelper
from shipyard_airflow.common.notes.storage_impl_db import \
ShipyardSQLNotesStorage
# Configuration sections
BASE = 'base'
K8S_LOGS = 'k8s_logs'
REQUESTS_CONFIG = 'requests_config'
LOG = logging.getLogger(__name__)
class UcpBaseOperator(BaseOperator):
"""Airship Base Operator
All Airship related workflow operators will use the Airship base
operator as the parent and inherit attributes and methods
from this class
"""
@apply_defaults
def __init__(self,
main_dag_name=None,
pod_selector_pattern=None,
shipyard_conf=None,
start_time=None,
xcom_push=True,
*args, **kwargs):
"""Initialization of UcpBaseOperator object.
:param continue_processing: A boolean value on whether to continue
with the workflow. Defaults to True.
:param main_dag_name: Parent Dag
:param pod_selector_pattern: A list containing the information on
the patterns of the Pod name and name
of the associated container for log
queries. This will allow us to query
multiple components, e.g. MAAS and
Drydock at the same time. It also allows
us to query the logs of specific container
in Pods with multiple containers. For
instance the Airflow worker pod contains
both the airflow-worker container and the
log-rotate container.
:param shipyard_conf: Location of shipyard.conf
:param start_time: Time when Operator gets executed
:param xcom_push: xcom usage
"""
super(UcpBaseOperator, self).__init__(*args, **kwargs)
self.continue_processing = True
self.main_dag_name = main_dag_name
self.pod_selector_pattern = pod_selector_pattern or []
self.shipyard_conf = shipyard_conf
self.start_time = datetime.now()
self.xcom_push_flag = xcom_push
# lazy init field to hold a shipyard_db_engine
self._shipyard_db_engine = None
def execute(self, context):
# Setup values that depend on the shipyard configuration
self.doc_utils = _get_document_util(self.shipyard_conf)
self.endpoints = service_endpoint.ServiceEndpoints(self.shipyard_conf)
# Read and parse shiyard.conf
self.config = configparser.ConfigParser()
self.config.read(self.shipyard_conf)
# Execute Airship base function
self.ucp_base(context)
# Execute base function for child operator
self.run_base(context)
if self.continue_processing:
# Execute child function
try:
self.do_execute()
except Exception:
LOG.exception(
'Exception happened during %s execution, '
'will try to log additional details',
self.__class__.__name__)
self.get_k8s_logs()
if hasattr(self, 'fetch_failure_details'):
self.fetch_failure_details()
raise
def ucp_base(self, context):
LOG.info("Running Airship Base Operator...")
# Configure the notes helper for this run of an operator
# establishes self.notes_helper
self._setup_notes_helper()
# Initialize variable that indicates the kubernetes namespace for the
# Airship components
self.ucp_namespace = self.config.get(K8S_LOGS, 'ucp_namespace')
# Define task_instance
self.task_instance = context['task_instance']
# Set up and retrieve values from xcom
self.xcom_puller = XcomPuller(self.main_dag_name, self.task_instance)
self.action_info = self.xcom_puller.get_action_info()
self.action_type = self.xcom_puller.get_action_type()
self.dc = self.xcom_puller.get_deployment_configuration()
# Set up other common-use values
self.action_id = self.action_info['id']
# extract the `task` or `step` name for easy access
self.task_id = self.task_instance.task_id
self.revision_id = self.action_info['committed_rev_id']
self.action_params = self.action_info.get('parameters', {})
self.design_ref = self._deckhand_design_ref()
self._setup_target_nodes()
def get_k8s_logs(self):
"""Retrieve Kubernetes pod/container logs specified by an operator
This method is "best effort" and should not prevent the progress of
the workflow processing
"""
if self.pod_selector_pattern:
for selector in self.pod_selector_pattern:
# Get difference in current time and time when the
# operator was first executed (in seconds)
t_diff = (datetime.now() - self.start_time).total_seconds()
# Note that we will end up with a floating number for
# 't_diff' and will need to round it up to the nearest
# integer
t_diff_int = int(math.ceil(t_diff))
try:
get_pod_logs(selector['pod_pattern'],
self.ucp_namespace,
selector['container'],
t_diff_int)
except K8sLoggingException as e:
LOG.error(e)
else:
LOG.debug("There are no pod logs specified to retrieve")
def _setup_target_nodes(self):
"""Sets up the target nodes field for this action
When managing a targeted action, this step needs to resolve the
target node. If there are no targets found (should be caught before
invocation of the DAG), then raise an exception so that it does not
try to take action on more nodes than targeted.
Later, when creating the deployment group, if this value
(self.target_nodes) is set, it will be used in lieu of the design
based deployment strategy.
target_nodes will be a comma separated string provided as part of the
parameters to an action on input to Shipyard.
"""
if self.action_type == 'targeted':
t_nodes = self.action_params.get('target_nodes', '')
self.target_nodes = [n.strip() for n in t_nodes.split(',')]
if not self.target_nodes:
raise AirflowException(
'{} ({}) requires targeted nodes, but was unable to '
'resolve any targets in {}'.format(
self.main_dag_name, self.action_id,
self.__class__.__name__
)
)
LOG.info("Target Nodes for action: [%s]",
', '.join(self.target_nodes))
else:
self.target_nodes = None
def _deckhand_design_ref(self):
"""Assemble a deckhand design_ref"""
# Retrieve DeckHand Endpoint Information
LOG.info("Assembling a design ref using revision: %s",
self.revision_id)
deckhand_svc_endpoint = self.endpoints.endpoint_by_name(
service_endpoint.DECKHAND
)
# This URL will be used to retrieve the Site Design YAMLs
deckhand_path = "deckhand+{}".format(deckhand_svc_endpoint)
design_ref = os.path.join(deckhand_path,
"revisions",
str(self.revision_id),
"rendered-documents")
LOG.info("Design Reference is %s", design_ref)
return design_ref
def get_unique_doc(self, schema, name, revision_id=None):
"""Retrieve a specific document from Deckhand
:param schema: the schema of the document
:param name: the metadata.name of the document
:param revision_id: the deckhand revision, or defaults to
self.revision_id
Wraps the document_validation_utils call to get the same.
Returns the sepcified document or raises an Airflow exception.
"""
if revision_id is None:
revision_id = self.revision_id
LOG.info(
"Retrieve shipyard/DeploymentConfiguration/v1, "
"deployment-configuration from Deckhand"
)
try:
return self.doc_utils.get_unique_doc(revision_id=revision_id,
name=name,
schema=schema)
except Exception as ex:
LOG.error("A document was expected to be available: Name: %s, "
"Schema: %s, Deckhand revision: %s, but there was an "
"error attempting to retrieve it. Since this document's "
"contents may be critical to the proper operation of "
"the workflow, this is fatal.", schema, name,
revision_id)
LOG.exception(ex)
# if the document is not found for ANY reason, the workflow is
# broken. Raise an Airflow Exception.
raise AirflowException(ex)
def _get_shipyard_db_engine(self):
"""Lazy initialize an engine for the Shipyard database.
:returns: a SQLAlchemy engine for the Shipyard database.
Developer's Note: Initially the idea was to use the PostgresHook and
retrieve an engine from there as is done with the concurrency check,
but since we have easy access to a configuration file, this does
direct SQLAlchemy to get the engine. By using the config, the database
connection is not exposed as environment variables -- which is one way
that Airflow registers database connections for use by the dbApiHook
"""
if self._shipyard_db_engine is None:
connection_string = self.config.get(BASE, 'postgresql_db')
pool_size = self.config.getint(BASE, 'pool_size')
max_overflow = self.config.getint(BASE, 'pool_overflow')
pool_pre_ping = self.config.getboolean(BASE, 'pool_pre_ping')
pool_recycle = self.config.getint(BASE, 'connection_recycle')
pool_timeout = self.config.getint(BASE, 'pool_timeout')
self._shipyard_db_engine = sqlalchemy.create_engine(
connection_string, pool_size=pool_size,
max_overflow=max_overflow,
pool_pre_ping=pool_pre_ping,
pool_recycle=pool_recycle,
pool_timeout=pool_timeout
)
LOG.info("Initialized Shipyard database connection with pool "
"size: %d, max overflow: %d, pool pre ping: %s, pool "
"recycle: %d, and pool timeout: %d",
pool_size, max_overflow,
pool_pre_ping, pool_recycle,
pool_timeout)
return self._shipyard_db_engine
@shipyard_service_token
def _token_getter(self):
# Generator method to get a shipyard service token
return self.svc_token
def _setup_notes_helper(self):
"""Setup a notes helper for use by all descendent operators"""
connect_timeout = self.config.get(REQUESTS_CONFIG,
'notes_connect_timeout')
read_timeout = self.config.get(REQUESTS_CONFIG, 'notes_read_timeout')
self.notes_helper = NotesHelper(
NotesManager(
storage=ShipyardSQLNotesStorage(self._get_shipyard_db_engine),
get_token=self._token_getter,
connect_timeout=connect_timeout,
read_timeout=read_timeout))
def _get_document_util(shipyard_conf):
"""Retrieve an instance of the DocumentValidationUtils"""
dh_client = DeckhandClientFactory(shipyard_conf).get_client()
return DocumentValidationUtils(dh_client)
class UcpBaseOperatorPlugin(AirflowPlugin):
"""Creates UcpBaseOperator in Airflow."""
name = 'ucp_base_operator_plugin'
operators = [UcpBaseOperator]
|
py | 1a39637ef5431db5d75dae76c81e27dde64d5b96 | # ======================================================================= #
# Copyright (C) 2021 Hoverset Group. #
# ======================================================================= #
import abc
from collections import defaultdict
from formation.formats import Node
from hoverset.data.keymap import KeyMap, CharKey
from hoverset.ui.icons import get_icon_image as icon
from hoverset.ui.widgets import EventMask
from hoverset.util.execution import Action
from hoverset.data.actions import Routine
from hoverset.ui.menu import MenuUtils, EnableIf
from studio.tools._base import BaseTool
from studio.feature.components import ComponentPane, SelectToDrawGroup
from studio.feature.stylepane import StyleGroup
from studio.ui.tree import NestedTreeView
from studio.lib import generate_id
from studio.lib.canvas import *
from studio.lib.legacy import Canvas
from studio.parsers.loader import BaseStudioAdapter, DesignBuilder
class Coordinate:
pool = defaultdict(list)
active = set()
min_radius = 3
max_radius = 5
def __init__(self, canvas, controller, x, y):
self.radius = self.min_radius
self.canvas = canvas
self.controller = controller
self.x = x
self.y = y
self._id = canvas.create_oval(
x - self.radius, y - self.radius, x + self.radius, y + self.radius,
fill=self.controller.tool.studio.style.colors["accent"],
tags=("coordinate", "controller")
)
canvas.tag_bind(self._id, "<ButtonRelease-1>", self._end_drag)
canvas.tag_bind(self._id, "<Motion>", self._drag)
canvas.tag_bind(self._id, "<Enter>", lambda _: self.grow_effect())
canvas.tag_bind(self._id, "<Leave>", lambda _: self.grow_effect(True))
MenuUtils.bind_canvas_context(self.canvas, self._id, self._context_menu)
self.active.add(self)
self._listeners = []
def grow_effect(self, shrink=False):
self.radius = self.min_radius if shrink else self.max_radius
self.place()
def add_listener(self, func, *args, **kwargs):
def callback():
func(*args, **kwargs)
self._listeners.append(callback)
return callback
def remove_listener(self, callback):
if callback in self._listeners:
self._listeners.remove(callback)
def retire(self):
# remove from view without deleting
self.canvas.itemconfigure(self._id, state='hidden')
self.pool["canvas"].append(self)
self._listeners = []
def place(self, x=None, y=None):
x = self.x if x is None else x
y = self.y if y is None else y
self.canvas.coords(
self._id,
x - self.radius, y - self.radius, x + self.radius, y + self.radius
)
self.x = x
self.y = y
for listener in self._listeners:
listener()
def shift(self, delta_x, delta_y):
self.place(self.x + delta_x, self.y + delta_y)
def revive(self, controller, x, y):
self.controller = controller
self.canvas.itemconfigure(self._id, state='normal')
self.place(x, y)
self.active.add(self)
def _context_menu(self, event):
self.controller.on_coord_context(self, event)
def _drag(self, event):
if not event.state & EventMask.MOUSE_BUTTON_1:
return
self.x = self.canvas.canvasx(event.x)
self.y = self.canvas.canvasy(event.y)
self.place()
self.controller.on_coord_change(self)
def _end_drag(self, _):
self.controller.on_release()
@classmethod
def acquire(cls, canvas, controller, x, y):
if len(cls.pool[canvas]):
coord = cls.pool[canvas][0]
cls.pool[canvas].remove(coord)
coord.revive(controller, x, y)
return coord
else:
return cls(canvas, controller, x, y)
class Link:
pool = defaultdict(list)
active = set()
def __init__(self, canvas, controller, coord1, coord2):
self.canvas = canvas
self.controller = controller
self._id = canvas.create_line(
coord1.x, coord1.y, coord2.x, coord2.y,
fill=self.controller.tool.studio.style.colors["accent"],
tag=("link", "controller"), dash=(5, 4), width=2
)
self.link_coord(coord1, coord2)
canvas.tag_bind(self._id, "<ButtonRelease-1>", self._end_drag)
MenuUtils.bind_canvas_context(self.canvas, self._id, self._context_menu)
canvas.tag_bind(self._id, "<Motion>", self._drag)
self.active.add(self)
self._coord_latch = None
def _to_canvas_coord(self, x, y):
return self.canvas.canvasx(x), self.canvas.canvasy(y)
def _context_menu(self, event):
self.controller.on_link_context(self, event)
def _drag(self, event):
if not event.state & EventMask.MOUSE_BUTTON_1:
return
if self._coord_latch:
x, y = self._to_canvas_coord(event.x, event.y)
xl, yl = self._coord_latch
self.controller.on_move(x - xl, y - yl)
self._coord_latch = x, y
else:
self._coord_latch = self._to_canvas_coord(event.x, event.y)
def _end_drag(self, _):
self.controller.on_release()
self._coord_latch = None
def place(self, coord1, coord2):
self.canvas.coords(self._id, coord1.x, coord1.y, coord2.x, coord2.y)
self.canvas.tag_lower(self._id, "coordinate")
def link_coord(self, coord1, coord2):
coord1.add_listener(self.coord_changed)
coord2.add_listener(self.coord_changed)
self.coord1 = coord1
self.coord2 = coord2
self.place(coord1, coord2)
def unlink_coord(self):
self.coord1 = self.coord2 = None
self._listeners = []
def revive(self, controller, coord1, coord2):
self.controller = controller
self.canvas.itemconfigure(self._id, state='normal')
self.link_coord(coord1, coord2)
self.active.add(self)
def retire(self):
# remove from view without deleting
self.canvas.itemconfigure(self._id, state='hidden')
self.pool["canvas"].append(self)
self.unlink_coord()
def coord_changed(self):
self.place(self.coord1, self.coord2)
@classmethod
def acquire(cls, canvas, controller, coord1, coord2):
if len(cls.pool[canvas]):
coord = cls.pool[canvas][0]
cls.pool[canvas].remove(coord)
coord.revive(controller, coord1, coord2)
return coord
else:
return cls(canvas, controller, coord1, coord2)
class Controller(abc.ABC):
def __init__(self, canvas, tool, item=None, **kw):
self.canvas = canvas
self.tool = tool
self.item = item
self._on_change = None
self.coords = []
self.links = []
def update(self):
pass
def on_change(self, func, *args, **kwargs):
self._on_change = lambda item: func(item, *args, **kwargs)
def _change(self):
if self._on_change:
self._on_change(self.item)
def highlight(self, item):
self.item = item
# raise controller elements to the top
self.canvas.tag_raise("controller")
@abc.abstractmethod
def get_coords(self):
pass
def on_coord_change(self, coord):
pass
def on_coord_context(self, coord, event):
pass
def on_link_context(self, link, event):
pass
def on_move(self, delta_x, delta_y, propagated=False):
for coord in self.coords:
coord.shift(delta_x, delta_y)
self.item.move(delta_x, delta_y)
if not propagated:
self.tool.propagate_move(delta_x, delta_y, self.item)
def on_release(self):
self.tool.on_layout_change()
def release(self):
for coord in self.coords:
coord.retire()
for link in self.links:
link.retire()
self.coords.clear()
self.links.clear()
class SquareController(Controller):
def __init__(self, canvas, tool, item=None, **kw):
super(SquareController, self).__init__(canvas, tool, item, **kw)
self.nw = Coordinate.acquire(canvas, self, 20, 20)
self.ne = Coordinate.acquire(canvas, self, 20, 20)
self.se = Coordinate.acquire(canvas, self, 20, 20)
self.sw = Coordinate.acquire(canvas, self, 20, 20)
self.n = Link.acquire(canvas, self, self.nw, self.ne)
self.s = Link.acquire(canvas, self, self.sw, self.se)
self.e = Link.acquire(canvas, self, self.ne, self.se)
self.w = Link.acquire(canvas, self, self.nw, self.sw)
self.coords = [self.ne, self.nw, self.se, self.sw]
self.links = [self.n, self.w, self.e, self.s]
if item:
self.highlight(item)
def highlight(self, item):
super(SquareController, self).highlight(item)
x1, y1, x2, y2 = item.coords()
self.nw.place(x1, y1)
self.ne.place(x2, y1)
self.se.place(x2, y2)
self.sw.place(x1, y2)
def update(self):
self.highlight(self.item)
def on_coord_change(self, coord):
x, y = coord.x, coord.y
if coord == self.nw:
self.ne.place(y=y)
self.sw.place(x=x)
elif coord == self.ne:
self.nw.place(y=y)
self.se.place(x=x)
elif coord == self.sw:
self.nw.place(x=x)
self.se.place(y=y)
elif coord == self.se:
self.ne.place(x=x)
self.sw.place(y=y)
else:
return
self.item.coords(self.get_coords())
self._change()
def get_coords(self):
return (
self.nw.x, self.nw.y,
self.se.x, self.se.y
)
class LinearController(Controller):
_closed = False
def __init__(self, canvas, tool, item=None, **kw):
super(LinearController, self).__init__(canvas, tool, item, **kw)
if item:
self.highlight(item)
self._link_context = MenuUtils.make_dynamic((
("command", "add point", icon("add", 14, 14), self._add_point, {}),
), tool.studio, tool.studio.style)
self._coord_context = MenuUtils.make_dynamic((
("command", "remove", icon("close", 14, 14), self._remove_point, {}),
), tool.studio, tool.studio.style)
self._active_link = None
self._active_coord = None
self._active_point = None
def on_link_context(self, link, event):
MenuUtils.popup(event, self._link_context)
self._active_link = link
self._active_point = self.canvas.canvasx(event.x), self.canvas.canvasy(event.y)
def on_coord_context(self, coord, event):
MenuUtils.popup(event, self._coord_context)
self._active_coord = coord
self._active_point = self.canvas.canvasx(event.x), self.canvas.canvasy(event.y)
def _add_point(self):
if not self._active_link:
return
index = self.coords.index(self._active_link.coord1) + 1
new_coord = Coordinate.acquire(self.canvas, self, *self._active_point)
self.coords.insert(index, new_coord)
self.item.coords(self.get_coords())
self.update()
self.tool.on_layout_change()
def _remove_point(self):
if not self._active_coord:
return
self.coords.remove(self._active_coord)
self._active_coord.retire()
self.item.coords(self.get_coords())
self.update()
self.tool.on_layout_change()
def on_coord_change(self, coord):
self.item.coords(self.get_coords())
self._change()
def get_coords(self):
return [coord for c in self.coords for coord in (c.x, c.y)]
def update(self):
# there is no smarter way to adjust links and coordinates
# clear them and reapply
self.release()
self.highlight(self.item)
def highlight(self, item):
coords = item.coords()
self.release()
prev = Coordinate.acquire(self.canvas, self, *coords[:2])
self.coords.append(prev)
for i in range(2, len(coords), 2):
# just in case the length of coordinates is odd
if i + 1 >= len(coords):
break
cd = Coordinate.acquire(self.canvas, self, coords[i], coords[i + 1])
self.coords.append(cd)
self.links.append(Link.acquire(self.canvas, self, prev, cd))
prev = cd
if self._closed:
self.links.append(Link.acquire(self.canvas, self, prev, self.coords[0]))
# ensure you have at least one item with "controller" tag before calling super
super(LinearController, self).highlight(item)
class ClosedLinearController(LinearController):
_closed = True
class PointController(Controller):
def __init__(self, canvas, tool, item=None, **kw):
super(PointController, self).__init__(canvas, tool, item, **kw)
self._border = None
if item:
self.highlight(item)
def get_coords(self):
return [self.coords[0].x, self.coords[0].y]
def on_coord_change(self, coord):
self.item.coords(self.get_coords())
self._change()
def on_move(self, delta_x, delta_y, propagated=False):
super(PointController, self).on_move(delta_x, delta_y, propagated)
self.highlight(self.item)
def _get_border_coords(self, item):
bbox = item.bbox() or (*item.coords(), *item.coords())
x1, y1, x2, y2 = bbox
x1, y1, x2, y2 = x1 - 2, y1 - 2, x2 + 2, y2 + 2
return x1, y1, x2, y1, x2, y2, x1, y2, x1, y1
def update(self):
self.canvas.coords(self._border, *self._get_border_coords(self.item))
def highlight(self, item):
coords = self._get_border_coords(item)
if self._border:
self.canvas.coords(self._border, *coords)
else:
self._border = self.canvas.create_line(
*coords, fill=self.tool.studio.style.colors["accent"],
tag="controller", dash=(5, 4), width=2
)
super(PointController, self).highlight(item)
def __del__(self):
if self._border:
self.canvas.delete(self._border)
class Draw(abc.ABC):
def __init__(self, tool):
self.tool = tool
self.active_item = None
@property
def canvas(self):
return self.tool.canvas
def canvas_coord(self, x, y):
return self.canvas.canvasx(x), self.canvas.canvasy(y)
@abc.abstractmethod
def on_button_press(self, event):
pass
@abc.abstractmethod
def on_button_release(self, event):
pass
@abc.abstractmethod
def on_double_press(self, event):
pass
@abc.abstractmethod
def on_motion(self, event):
pass
class SquareDraw(Draw):
def __init__(self, tool):
super(SquareDraw, self).__init__(tool)
self.coords = (0, 0, 0, 0)
self.item = None
self.draw_start = False
def on_button_press(self, event):
x, y = self.canvas_coord(event.x, event.y)
self.coords = (x, y, x, y)
self.draw_start = True
def on_button_release(self, event):
self.draw_start = False
if self.item:
self.tool.on_item_added(self.item)
self.item = None
def on_double_press(self, event):
pass
def on_motion(self, event):
if not self.draw_start:
return
x, y = self.canvas_coord(event.x, event.y)
if self.item is None:
self.item = self.tool.create_item(self.tool.current_draw, self.coords)
self.coords = (*self.coords[:2], x, y)
self.item.coords(*self.coords)
class LinearDraw(Draw):
def __init__(self, tool):
super(LinearDraw, self).__init__(tool)
self.coords = [0, 0, 0, 0]
self.item = None
self.draw_start = False
def on_button_press(self, event):
x, y = self.canvas_coord(event.x, event.y)
if not self.draw_start:
self.coords = [x, y, x, y]
else:
self.coords.extend([x, y])
self.item.coords(*self.coords)
self.draw_start = True
def on_button_release(self, event):
pass
def on_double_press(self, event):
self.draw_start = False
if self.item:
# remove last point which is usually a duplicate
self.item.coords(*self.coords[:-2])
self.tool.on_item_added(self.item)
self.item = None
def on_motion(self, event):
if not self.draw_start:
return
if self.item is None:
self.item = self.tool.create_item(self.tool.current_draw, self.coords)
x, y = self.canvas_coord(event.x, event.y)
# set the last two coordinates
self.coords[-2:] = [x, y]
self.item.coords(*self.coords)
class PointDraw(Draw):
def __init__(self, tool, **default_opts):
super(PointDraw, self).__init__(tool)
self.default_opts = default_opts
def on_button_press(self, event):
if event.state & EventMask.CONTROL:
return
x, y = self.canvas_coord(event.x, event.y)
self.item = self.tool.create_item(
self.tool.current_draw, (x, y), **self.default_opts
)
def on_button_release(self, event):
self.tool.on_item_added(self.item)
self.item = None
def on_double_press(self, event):
pass
def on_motion(self, event):
pass
class TextDraw(PointDraw):
def on_button_press(self, event):
super(TextDraw, self).on_button_press(event)
self.item.configure(text=self.item.name)
class CanvasStyleGroup(StyleGroup):
def __init__(self, master, pane, **cnf):
self.tool = cnf.pop('tool', None)
super().__init__(master, pane, **cnf)
self.label = "Canvas Item"
self.prop_keys = None
self._prev_prop_keys = set()
self._empty_message = "Select canvas item to see styles"
@property
def cv_items(self):
# selected canvas items
return self.tool.selected_items
def supports_widget(self, widget):
return isinstance(widget, Canvas)
def can_optimize(self):
# probably needs a rethink if we consider definition overrides
# in canvas items but there isn't much of that so this will do
return self.prop_keys == self._prev_prop_keys
def compute_prop_keys(self):
items = self.cv_items
if not items:
self.prop_keys = set()
else:
self.prop_keys = None
# determine common configs for multi-selected items
for item in self.cv_items:
if self.prop_keys is None:
self.prop_keys = set(item.configure())
else:
self.prop_keys &= set(item.configure())
if len(items) > 1:
# id cannot be set for multi-selected items
self.prop_keys.remove('id')
def on_widget_change(self, widget):
self._prev_prop_keys = self.prop_keys
self.compute_prop_keys()
super(CanvasStyleGroup, self).on_widget_change(widget)
def _get_prop(self, prop, widget):
# not very useful to us
return None
def _get_key(self, widget, prop):
# generate a key identifying the multi-selection state and prop modified
return f"{','.join(map(lambda x: str(x._id), self.cv_items))}:{prop}"
def _get_action_data(self, widget, prop):
return {item: {prop: item.cget(prop)} for item in self.cv_items}
def _apply_action(self, prop, value, widget, data):
for item in data:
item.configure(data[item])
if item._controller:
item._controller.update()
if self.tool.canvas == widget:
self.on_widget_change(widget)
self.tool.on_items_modified(data.keys())
def _set_prop(self, prop, value, widget):
for item in self.cv_items:
item.configure({prop: value})
if item._controller:
item._controller.update()
self.tool.on_items_modified(self.cv_items)
def get_definition(self):
if not self.cv_items:
return {}
else:
rough_definition = self.cv_items[0].properties
if len(self.cv_items) == 1:
# for single item no need to refine definitions any further
return rough_definition
resolved = {}
for prop in self.prop_keys:
if prop not in rough_definition:
continue
definition = resolved[prop] = rough_definition[prop]
# use default for value
definition.update(value=definition['default'])
return resolved
class CanvasTreeView(NestedTreeView):
class Node(NestedTreeView.Node):
def __init__(self, master=None, **config):
super().__init__(master, **config)
self.item: CanvasItem = config.get("item")
self.item.node = self
self._color = self.style.colors["secondary1"]
self.name_pad.configure(text=self.item.name)
self.icon_pad.configure(
image=icon(self.item.icon, 15, 15, color=self._color)
)
self.editable = True
self.strict_mode = True
def widget_modified(self, widget):
self.item = widget
self.name_pad.configure(text=self.item.name)
self.icon_pad.configure(
image=icon(self.item.icon, 15, 15, color=self._color)
)
def select(self, event=None, silently=False):
super(CanvasTreeView.Node, self).select(event, silently)
if event:
self.item.canvas.focus_set()
def __init__(self, canvas, **kw):
super(CanvasTreeView, self).__init__(canvas.node, **kw)
self._cv_node = canvas.node
self.canvas = canvas
self._is_mapped = False
self.allow_multi_select(True)
def add(self, node):
super(CanvasTreeView, self).add(node)
# if we have a node we make ourselves visible
if self not in self._cv_node.nodes:
self._cv_node.add(self)
def insert(self, index=None, *nodes):
super(CanvasTreeView, self).insert(index, *nodes)
# also make sure nodes is not empty
if self not in self._cv_node.nodes and nodes:
self._cv_node.add(self)
def remove(self, node):
super(CanvasTreeView, self).remove(node)
# if no nodes are left we hide ourselves
if not len(self.nodes):
self._cv_node.remove(self)
def reorder(self, reorder_data):
# rearrange nodes based on data containing {item: index, ...}
for item in reorder_data:
self.insert(reorder_data[item], item.node)
class CanvasStudioAdapter(BaseStudioAdapter):
_tool = None
@classmethod
def assert_tool(cls):
# make sure tool is initialized
if cls._tool is None:
raise RuntimeError("Canvas tool not initialized. Could not load canvas.")
@classmethod
def generate(cls, widget, parent=None):
cls.assert_tool()
# if canvas is selected there is a chance its cursor has been modified by tool
# below lies a hack to set the right cursor and restore it after loading is complete
cursor = None
if widget == cls._tool.canvas:
cursor = widget["cursor"]
widget.config(cursor=cls._tool._cursor)
node = BaseStudioAdapter.generate(widget, parent)
if cursor:
widget.config(cursor=cursor)
if getattr(widget, "_cv_initialized", False):
for item in widget._cv_items:
opts = {
"name": item.name,
"coords": ",".join(map(lambda c: str(round(c)), item.coords())),
"attr": item.altered_options()
}
if not item.name:
opts.pop("name", None)
Node(node, item.__class__.__name__, opts)
return node
@classmethod
def load(cls, node, designer, parent, bounds=None):
widget = BaseStudioAdapter.load(node, designer, parent, bounds=None)
cls.assert_tool()
if len(node):
cls._tool.initialize_canvas(widget)
for sub_node in node:
if sub_node.type not in CANVAS_ITEM_MAP:
raise NotImplementedError(f"Tag <{sub_node.type}> not implemented by canvas tool")
# use a copy just in case something gets popped down the line
config = dict(sub_node.attrib.get("attr", {}))
# add name to config as id so the intercepts can set it for us
config["id"] = sub_node.attrib.get("name", "")
coords = sub_node.attrib.get("coords", "").split(",")
if len(coords) < 2:
raise ValueError("Not enough coordinates provided.")
component = CANVAS_ITEM_MAP[sub_node.type]
item = component(widget, *coords)
item.configure(config)
cls._tool.create_item(
component, item=item, canvas=widget, silently=True
)
return widget
class CanvasTool(BaseTool):
name = "Canvas"
icon = "paint"
def __init__(self, studio, manager):
super(CanvasTool, self).__init__(studio, manager)
self._component_pane: ComponentPane = self.studio.get_feature(ComponentPane)
self.item_select = self._component_pane.register_group(
"Canvas", CANVAS_ITEMS, SelectToDrawGroup, self._evaluator
)
self.style_group = studio.style_pane.add_group(
CanvasStyleGroup, tool=self
)
CanvasStudioAdapter._tool = self
# connect the canvas adapter to load canvas objects to the studio
DesignBuilder.add_adapter(CanvasStudioAdapter, Canvas)
self.items = []
self.item_select.on_select(self.set_draw)
self.canvas = None
self._cursor = "arrow"
self.current_draw = None
self.selected_items = []
self._clipboard = None
self._latch_pos = 0, 0
self._image_placeholder = icon("image_dark", 60, 60)
self.square_draw = SquareDraw(self)
self.line_draw = LinearDraw(self)
self.text_draw = TextDraw(self)
self.bitmap_draw = PointDraw(self, bitmap="gray25")
self.image_draw = PointDraw(self, image=self._image_placeholder)
self.draw_map = {
Oval: self.square_draw,
Rectangle: self.square_draw,
Arc: self.square_draw,
Line: self.line_draw,
Polygon: self.line_draw,
Text: self.text_draw,
Bitmap: self.bitmap_draw,
Image: self.image_draw,
}
self.controller_map = {
Oval: SquareController,
Rectangle: SquareController,
Arc: SquareController,
Line: LinearController,
Polygon: ClosedLinearController,
Text: PointController,
Bitmap: PointController,
Image: PointController,
}
self.keymap = KeyMap(None)
CTRL = KeyMap.CTRL
self.routines = (
Routine(self.cut_items, 'CV_CUT', 'Cut selected items', 'canvas', CTRL + CharKey('x')),
Routine(self.copy_items, 'CV_COPY', 'Copy selected items', 'canvas', CTRL + CharKey('c')),
Routine(self.paste_items, 'CV_PASTE', 'Paste selected items', 'canvas', CTRL + CharKey('v')),
Routine(self.delete_items, 'CV_DELETE', 'Delete selected items', 'canvas', KeyMap.DELETE),
Routine(self.duplicate_items, 'CV_DUPLICATE', 'Duplicate selected items', 'canvas', CTRL + CharKey('d')),
Routine(self._send_back, 'CV_BACK', 'Send item to back', 'canvas', CharKey(']')),
Routine(self._bring_front, 'CV_FRONT', 'Bring item to front', 'canvas', CharKey('[')),
Routine(lambda: self._send_back(1), 'CV_BACK_1', 'send item back one step', 'canvas', CTRL + CharKey(']')),
Routine(lambda: self._bring_front(1), 'CV_FRONT_1', 'bring item forward one step', 'canvas',
CTRL + CharKey('[')),
)
self.keymap.add_routines(*self.routines)
self._item_context_menu = MenuUtils.make_dynamic((
EnableIf(
lambda: self.selected_items,
("separator",),
("command", "copy", icon("copy", 14, 14), self._get_routine('CV_COPY'), {}),
("command", "duplicate", icon("copy", 14, 14), self._get_routine('CV_DUPLICATE'), {}),
EnableIf(
lambda: self._clipboard is not None,
("command", "paste", icon("clipboard", 14, 14), self._get_routine('CV_PASTE'), {})
),
("command", "cut", icon("cut", 14, 14), self._get_routine('CV_CUT'), {}),
("separator",),
("command", "delete", icon("delete", 14, 14), self._get_routine('CV_DELETE'), {}),
("separator",),
("command", "send to back", icon("send_to_back", 14, 14), self._get_routine('CV_BACK'), {}),
("command", "bring to front", icon("bring_to_front", 14, 14), self._get_routine('CV_FRONT'), {}),
("command", "back one step", icon("send_to_back", 14, 14), self._get_routine('CV_BACK_1'), {}),
("command", "forward one step", icon("bring_to_front", 14, 14), self._get_routine('CV_FRONT_1'), {}),
),
), self.studio, self.studio.style)
self._canvas_menu = MenuUtils.make_dynamic((
EnableIf(
lambda: self._clipboard is not None,
("command", "paste", icon("clipboard", 14, 14),
self._get_routine('CV_PASTE'), {})
),
), self.studio, self.studio.style)
@property
def _ids(self):
return [item.name for item_set in self.items for item in item_set._cv_items]
def initialize_canvas(self, canvas=None):
canvas = canvas or self.canvas
if canvas and not getattr(canvas, "_cv_initialized", False):
canvas.bind(
"<ButtonPress-1>", self._draw_dispatch("on_button_press"), True)
canvas.bind(
"<ButtonRelease>", self._draw_dispatch("on_button_release"), True)
canvas.bind(
"<Double-Button-1>", self._draw_dispatch("on_double_press"), True)
canvas.bind(
"<Motion>", self._draw_dispatch("on_motion"), True)
canvas.bind("<Control-Button-1>", self._enter_pointer_mode)
canvas.bind("<Button-1>", self._latch_and_focus(canvas), True)
self.keymap._bind(canvas)
canvas.on_context_menu(self._show_canvas_menu(canvas))
canvas._cv_tree = CanvasTreeView(canvas)
canvas._cv_tree.on_structure_change(self._update_stacking, canvas)
canvas._cv_tree.on_select(self._update_selection, canvas)
canvas._cv_items = []
canvas._cv_initialized = True
@property
def sorted_selected_items(self):
return sorted(self.selected_items, key=lambda x: self.canvas._cv_items.index(x))
def _latch_and_focus(self, canvas):
def func(event):
canvas.focus_set()
self._latch_pos = canvas.canvasx(event.x), canvas.canvasy(event.y)
return func
def _enter_pointer_mode(self, *_):
if self.item_select._selected is None:
return
self.item_select._selected.deselect()
def _show_item_menu(self, item):
def show(event):
if item in self.selected_items:
MenuUtils.popup(event, self._item_context_menu)
return show
def _show_canvas_menu(self, canvas):
def show(event):
x, y = canvas.canvasx(event.x), canvas.canvasy(event.y)
self._latch_pos = x, y
if not canvas.find_overlapping(x, y, x, y):
MenuUtils.popup(event, self._canvas_menu)
return 'break'
return show
def _send_back(self, steps=None):
if not self.selected_items:
return
items = self.sorted_selected_items
if steps is None:
self._update_stacking(
self.canvas,
# arrange starting from zero
{item: index for index, item in enumerate(items)}
)
else:
self._update_stacking(
self.canvas,
# clamp to ensure non-negative index
{item: max(0, self.canvas._cv_items.index(item) - steps) for item in items}
)
def _bring_front(self, steps=None):
if not self.selected_items:
return
# work with items in stacking order
items = self.sorted_selected_items
cv_items = self.canvas._cv_items
if steps is None:
end = len(cv_items) - 1
self._update_stacking(
self.canvas,
# insert each item to the end of the list, will be done in stacking order
{item: end for item in items}
)
else:
self._update_stacking(
self.canvas,
# clamp the new index to within length of items
{item: min(len(cv_items) - 1, cv_items.index(item) + steps) for item in items}
)
def _update_stacking(self, canvas, data=None, silently=False):
if data:
canvas._cv_tree.reorder(data)
else:
data = {}
canvas._cv_items.sort(key=lambda x: canvas._cv_tree.nodes.index(x.node))
prev_data = {}
for index, item in enumerate(canvas._cv_items):
if item._prev_index != index:
# old data
prev_data[item] = item._prev_index
# new data
data[item] = index
item._prev_index = index
if index > 0:
item.lift(canvas._cv_items[index - 1]._id)
if not silently and prev_data != data:
self.studio.new_action(Action(
lambda _: self._update_stacking(canvas, prev_data, True),
lambda _: self._update_stacking(canvas, data, True)
))
def _get_routine(self, key):
for routine in self.routines:
if routine.key == key:
return routine
def create_item(self, component, coords=(), item=None, canvas=None, silently=False, **kwargs):
canvas = canvas or self.canvas
if item is None:
opts = dict(**component.defaults)
opts.update(kwargs)
item = component(canvas, *coords, **opts)
# generate a unique id
item.name = generate_id(component, self._ids)
canvas._cv_items.append(item)
item._prev_index = canvas._cv_items.index(item)
node = canvas._cv_tree.add_as_node(item=item)
item.bind("<ButtonRelease-1>", lambda e: self._handle_select(item, e), True)
item.bind("<ButtonRelease-1>", lambda e: self._handle_end(item, e), True)
item.bind("<Motion>", lambda e: self._handle_move(item, e), True)
MenuUtils.bind_context(item, self._show_item_menu(item))
MenuUtils.bind_all_context(node, self._show_item_menu(item))
if not silently:
self.studio.new_action(Action(
lambda _: self.remove_items([item], silently=True),
lambda _: self.restore_items([item])
))
return item
def remove_items(self, items, silently=False):
items = sorted(items, key=lambda x: x.canvas._cv_items.index(x))
self.deselect_items(items)
for item in items:
item.hide()
item.canvas._cv_items.remove(item)
item.node.remove()
if not silently:
self.studio.new_action(Action(
lambda _: self.restore_items(items),
lambda _: self.remove_items(items, silently=True)
))
def restore_items(self, items):
for item in items:
item.show()
canvas = item.canvas
if item._prev_index is not None:
canvas._cv_items.insert(item._prev_index, item)
canvas._cv_tree.insert(item._prev_index, item.node)
def _get_copy_data(self):
if not self.selected_items:
return []
items = self.sorted_selected_items
for item in items:
item.addtag('bound_check')
bbox = self.canvas.bbox('bound_check') or items[0].coords()
ref_x, ref_y = bbox[:2]
self.canvas.dtag('bound_check', 'bound_check')
return [item.serialize(ref_x, ref_y) for item in items]
def copy_items(self):
if self.selected_items:
self._clipboard = self._get_copy_data()
def cut_items(self):
if self.selected_items:
self.copy_items()
self.delete_items()
def duplicate_items(self):
if self.selected_items:
self.paste_items(self._get_copy_data())
def paste_items(self, _clipboard=None):
_clipboard = self._clipboard if _clipboard is None else _clipboard
if _clipboard:
items = []
for item_data in _clipboard:
item = CanvasItem.from_data(self.canvas, item_data, self._latch_pos)
self.create_item(item.__class__, item=item, silently=True)
items.append(item)
# slightly displace latch position for next paste
self._latch_pos = tuple(map(lambda x: x + 5, self._latch_pos))
self.studio.new_action(Action(
lambda _: self.remove_items(items, silently=True),
lambda _: self.restore_items(items)
))
def delete_items(self):
self.remove_items(list(self.selected_items))
def _handle_move(self, item, event):
if not event.state & EventMask.MOUSE_BUTTON_1:
# we need mouse button 1 to be down to qualify as a drag
return
if getattr(item, '_controller', None) and self.current_draw is None:
if getattr(item, '_coord_latch', None):
x0, y0 = item._coord_latch
x, y = item.canvas.canvasx(event.x), item.canvas.canvasx(event.y)
item._controller.on_move(x - x0, y - y0)
item._coord_latch = x, y
else:
item._coord_latch = item.canvas.canvasx(event.x), item.canvas.canvasx(event.y)
def _handle_end(self, item, event):
if getattr(item, '_coord_latch', None) and self.current_draw is None:
self.on_layout_change()
item._coord_latch = None
def _handle_select(self, item, event):
if self.current_draw is not None or getattr(item, '_coord_latch', None):
# if coord_latch has a value then it means we have been dragging
# an item and the button release means end of drag and not selection
return
if event.state & EventMask.CONTROL:
self.select_item(item, True)
else:
self.select_item(item)
def _draw_dispatch(self, event_type):
def handler(event):
drawer = self.draw_map.get(self.current_draw)
if drawer:
getattr(drawer, event_type)(event)
return handler
def set_draw(self, component):
self._set_cursor()
self.current_draw = component
def _reset_cursor(self):
self.canvas.configure(cursor=self._cursor)
def _set_cursor(self):
if self.item_select.selected:
self.canvas.configure(cursor="crosshair")
else:
self._reset_cursor()
def _evaluator(self, widget):
return isinstance(widget, Canvas)
def set_controller(self, item):
controller_class = self.controller_map.get(item.__class__)
if controller_class:
item._controller = controller_class(item.canvas, self, item)
return item._controller
def remove_controller(self, item):
controller = getattr(item, "_controller", None)
if controller:
controller.release()
item._controller = None
def selection_changed(self):
# called when canvas item selection changes
self.style_group.on_widget_change(self.canvas)
def _update_selection(self, canvas):
# update selections from the canvas tree
if canvas != self.canvas:
self.studio.select(canvas)
# call to studio should cause canvas to be selected
assert self.canvas == canvas
selected = set(self.selected_items)
to_select = {node.item for node in canvas._cv_tree.get()}
# deselect items currently selected that shouldn't be
for item in selected - to_select:
self.remove_controller(item)
self.selected_items.remove(item)
# select items to be selected that are not yet selected
for item in to_select - selected:
controller = self.set_controller(item)
if not controller:
return
self.selected_items.append(item)
self.selection_changed()
def _clear_selection(self):
if self.selected_items:
for item in self.selected_items:
self.remove_controller(item)
item.canvas._cv_tree.deselect(item.node)
self.selected_items.clear()
self.selection_changed()
def _deselect(self, item):
self.remove_controller(item)
self.selected_items.remove(item)
item.canvas._cv_tree.deselect(item.node)
def deselect_items(self, items):
# only consider selected items
items = set(items) & set(self.selected_items)
if items:
for item in items:
if item in self.selected_items:
self._deselect(item)
self.selection_changed()
def select_item(self, item, multi=False):
if multi:
if item in self.selected_items:
self._deselect(item)
else:
controller = self.set_controller(item)
if not controller:
return
self.selected_items.append(item)
item.node.select(silently=True)
else:
for i in self.selected_items:
if i == item:
continue
self.remove_controller(i)
i.canvas._cv_tree.deselect(i.node)
if item in self.selected_items:
self.selected_items = [item]
elif self.set_controller(item):
self.selected_items = [item]
item.node.select(silently=True)
self.selection_changed()
def on_select(self, widget):
if self.canvas == widget:
return
if self.canvas is not None:
self._reset_cursor()
self.release(self.canvas)
if isinstance(widget, Canvas):
self.canvas = widget
self._cursor = widget["cursor"]
self._set_cursor()
self.initialize_canvas()
else:
if self.canvas is None:
return
self.release(self.canvas)
self.canvas = None
def release(self, canvas):
if canvas is None or not getattr(canvas, "_cv_initialized", False):
return
self._clear_selection()
def on_layout_change(self):
prev_data = {item: item._coord_restore for item in self.selected_items}
data = {item: item.coords() for item in self.selected_items}
for item in self.selected_items:
item._coord_restore = item.coords()
self.studio.new_action(Action(
lambda _: self.restore_layouts(prev_data),
lambda _: self.restore_layouts(data)
))
def restore_layouts(self, data):
for item in data:
item.coords(*data[item])
if item._controller:
item._controller.update()
def on_item_added(self, item):
item._coord_restore = item.coords()
def on_items_modified(self, items):
for item in items:
item.node.widget_modified(item)
def on_widget_delete(self, widget):
if isinstance(widget, Canvas):
if widget in self.items:
self.items.remove(widget)
def propagate_move(self, delta_x, delta_y, source=None):
for item in self.selected_items:
if item != source:
item._controller.on_move(delta_x, delta_y, True)
|
py | 1a3963b7402e227d4429aa8c63b94584d2a8baf7 | import numpy
from ..tractography import Tractography
from . import tract_operations
from ..tensor import scalar_measures
try:
from collections import OrderedDict
except ImportError: # Python 2.6 fix
from ordereddict import OrderedDict
def compute_all_measures(tractography, desired_keys_list, scalars=None, resolution=None):
unordered_results = dict()
if ('number of tracts' in desired_keys_list):
unordered_results['number of tracts'] = tract_operations.tract_count(
tractography.tracts())
if ('length mean (mm)' in desired_keys_list) or ('length std (mm^2)' in desired_keys_list):
lengths = numpy.empty(len(tractography.tracts()))
for i, one_tract in enumerate(tractography.tracts()):
lengths[i] = tract_operations.tract_length(one_tract)
unordered_results['length mean (mm)'] = lengths.mean()
unordered_results['length std (mm^2)'] = lengths.std()
if ('tract volume' in desired_keys_list) and (resolution is not None):
resolution = float(resolution)
voxels = tract_operations.voxelized_tract(tractography, resolution)
neighbors = numpy.array([
[0, 1, 0],
[0, -1, 0],
[1, 0, 0],
[-1, 0, 0],
[0, 0, 1],
[0, 0, -1]
])
dilated_voxels = set()
dilated_voxels.update(voxels)
eroded_voxels = set()
for voxel in voxels:
neighbors_list = zip(*(neighbors + voxel).T)
dilated_voxels.update(neighbors_list)
if len(voxels.intersection(neighbors_list)) == len(neighbors):
eroded_voxels.add(voxel)
# print len(dilated_voxels), len(voxels), len(eroded_voxels)
approx_voxels = (len(dilated_voxels) - len(eroded_voxels)) / 2.
approx_volume = approx_voxels * (resolution ** 3)
unordered_results['tract volume'] = approx_volume
if ('per tract distance weighted mean %s' in desired_keys_list ) or \
('per tract distance weighted std %s' in desired_keys_list):
mean_keys_list = list()
std_keys_list = list()
for scalar in scalars:
mean_key = 'per tract distance weighted mean %s' % scalar
std_key = 'per tract distance weighted std %s' % scalar
mean_keys_list.append(mean_key)
std_keys_list.append(std_key)
scalars = tractography.tracts_data()[scalar]
weighted_scalars = numpy.empty((len(tractography.tracts()), 2))
for line_index, t_data in enumerate(tractography.tracts()):
tdiff = numpy.sqrt((numpy.diff(t_data, axis=0) ** 2).sum(-1))
length = tdiff.sum()
values = scalars[line_index][1:].squeeze()
average = numpy.average(values, weights=tdiff)
weighted_scalars[line_index, 0] = average
weighted_scalars[line_index, 1] = length
mean = numpy.average(
weighted_scalars[:, 0], weights=weighted_scalars[:, 1])
std = numpy.average(
(weighted_scalars[:, 0] - mean) ** 2, weights=weighted_scalars[:, 1])
unordered_results[mean_key] = mean
unordered_results[std_key] = std
mii = desired_keys_list.index('per tract distance weighted mean %s')
desired_keys_list[mii:mii + 1] = mean_keys_list
sii = desired_keys_list.index('per tract distance weighted std %s')
desired_keys_list[sii:sii + 1] = std_keys_list
# Make Ordered Dictionary
ordered_dict = OrderedDict()
for key in desired_keys_list:
ordered_dict[key] = unordered_results[key]
return ordered_dict
def tract_expand_tensor_metrics(tractography):
from os import path
from scipy import ndimage
from numpy import linalg
quantity_name = "tensor1_FA"
start = 0
new_scalar_data = []
for tract in tractography.original_tracts():
new_scalar_data.append(
new_scalar_data_flat[start: start + len(tract)].copy()
)
start += len(tract)
tractography.original_tracts_data()[quantity_name] = new_scalar_data
return Tractography(
tractography.original_tracts(), tractography.original_tracts_data(),
**tractography.extra_args
)
def decorate_tract_with_measures(tractography, tensor_name):
ot = tractography.original_tracts_data()
all_tensors = ot[tensor_name]
fa_fiber_list = list()
md_fiber_list = list()
ax_fiber_list = list()
rd_fiber_list = list()
ga_fiber_list = list()
for one_fiber in all_tensors:
fa_by_point = numpy.ndarray((len(one_fiber), 1), dtype=numpy.float32)
md_by_point = numpy.ndarray((len(one_fiber), 1), dtype=numpy.float32)
ax_by_point = numpy.ndarray((len(one_fiber), 1), dtype=numpy.float32)
rd_by_point = numpy.ndarray((len(one_fiber), 1), dtype=numpy.float32)
ga_by_point = numpy.ndarray((len(one_fiber), 1), dtype=numpy.float32)
index = 0
for one_tensor_values in one_fiber:
one_tensor = numpy.reshape(one_tensor_values, (3, 3))
_, eigenvals, _ = numpy.linalg.svd(one_tensor)
fa_by_point[index] = scalar_measures.fractional_anisotropy_from_eigenvalues(eigenvals)
md_by_point[index] = scalar_measures.mean_diffusivity(eigenvals)
ax_by_point[index] = scalar_measures.axial_diffusivity(eigenvals)
rd_by_point[index] = scalar_measures.radial_diffusivity(eigenvals)
ga_by_point[index] = scalar_measures.geodesic_anisotropy(eigenvals)
index = index + 1
fa_fiber_list.append(fa_by_point)
md_fiber_list.append(md_by_point)
ax_fiber_list.append(ax_by_point)
rd_fiber_list.append(rd_by_point)
ga_fiber_list.append(ga_by_point)
tractography.original_tracts_data()['FA_' + tensor_name] = fa_fiber_list
tractography.original_tracts_data()['MD_' + tensor_name] = md_fiber_list
tractography.original_tracts_data()['AX_' + tensor_name] = ax_fiber_list
tractography.original_tracts_data()['RD_' + tensor_name] = rd_fiber_list
tractography.original_tracts_data()['GA_' + tensor_name] = ga_fiber_list
return Tractography(
tractography.original_tracts(), tractography.original_tracts_data(),
**tractography.extra_args)
|
py | 1a396455c3d328ebfaf4a1f9b870a838239c33df | import interact
class EvtInteract(interact.Interact):
def __init__(self):
self.events = []
def checkEventInteraction(self, events):
self.events = events
self.checkInteraction()
|
py | 1a39670dfb5df557db31cdda843b7f6376aae065 | import datetime
import os
from pyhmy.logging import ControlledLogger
_config = {
"AMT_PER_TXN": [1e-9, 1e-9], # The random range for each transaction in the transaction-generation
"NUM_SRC_ACC": 32, # The number of possible source accounts for all transactions, higher = more tps
"NUM_SNK_ACC": 1, # The number of possible destination / sink accounts for all transaction
"MAX_TXN_GEN_COUNT": None, # The upper bound of the number generated transaction, regardless of if `stop` is called
"ONLY_CROSS_SHARD": False, # If true, forces source and destination shards to be different
"ENFORCE_NONCE": False, # If true, will only generate transactions with a valid nonce
"ESTIMATED_GAS_PER_TXN": 1e-3, # The estimated gas, hardcoded
"INIT_SRC_ACC_BAL_PER_SHARD": 1, # The initial balance for EVERY source account
"TXN_WAIT_TO_CONFIRM": 60, # The timeout when a transaction is sent (only used in setup related functions)
"MAX_THREAD_COUNT": os.cpu_count()//2, # Max thread is recommended to be less than your v-core count
"ENDPOINTS": [ # Endpoints for all transaction, index i = shard i
"https://api.s0.pga.hmny.io/",
"https://api.s1.pga.hmny.io/",
"https://api.s2.pga.hmny.io/"
],
"SRC_SHARD_WEIGHTS": [ # Adjust the likelihood that shard i (i = index) gets chosen to be the source shard
1, # Bigger number = higher likelihood of shard i begin chosen
1, # 0 = 0% chance of being chosen
1
],
"SNK_SHARD_WEIGHTS": [ # Adjust the likelihood that shard i (i = index) gets chosen to be the source shard
1,
1,
1
],
"CHAIN_ID": "devnet", # The chain id for all transaction, should be devnet if not localnet.
"REFUND_ACCOUNT": "one1j9hwh7vqz94dsk06q4h9hznr4wlr3x5zup6wz3", # All refunds will be sent to this address
}
import_account_name_prefix = "_tx_gen_"
class Loggers:
"""
A collection of loggers for the transaction generator.
"""
general = ControlledLogger(f"general_log_{datetime.datetime.utcnow()}", "./logs/general")
transaction = ControlledLogger(f"transaction_log_{datetime.datetime.utcnow()}", "./logs/transaction")
balance = ControlledLogger(f"balance_log_{datetime.datetime.utcnow()}", "./logs/balance")
report = ControlledLogger(f"report_log_{datetime.datetime.utcnow()}", "./logs/report")
def start_new_loggers():
"""
This reinitialize all loggers in `pdoc.Loggers`.
Note that new files will be generated in the process.
"""
start_new_general_logger()
start_new_transaction_logger()
start_new_balance_logger()
start_new_report_logger()
def start_new_general_logger():
"""
This reinitialize the general logger in `pdoc.Loggers`.
Note that new files will be generated in the process.
"""
Loggers.general = ControlledLogger(f"general_log_{datetime.datetime.utcnow()}", "./logs/general")
def start_new_transaction_logger():
"""
This reinitialize the transaction logger in `pdoc.Loggers`.
Note that new files will be generated in the process.
"""
Loggers.transaction = ControlledLogger(f"transaction_log_{datetime.datetime.utcnow()}", "./logs/transaction")
def start_new_balance_logger():
"""
This reinitialize the balance logger in `pdoc.Loggers`.
Note that new files will be generated in the process.
"""
Loggers.balance = ControlledLogger(f"balance_log_{datetime.datetime.utcnow()}", "./logs/balance")
def start_new_report_logger():
"""
This reinitialize the report logger in `pdoc.Loggers`.
Note that new files will be generated in the process.
"""
Loggers.report = ControlledLogger(f"report_log_{datetime.datetime.utcnow()}", "./logs/report")
def write_all_logs():
"""
Write all the logs in `pdoc.Loggers`
"""
Loggers.general.write()
Loggers.transaction.write()
Loggers.balance.write()
Loggers.report.write()
def _validate_config():
assert isinstance(_config, dict)
if not isinstance(_config["AMT_PER_TXN"], list) or len(_config["AMT_PER_TXN"]) != 2 \
or _config["AMT_PER_TXN"][0] < 0:
raise ValueError("Amount per transaction must be a range from 0")
if not isinstance(_config["NUM_SRC_ACC"], int) or _config["NUM_SRC_ACC"] < 0:
raise ValueError("Number of Source Accounts cannot be negative")
if not isinstance(_config["NUM_SNK_ACC"], int) or _config["NUM_SNK_ACC"] < 0:
raise ValueError("Number of Sink Accounts cannot be negative")
# TODO: check max generation count: input_config["MAX_TXN_GEN_COUNT"]
if not isinstance(_config["ONLY_CROSS_SHARD"], bool):
raise ValueError("Only Cross Shard must be a boolean")
if not isinstance(_config["ESTIMATED_GAS_PER_TXN"], (int, float)) or _config["ESTIMATED_GAS_PER_TXN"] < 0:
raise ValueError("Estimated gas per transaction cannot be negative")
if not isinstance(_config["INIT_SRC_ACC_BAL_PER_SHARD"], (int, float)) \
or _config["INIT_SRC_ACC_BAL_PER_SHARD"] < 0:
raise ValueError("Initial Source Account Balance per shard cannot be negative")
if not isinstance(_config["TXN_WAIT_TO_CONFIRM"], (int, float)) or _config["TXN_WAIT_TO_CONFIRM"] < 0:
raise ValueError("Transaction wait to confirm time cannot be negative")
if _config["MAX_THREAD_COUNT"] is not None and not (isinstance(_config["MAX_THREAD_COUNT"], int)
and _config["MAX_THREAD_COUNT"] > 0):
raise ValueError("Max Threads cannot be negative")
num_shards = len(_config["ENDPOINTS"])
# TODO: check endpoints are valid: input_config["ENDPOINTS"]
if not isinstance(_config["SRC_SHARD_WEIGHTS"], list) or len(_config["SRC_SHARD_WEIGHTS"]) != num_shards:
raise ValueError("Source Shard Weights must be list of len shards")
if not isinstance(_config["SNK_SHARD_WEIGHTS"], list) or len(_config["SNK_SHARD_WEIGHTS"]) != num_shards:
raise ValueError("Sink Shard Weights must be list of len shards")
# TODO: check chain_ID: input_config["CHAIN_ID"]
if not _config["REFUND_ACCOUNT"].startswith("one1"):
raise ValueError("Refund account must be valid account")
def set_config(input_config):
"""
Validate a config, `input_config`, and set the config for the transaction generator.
"""
input_keys = input_config.keys()
assert "ENDPOINTS" in input_keys, "Must specify endpoints"
assert isinstance(input_config["ENDPOINTS"], list)
if "SRC_SHARD_WEIGHTS" not in input_keys:
input_config["SRC_SHARD_WEIGHTS"] = [1] * len(input_config["ENDPOINTS"])
if "SNK_SHARD_WEIGHTS" not in input_keys:
input_config["SNK_SHARD_WEIGHTS"] = [1] * len(input_config["ENDPOINTS"])
_config.update(input_config)
_validate_config()
def get_config():
"""
:returns a COPY of the current config (to prevent accidental modification of config)
"""
return _config.copy()
|
py | 1a3967147ef1cf024e75e01554e435adac9d8a2f | import sys, os
import pandas as pd
from tqdm import tqdm
from Bio import SeqIO
import argparse
from multiprocessing import Pool
import pickle
import numpy as np
class QuadruplexFinder(object):
def __init__(self, fasta_file, output_path = '',
GC='G', L=7, q=4, nquadruplets=4, mdef=1, tetdef=1, len_bulge=1, max_bulge = 1,
bulge_priority=False, repeats=False, verbose=False, nthreads=1):
# parse arg
self.fasta_file = fasta_file
self.output_path = output_path
self.GC = GC
self.L = L
self.q = q
self.nquadruplets = nquadruplets
self.mdef = mdef
self.tetdef = tetdef
self.repeats = repeats
self.verbose = verbose
self.len_bulge = len_bulge
self.max_bulge = max_bulge
self.bulge_priority = bulge_priority
self.nthreads = nthreads
def load_fasta(self):
sequences = []
for record in SeqIO.parse(self.fasta_file, "fasta"):
sequences.append((record.seq, record.id))
return sequences
def find_quadruplets_without_bulges(self, fasta):
quadruplets = []
stack = [self.QuadrupletDetector(nuc) for nuc in fasta[:self.q]]
current_state = sum(stack)
if current_state >= self.q - self.tetdef:
quadruplets.append((0, self.q - current_state, self.q))
for i in tqdm(range(self.q, len(fasta)), desc='Qadrupleting', disable = self.verbose):
stack.append(self.QuadrupletDetector(fasta[i]))
current_state = current_state + stack[-1] - stack.pop(0)
if current_state >= self.q - self.tetdef:
quadruplets.append((i-self.q+1, self.q - current_state, self.q))
return quadruplets
def QuadrupletDetector(self, quadr):
if self.repeats:
quadr = quadr.upper()
return 1 if quadr == self.GC.upper() else 0
def find_quadruplets_wrapper(self, data):
return self.find_quadruplets(**data)
def find_quadruplets(self, fasta, shift=0, tqdm_keep_silence=None):
'''
bulge_stack - a set of numbers - amounts how many non-G nucleotides was before + 1
'''
tqdm_keep_silence = self.verbose if tqdm_keep_silence is None else tqdm_keep_silence
quadruplets = []
quadruplets_sequences = []
open_bulge = 0
bulge_stack = []
sequence_stack = ''
bulge_current_state = 0
bulge_current_num_state = 0
bulge_num_state = 0
n_bulges = 0
def add_bulge(nuc):
nonlocal open_bulge, bulge_current_num_state, bulge_current_state, bulge_num_state, bulge_stack, n_bulges
if self.QuadrupletDetector(nuc):
bulge_stack.append(open_bulge+1)
if len(bulge_stack) == 1:
bulge_stack[0] = 1
open_bulge = 0
if bulge_current_num_state < self.q:
bulge_current_num_state += 1
bulge_current_state += bulge_stack[-1]
if bulge_stack[-1] != 1:
n_bulges += 1
else:
bulge_num_state += 1
else:
open_bulge += 1
def remove_bulge(nuc):
nonlocal bulge_num_state, bulge_current_state, bulge_current_num_state, bulge_stack, n_bulges
if self.QuadrupletDetector(nuc):
if bulge_num_state > 0:
bulge_current_state += bulge_stack[bulge_current_num_state]
bulge_num_state -= 1
if bulge_stack[bulge_current_num_state] != 1:
n_bulges += 1
else:
bulge_current_num_state -= 1
bulge_current_state -= bulge_stack.pop(0)
if len(bulge_stack) > 0:
pop = bulge_stack.pop(0)
if pop != 1:
n_bulges -= 1
bulge_current_state -= pop - 1
bulge_stack.insert(0, 1)
for i, nuc in enumerate(fasta[:(self.q+self.len_bulge)]):
add_bulge(nuc)
sequence_stack = sequence_stack+nuc
if ((bulge_current_num_state == self.q) & (n_bulges <= self.max_bulge) &
(self.QuadrupletDetector(fasta[0])) & (self.QuadrupletDetector(fasta[bulge_current_state-1]))):
quadruplets.append((0+shift, n_bulges, bulge_current_state))
quadruplets_sequences.append(sequence_stack[:bulge_current_state])
stack = [self.QuadrupletDetector(nuc) for nuc in fasta[:self.q]]
current_state = sum(stack)
if ((current_state >= self.q - self.tetdef) & (current_state < self.q) &
(self.QuadrupletDetector(fasta[0])) & (self.QuadrupletDetector(fasta[self.q-1]))):
quadruplets.append((0+shift, self.q - current_state, self.q))
quadruplets_sequences.append(sequence_stack[:self.q])
for i in tqdm(range(self.q, len(fasta)), desc='Quadrupleting', disable = tqdm_keep_silence):
remove_bulge(fasta[i-self.q])
i_bulge = i + self.len_bulge
if i_bulge < len(fasta):
add_bulge(fasta[i_bulge])
sequence_stack = sequence_stack+fasta[i_bulge]
stack.append(self.QuadrupletDetector(fasta[i]))
current_state = current_state + stack[-1] - stack.pop(0)
sequence_stack = sequence_stack[1:]
if self.QuadrupletDetector(fasta[i-self.q+1]):
if ((bulge_current_num_state == self.q) & (n_bulges <= self.max_bulge) &
(self.QuadrupletDetector(fasta[i-self.q+bulge_current_state]))):
quadruplets.append((i-self.q+1+shift, n_bulges, bulge_current_state))
quadruplets_sequences.append(sequence_stack[:bulge_current_state])
if ((current_state >= self.q - self.tetdef) & (current_state < self.q) &
(self.QuadrupletDetector(fasta[i]))):
quadruplets.append((i-self.q+1+shift, self.q - current_state, self.q))
quadruplets_sequences.append(sequence_stack[:self.q])
return quadruplets, quadruplets_sequences
def find_quadruplets_in_parallel(self, fasta):
pool = Pool(processes=self.nthreads)
minimal_chunk_length = self.q + self.len_bulge
base_chunk_length = len(fasta) // self.nthreads
if base_chunk_length < minimal_chunk_length:
base_chunk_length = minimal_chunk_length
fasta_chunks_starts = list(range(0, len(fasta), base_chunk_length))
if len(fasta) % base_chunk_length != 0:
fasta_chunks_starts = fasta_chunks_starts[:-1]
fasta_chunks_ends = fasta_chunks_starts[1:] + [len(fasta)-minimal_chunk_length]
quadruplets_list = pool.map(self.find_quadruplets_wrapper, ({'fasta':fasta[start:(end+minimal_chunk_length)],
'shift':start,
'tqdm_keep_silence':None if silence_ind==len(fasta_chunks_starts)-1 else True}
for silence_ind, (start, end) in enumerate(zip(fasta_chunks_starts, fasta_chunks_ends))))
pool.close()
pool.join()
quadruplets = []
quadruplets_sequences = []
quadruplets_list_ = []
quadruplets_seq_list_ = []
for quad, quad_seq in quadruplets_list:
if len(quad) != 0:
quadruplets_list_.append(quad)
quadruplets_seq_list_.append(quad_seq)
del quadruplets_list
for quadruplet_now, quadruplet_next, quadruplet_seq_now, quadruplet_seq_next in zip(
quadruplets_list_[:-1], quadruplets_list_[1:],
quadruplets_seq_list_[:-1], quadruplets_seq_list_[1:]):
first_next_quad = quadruplet_next[0]
num_quad_now = -1
while (first_next_quad == quadruplet_now[num_quad_now]) or (first_next_quad[0] <= quadruplet_now[num_quad_now][0]):
num_quad_now -= 1
num_quad_now += 1
if num_quad_now != 0:
quadruplet_now = quadruplet_now[:num_quad_now]
quadruplet_seq_now = quadruplet_seq_now[:num_quad_now]
quadruplets.extend(quadruplet_now)
quadruplets_sequences.extend(quadruplet_seq_now)
quadruplets_sequences.extend(quadruplet_seq_next)
quadruplets.extend(quadruplet_next)
del quadruplets_list_
del quadruplets_seq_list_
return quadruplets, quadruplets_sequences
def find_quadruplexes_wrapper(self, data):
return self.find_quadruplexes(**data)
def find_quadruplexes(self, quadruplets, tqdm_keep_silence=None):
'''
quadruplex: [[Q1-Start, Q1-Defects, Q1-Length]]*self.nquadruplets
'''
tqdm_keep_silence = self.verbose if tqdm_keep_silence is None else tqdm_keep_silence
total_wrongs = 0 #number of the quadruplets with defect
wrongNum = 0
def check_conditions():
nonlocal total_wrongs, wrongNum
if i == 0:
total_wrongs = 0
wrongNum = 0
elif (quadruplets[k][0] - quadruplets[quadruplex_set[i-1]][0] <= quadruplets[quadruplex_set[i-1]][2]):
return 'too close'
elif (quadruplets[k][0] - (quadruplets[quadruplex_set[i-1]][0] + quadruplets[quadruplex_set[i-1]][2]) > self.L):
return 'too far'
if quadruplets[k][1] != 0:
wrongNum = i+1
total_wrongs += 1
if total_wrongs > self.mdef:
total_wrongs -= 1
return False
else:
return True
def revert_wrongs():
nonlocal total_wrongs, wrongNum
if (i >= 0):
if (quadruplets[quadruplex_set[i]][1] != 0):
total_wrongs -= 1
if wrongNum == i+1:
for j in range(i):
if quadruplets[quadruplex_set[j]][1] != 0:
wrongNum == j+1
break
if wrongNum == i+1:
wrongNum = 0
quadruplexes = []
quadruplex_set = list(range(-1, self.nquadruplets))
i = 0
k = quadruplex_set[i]
with tqdm(desc='Qadruplexing', total=len(quadruplets), disable = tqdm_keep_silence) as pbar:
while i >= 0:
k = quadruplex_set[i]+1
if i == 0:
pbar.update(1)
if i == self.nquadruplets:
quadruplex = tuple([quadruplets[qu] for qu in quadruplex_set[:-1]] + [total_wrongs])
quadruplexes.append(list(quadruplex))
i -= 1
revert_wrongs()
elif k >= len(quadruplets) - self.nquadruplets + 1 + i:
i -= 1
revert_wrongs()
else:
status = check_conditions()
if status == True:
quadruplex_set[i] = k
i += 1
quadruplex_set[i] = quadruplex_set[i-1]
elif status == 'too far':
i -= 1
revert_wrongs()
else:
quadruplex_set[i] = k
pbar.update(len(quadruplets) - pbar.n)
return quadruplexes
def group_quadruplexes(self, quadruplexes):
groups = []
q1 = 0
q2 = 1
with tqdm(desc='Grouping', total=len(quadruplexes)-1, disable = self.verbose) as pbar:
while q1 < len(quadruplexes)-1:
while q2 < len(quadruplexes):
pbar.update(1)
tetrads_lehgth_q1 = sum([quadruplexes[q1][i][2]+quadruplexes[q1][i][1] for i in range(self.nquadruplets)])
tetrads_lehgth_q2 = sum([quadruplexes[q2][i][2]+quadruplexes[q2][i][1] for i in range(self.nquadruplets)])
general_length_q1 = quadruplexes[q1][self.nquadruplets - 1][0] + quadruplexes[q1][self.nquadruplets - 1][2] - 1 - quadruplexes[q1][0][0]
general_length_q2 = quadruplexes[q2][self.nquadruplets - 1][0] + quadruplexes[q2][self.nquadruplets - 1][2] - 1 - quadruplexes[q2][0][0]
if (quadruplexes[q2][0][0] > quadruplexes[q1][self.nquadruplets - 1][0] + quadruplexes[q1][self.nquadruplets - 1][2] - 1):
groups.append(quadruplexes[q1])
q1 = q2
if (q2 == len(quadruplexes)-1):
groups.append(quadruplexes[q2])
q1 = len(quadruplexes)
elif ((tetrads_lehgth_q2 < tetrads_lehgth_q1) & (not self.bulge_priority) or
(tetrads_lehgth_q2 >= tetrads_lehgth_q1) & (self.bulge_priority) or
(general_length_q2 < general_length_q1) & (not self.bulge_priority) or
(general_length_q2 < general_length_q1) & (self.bulge_priority)):
q1 = q2
if (q2 == len(quadruplexes)-1):
groups.append(quadruplexes[q2])
q1 = len(quadruplexes)
elif (q2 == len(quadruplexes)-1):
groups.append(quadruplexes[q1])
q1 = len(quadruplexes)
q2 += 1
return groups
def find_quadruplexes_in_parallel(self, quadruplets):
pool = Pool(processes=self.nthreads)
minimal_chunk_length = (self.q + self.len_bulge + self.L)*(self.nquadruplets)-self.L
if len(quadruplets) > self.nthreads:
base_chunk_length = len(quadruplets) // self.nthreads
else:
base_chunk_length = 1
quadruplets_chunks_starts = list(range(0, len(quadruplets), base_chunk_length))
if len(quadruplets) % base_chunk_length != 0:
quadruplets_chunks_starts = quadruplets_chunks_starts[:-1]
quadruplets_chunks_ends = []
for start_tmp in quadruplets_chunks_starts[1:]:
end_ind = start_tmp
end_val = quadruplets[start_tmp][0]
tmp_end_val = quadruplets[end_ind][0]
while (end_ind < len(quadruplets)) and (tmp_end_val - end_val <= minimal_chunk_length):
end_ind += 1
tmp_end_val = quadruplets[end_ind][0]
quadruplets_chunks_ends.append(end_ind-1)
quadruplets_chunks_ends.append(len(quadruplets))
quadruplexes_list = pool.map(self.find_quadruplexes_wrapper, ({'quadruplets':quadruplets[start:end],
'tqdm_keep_silence':None if silence_ind==len(quadruplets_chunks_starts)-1 else True}
for silence_ind, (start, end) in enumerate(zip(quadruplets_chunks_starts, quadruplets_chunks_ends))))
pool.close()
pool.join()
quadruplexes_list_ = []
for quad in quadruplexes_list:
if len(quad) != 0:
quadruplexes_list_.append(quad)
del quadruplexes_list
quadruplexes = []
for quadruplex_now, quadruplex_next in zip(quadruplexes_list_[:-1], quadruplexes_list_[1:]):
first_next_quad = quadruplex_next[0]
num_quad_now = -1
while first_next_quad[0][0] <= quadruplex_now[num_quad_now][0][0]:
if (first_next_quad == quadruplex_now[num_quad_now]) or (first_next_quad[0][0] <= quadruplex_now[num_quad_now][0][0]):
num_quad_now -= 1
num_quad_now += 1
if num_quad_now != 0:
quadruplex_now = quadruplex_now[:num_quad_now]
quadruplexes.extend(quadruplex_now)
try:
quadruplexes.extend(quadruplex_next)
except:
pass
del quadruplexes_list_
return quadruplexes
def group_to_ranges(self, groups, fasta_id):
ranges = []
for group in tqdm(groups, desc='Converting to ranges', disable = self.verbose):
start = group[0][0]
end = group[self.nquadruplets-1][0]+group[self.nquadruplets-1][2]-1
ranges.append((fasta_id, start, end))
return ranges
def prepare_quadruplets_toprint(self, quadruplets, quadruplets_sequences, tqdm_keep_silence=None):
tqdm_keep_silence = self.verbose if tqdm_keep_silence is None else tqdm_keep_silence
quadruplets_toprint = []
for quad, seq in tqdm(list(zip(quadruplets, quadruplets_sequences)),
desc='Postprocessing quadruplets', disable=tqdm_keep_silence):
quad = list(quad)
quad.append(seq)
quadruplets_toprint.append(quad)
return quadruplets_toprint
def prepare_quadruplexes_toprint(self, quadruplexes, fasta_di, tqdm_keep_silence=None):
tqdm_keep_silence = self.verbose if tqdm_keep_silence is None else tqdm_keep_silence
quadruplexes_toprint = []
[(shift, fasta)] = fasta_di.items()
for quadruplex in tqdm(quadruplexes, desc='Postprocessing quadruplexes', disable=tqdm_keep_silence):
seq = ''
quadruplex_toprint = []
for qu1, qu2 in zip(quadruplex[:-2], quadruplex[1:-1]):
seq = seq + fasta[(qu1[0]-shift):(qu1[0]+qu1[2]-shift)].upper()+\
fasta[(qu1[0]+qu1[2]-shift):(qu2[0]-shift)].lower()
quadruplex_toprint.extend(list(qu1))
quadruplex_toprint.extend(list(qu2))
quadruplex_toprint.append(quadruplex[-1])
seq = seq+fasta[(qu2[0]-shift):(qu2[0]+qu2[2]-shift)].upper()
quadruplex_toprint.append(seq)
quadruplexes_toprint.append(tuple(quadruplex_toprint))
return quadruplexes_toprint
def prepare_groups_toprint(self, groups, fasta, tqdm_keep_silence=None):
tqdm_keep_silence = self.verbose if tqdm_keep_silence is None else tqdm_keep_silence
groups_toprint = []
for group in tqdm(groups, desc='Postprocessing groups', disable=tqdm_keep_silence):
seq = ''
group_toprint = []
for qu1, qu2 in zip(group[:-2], group[1:-1]):
seq = seq + fasta[qu1[0]:(qu1[0]+qu1[2])].upper()+fasta[(qu1[0]+qu1[2]):qu2[0]].lower()
group_toprint.extend(qu1)
group_toprint.extend(qu2)
group_toprint.append(group[-1])
seq = seq+fasta[qu2[0]:(qu2[0]+qu2[2])].upper()
group_toprint.append(seq)
groups_toprint.append(tuple(group_toprint))
return groups_toprint
def split_args_for_prepare_quadruplexes_toprint(self, quadruplexes, fasta, n):
quad_len = len(quadruplexes) // n
minimal_chunk_length = (self.q + self.len_bulge + self.L)*(self.nquadruplets)
parts = list(range(0, len(quadruplexes), quad_len))[1:]
if len(quadruplexes) % n != 0:
parts = parts[:-1]
quadruplexes_parts = [quadruplexes[start:end] for start, end in zip(
[0]+parts, parts+[len(quadruplexes)])]
fasta_parts_coordinates = [(quadruplex_set[0][0][0], quadruplex_set[-1][-2][0]+minimal_chunk_length)
for quadruplex_set in quadruplexes_parts]
fasta_parts = [{start:fasta[start:end]} for start, end in fasta_parts_coordinates]
show_status = [True]*(len(quadruplexes_parts)-1)+[None]
return list(zip(quadruplexes_parts, fasta_parts, show_status))
def postprocess_wrapper(self, kwargs):
'''
args: {'args': args, 'func':function}
'''
return kwargs['func'](*kwargs['args'])
def save_tables(self, df, columns, fasta_id, which_table):
n = len(columns)
with open('{}/{}_{}'.format(self.output_path, fasta_id, which_table), 'w') as f:
if n == 4:
f.write('{}\t{}\t{}\t{}\n'.format(columns[0], columns[1], columns[2], columns[3]))
for row in df:
f.write('{}\t{}\t{}\t{}\n'.format(row[0], row[1], row[2], row[3]))
elif n == 14:
f.write('{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n'.format(
columns[0], columns[1], columns[2], columns[3],
columns[4], columns[5], columns[6], columns[7],
columns[8], columns[9], columns[10], columns[11],
columns[12], columns[13]))
for row in df:
f.write('{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n'.format(
row[0], row[1], row[2], row[3],
row[4], row[5], row[6], row[7],
row[8], row[9], row[10], row[11],
row[12], row[13]))
elif n==3:
for row in df:
f.write('{}\t{}\t{}\n'.format(row[0], row[1], row[2]))
def save_tables_wrapper(self, args):
return self.save_tables(*args)
def description_file(self):
all_members = self.__dict__.keys()
columns_description = '''\n\nColumns Description
Qudruplets File:
Start: an index of a quadruplet begining
Number of Defects: a total number of the mismatches or a number of bulges
Length: a length of a quadruplet
Sequence: quadruplet sequences if not suppressed
Quadruplex & Group Files:
Qi-Start: an index of a quadruplet begining
Qi-Defects: a total number of the mismatches or a number of bulges
Qi-Length: a length of a quadruplet
Defective: a number of quadruplets with defects (mismatches or bulges)
Sequence: a sequence of a quadruplex with loops if not suppressed, quadruplets are uppercase
Ranges File: bed-format
Fasta ID: fasta id
Start: an index of a quadruplex begining
End: an index of the end of the quadruplex
'''
description_file = 'Parametres\n'+'\n'.join(['\t%s = %s'%(item, self.__dict__[item]) for item in all_members if (not item.startswith("_")) & ('nquadruplets' not in item)]) + columns_description
with open('%s/description.txt'%(self.output_path), 'w') as f:
f.write(description_file)
def run(self, print_sequences=True, output_type = [4]):
print('Loading %s'%self.fasta_file)
sequences = self.load_fasta()
print('This fasta file contains %d sequences.'%len(sequences))
for fasta, fasta_id in sequences:
quadruplets, quadruplets_sequences, quadruplexes, groups, ranges, quadruplets_toprint, quadruplexes_toprint, groups_toprint = [[]]*8
print('Processing %s:'%fasta_id)
if (self.nthreads == 1) or (self.len_bulge == 0) or (self.max_bulge == 0):
quadruplets, quadruplets_sequences = self.find_quadruplets(fasta) if ((self.len_bulge > 0) or (self.max_bulge != 0)) else self.find_quadruplets_without_bulges(fasta)
else:
quadruplets, quadruplets_sequences = self.find_quadruplets_in_parallel(fasta)
if output_type[-1] > 0:
if (self.nthreads == 1):
quadruplexes = self.find_quadruplexes(quadruplets)
else:
quadruplexes = self.find_quadruplexes_in_parallel(quadruplets)
if output_type[-1] > 1:
groups = self.group_quadruplexes(quadruplexes)
if output_type[-1] > 2:
ranges = self.group_to_ranges(groups, fasta_id)
columns_set1 = ['Start', 'Number of Defects', 'Length']
columns_set2 = []
[columns_set2.extend(['Q%d-Start'%i, 'Q%d-Defects'%i, 'Q%d-Length'%i]) for i in range(1, self.nquadruplets+1)]
columns_set2.extend(['Defective'])
if output_type[0] < 3:
k = sum([0 if ind_num in output_type else 1 for ind_num in [0, 2]])
if print_sequences:
if self.nthreads > 1:
pool = Pool(processes=self.nthreads)
args_quadruplexes = []
n = 1
if 1 in output_type:
if self.nthreads - k > 2:
n = self.nthreads
args_quadruplexes = self.split_args_for_prepare_quadruplexes_toprint(quadruplexes, fasta, n)
elif self.nthreads - k > 1:
n = self.nthreads - k
args_quadruplexes = self.split_args_for_prepare_quadruplexes_toprint(quadruplexes, fasta, n)
else:
n = 1
args_quadruplexes = {0:fasta}
args_dict = {0: [(quadruplets, quadruplets_sequences)],
1: args_quadruplexes,
2: [(groups, fasta)]}
func_dict = {0: [self.prepare_quadruplets_toprint],
1: [self.prepare_quadruplexes_toprint]*n,
2: [self.prepare_groups_toprint]}
results_inds_dict = {0: [0],
1: [1]*n,
2: [2]
}
args_all = []
functions = []
results_inds = []
for output_ind in output_type:
if output_ind < 3:
functions.extend(func_dict[output_ind])
args_all.extend(args_dict[output_ind])
results_inds.extend(results_inds_dict[output_ind])
uni, inds, counts = np.unique(results_inds, return_index=True, return_counts=True)
slice_dict = {}
for un, ind, count in zip(uni, inds, counts):
slice_dict[un] = (ind,(ind+count))
results = pool.map(self.postprocess_wrapper, ({'func':func, 'args': args}
for func, args in zip(functions, args_all)))
if 0 in slice_dict.keys():
[quadruplets_toprint] = results[slice_dict[0][0]:slice_dict[0][1]]
if 1 in slice_dict.keys():
quadruplexes_toprint_all = results[slice_dict[1][0]:slice_dict[1][1]]
quadruplexes_toprint = []
[quadruplexes_toprint.extend(quad) for quad in quadruplexes_toprint_all];
if 2 in slice_dict.keys():
[groups_toprint] = results[slice_dict[2][0]:slice_dict[2][1]]
pool.close()
pool.join()
else:
if 0 in output_type:
quadruplets_toprint = self.prepare_quadruplets_toprint(quadruplets, quadruplets_sequences)
if 1 in output_type:
quadruplexes_toprint = self.prepare_quadruplexes_toprint(quadruplexes, {0:fasta})
if 2 in output_type:
groups_toprint = self.prepare_groups_toprint(groups, fasta)
columns_set1.extend(['Sequence'])
columns_set2.extend(['Sequence'])
else:
if 0 in output_type:
quadruplets_toprint = quadruplets
if 1 in output_type:
quadruplexes_toprint = []
for quadruplex in quadruplexes:
seq = ''
quadruplex_toprint = []
for qu1 in quadruplex[:-1]:
quadruplex_toprint.extend(qu1)
quadruplex_toprint.append(quadruplex[-1])
quadruplexes_toprint.append(tuple(quadruplex_toprint))
if 2 in output_type:
groups_toprint = []
for group in groups:
seq = ''
group_toprint = []
for qu1 in group[:-1]:
group_toprint.extend(qu1)
group_toprint.append(group[-1])
groups_toprint.append(tuple(group_toprint))
for i in tqdm(range(1), desc='Saving tables', disable=self.verbose):
pool = Pool(processes=self.nthreads)
data = np.array([(quadruplets_toprint, columns_set1, fasta_id, 'quadruplets.csv'),
(quadruplexes_toprint, columns_set2, fasta_id, 'quadruplexes.csv'),
(groups_toprint, columns_set2, fasta_id, 'groups.csv'),
(ranges, ['Fasta ID', 'Start', 'End'], fasta_id, 'ranges.bed')])[output_type]
pool.map(self.save_tables_wrapper, data)
pool.close()
pool.join()
self.description_file()
print('Finished')
# Disable prints
def blockPrint():
sys.stdout = open(os.devnull, 'w')
def main():
parser = argparse.ArgumentParser(prog='ImGQFinder', description='The tool for finding G-, C- quadruplexes. The output positions are represented in a zero based counting.')
parser.add_argument('-i', '--input', help='Assembly scaffolds/contigs or full genomes, required.', required=True)
parser.add_argument('-o', '--output', default='', help='Name/path of a folder for output files. Saves to the current folder if not provided.')
parser.add_argument('-GC', default='G', help='Quad type, G- or C-. By default, G.')
parser.add_argument('-L', default=7, help='Maximum loop length. By default, 7.')
parser.add_argument('-q', default=4, help="The length of a quadruplet.") # the length of a tetrad
parser.add_argument('-nq', '--nquadruplets', default=4, help=argparse.SUPPRESS) # 'Number of quadruplets. By default, 4.'
parser.add_argument('-mdef', default=1, help='Allowed number of defective tetrads. By default, 1.')
parser.add_argument('-bulgelen', default=1, help='Total length of bulges in one quadruplet. By default, 1.')
parser.add_argument('-maxbulge', default=1, help='Maximum number of bulges per quadruplet. By default, 1.')
parser.add_argument('-bp', '--bulge_priority', action='store_true', help='By default, quadrouplexes with shorter bulge or without them are preferable while grouping. This behaviour can be changed with this parameter.')
parser.add_argument('-tetdef', default=1, help='Allowed number of defective nucleotides in tetrads. By default, 1.')
parser.add_argument('-ns', '--no-sequences', action='store_true', help='Not to include sequences to the output.')
parser.add_argument('-r', '--repeats', action='store_true', help='To include soft-masked genome areas. By default, not included.')
parser.add_argument('-v', '--verbose', action='store_true', help='Show the status of procesing or not. By default print stages info.')
parser.add_argument('--nthreads', default=1, help='Number of kernels to use.')
parser.add_argument('--output_type', default=['all'], nargs='+', help='List the numbers of file types you need the tool to generate or write all if you want all files. All - is the default. 0 - quadruplets, 1 - quadruplexes, 2 - groups, 3 - ranges. For example, --output_type 1 2 will generate only 2 files: quadruplexes and groups.')
args = parser.parse_args()
if not os.path.isdir(args.output):
os.mkdir(args.output)
#args.output = os.path.dirname(args.output)
if args.verbose:
blockPrint()
args.output_type = [atype.lower() for atype in args.output_type]
output_type_dict = {'all':4, '0':0, '1':1, '2':2, '3':3, '4':4}
output_type_dict_report = {'all':'all', '0':'quadruplets', '1':'quadruplexes', '2':'groups', '3':'ranges', '4':'all'}
output_type = sorted([output_type_dict[user_type] for user_type in args.output_type if user_type in list(output_type_dict.keys())])
output_type_report = [output_type_dict_report[user_type] for user_type in args.output_type if user_type in list(output_type_dict.keys())]
if output_type[-1] == 4:
output_type = [0, 1, 2, 3]
output_type_report = [output_type_dict_report[user_type] for user_type in ['0', '1', '2', '3']]
if 'all' in output_type:
output_type = ['quadruplets', 'quadruplexes', 'groups', 'ranges']
if len(output_type) == 1:
print('The ImGQfinder will generate %s file.'%(output_type_report[0]))
else:
print('The ImGQfinder will generate %s and %s files.'%(', '.join(output_type_report[:-1]), output_type_report[-1]))
if int(args.mdef) < int(args.tetdef):
print('Warning: The allowed number of defective nucleotides (-tetdef) is more than the number of nucleotides (-mdef).', end='\n\n')
finder = QuadruplexFinder(args.input, output_path = args.output, verbose = args.verbose, repeats=args.repeats,
GC=args.GC, L=int(args.L) , q=int(args.q), nquadruplets=int(args.nquadruplets), mdef=int(args.mdef), tetdef=int(args.tetdef),
len_bulge = int(args.bulgelen), max_bulge = int(args.maxbulge), bulge_priority = args.bulge_priority, nthreads = int(args.nthreads))
finder.run(print_sequences= not args.no_sequences, output_type = output_type)
if __name__ == '__main__':
main()
|
py | 1a39672487a44109089f4db8aa90179430c78e68 | import time
import wiotp.sdk.device
def myCommandCallback(cmd):
print("Command received: %s" % cmd.data)
# Configure
myConfig = {
"identity": {
"orgId": "j4fntv",
"typeId": "Cambien",
"deviceId": "cambien001"
},
"auth": {
"token": "12345678"
},
"options": {
"domain": "internetofthings.ibmcloud.com",
# "logLevel": "error|warning|info|debug",
"mqtt": {
"port": 8883,
"transport": "websockets",
"cleanStart": True,
"sessionExpiry": 3600,
"keepAlive": 60,
# "caFile": "/path/to/certificateAuthorityFile.pem"
}
}
}
client = wiotp.sdk.device.DeviceClient(config=myConfig, logHandlers=None)
client.commandCallback = myCommandCallback
# Connect
client.connect()
# # Send Data
# for x in range(2, 30, 3):
# myData={'name' : 'foo', 'cpu' : x, 'mem' : 50}
# client.publishEvent(eventId="status", msgFormat="json", data=myData, qos=2, onPublish=None)
# Connect and send datapoint(s) into the cloud
# deviceCli.connect()
for x in range(0, 1000):
data = {"simpledev": "ok", "x": x}
def myOnPublishCallback():
print("Confirmed event %s received by IoTF\n" % x)
success = client.publishEvent("test", "json", data, qos=0, onPublish=myOnPublishCallback)
if not success:
print("Not connected to IoTF")
time.sleep(500)
# Disconnect
client.disconnect(); |
py | 1a39675f9543f41cd74ffc76341aebb8cf77f94c | from django.db import models
from django.urls import reverse
from django.utils import timezone
from django_countries import fields
from core.dj_import import get_user_model
from core.models import IntegerRangeField
from core.utils import getReverseWithUpdatedQuery
from models.models import Model
from brands.models import Brand
from categories.models import Category
from searching.models import Search
from ebayinfo import EBAY_SHIPPING_CHOICES
from ebayinfo.models import CategoryHierarchy, Market, EbayCategory
User = get_user_model()
# Item IDs are unique across all eBay sites
# http://developer.ebay.com/devzone/shopping/docs/callref/getsingleitem.html
class ItemFound(models.Model):
EBAY_SHIPPING_CHOICES = EBAY_SHIPPING_CHOICES
iItemNumb = models.BigIntegerField( 'item number',
primary_key = True )
cTitle = models.CharField( 'item title', max_length = 80 )
cSubTitle = models.CharField( 'item sub title', max_length = 80,
null = True, blank = True )
cLocation = models.CharField( 'location',
max_length = 58 )
cCountry = fields.CountryField( "country" )
cMarket = models.CharField( 'market Global ID',
max_length = 14 )
iEbaySiteID = models.ForeignKey( Market, on_delete=models.CASCADE,
verbose_name = 'ebay site ID (PK)', db_index=True )
cGalleryURL = models.CharField( 'gallery pic URL',
max_length = 88, null = True, blank = True )
cEbayItemURL = models.CharField( 'ebay item URL',
max_length =188 )
tTimeBeg = models.DateTimeField( 'beginning date/time',null=True )
tTimeEnd = models.DateTimeField( 'ending date/time', null=True )
bBestOfferable = models.BooleanField(
'best offer enabled?', default = False )
bBuyItNowable = models.BooleanField(
'buy it now enabled?',default = False )
cListingType = models.CharField(
'listing type', max_length = 15 )
lLocalCurrency = models.CharField(
'local currency', max_length = 3, default = 'USD' )
lCurrentPrice = models.DecimalField( 'current price',
max_digits = 10, decimal_places = 2,
null = True, blank = True ) # use DecimalField not MoneyField
dCurrentPrice = models.DecimalField( # form was throwing nonsense error
'current price (converted to USD)', # for MoneyField
max_digits=10, decimal_places=2, # but not for
db_index = False ) # DecimalField
lBuyItNowPrice = models.DecimalField( 'buy it now price',
max_digits = 10, decimal_places = 2,
null = True, blank = True )
dBuyItNowPrice = models.DecimalField(
'buy it now price (converted to USD)',
max_digits=10, decimal_places=2,
null = True, blank = True )
iShippingType = models.PositiveSmallIntegerField(
'shipping type',
choices = EBAY_SHIPPING_CHOICES,
null = True ) # data prior to Feb 2019 d/n have
iHandlingTime = models.PositiveSmallIntegerField(
'hangling time',
null = True, blank = True ) # optional
iCategoryID = models.ForeignKey( EbayCategory,
on_delete=models.DO_NOTHING,
verbose_name = 'primary category ID',
related_name = 'ebay_primary_category',
null = True, blank = True ) # ebay sends but
# EbayCategory table is extremely slow
# CategoryHierarchy has relevant info & is much faster
# but need to get this ebay category ID from API
# to look up CategoryHierarchy
cCategory = models.CharField( 'primary category',
max_length = 48 )
iCatHeirarchy = models.ForeignKey( CategoryHierarchy,
on_delete=models.DO_NOTHING,
verbose_name = 'category hierarchy (primary)',
related_name = 'primary_category',
null = True, blank = True )
i2ndCategoryID = models.ForeignKey( EbayCategory, # optional
on_delete=models.CASCADE,
verbose_name = 'secondary category ID (optional)',
related_name = 'ebay_secondary_category',
null = True, blank = True ) # ebay sends but
# EbayCategory table is extremely slow
# CategoryHierarchy has relevant info & is much faster
# but need to get this ebay category ID from API
# to look up CategoryHierarchy
c2ndCategory = models.CharField( 'secondary category (optional)',
max_length = 48, null = True, blank = True )
i2ndCatHeirarchy= models.ForeignKey( CategoryHierarchy,
on_delete=models.DO_NOTHING,
verbose_name = 'category hierarchy (secondary)',
related_name = 'secondary_category',
null = True, blank = True )
# condition is optional but may become required in the future
# https://developer.ebay.com/DevZone/guides/ebayfeatures/Development/Desc-ItemCondition.html
iConditionID = models.IntegerField( 'condition ID',
null = True, blank = True )
cCondition = models.CharField( 'condition display name',
max_length = 28, null = True, blank = True )
cSellingState = models.CharField( 'selling state',
max_length = 18 )
bCancelledItem = models.BooleanField(
'Invalid Or Non-Existent Item Number',default = False )
tCreate = models.DateTimeField( 'created on',
db_index = True, auto_now_add= True )
tRetrieved = models.DateTimeField( 'retrieved info',
null = True, blank = True )
tRetrieveFinal = models.DateTimeField( 'retrieved info after end',
null = True, blank = True )
def __str__(self):
return self.cTitle
def get_absolute_url(self):
#
return reverse(
'finders:detail', kwargs = { 'pk': self.pk } )
class Meta:
verbose_name_plural = 'itemsfound'
db_table = verbose_name_plural
class UserItemFound(models.Model):
iItemNumb = models.ForeignKey( ItemFound, on_delete=models.CASCADE )
iHitStars = IntegerRangeField(
'hit stars', null = True, db_index = True,
min_value = 0, max_value = 1000, default = 0 )
bGetResult = models.BooleanField( 'get results?',
default = False )
tLook4Hits = models.DateTimeField(
'assessed interest date/time', null = True )
iSearch = models.ForeignKey( Search,
on_delete=models.CASCADE,
verbose_name = 'Search that found this item' )
iModel = models.ForeignKey( Model, on_delete=models.CASCADE,
null = True, blank = True,
verbose_name = 'Model Name/Number',
help_text = 'You can display models for a particular '
'brand by changing to that brand (just below), '
'hit save, then edit again' )
iBrand = models.ForeignKey( Brand, on_delete=models.CASCADE,
null = True, blank = True, verbose_name = 'Brand' )
iCategory = models.ForeignKey( Category, on_delete=models.CASCADE,
null = True, blank = True,
verbose_name = 'Category' )
cWhereCategory = models.CharField( 'where category was found',
default = 'title',
max_length = 10 ) # title heirarchy1 heirarchy2
bListExclude = models.BooleanField( 'exclude from listing?',
default = False )
# tGotPics = models.DateTimeField( 'got pictures',
# null = True, blank = True )
bAuction = models.BooleanField(
'Auction or Auction with Buy It Now',default = False )
iUser = models.ForeignKey( User, on_delete=models.CASCADE,
verbose_name = 'Owner')
#
# yes the col below repeats the col in ItemFound, the normalized place
# but after writing the query to get the open auctions for a user, and
# after considering that query's load if this project is a success,
# it is clear that de-normalization is the way to go!!!
# besides, time end is fixed when a seller puts up an item for auction
# this is not a variable that will ever be maintained, once set, it is
# absolutely fixed - seller's only option is to cancel and resubmit
# 2019-08-27
#
tTimeEnd = models.DateTimeField( 'ending date/time',
null=True, db_index = True )
#
tCreate = models.DateTimeField( 'created on',
default=timezone.now, db_index = True )
tModify = models.DateTimeField( 'updated on', auto_now = True )
tRetrieved = models.DateTimeField( 'retrieved info',
null = True, blank = True )
tRetrieveFinal = models.DateTimeField( 'retrieved info after end',
null = True, blank = True )
tPutInKeepers = models.DateTimeField( 'User has Keeper row',
null = True, blank = True )
def __str__(self):
return self.iItemNumb.cTitle
class Meta:
verbose_name_plural = 'useritemsfound'
db_table = verbose_name_plural
unique_together = (
'iItemNumb', 'iUser', 'iModel', 'iBrand', 'iCategory' )
def get_absolute_url(self):
#
oUserFinder = UserFinder.objects.get(
iItemNumb_id = self.iItemNumb_id,
iUser = self.iUser )
#
return getReverseWithUpdatedQuery(
'finders:detail',
kwargs = { 'pk': oUserFinder.pk, 'tModify': timezone.now() } )
def get_edit_url(self):
#
return reverse(
'finders:edit', kwargs = { 'pk': self.pk } )
class UserFinder(models.Model):
#
# not normalized but this allows fast selection of finders for a user
# one row per item
# this table can now drive the finder listing for a user all by itself
#
iItemNumb = models.ForeignKey( ItemFound, on_delete=models.CASCADE,
verbose_name = 'eBay Item Number' )
iHitStars = IntegerRangeField(
'hit stars (max for item)', null = True,
min_value = 0, max_value = 1000, default = 0 )
cTitle = models.CharField( 'item title',
max_length = 80, null=True )
cMarket = models.CharField( 'market Global ID',
max_length = 14, null=True )
cListingType = models.CharField( 'listing type',
max_length = 15, null=True )
tTimeEnd = models.DateTimeField( 'ending date/time', null=True )
iUser = models.ForeignKey( User, on_delete=models.CASCADE,
verbose_name = 'Owner' )
bGetResult = models.BooleanField( 'get results?',
null = True, default = False )
bListExclude = models.BooleanField( 'exclude from listing?',
null = True, default = False )
iMaxModel = models.IntegerField( 'model hit with most stars',
null = True, default = False )
cLookFor = models.TextField( 'dummy for search compatibility',
null=True, blank = True )
#
def __str__(self):
# return '%s - %s' % ( self.iItemNumb, self.iUser )
return self.cTitle
class Meta:
verbose_name_plural = 'userfinders'
db_table = verbose_name_plural
unique_together = ('iItemNumb', 'iUser' )
def get_absolute_url(self):
#
return reverse(
'finders:detail', kwargs = { 'pk': self.pk } )
'''
truncate table userfinders ;
insert into userfinders ( "iItemNumb_id", "iUser_id" )
select distinct "iItemNumb_id", "iUser_id" from useritemsfound uif
where exists
( select 1 from itemsfound if
where
if."iItemNumb" = uif."iItemNumb_id" and
if."tRetrieved" is null ) ;
update userfinders uf
set "iHitStars" =
( select max( uif."iHitStars" )
from useritemsfound uif
where
uif."iItemNumb_id" = uf."iItemNumb_id" and
uif."iUser_id" = uf."iUser_id" ) ;
delete from userfinders where "iHitStars" = 0 ;
update userfinders uf
set "iMaxModel" =
( select distinct on (uif."iHitStars") uif."iModel_id"
from useritemsfound uif
where
uif."iItemNumb_id" = uf."iItemNumb_id" and
uif."iUser_id" = uf."iUser_id" and
uif."iHitStars" = uf."iHitStars" ) ;
update userfinders uf
set "bGetResult" = true where exists
( select 1 from useritemsfound uif
where
uif."iItemNumb_id" = uf."iItemNumb_id" and
uif."iUser_id" = uf."iUser_id" and
uif."bGetResult" = true ) ;
temporary 2019-12-26
update useritemsfound uif
set "bGetResult" = true where exists
( select 1 from userfinders uf
where
uf."iItemNumb_id" = uif."iItemNumb_id" and
uf."iUser_id" = uif."iUser_id" and
uf."bGetResult" = true ) ;
update userfinders uf
set "bListExclude" = false ;
update userfinders uf
set "bListExclude" = true where exists
( select 1 from useritemsfound uif
where
uif."iItemNumb_id" = uf."iItemNumb_id" and
uif."iUser_id" = uf."iUser_id" and
uif."bListExclude" = true ) ;
temporary 2019-12-26:
update useritemsfound uif
set "bListExclude" = true where exists
( select 1 from userfinders uf
where
uf."iItemNumb_id" = uif."iItemNumb_id" and
uf."iUser_id" = uif."iUser_id" and
uf."bListExclude" = true ) ;
update userfinders uf
set "cTitle" = if."cTitle",
"cMarket" = if."cMarket",
"cListingType" = if."cListingType",
"tTimeEnd" = if."tTimeEnd"
from itemsfound if
where if."iItemNumb" = uf."iItemNumb_id" ;
double chek for strays:
select count(*) from userfinders where "tTimeEnd" = null ;
class ItemFoundTemp(models.Model):
iItemNumb = models.ForeignKey( ItemFound, on_delete=models.CASCADE )
iHitStars = IntegerRangeField(
'hit stars', null = True,
min_value = 0, max_value = 1000, default = 0 )
iSearch = models.ForeignKey( Search, on_delete=models.CASCADE,
verbose_name = 'Search that first found this item' )
iModel = models.ForeignKey( Model, on_delete=models.CASCADE,
null = True )
iBrand = models.ForeignKey( Brand, on_delete=models.CASCADE,
null = True )
iCategory = models.ForeignKey( Category, on_delete=models.CASCADE,
null = True )
iStarsModel = IntegerRangeField( null = True,
min_value = 0, max_value = 10, default = 1 )
iStarsBrand = IntegerRangeField( null = True,
min_value = 0, max_value = 10, default = 1 )
iStarsCategory = IntegerRangeField( null = True,
min_value = 0, max_value = 10, default = 1 )
cFoundModel = models.CharField(
'model name/number found in auction title',
max_length = 48, null = True )
iFoundModelLen = models.PositiveSmallIntegerField( default = 0 )
bModelKeyWords = models.BooleanField(
'model has key words and they are in auction title?',
null = True, default = False )
cModelAlphaNum = models.CharField(
'model name/number alpha num only',
max_length = 48, null = True )
cTitleLeftOver = models.CharField( 'item title less model match',
max_length = 80, null = True )
cWhereCategory = models.CharField( 'where category was found',
default = 'title',
max_length = 10 ) # title heirarchy1 heirarchy2
bIncludeThis = models.BooleanField(
'include this hit when populating table?',
default = True )
def __str__(self):
#
lOut = [ 'ItemFound - %s' % self.iItemNumb ]
#
for s in vars( self ):
if s.startswith( '_' ): continue
lOut.append( ' %s: %s' % ( s, self.__dict__[s] ) )
#
return '\n'.join( lOut )
class Meta:
verbose_name_plural = 'itemsfoundtemp'
db_table = verbose_name_plural
'''
|
py | 1a396b5f85f4a1663c19c15705940c3118c3fc06 | import argparse
import os
import shutil
import sys
from pathlib import Path
from typing import Any, Callable, Dict, Optional, Union
import pkg_resources
import yaml
from apple.util.path import mkdir
def initial_config_file(filename: Union[str, Path]) -> str:
return pkg_resources.resource_string(__name__, f"initial-{filename}").decode()
def create_default_apple_config(root_path: Path, filenames=["config.yaml"]) -> None:
for filename in filenames:
default_config_file_data = initial_config_file(filename)
path = config_path_for_filename(root_path, filename)
mkdir(path.parent)
with open(path, "w") as f:
f.write(default_config_file_data)
def config_path_for_filename(root_path: Path, filename: Union[str, Path]) -> Path:
path_filename = Path(filename)
if path_filename.is_absolute():
return path_filename
return root_path / "config" / filename
def save_config(root_path: Path, filename: Union[str, Path], config_data: Any):
path = config_path_for_filename(root_path, filename)
with open(path.with_suffix("." + str(os.getpid())), "w") as f:
yaml.safe_dump(config_data, f)
shutil.move(str(path.with_suffix("." + str(os.getpid()))), path)
def load_config(
root_path: Path,
filename: Union[str, Path],
sub_config: Optional[str] = None,
exit_on_error=True,
) -> Dict:
path = config_path_for_filename(root_path, filename)
if not path.is_file():
if not exit_on_error:
raise ValueError("Config not found")
print(f"can't find {path}")
print("** please run `apple init` to migrate or create new config files **")
# TODO: fix this hack
sys.exit(-1)
r = yaml.safe_load(open(path, "r"))
if sub_config is not None:
r = r.get(sub_config)
return r
def load_config_cli(root_path: Path, filename: str, sub_config: Optional[str] = None) -> Dict:
"""
Loads configuration from the specified filename, in the config directory,
and then overrides any properties using the passed in command line arguments.
Nested properties in the config file can be used in the command line with ".",
for example --farmer_peer.host. Does not support lists.
"""
config = load_config(root_path, filename, sub_config)
flattened_props = flatten_properties(config)
parser = argparse.ArgumentParser()
for prop_name, value in flattened_props.items():
if type(value) is list:
continue
prop_type: Callable = str2bool if type(value) is bool else type(value) # type: ignore
parser.add_argument(f"--{prop_name}", type=prop_type, dest=prop_name)
for key, value in vars(parser.parse_args()).items():
if value is not None:
flattened_props[key] = value
return unflatten_properties(flattened_props)
def flatten_properties(config: Dict) -> Dict:
properties = {}
for key, value in config.items():
if type(value) is dict:
for key_2, value_2 in flatten_properties(value).items():
properties[key + "." + key_2] = value_2
else:
properties[key] = value
return properties
def unflatten_properties(config: Dict) -> Dict:
properties: Dict = {}
for key, value in config.items():
if "." in key:
add_property(properties, key, value)
else:
properties[key] = value
return properties
def add_property(d: Dict, partial_key: str, value: Any):
key_1, key_2 = partial_key.split(".", maxsplit=1)
if key_1 not in d:
d[key_1] = {}
if "." in key_2:
add_property(d[key_1], key_2, value)
else:
d[key_1][key_2] = value
def str2bool(v: Union[str, bool]) -> bool:
# Source from https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "True", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "False", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
|
py | 1a396bd5bfb6994ff63200a43c15a616dc3be025 | """Flight Air Map feed entry."""
from datetime import datetime
import logging
import pytz
from aio_geojson_client.feed_entry import FeedEntry
_LOGGER = logging.getLogger(__name__)
class FlightAirMapFeedEntry(FeedEntry):
"""Flight Air Map Incidents feed entry."""
def __init__(self, home_coordinates, feature):
"""Initialise this service."""
super().__init__(home_coordinates, feature)
@property
def title(self) -> str:
"""Return the title of this entry."""
return self._search_in_properties("c")
@property
def external_id(self) -> str:
"""Return the title of this entry."""
return self._search_in_properties("fi")
@property
def flight_num(self) -> str:
"""Return the title of this entry."""
return self._search_in_properties("c")
@property
def aircraft_registration(self) -> str:
"""Return the y of this entry."""
return self._search_in_properties("reg")
@property
def aircraft_icao(self) -> str:
"""Return the y of this entry."""
return self._search_in_properties("aircraft_icao")
@property
def aircraft_type(self) -> str:
"""Return the location of this entry."""
return self._search_in_properties("ai")
@property
def departure_airport(self) -> str:
"""Return the y of this entry."""
return self._search_in_properties("dac")
@property
def arrival_airport(self) -> str:
"""Return the location of this entry."""
arrival_airport = self._search_in_properties("aac")
return arrival_airport
@property
def altitude(self) -> str:
"""Return the location of this entry."""
if self._search_in_properties("a") is not None:
altitude = int(self._search_in_properties("a"))*100
else:
altitude = 0
return altitude
@property
def squawk(self) -> str:
"""Return the location of this entry."""
squawk = self._search_in_properties("sq")
return squawk
@property
def heading(self) -> str:
"""Return the location of this entry."""
heading = self._search_in_properties("h")
if heading is not None:
return heading
return None
@property
def publication_date(self) -> datetime:
"""Return the publication date of this entry."""
last_update = self._search_in_properties("lu")
if last_update is not None:
publication_date = datetime.fromtimestamp(int(last_update), tz=pytz.utc)
return publication_date
return None
|
py | 1a396c3230fd2d78b75d02738a5177eb3c7427de | from redisco import models
class Image(models.Model):
url = models.Attribute(required=True, unique=True)
created_at = models.DateTimeField(auto_now_add=True)
tags = models.ListField(str)
|
py | 1a396c816f4011375a399a673a7b03016d35944a | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
from ansible_collections.ibm.ibm_zos_ims.plugins.module_utils.ims_module_error_messages import ErrorMessages as em # pylint: disable=import-error
from pprint import pprint
import pytest
__metaclass__ = type
ROUTE = "IMS1"
def test_invalid_plex(ansible_zos_module):
hosts = ansible_zos_module
response = hosts.all.ims_command(command='QUERY PGM', plex="Thisplexdoesntexist", route=ROUTE)
for result in response.contacted.values():
assert result['ims_output'][0]['msg'] == em.NON_ZERO_RC_MSG
def test_invalid_characters_for_plex(ansible_zos_module):
hosts = ansible_zos_module
response = hosts.all.ims_command(command='QUERY PGM', plex="$Xe%^&L*(P", route=ROUTE)
for result in response.contacted.values():
assert result['ims_output'][0]['msg'] == em.INVALID_PLEX_MSG
def test_missing_plex(ansible_zos_module):
hosts = ansible_zos_module
response = hosts.all.ims_command(command='QUERY PGM', route=ROUTE)
for result in response.contacted.values():
assert result['ims_output'][0]['msg'] == em.MISSING_PLEX
def test_batch_missing_plex(ansible_zos_module):
hosts = ansible_zos_module
batch_list = [{"command": "QUERY PGM", "route": ROUTE}]
response = hosts.all.ims_command(batch=batch_list)
for result in response.contacted.values():
assert em.MISSING_PLEX.lower() in result['msg']
assert result['changed'] is False
def test_batch_malformed_plex_multiple(ansible_zos_module):
hosts = ansible_zos_module
batch_list = [
{"command": "QUERY PGM", "route": ROUTE},
{"command": "QUERY PGM", "plex": "Thisplexdoesntexist", "route": ROUTE},
{"command": "QUERY PGM", "plex": "$Xe%^&L*(P", "route": ROUTE}
]
response = hosts.all.ims_command(batch=batch_list)
for result in response.contacted.values():
pprint(result)
assert em.MISSING_PLEX.lower() in result['msg']
assert result['changed'] is False
def test_batch_correct_and_malformed_plex(ansible_zos_module):
hosts = ansible_zos_module
batch_list = [
{"command": "QUERY PGM", "plex": "PLEX1", "route": ROUTE},
{"command": "QUERY PGM", "plex": "Thisplexdoesntexist", "route": ROUTE}
]
response = hosts.all.ims_command(batch=batch_list)
for result in response.contacted.values():
pprint(result)
assert result['changed'] is True
assert int(result['ims_output'][0]['command_return']['ctl.rc']) == 0
assert result['ims_output'][1]['msg'] == em.NON_ZERO_RC_MSG
|
py | 1a396cd16d971fd43d5ddff07782aefc657e7925 | from builtins import object
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import signal
"""
A timer class so methods can only execute for a certain amount of seconds,
then returns the default value
@author: Madison Bahmer @ IST Research
Last Updated 9/1/15
"""
class MethodTimer(object):
'''
-------------------------------------
Initial code from http://pguides.net/python-tutorial/python-timeout-a-function/
Minor modifications made to work
with classes, parameters, and new timeout name.
Use above your function definition:
@MethodTimer.timeout(seconds_to_wait, default_return_value)
def myfunc(params):
'''
class DecoratorTimeout(Exception):
'''
Simple class in order to raise exception
'''
pass
@staticmethod
def timeout(timeout_time, default):
'''
Decorate a method so it is required to execute in a given time period,
or return a default value.
'''
def timeout_function(f):
def f2(*args):
def timeout_handler(signum, frame):
raise MethodTimer.DecoratorTimeout()
old_handler = signal.signal(signal.SIGALRM, timeout_handler)
# triger alarm in timeout_time seconds
signal.alarm(timeout_time)
try:
retval = f(*args)
except MethodTimer.DecoratorTimeout:
return default
finally:
signal.signal(signal.SIGALRM, old_handler)
signal.alarm(0)
return retval
return f2
return timeout_function
'''
-------------------------------------
'''
def __init__(self):
pass
|
py | 1a396d192481a45521d7ab8997d74da86643a27d | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mBusi.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
py | 1a396dba004680019360b2497e9fd92782b419ff | # -*- test-case-name: twisted.test.test_adbapi -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
An asynchronous mapping to U{DB-API
2.0<http://www.python.org/topics/database/DatabaseAPI-2.0.html>}.
"""
import sys
from twisted.internet import threads
from twisted.python import reflect, log, compat
class ConnectionLost(Exception):
"""
This exception means that a db connection has been lost. Client code may
try again.
"""
class Connection:
"""
A wrapper for a DB-API connection instance.
The wrapper passes almost everything to the wrapped connection and so has
the same API. However, the L{Connection} knows about its pool and also
handle reconnecting should when the real connection dies.
"""
def __init__(self, pool):
self._pool = pool
self._connection = None
self.reconnect()
def close(self):
# The way adbapi works right now means that closing a connection is
# a really bad thing as it leaves a dead connection associated with
# a thread in the thread pool.
# Really, I think closing a pooled connection should return it to the
# pool but that's handled by the runWithConnection method already so,
# rather than upsetting anyone by raising an exception, let's ignore
# the request
pass
def rollback(self):
if not self._pool.reconnect:
self._connection.rollback()
return
try:
self._connection.rollback()
curs = self._connection.cursor()
curs.execute(self._pool.good_sql)
curs.close()
self._connection.commit()
return
except:
log.err(None, "Rollback failed")
self._pool.disconnect(self._connection)
if self._pool.noisy:
log.msg("Connection lost.")
raise ConnectionLost()
def reconnect(self):
if self._connection is not None:
self._pool.disconnect(self._connection)
self._connection = self._pool.connect()
def __getattr__(self, name):
return getattr(self._connection, name)
class Transaction:
"""
A lightweight wrapper for a DB-API 'cursor' object.
Relays attribute access to the DB cursor. That is, you can call
C{execute()}, C{fetchall()}, etc., and they will be called on the
underlying DB-API cursor object. Attributes will also be retrieved from
there.
"""
_cursor = None
def __init__(self, pool, connection):
self._pool = pool
self._connection = connection
self.reopen()
def close(self):
_cursor = self._cursor
self._cursor = None
_cursor.close()
def reopen(self):
if self._cursor is not None:
self.close()
try:
self._cursor = self._connection.cursor()
return
except:
if not self._pool.reconnect:
raise
else:
log.err(None, "Cursor creation failed")
if self._pool.noisy:
log.msg('Connection lost, reconnecting')
self.reconnect()
self._cursor = self._connection.cursor()
def reconnect(self):
self._connection.reconnect()
self._cursor = None
def __getattr__(self, name):
return getattr(self._cursor, name)
class ConnectionPool:
"""
Represent a pool of connections to a DB-API 2.0 compliant database.
@ivar connectionFactory: factory for connections, default to L{Connection}.
@type connectionFactory: any callable.
@ivar transactionFactory: factory for transactions, default to
L{Transaction}.
@type transactionFactory: any callable
@ivar shutdownID: L{None} or a handle on the shutdown event trigger which
will be used to stop the connection pool workers when the reactor
stops.
@ivar _reactor: The reactor which will be used to schedule startup and
shutdown events.
@type _reactor: L{IReactorCore} provider
"""
CP_ARGS = "min max name noisy openfun reconnect good_sql".split()
noisy = False # If true, generate informational log messages
min = 3 # Minimum number of connections in pool
max = 5 # Maximum number of connections in pool
name = None # Name to assign to thread pool for debugging
openfun = None # A function to call on new connections
reconnect = False # Reconnect when connections fail
good_sql = 'select 1' # A query which should always succeed
running = False # True when the pool is operating
connectionFactory = Connection
transactionFactory = Transaction
# Initialize this to None so it's available in close() even if start()
# never runs.
shutdownID = None
def __init__(self, dbapiName, *connargs, **connkw):
"""
Create a new L{ConnectionPool}.
Any positional or keyword arguments other than those documented here
are passed to the DB-API object when connecting. Use these arguments to
pass database names, usernames, passwords, etc.
@param dbapiName: an import string to use to obtain a DB-API compatible
module (e.g. C{'pyPgSQL.PgSQL'})
@param cp_min: the minimum number of connections in pool (default 3)
@param cp_max: the maximum number of connections in pool (default 5)
@param cp_noisy: generate informational log messages during operation
(default C{False})
@param cp_openfun: a callback invoked after every C{connect()} on the
underlying DB-API object. The callback is passed a new DB-API
connection object. This callback can setup per-connection state
such as charset, timezone, etc.
@param cp_reconnect: detect connections which have failed and reconnect
(default C{False}). Failed connections may result in
L{ConnectionLost} exceptions, which indicate the query may need to
be re-sent.
@param cp_good_sql: an sql query which should always succeed and change
no state (default C{'select 1'})
@param cp_reactor: use this reactor instead of the global reactor
(added in Twisted 10.2).
@type cp_reactor: L{IReactorCore} provider
"""
self.dbapiName = dbapiName
self.dbapi = reflect.namedModule(dbapiName)
if getattr(self.dbapi, 'apilevel', None) != '2.0':
log.msg('DB API module not DB API 2.0 compliant.')
if getattr(self.dbapi, 'threadsafety', 0) < 1:
log.msg('DB API module not sufficiently thread-safe.')
reactor = connkw.pop('cp_reactor', None)
if reactor is None:
from twisted.internet import reactor
self._reactor = reactor
self.connargs = connargs
self.connkw = connkw
for arg in self.CP_ARGS:
cpArg = 'cp_%s' % (arg,)
if cpArg in connkw:
setattr(self, arg, connkw[cpArg])
del connkw[cpArg]
self.min = min(self.min, self.max)
self.max = max(self.min, self.max)
# All connections, hashed on thread id
self.connections = {}
# These are optional so import them here
from twisted.python import threadpool
from twisted.python import threadable
self.threadID = threadable.getThreadID
self.threadpool = threadpool.ThreadPool(self.min, self.max)
self.startID = self._reactor.callWhenRunning(self._start)
def _start(self):
self.startID = None
return self.start()
def start(self):
"""
Start the connection pool.
If you are using the reactor normally, this function does *not*
need to be called.
"""
if not self.running:
self.threadpool.start()
self.shutdownID = self._reactor.addSystemEventTrigger(
'during', 'shutdown', self.finalClose)
self.running = True
def runWithConnection(self, func, *args, **kw):
"""
Execute a function with a database connection and return the result.
@param func: A callable object of one argument which will be executed
in a thread with a connection from the pool. It will be passed as
its first argument a L{Connection} instance (whose interface is
mostly identical to that of a connection object for your DB-API
module of choice), and its results will be returned as a
L{Deferred}. If the method raises an exception the transaction will
be rolled back. Otherwise, the transaction will be committed.
B{Note} that this function is B{not} run in the main thread: it
must be threadsafe.
@param *args: positional arguments to be passed to func
@param **kw: keyword arguments to be passed to func
@return: a L{Deferred} which will fire the return value of
C{func(Transaction(...), *args, **kw)}, or a
L{twisted.python.failure.Failure}.
"""
return threads.deferToThreadPool(self._reactor, self.threadpool,
self._runWithConnection,
func, *args, **kw)
def _runWithConnection(self, func, *args, **kw):
conn = self.connectionFactory(self)
try:
result = func(conn, *args, **kw)
conn.commit()
return result
except:
excType, excValue, excTraceback = sys.exc_info()
try:
conn.rollback()
except:
log.err(None, "Rollback failed")
compat.reraise(excValue, excTraceback)
def runInteraction(self, interaction, *args, **kw):
"""
Interact with the database and return the result.
The 'interaction' is a callable object which will be executed in a
thread using a pooled connection. It will be passed an L{Transaction}
object as an argument (whose interface is identical to that of the
database cursor for your DB-API module of choice), and its results will
be returned as a L{Deferred}. If running the method raises an
exception, the transaction will be rolled back. If the method returns a
value, the transaction will be committed.
NOTE that the function you pass is *not* run in the main thread: you
may have to worry about thread-safety in the function you pass to this
if it tries to use non-local objects.
@param interaction: a callable object whose first argument is an
L{adbapi.Transaction}.
@param *args: additional positional arguments to be passed to
interaction
@param **kw: keyword arguments to be passed to interaction
@return: a Deferred which will fire the return value of
C{interaction(Transaction(...), *args, **kw)}, or a
L{twisted.python.failure.Failure}.
"""
return threads.deferToThreadPool(self._reactor, self.threadpool,
self._runInteraction,
interaction, *args, **kw)
def runQuery(self, *args, **kw):
"""
Execute an SQL query and return the result.
A DB-API cursor which will be invoked with C{cursor.execute(*args,
**kw)}. The exact nature of the arguments will depend on the specific
flavor of DB-API being used, but the first argument in C{*args} be an
SQL statement. The result of a subsequent C{cursor.fetchall()} will be
fired to the L{Deferred} which is returned. If either the 'execute' or
'fetchall' methods raise an exception, the transaction will be rolled
back and a L{twisted.python.failure.Failure} returned.
The C{*args} and C{**kw} arguments will be passed to the DB-API
cursor's 'execute' method.
@return: a L{Deferred} which will fire the return value of a DB-API
cursor's 'fetchall' method, or a L{twisted.python.failure.Failure}.
"""
return self.runInteraction(self._runQuery, *args, **kw)
def runOperation(self, *args, **kw):
"""
Execute an SQL query and return L{None}.
A DB-API cursor which will be invoked with C{cursor.execute(*args,
**kw)}. The exact nature of the arguments will depend on the specific
flavor of DB-API being used, but the first argument in C{*args} will be
an SQL statement. This method will not attempt to fetch any results
from the query and is thus suitable for C{INSERT}, C{DELETE}, and other
SQL statements which do not return values. If the 'execute' method
raises an exception, the transaction will be rolled back and a
L{Failure} returned.
The C{*args} and C{*kw} arguments will be passed to the DB-API cursor's
'execute' method.
@return: a L{Deferred} which will fire with L{None} or a
L{twisted.python.failure.Failure}.
"""
return self.runInteraction(self._runOperation, *args, **kw)
def close(self):
"""
Close all pool connections and shutdown the pool.
"""
if self.shutdownID:
self._reactor.removeSystemEventTrigger(self.shutdownID)
self.shutdownID = None
if self.startID:
self._reactor.removeSystemEventTrigger(self.startID)
self.startID = None
self.finalClose()
def finalClose(self):
"""
This should only be called by the shutdown trigger.
"""
self.shutdownID = None
self.threadpool.stop()
self.running = False
for conn in self.connections.values():
self._close(conn)
self.connections.clear()
def connect(self):
"""
Return a database connection when one becomes available.
This method blocks and should be run in a thread from the internal
threadpool. Don't call this method directly from non-threaded code.
Using this method outside the external threadpool may exceed the
maximum number of connections in the pool.
@return: a database connection from the pool.
"""
tid = self.threadID()
conn = self.connections.get(tid)
if conn is None:
if self.noisy:
log.msg('adbapi connecting: %s' % (self.dbapiName,))
conn = self.dbapi.connect(*self.connargs, **self.connkw)
if self.openfun is not None:
self.openfun(conn)
self.connections[tid] = conn
return conn
def disconnect(self, conn):
"""
Disconnect a database connection associated with this pool.
Note: This function should only be used by the same thread which called
L{ConnectionPool.connect}. As with C{connect}, this function is not
used in normal non-threaded Twisted code.
"""
tid = self.threadID()
if conn is not self.connections.get(tid):
raise Exception("wrong connection for thread")
if conn is not None:
self._close(conn)
del self.connections[tid]
def _close(self, conn):
if self.noisy:
log.msg('adbapi closing: %s' % (self.dbapiName,))
try:
conn.close()
except:
log.err(None, "Connection close failed")
def _runInteraction(self, interaction, *args, **kw):
conn = self.connectionFactory(self)
trans = self.transactionFactory(self, conn)
try:
result = interaction(trans, *args, **kw)
trans.close()
conn.commit()
return result
except:
excType, excValue, excTraceback = sys.exc_info()
try:
conn.rollback()
except:
log.err(None, "Rollback failed")
compat.reraise(excValue, excTraceback)
def _runQuery(self, trans, *args, **kw):
trans.execute(*args, **kw)
return trans.fetchall()
def _runOperation(self, trans, *args, **kw):
trans.execute(*args, **kw)
def __getstate__(self):
return {'dbapiName': self.dbapiName,
'min': self.min,
'max': self.max,
'noisy': self.noisy,
'reconnect': self.reconnect,
'good_sql': self.good_sql,
'connargs': self.connargs,
'connkw': self.connkw}
def __setstate__(self, state):
self.__dict__ = state
self.__init__(self.dbapiName, *self.connargs, **self.connkw)
__all__ = ['Transaction', 'ConnectionPool']
|
py | 1a396dd9b74d8313a32119efb277ac4631da24e8 | # coding:utf-8
rarity_id = {1: 549,
2: 551,
3: 553,
4: 555,
5: 557,
6: 559,
7: 561,
8: 563,
'1': 549,
'2': 551,
'3': 553,
'4': 555,
'5': 557,
'6': 559,
'7': 561}
rarity_w = {1: '白',
2: '绿',
3: '蓝',
4: '紫',
7: '以太化',
'1': '白',
'2': '绿',
'3': '蓝',
'4': '紫',
'7': '以太化'}
color_id = {'bg': 6,
'bg2': 7,
'fg': 3,
'fg_d': 2,
'fg_dd': 1,
'fg_l': 4,
'fg_ll': 5,
'shadow': 8,
'canUse': 43}
pic_font = {'default': 'HarmonyOS_Sans_SC_Regular.ttf',
'FFXIV': 'FFXIV_Lodestone_SSF.ttf',
'Eorzea': 'Eorzea.ttf',
'mono': 'JetBrainsMono-Regular.ttf'}
pic_res = {'Item_Cover': 'Item_icon_cover_128.png',
'hq': 'Hq.png',
'meld': 'Melding.png',
'meld+': 'MeldingEX.png'}
food_param = {"BaseParam[0]": ("IsRelative[0]", "Value[0]", "Max[0]", "Value{HQ}[0]", "Max{HQ}[0]"),
"BaseParam[1]": ("IsRelative[1]", "Value[1]", "Max[1]", "Value{HQ}[1]", "Max{HQ}[1]"),
"BaseParam[2]": ("IsRelative[2]", "Value[2]", "Max[2]", "Value{HQ}[2]", "Max{HQ}[2]"),
}
trans_true_false = ['IsUnique', 'IsUntradable', 'IsIndisposable', 'IsDyeable', 'IsCrestWorthy', 'IsCollectable',
'IsAdvancedMeldingPermitted', 'IsPvP', 'IsGlamourous', 'CanBeHq', 'Lot', 'AlwaysCollectable',
]
trans_base_param = {"BaseParam[0]": "BaseParamValue[0]",
"BaseParam[1]": "BaseParamValue[1]",
"BaseParam[2]": "BaseParamValue[2]",
"BaseParam[3]": "BaseParamValue[3]",
"BaseParam[4]": "BaseParamValue[4]",
"BaseParam[5]": "BaseParamValue[5]",
"BaseParam{Special}[0]": "BaseParamValue{Special}[0]",
"BaseParam{Special}[1]": "BaseParamValue{Special}[1]",
"BaseParam{Special}[2]": "BaseParamValue{Special}[2]",
"BaseParam{Special}[3]": "BaseParamValue{Special}[3]",
"BaseParam{Special}[4]": "BaseParamValue{Special}[4]",
"BaseParam{Special}[5]": "BaseParamValue{Special}[5]"}
trans_base_special = {"ItemSpecialBonus": "ItemSpecialBonus{Param}", }
trans_base_param_key = ["BaseParam[0]",
"BaseParam[1]",
"BaseParam[2]",
"BaseParam[3]",
"BaseParam[4]",
"BaseParam[5]"]
trans_base_hq = ["BaseParam{Special}[0]",
"BaseParam{Special}[1]",
"BaseParam{Special}[2]",
"BaseParam{Special}[3]",
"BaseParam{Special}[4]",
"BaseParam{Special}[5]"]
Class_Job = {0: {"Name": "冒险者", "Abbreviation": "ADV", },
1: {"Name": "剑术师", "Abbreviation": "GLA", },
2: {"Name": "格斗家", "Abbreviation": "PGL", },
3: {"Name": "斧术师", "Abbreviation": "MRD", },
4: {"Name": "枪术师", "Abbreviation": "LNC", },
5: {"Name": "弓箭手", "Abbreviation": "ARC", },
6: {"Name": "幻术师", "Abbreviation": "CNJ", },
7: {"Name": "咒术师", "Abbreviation": "THM", },
8: {"Name": "刻木匠", "Abbreviation": "CRP", },
9: {"Name": "锻铁匠", "Abbreviation": "BSM", },
10: {"Name": "铸甲匠", "Abbreviation": "ARM", },
11: {"Name": "雕金匠", "Abbreviation": "GSM", },
12: {"Name": "制革匠", "Abbreviation": "LTW", },
13: {"Name": "裁衣匠", "Abbreviation": "WVR", },
14: {"Name": "炼金术士", "Abbreviation": "ALC", },
15: {"Name": "烹调师", "Abbreviation": "CUL", },
16: {"Name": "采矿工", "Abbreviation": "MIN", },
17: {"Name": "园艺工", "Abbreviation": "BTN", },
18: {"Name": "捕鱼人", "Abbreviation": "FSH", },
19: {"Name": "骑士", "Abbreviation": "PLD", },
20: {"Name": "武僧", "Abbreviation": "MNK", },
21: {"Name": "战士", "Abbreviation": "WAR", },
22: {"Name": "龙骑士", "Abbreviation": "DRG", },
23: {"Name": "吟游诗人", "Abbreviation": "BRD", },
24: {"Name": "白魔法师", "Abbreviation": "WHM", },
25: {"Name": "黑魔法师", "Abbreviation": "BLM", },
26: {"Name": "秘术师", "Abbreviation": "ACN", },
27: {"Name": "召唤师", "Abbreviation": "SMN", },
28: {"Name": "学者", "Abbreviation": "SCH", },
29: {"Name": "双剑师", "Abbreviation": "ROG", },
30: {"Name": "忍者", "Abbreviation": "NIN", },
31: {"Name": "机工士", "Abbreviation": "MCH", },
32: {"Name": "暗黑骑士", "Abbreviation": "DRK", },
33: {"Name": "占星术士", "Abbreviation": "AST", },
34: {"Name": "武士", "Abbreviation": "SAM", },
35: {"Name": "赤魔法师", "Abbreviation": "RDM", },
36: {"Name": "青魔法师", "Abbreviation": "BLU", },
37: {"Name": "绝枪战士", "Abbreviation": "GNB", },
38: {"Name": "舞者", "Abbreviation": "DNC", },
39: {"Name": "", "Abbreviation": "None", },
40: {"Name": "", "Abbreviation": "None", }}
Class_Job_Category = {0: {"Name": "", },
1: {"Name": "所有职业", },
2: {"Name": "剑术师", },
3: {"Name": "格斗家", },
4: {"Name": "斧术师", },
5: {"Name": "枪术师", },
6: {"Name": "弓箭手", },
7: {"Name": "幻术师", },
8: {"Name": "咒术师", },
9: {"Name": "刻木匠", },
10: {"Name": "锻铁匠", },
11: {"Name": "铸甲匠", },
12: {"Name": "雕金匠", },
13: {"Name": "制革匠", },
14: {"Name": "裁衣匠", },
15: {"Name": "炼金术士", },
16: {"Name": "烹调师", },
17: {"Name": "采矿工", },
18: {"Name": "园艺工", },
19: {"Name": "捕鱼人", },
20: {"Name": "骑士", },
21: {"Name": "武僧", },
22: {"Name": "战士", },
23: {"Name": "龙骑士", },
24: {"Name": "吟游诗人", },
25: {"Name": "白魔法师", },
26: {"Name": "黑魔法师", },
27: {"Name": "秘术师", },
28: {"Name": "召唤师", },
29: {"Name": "学者", },
30: {"Name": "战斗精英", },
31: {"Name": "魔法导师", },
32: {"Name": "大地使者", },
33: {"Name": "能工巧匠", },
34: {"Name": "战斗精英 魔法导师", },
35: {"Name": "能工巧匠 大地使者", },
36: {"Name": "剑术师之外的战斗精英", },
37: {"Name": "剑术师 格斗家 斧术师 枪术师 弓箭手 双剑师 幻术师 咒术师 秘术师 骑士 战士 暗黑骑士", },
38: {"Name": "剑术师 骑士", },
39: {"Name": "剑术师 格斗家 斧术师 枪术师 弓箭手 双剑师 骑士 战士 暗黑骑士", },
40: {"Name": "剑术师 格斗家 斧术师 枪术师 弓箭手 双剑师 幻术师 咒术师 秘术师 武僧 战士 龙骑士 吟游诗人 忍者", },
41: {"Name": "格斗家 武僧", },
42: {"Name": "剑术师 格斗家 斧术师 枪术师 弓箭手 双剑师 武僧 战士 龙骑士 吟游诗人 忍者", },
43: {"Name": "剑术师 格斗家 斧术师 枪术师 弓箭手 双剑师 幻术师 咒术师 秘术师 骑士 武僧 战士 龙骑士 暗黑骑士", },
44: {"Name": "斧术师 战士", },
45: {"Name": "剑术师 格斗家 斧术师 枪术师 弓箭手 双剑师 骑士 武僧 战士 龙骑士 暗黑骑士", },
46: {"Name": "剑术师 格斗家 斧术师 枪术师 弓箭手 双剑师 幻术师 咒术师 秘术师 武僧 龙骑士 吟游诗人 忍者 机工士", },
47: {"Name": "枪术师 龙骑士", },
48: {"Name": "剑术师 格斗家 斧术师 枪术师 弓箭手 双剑师 武僧 龙骑士 吟游诗人 忍者 机工士", },
49: {"Name": "剑术师 格斗家 斧术师 枪术师 弓箭手 双剑师 幻术师 咒术师 秘术师 吟游诗人 黑魔法师 召唤师 机工士", },
50: {"Name": "弓箭手 吟游诗人", },
51: {"Name": "剑术师 格斗家 斧术师 枪术师 弓箭手 双剑师 吟游诗人 机工士", },
52: {"Name": "剑术师 格斗家 斧术师 枪术师 弓箭手 双剑师 幻术师 咒术师 秘术师 骑士 白魔法师 学者 占星术士", },
53: {"Name": "幻术师 白魔法师", },
54: {"Name": "剑术师 格斗家 斧术师 枪术师 弓箭手 双剑师 幻术师 咒术师 秘术师 白魔法师 黑魔法师 召唤师 学者 占星术士", },
55: {"Name": "咒术师 黑魔法师", },
56: {"Name": "剑术师 幻术师 咒术师 骑士 白魔法师 黑魔法师", },
57: {"Name": "剑术师 咒术师 骑士 黑魔法师", },
58: {"Name": "剑术师 幻术师 骑士 白魔法师", },
59: {"Name": "剑术师 斧术师 骑士 战士 暗黑骑士 绝枪战士", },
60: {"Name": "剑术师 斧术师 枪术师 骑士 战士 龙骑士 暗黑骑士 绝枪战士", },
61: {"Name": "幻术师 咒术师 秘术师 白魔法师 学者 占星术士", },
62: {"Name": "幻术师 咒术师 秘术师 白魔法师 黑魔法师 召唤师 学者 占星术士", },
63: {"Name": "咒术师 秘术师 黑魔法师 召唤师 赤魔法师 青魔法师", },
64: {"Name": "幻术师 白魔法师 学者 占星术士", },
65: {"Name": "格斗家 武僧 武士", },
66: {"Name": "弓箭手 吟游诗人 机工士 舞者", },
67: {"Name": "剑术师 格斗家 斧术师 枪术师 双剑师 武僧 龙骑士 忍者", },
68: {"Name": "秘术师 召唤师 学者", },
69: {"Name": "秘术师 召唤师", },
70: {"Name": "烹调师之外的能工巧匠", },
71: {"Name": "剑术师 格斗家 斧术师 枪术师 弓箭手 双剑师 幻术师 咒术师 秘术师 白魔法师 黑魔法师 召唤师 学者", },
72: {"Name": "幻术师 咒术师 秘术师 白魔法师 黑魔法师 召唤师 学者", },
73: {"Name": "幻术师 白魔法师 学者 占星术士", },
74: {"Name": "", },
75: {"Name": "", },
76: {"Name": "", },
77: {"Name": "", },
78: {"Name": "", },
79: {"Name": "", },
80: {"Name": "", },
81: {"Name": "", },
82: {"Name": "", },
83: {"Name": "", },
84: {"Name": "格斗家 枪术师 武僧 龙骑士 武士", },
85: {"Name": "战斗精英 魔法导师 特职专用", },
86: {"Name": "骑士 战士 暗黑骑士 绝枪战士 武僧 龙骑士 忍者 武士", },
87: {"Name": "吟游诗人 机工士 舞者 黑魔法师 召唤师 赤魔法师 白魔法师 学者 占星术士", },
88: {"Name": "剑术师 斧术师 格斗家 枪术师 弓箭手 双剑师 骑士 武僧 战士 龙骑士 吟游诗人 忍者 暗黑骑士 机工士", },
89: {"Name": "黑魔法师 召唤师 赤魔法师", },
90: {"Name": "弓箭手 幻术师 咒术师 秘术师 白魔法师 吟游诗人 黑魔法师 召唤师 学者 机工士 占星术士", },
91: {"Name": "双剑师", },
92: {"Name": "忍者", },
93: {"Name": "双剑师 忍者", },
94: {"Name": "剑术师 格斗家 斧术师 枪术师 弓箭手 双剑师 忍者", },
95: {"Name": "剑术师 格斗家 斧术师 枪术师 双剑师 忍者", },
96: {"Name": "机工士", },
97: {"Name": "格斗家 枪术师 弓箭手 双剑师 武僧 龙骑士 吟游诗人 忍者 机工士", },
98: {"Name": "暗黑骑士", },
99: {"Name": "占星术士", },
100: {"Name": "弓箭手 双剑师 吟游诗人 忍者 机工士", },
101: {"Name": "格斗家 枪术师 双剑师 武僧 龙骑士 忍者", },
102: {"Name": "格斗家 双剑师 武僧 忍者 武士", },
103: {"Name": "双剑师 忍者", },
104: {"Name": "", },
105: {"Name": "弓箭手 双剑师 吟游诗人 忍者 机工士 舞者", },
106: {"Name": "剑术师 格斗家 斧术师 枪术师 弓箭手 双剑师 吟游诗人", },
107: {"Name": "骑士 武僧 战士 龙骑士 吟游诗人 白魔法师 黑魔法师 召唤师 学者 忍者 机工士 暗黑骑士 占星术士", },
108: {"Name": "战斗精英 魔法导师", },
109: {"Name": "", },
110: {"Name": "战斗精英 魔法导师 特职专用", },
111: {"Name": "武士", },
112: {"Name": "赤魔法师", },
113: {"Name": "剑术师 斧术师 骑士 战士 暗黑骑士 绝枪战士", },
114: {"Name": "格斗家 枪术师 武僧 龙骑士 双剑师 忍者 武士", },
115: {"Name": "弓箭手 吟游诗人 机工士 舞者", },
116: {"Name": "咒术师 黑魔法师 秘术师 召唤师 赤魔法师 青魔法师", },
117: {"Name": "幻术师 白魔法师 学者 占星术士", },
118: {"Name": "格斗家 枪术师 弓箭手 武僧 龙骑士 吟游诗人 双剑师 忍者 机工士 武士 舞者", },
119: {"Name": "格斗家 枪术师 咒术师 武僧 龙骑士 黑魔法师 秘术师 召唤师 双剑师 忍者 武士 赤魔法师 青魔法师", },
120: {"Name": "幻术师 咒术师 白魔法师 黑魔法师 秘术师 召唤师 学者 占星术士 赤魔法师 青魔法师", },
121: {"Name": "骑士 战士 暗黑骑士 绝枪战士", },
122: {"Name": "武僧 龙骑士 忍者 武士", },
123: {"Name": "吟游诗人 机工士 舞者", },
124: {"Name": "黑魔法师 召唤师 赤魔法师 青魔法师", },
125: {"Name": "白魔法师 学者 占星术士", },
126: {"Name": "武僧 龙骑士 吟游诗人 忍者 机工士 武士 舞者", },
127: {"Name": "武僧 龙骑士 黑魔法师 召唤师 忍者 武士 赤魔法师 青魔法师", },
128: {"Name": "白魔法师 黑魔法师 召唤师 学者 占星术士 赤魔法师 青魔法师", },
129: {"Name": "青魔法师", },
130: {"Name": "所有(除设限特职)", },
131: {"Name": "武僧 龙骑士 吟游诗人 黑魔法师 召唤师 忍者 机工士 武士 赤魔法师 舞者", },
132: {"Name": "武僧 龙骑士 吟游诗人 白魔法师 黑魔法师 召唤师 学者 忍者 机工士 占星术士 武士 赤魔法师 舞者", },
133: {"Name": "白魔法师 学者 占星术士", },
134: {"Name": "骑士 战士 暗黑骑士 绝枪战士", },
135: {"Name": "骑士 武僧 战士 龙骑士 吟游诗人 黑魔法师 召唤师 忍者 机工士 暗黑骑士 武士 赤魔法师 绝枪战士 舞者", },
136: {"Name": "骑士 战士 白魔法师 学者 暗黑骑士 占星术士 绝枪战士", },
137: {"Name": "骑士 武僧 战士 龙骑士 吟游诗人 白魔法师 黑魔法师 召唤师 学者 忍者 机工士 暗黑骑士 占星术士 武士 赤魔法师 绝枪战士 舞者", },
138: {"Name": "骑士 武僧 战士 龙骑士 忍者 暗黑骑士 武士 绝枪战士", },
139: {"Name": "吟游诗人 机工士 舞者", },
140: {"Name": "白魔法师 黑魔法师 召唤师 学者 占星术士 赤魔法师", },
141: {"Name": "骑士 武僧 战士 龙骑士 吟游诗人 白魔法师 黑魔法师 召唤师 学者 机工士 暗黑骑士 占星术士 武士 赤魔法师 绝枪战士 舞者", },
142: {"Name": "战斗精英和魔法导师(除设限特职)", },
143: {"Name": "战斗精英(除设限特职)", },
144: {"Name": "魔法导师(除设限特职)", },
145: {"Name": "骑士 武僧 战士 龙骑士 吟游诗人 忍者 机工士 暗黑骑士 武士 绝枪战士 舞者", },
146: {"Name": "战斗精英 魔法导师 特职专用(除设限特职)", },
147: {"Name": "黑魔法师 召唤师 赤魔法师", },
148: {"Name": "武僧 龙骑士 忍者 武士", },
149: {"Name": "绝枪战士", },
150: {"Name": "舞者", },
151: {"Name": "金属工艺(锻铁匠、铸甲匠、雕金匠)", },
152: {"Name": "手工工艺(刻木匠、制革匠、裁衣匠)", },
153: {"Name": "食药工艺(炼金术士、烹调师)", },
154: {"Name": "大地资源(采矿工、园艺工)", },
155: {"Name": "水生资源(捕鱼人)", },
156: {"Name": "防护职业(设限特职除外)", },
157: {"Name": "治疗职业(设限特职除外)", },
158: {"Name": "物理进攻职业(设限特职除外)", },
159: {"Name": "魔法进攻职业(设限特职除外)", },
160: {"Name": "秘术师 学者", },
161: {"Name": "剑术师 格斗家 斧术师 枪术师 弓箭手 骑士 武僧 战士 龙骑士 吟游诗人 双剑师 忍者 机工士 暗黑骑士 武士 绝枪战士 舞者", },
162: {"Name": "骑士 武僧 战士 龙骑士 吟游诗人 白魔法师 黑魔法师 召唤师 学者 忍者 机工士 暗黑骑士 占星术士 武士 赤魔法师 绝枪战士 舞者", },
163: {"Name": "武僧 龙骑士 吟游诗人 黑魔法师 召唤师 忍者 机工士 武士 赤魔法师 舞者", },
164: {"Name": "武僧 龙骑士 吟游诗人 白魔法师 黑魔法师 召唤师 学者 忍者 机工士 占星术士 武士 赤魔法师 舞者", },
165: {"Name": "白魔法师 学者 占星术士", },
166: {"Name": "骑士 战士 暗黑骑士 绝枪战士", },
167: {"Name": "骑士 武僧 战士 龙骑士 吟游诗人 黑魔法师 召唤师 忍者 机工士 暗黑骑士 武士 赤魔法师 绝枪战士 舞者", },
168: {"Name": "骑士 战士 白魔法师 学者 暗黑骑士 占星术士 绝枪战士", },
169: {"Name": "骑士 武僧 战士 龙骑士 吟游诗人 白魔法师 黑魔法师 召唤师 学者 忍者 机工士 暗黑骑士 占星术士 武士 赤魔法师 绝枪战士 舞者", },
170: {"Name": "ナイト モンク 戦士 竜騎士 忍者 暗黒騎士 侍 ガンブレイカー", },
171: {"Name": "吟游诗人 机工士 舞者", },
172: {"Name": "白魔法师 黑魔法师 召唤师 学者 占星术士 赤魔法师", },
173: {"Name": "骑士 武僧 战士 龙骑士 吟游诗人 白魔法师 黑魔法师 召唤师 学者 机工士 暗黑骑士 占星术士 武士 赤魔法师 绝枪战士 舞者", },
174: {"Name": "骑士 武僧 战士 龙骑士 吟游诗人 忍者 机工士 暗黑骑士 武士 绝枪战士 舞者", },
175: {"Name": "黑魔法师 召唤师 赤魔法师", },
176: {"Name": "武僧 龙骑士 忍者 武士", },
177: {"Name": "武僧 龙骑士 吟游诗人 忍者 机工士 武士 舞者", },
178: {"Name": "ナイト 戦士 黒魔道士 召喚士 暗黒騎士 赤魔道士 ガンブレイカー", },
179: {"Name": "白魔法师 召唤师 学者 占星术士 赤魔法师", },
180: {"Name": "", },
181: {"Name": "", }}
tf = {'TRUE': True,
'FALSE': False,
'0': False,
'1': True,
0: False,
1: True}
reject_words = ["ABORT",
"ACTION",
"ADD",
"AFTER",
"ALL",
"ALTER",
"ANALYZE",
"AND",
"AS",
"ASC",
"ATTACH",
"AUTOINCREMENT",
"BEFORE",
"BEGIN",
"BETWEEN",
"BY",
"CASCADE",
"CASE",
"CAST",
"CHECK",
"COLLATE",
"COLUMN",
"COMMIT",
"CONFLICT",
"CONSTRAINT",
"CREATE",
"CROSS",
"CURRENT_DATE",
"CURRENT_TIME",
"CURRENT_TIMESTAMP",
"DATABASE",
"DEFAULT",
"DEFERRABLE",
"DEFERRED",
"DELETE",
"DESC",
"DETACH",
"DISTINCT",
"DROP",
"EACH",
"ELSE",
"END",
"ESCAPE",
"EXCEPT",
"EXCLUSIVE",
"EXISTS",
"EXPLAIN",
"FAIL",
"FOR",
"FOREIGN",
"FROM",
"FULL",
"GLOB",
"GROUP",
"HAVING",
"IF",
"IGNORE",
"IMMEDIATE",
"IN",
"INDEX",
"INDEXED",
"INITIALLY",
"INNER",
"INSERT",
"INSTEAD",
"INTERSECT",
"INTO",
"IS",
"ISNULL",
"JOIN",
"KEY",
"LEFT",
"LIKE",
"LIMIT",
"MATCH",
"NATURAL",
"NO",
"NOT",
"NOTNULL",
"NULL",
"OF",
"OFFSET",
"ON",
"OR",
"ORDER",
"OUTER",
"PLAN",
"PRAGMA",
"PRIMARY",
"QUERY",
"RAISE",
"REFERENCES",
"REGEXP",
"REINDEX",
"RELEASE",
"RENAME",
"REPLACE",
"RESTRICT",
"RIGHT",
"ROLLBACK",
"ROW",
"SAVEPOINT",
"SELECT",
"SET",
"TABLE",
"TEMP",
"TEMPORARY",
"THEN",
"TO",
"TRANSACTION",
"TRIGGER",
"UNION",
"UNIQUE",
"UPDATE",
"USING",
"VACUUM",
"VALUES",
"VIEW",
"VIRTUAL",
"WHEN",
"WHERE",
"NULL",
"INTEGER",
"REAL",
"TEXT",
"BLOB",
"\"",
"%"]
Base_Param = {
0: (-1, ""),
1: (0, "力量"),
2: (1, "灵巧"),
3: (2, "耐力"),
4: (3, "智力"),
5: (4, "精神"),
6: (5, "信仰"),
7: (6, "体力"),
8: (7, "魔力"),
9: (8, "技力"),
10: (9, "采集力"),
11: (10, "制作力"),
12: (-1, "物理基本性能"),
13: (-1, "魔法基本性能"),
14: (11, "攻击间隔"),
15: (-1, "附加效果"),
16: (-1, "攻击次数"),
17: (-1, "格挡发动力"),
18: (-1, "格挡性能"),
19: (12, "坚韧"),
20: (13, "物理攻击力"),
21: (14, "物理防御力"),
22: (15, "直击"),
23: (16, "回避力"),
24: (17, "魔法防御力"),
25: (-1, "暴击攻击力"),
26: (-1, "暴击防御力"),
27: (18, "暴击"),
28: (-1, "暴击回避力"),
29: (-1, "斩击耐性"),
30: (-1, "突刺耐性"),
31: (-1, "打击耐性"),
32: (-1, "射击耐性"),
33: (19, "攻击魔法威力"),
34: (20, "治疗魔法威力"),
35: (-1, "强化魔法威力"),
36: (21, "元素加持"),
37: (-1, "火"),
38: (-1, "冰"),
39: (-1, "风"),
40: (-1, "土"),
41: (-1, "雷"),
42: (-1, "水"),
43: (-1, "全魔法耐性"),
44: (22, "信念"),
45: (23, "技能速度"),
46: (24, "咏唱速度"),
47: (25, "加速"),
48: (-1, "斗志"),
49: (-1, "仇恨"),
50: (-1, "降低仇恨"),
51: (-1, "分解技能提升率"),
52: (-1, "经验值获得量"),
53: (-1, "生命再生"),
54: (-1, "魔力再生"),
55: (-1, "主状态修正"),
56: (-1, "副状态修正"),
57: (-1, "减速耐性"),
58: (-1, "石化耐性"),
59: (-1, "麻痹耐性"),
60: (-1, "静寂耐性"),
61: (-1, "失明耐性"),
62: (-1, "中毒耐性"),
63: (-1, "眩晕耐性"),
64: (-1, "睡眠耐性"),
65: (-1, "止步耐性"),
66: (-1, "加重耐性"),
67: (-1, "死亡宣告耐性"),
68: (-1, "装备损耗耐性"),
69: (-1, "精炼度提升量"),
70: (26, "作业精度"),
71: (27, "加工精度"),
72: (28, "获得力"),
73: (29, "鉴别力")}
Item_Special_Bonus = {
0: "",
1: "",
2: "套装效果:",
3: "",
4: "神应效果:",
5: "",
6: "套装效果(等级限制):",
7: "优雷卡专用效果:",
8: "天佑女王专用效果:",
9: "",
10: ""}
Item_Series = {
0: "",
1: "黑涡团制式装备",
2: "双蛇党制式装备",
3: "恒辉队制式装备",
4: "东方公子装束",
5: "东方秀女装束",
6: "东方警卫装束",
7: "东方女官装束",
8: "特制上仙装备",
9: "特制女仆装备",
10: "特制管家装备",
11: "哪吒赤莲装备",
12: "哪吒白莲装备",
13: "东方贵人装束",
14: "风雅装束",
15: "东方雅人装束",
16: "东方丽人装束",
17: "绿宝石兽装备",
18: "黄宝石兽装备",
19: "天使装备",
20: "恶魔装备",
21: "王子装备",
22: "公主装备",
23: "东方书生装备",
24: "东方女生装备",
25: "艾普装备",
26: "大召唤士装备",
27: "红宝石兽装备",
28: "圣手饰品装备",
29: "圣地饰品装备"}
Grand_Company = {
0: "平民",
1: "黑涡团",
2: "双蛇党",
3: "恒辉队"}
Item_Sort_Category = {
0: 0,
1: 0,
2: 0,
3: 0,
4: 1,
5: 5,
6: 6,
7: 11,
8: 22,
9: 25,
10: 26,
11: 27,
12: 30,
13: 32,
14: 33,
15: 35,
16: 40,
17: 41,
18: 42,
19: 43,
20: 45,
21: 46,
22: 47,
23: 48,
24: 49,
25: 50,
26: 51,
27: 52,
28: 53,
29: 54,
30: 55,
31: 75,
32: 76,
33: 77,
34: 78,
35: 80,
36: 81,
37: 79,
38: 95,
39: 100,
40: 101,
41: 105,
42: 106,
43: 107,
44: 108,
45: 109,
46: 110,
47: 111,
48: 112,
49: 113,
50: 125,
51: 130,
52: 135,
53: 140,
54: 150,
55: 155,
56: 160,
57: 165,
58: 170,
59: 175,
60: 180,
61: 185,
62: 250,
63: 254,
64: 96,
65: 102,
66: 114,
67: 82,
68: 115,
69: 83,
70: 103}
Equip_Slot_Category = {
0: {"MainHand": 0, "OffHand": 0, "Head": 0, "Body": 0, "Gloves": 0, "Waist": 0, "Legs": 0, "Feet": 0, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
1: {"MainHand": 1, "OffHand": 0, "Head": 0, "Body": 0, "Gloves": 0, "Waist": 0, "Legs": 0, "Feet": 0, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
2: {"MainHand": 0, "OffHand": 1, "Head": 0, "Body": 0, "Gloves": 0, "Waist": 0, "Legs": 0, "Feet": 0, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
3: {"MainHand": 0, "OffHand": 0, "Head": 1, "Body": 0, "Gloves": 0, "Waist": 0, "Legs": 0, "Feet": 0, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
4: {"MainHand": 0, "OffHand": 0, "Head": 0, "Body": 1, "Gloves": 0, "Waist": 0, "Legs": 0, "Feet": 0, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
5: {"MainHand": 0, "OffHand": 0, "Head": 0, "Body": 0, "Gloves": 1, "Waist": 0, "Legs": 0, "Feet": 0, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
6: {"MainHand": 0, "OffHand": 0, "Head": 0, "Body": 0, "Gloves": 0, "Waist": 1, "Legs": 0, "Feet": 0, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
7: {"MainHand": 0, "OffHand": 0, "Head": 0, "Body": 0, "Gloves": 0, "Waist": 0, "Legs": 1, "Feet": 0, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
8: {"MainHand": 0, "OffHand": 0, "Head": 0, "Body": 0, "Gloves": 0, "Waist": 0, "Legs": 0, "Feet": 1, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
9: {"MainHand": 0, "OffHand": 0, "Head": 0, "Body": 0, "Gloves": 0, "Waist": 0, "Legs": 0, "Feet": 0, "Ears": 1,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
10: {"MainHand": 0, "OffHand": 0, "Head": 0, "Body": 0, "Gloves": 0, "Waist": 0, "Legs": 0, "Feet": 0, "Ears": 0,
"Neck": 1, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
11: {"MainHand": 0, "OffHand": 0, "Head": 0, "Body": 0, "Gloves": 0, "Waist": 0, "Legs": 0, "Feet": 0, "Ears": 0,
"Neck": 0, "Wrists": 1, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
12: {"MainHand": 0, "OffHand": 0, "Head": 0, "Body": 0, "Gloves": 0, "Waist": 0, "Legs": 0, "Feet": 0, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 1, "FingerR": 1, "SoulCrystal": 0, },
13: {"MainHand": 1, "OffHand": -1, "Head": 0, "Body": 0, "Gloves": 0, "Waist": 0, "Legs": 0, "Feet": 0, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
14: {"MainHand": 1, "OffHand": 1, "Head": 0, "Body": 0, "Gloves": 0, "Waist": 0, "Legs": 0, "Feet": 0, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
15: {"MainHand": 0, "OffHand": 0, "Head": -1, "Body": 1, "Gloves": 0, "Waist": 0, "Legs": 0, "Feet": 0, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
16: {"MainHand": 0, "OffHand": 0, "Head": 0, "Body": 1, "Gloves": -1, "Waist": 0, "Legs": -1, "Feet": -1, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
17: {"MainHand": 0, "OffHand": 0, "Head": 0, "Body": 0, "Gloves": 0, "Waist": 0, "Legs": 0, "Feet": 0, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 1, },
18: {"MainHand": 0, "OffHand": 0, "Head": 0, "Body": 0, "Gloves": 0, "Waist": 0, "Legs": 1, "Feet": -1, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
19: {"MainHand": 0, "OffHand": 0, "Head": -1, "Body": 1, "Gloves": -1, "Waist": 0, "Legs": -1, "Feet": -1,
"Ears": 0, "Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
20: {"MainHand": 0, "OffHand": 0, "Head": 0, "Body": 1, "Gloves": -1, "Waist": 0, "Legs": -1, "Feet": 0, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
21: {"MainHand": 0, "OffHand": 0, "Head": 0, "Body": 1, "Gloves": 0, "Waist": 0, "Legs": -1, "Feet": -1, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
22: {"MainHand": 0, "OffHand": 0, "Head": 0, "Body": 0, "Gloves": 0, "Waist": 0, "Legs": 0, "Feet": 0, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, }
}
Item_Search_Category = {
0: {"Name": "", "Icon": 0, "Category": 0, "Order": 0, "ClassJob": 0, },
1: {"Name": "武器", "Icon": 60102, "Category": 0, "Order": 0, "ClassJob": 0, },
2: {"Name": "制作工具", "Icon": 60113, "Category": 0, "Order": 0, "ClassJob": 0, },
3: {"Name": "采集工具", "Icon": 60120, "Category": 0, "Order": 0, "ClassJob": 0, },
4: {"Name": "防具", "Icon": 60126, "Category": 0, "Order": 0, "ClassJob": 0, },
5: {"Name": "饰品", "Icon": 60135, "Category": 0, "Order": 0, "ClassJob": 0, },
6: {"Name": "药品食品", "Icon": 60136, "Category": 0, "Order": 0, "ClassJob": 0, },
7: {"Name": "素材", "Icon": 60137, "Category": 0, "Order": 0, "ClassJob": 0, },
8: {"Name": "其他", "Icon": 60159, "Category": 0, "Order": 0, "ClassJob": 0, },
9: {"Name": "格斗武器", "Icon": 60101, "Category": 1, "Order": 5, "ClassJob": 2, },
10: {"Name": "剑", "Icon": 60102, "Category": 1, "Order": 0, "ClassJob": 1, },
11: {"Name": "斧", "Icon": 60103, "Category": 1, "Order": 1, "ClassJob": 3, },
12: {"Name": "弓", "Icon": 60105, "Category": 1, "Order": 8, "ClassJob": 5, },
13: {"Name": "长枪", "Icon": 60104, "Category": 1, "Order": 4, "ClassJob": 4, },
14: {"Name": "咒杖", "Icon": 60108, "Category": 1, "Order": 11, "ClassJob": 7, },
15: {"Name": "幻杖", "Icon": 60107, "Category": 1, "Order": 14, "ClassJob": 6, },
16: {"Name": "魔导书", "Icon": 60109, "Category": 1, "Order": 12, "ClassJob": 26, },
17: {"Name": "盾", "Icon": 60110, "Category": 2, "Order": 0, "ClassJob": 0, },
18: {"Name": "投掷武器", "Icon": 60111, "Category": 0, "Order": 0, "ClassJob": 0, },
19: {"Name": "刻木工具", "Icon": 60112, "Category": 1, "Order": 17, "ClassJob": 8, },
20: {"Name": "锻铁工具", "Icon": 60113, "Category": 1, "Order": 18, "ClassJob": 9, },
21: {"Name": "铸甲工具", "Icon": 60114, "Category": 1, "Order": 19, "ClassJob": 10, },
22: {"Name": "雕金工具", "Icon": 60115, "Category": 1, "Order": 20, "ClassJob": 11, },
23: {"Name": "制革工具", "Icon": 60116, "Category": 1, "Order": 21, "ClassJob": 12, },
24: {"Name": "裁衣工具", "Icon": 60117, "Category": 1, "Order": 22, "ClassJob": 13, },
25: {"Name": "炼金工具", "Icon": 60118, "Category": 1, "Order": 23, "ClassJob": 14, },
26: {"Name": "烹调工具", "Icon": 60119, "Category": 1, "Order": 24, "ClassJob": 15, },
27: {"Name": "采矿工具", "Icon": 60120, "Category": 1, "Order": 25, "ClassJob": 16, },
28: {"Name": "园艺工具", "Icon": 60121, "Category": 1, "Order": 26, "ClassJob": 17, },
29: {"Name": "捕鱼用具", "Icon": 60122, "Category": 1, "Order": 27, "ClassJob": 18, },
30: {"Name": "钓饵", "Icon": 60123, "Category": 1, "Order": 28, "ClassJob": 18, },
31: {"Name": "头部防具", "Icon": 60124, "Category": 2, "Order": 1, "ClassJob": 0, },
32: {"Name": "内衣", "Icon": 60125, "Category": 0, "Order": 0, "ClassJob": 0, },
33: {"Name": "身体防具", "Icon": 60126, "Category": 2, "Order": 2, "ClassJob": 0, },
34: {"Name": "内裤", "Icon": 60127, "Category": 0, "Order": 0, "ClassJob": 0, },
35: {"Name": "腿部防具", "Icon": 60128, "Category": 2, "Order": 5, "ClassJob": 0, },
36: {"Name": "手部防具", "Icon": 60129, "Category": 2, "Order": 3, "ClassJob": 0, },
37: {"Name": "脚部防具", "Icon": 60130, "Category": 2, "Order": 6, "ClassJob": 0, },
38: {"Name": "腰部防具", "Icon": 60131, "Category": 2, "Order": 4, "ClassJob": 0, },
39: {"Name": "项链", "Icon": 60132, "Category": 2, "Order": 9, "ClassJob": 0, },
40: {"Name": "耳饰", "Icon": 60133, "Category": 2, "Order": 8, "ClassJob": 0, },
41: {"Name": "手镯", "Icon": 60134, "Category": 2, "Order": 10, "ClassJob": 0, },
42: {"Name": "戒指", "Icon": 60135, "Category": 2, "Order": 11, "ClassJob": 0, },
43: {"Name": "药品", "Icon": 60136, "Category": 3, "Order": 0, "ClassJob": 0, },
44: {"Name": "食材", "Icon": 60137, "Category": 3, "Order": 1, "ClassJob": 0, },
45: {"Name": "食品", "Icon": 60146, "Category": 3, "Order": 2, "ClassJob": 0, },
46: {"Name": "水产品", "Icon": 60138, "Category": 3, "Order": 3, "ClassJob": 0, },
47: {"Name": "石材", "Icon": 60139, "Category": 3, "Order": 4, "ClassJob": 0, },
48: {"Name": "金属", "Icon": 60140, "Category": 3, "Order": 5, "ClassJob": 0, },
49: {"Name": "木材", "Icon": 60141, "Category": 3, "Order": 6, "ClassJob": 0, },
50: {"Name": "布料", "Icon": 60142, "Category": 3, "Order": 7, "ClassJob": 0, },
51: {"Name": "皮革", "Icon": 60143, "Category": 3, "Order": 8, "ClassJob": 0, },
52: {"Name": "骨材", "Icon": 60144, "Category": 3, "Order": 9, "ClassJob": 0, },
53: {"Name": "炼金原料", "Icon": 60145, "Category": 3, "Order": 10, "ClassJob": 0, },
54: {"Name": "染料", "Icon": 60147, "Category": 3, "Order": 11, "ClassJob": 0, },
55: {"Name": "部件", "Icon": 60148, "Category": 3, "Order": 12, "ClassJob": 0, },
56: {"Name": "一般家具", "Icon": 60164, "Category": 4, "Order": 3, "ClassJob": 0, },
57: {"Name": "魔晶石", "Icon": 60150, "Category": 3, "Order": 13, "ClassJob": 0, },
58: {"Name": "水晶", "Icon": 60151, "Category": 3, "Order": 14, "ClassJob": 0, },
59: {"Name": "触媒", "Icon": 60152, "Category": 3, "Order": 15, "ClassJob": 0, },
60: {"Name": "杂货", "Icon": 60153, "Category": 3, "Order": 16, "ClassJob": 0, },
61: {"Name": "灵魂水晶", "Icon": 60157, "Category": 0, "Order": 0, "ClassJob": 0, },
62: {"Name": "箭", "Icon": 60153, "Category": 0, "Order": 0, "ClassJob": 0, },
63: {"Name": "任务道具", "Icon": 60158, "Category": 0, "Order": 0, "ClassJob": 0, },
64: {"Name": "其他", "Icon": 60159, "Category": 0, "Order": 0, "ClassJob": 0, },
65: {"Name": "室外建材", "Icon": 60160, "Category": 4, "Order": 0, "ClassJob": 0, },
66: {"Name": "室内建材", "Icon": 60161, "Category": 4, "Order": 1, "ClassJob": 0, },
67: {"Name": "庭具", "Icon": 60168, "Category": 4, "Order": 2, "ClassJob": 0, },
68: {"Name": "椅子睡床", "Icon": 60165, "Category": 4, "Order": 4, "ClassJob": 0, },
69: {"Name": "桌台", "Icon": 60162, "Category": 4, "Order": 5, "ClassJob": 0, },
70: {"Name": "桌上", "Icon": 60163, "Category": 4, "Order": 6, "ClassJob": 0, },
71: {"Name": "壁挂", "Icon": 60166, "Category": 4, "Order": 7, "ClassJob": 0, },
72: {"Name": "地毯", "Icon": 60167, "Category": 4, "Order": 8, "ClassJob": 0, },
73: {"Name": "双剑", "Icon": 60106, "Category": 1, "Order": 7, "ClassJob": 29, },
74: {"Name": "杂货(季节活动)", "Icon": 60154, "Category": 3, "Order": 17, "ClassJob": 0, },
75: {"Name": "宠物", "Icon": 60155, "Category": 3, "Order": 18, "ClassJob": 0, },
76: {"Name": "双手剑", "Icon": 60170, "Category": 1, "Order": 2, "ClassJob": 32, },
77: {"Name": "火枪", "Icon": 60172, "Category": 1, "Order": 9, "ClassJob": 31, },
78: {"Name": "天球仪", "Icon": 60171, "Category": 1, "Order": 16, "ClassJob": 33, },
79: {"Name": "飞空艇/潜水艇部件", "Icon": 60169, "Category": 3, "Order": 19, "ClassJob": 0, },
80: {"Name": "管弦乐琴关联物品", "Icon": 60173, "Category": 3, "Order": 20, "ClassJob": 0, },
81: {"Name": "栽培用品", "Icon": 60174, "Category": 4, "Order": 9, "ClassJob": 0, },
82: {"Name": "绘画作品", "Icon": 60175, "Category": 4, "Order": 10, "ClassJob": 0, },
83: {"Name": "武士刀", "Icon": 60177, "Category": 1, "Order": 6, "ClassJob": 34, },
84: {"Name": "刺剑", "Icon": 60176, "Category": 1, "Order": 13, "ClassJob": 35, },
85: {"Name": "魔导书(学者专用)", "Icon": 60178, "Category": 1, "Order": 15, "ClassJob": 28, },
86: {"Name": "枪刃", "Icon": 60181, "Category": 1, "Order": 3, "ClassJob": 37, },
87: {"Name": "投掷武器", "Icon": 60182, "Category": 1, "Order": 10, "ClassJob": 38, },
88: {"Name": "", "Icon": 0, "Category": 0, "Order": 0, "ClassJob": 0, },
89: {"Name": "", "Icon": 0, "Category": 0, "Order": 0, "ClassJob": 0, },
90: {"Name": "", "Icon": 0, "Category": 0, "Order": 0, "ClassJob": 0, },
91: {"Name": "", "Icon": 0, "Category": 0, "Order": 0, "ClassJob": 0, },
92: {"Name": "", "Icon": 0, "Category": 0, "Order": 0, "ClassJob": 0, },
93: {"Name": "", "Icon": 0, "Category": 0, "Order": 0, "ClassJob": 0, },
94: {"Name": "", "Icon": 0, "Category": 0, "Order": 0, "ClassJob": 0, },
95: {"Name": "", "Icon": 0, "Category": 0, "Order": 0, "ClassJob": 0, },
96: {"Name": "", "Icon": 0, "Category": 0, "Order": 0, "ClassJob": 0, },
97: {"Name": "", "Icon": 0, "Category": 0, "Order": 0, "ClassJob": 0, },
98: {"Name": "", "Icon": 0, "Category": 0, "Order": 0, "ClassJob": 0, },
99: {"Name": "", "Icon": 0, "Category": 0, "Order": 0, "ClassJob": 0, },
100: {"Name": "", "Icon": 0, "Category": 0, "Order": 0, "ClassJob": 0, }
}
Item_UI_Category = {
0: {"Name": "", "Icon": 0, "Order{Minor}": 0, "Order{Major}": 0, },
1: {"Name": "格斗武器", "Icon": 60101, "Order{Minor}": 5, "Order{Major}": 1, },
2: {"Name": "单手剑", "Icon": 60102, "Order{Minor}": 0, "Order{Major}": 1, },
3: {"Name": "大斧", "Icon": 60103, "Order{Minor}": 1, "Order{Major}": 1, },
4: {"Name": "弓", "Icon": 60105, "Order{Minor}": 8, "Order{Major}": 1, },
5: {"Name": "长枪", "Icon": 60104, "Order{Minor}": 4, "Order{Major}": 1, },
6: {"Name": "单手咒杖", "Icon": 60108, "Order{Minor}": 11, "Order{Major}": 1, },
7: {"Name": "双手咒杖", "Icon": 60108, "Order{Minor}": 12, "Order{Major}": 1, },
8: {"Name": "单手幻杖", "Icon": 60107, "Order{Minor}": 16, "Order{Major}": 1, },
9: {"Name": "双手幻杖", "Icon": 60107, "Order{Minor}": 17, "Order{Major}": 1, },
10: {"Name": "魔导书", "Icon": 60109, "Order{Minor}": 13, "Order{Major}": 1, },
11: {"Name": "盾", "Icon": 60110, "Order{Minor}": 0, "Order{Major}": 3, },
12: {"Name": "刻木工具(主工具)", "Icon": 60112, "Order{Minor}": 0, "Order{Major}": 2, },
13: {"Name": "刻木工具(副工具)", "Icon": 60112, "Order{Minor}": 1, "Order{Major}": 2, },
14: {"Name": "锻铁工具(主工具)", "Icon": 60113, "Order{Minor}": 2, "Order{Major}": 2, },
15: {"Name": "锻铁工具(副工具)", "Icon": 60113, "Order{Minor}": 3, "Order{Major}": 2, },
16: {"Name": "铸甲工具(主工具)", "Icon": 60114, "Order{Minor}": 4, "Order{Major}": 2, },
17: {"Name": "铸甲工具(副工具)", "Icon": 60114, "Order{Minor}": 5, "Order{Major}": 2, },
18: {"Name": "雕金工具(主工具)", "Icon": 60115, "Order{Minor}": 6, "Order{Major}": 2, },
19: {"Name": "雕金工具(副工具)", "Icon": 60115, "Order{Minor}": 7, "Order{Major}": 2, },
20: {"Name": "制革工具(主工具)", "Icon": 60116, "Order{Minor}": 8, "Order{Major}": 2, },
21: {"Name": "制革工具(副工具)", "Icon": 60116, "Order{Minor}": 9, "Order{Major}": 2, },
22: {"Name": "裁衣工具(主工具)", "Icon": 60117, "Order{Minor}": 10, "Order{Major}": 2, },
23: {"Name": "裁衣工具(副工具)", "Icon": 60117, "Order{Minor}": 11, "Order{Major}": 2, },
24: {"Name": "炼金工具(主工具)", "Icon": 60118, "Order{Minor}": 12, "Order{Major}": 2, },
25: {"Name": "炼金工具(副工具)", "Icon": 60118, "Order{Minor}": 13, "Order{Major}": 2, },
26: {"Name": "烹调工具(主工具)", "Icon": 60119, "Order{Minor}": 14, "Order{Major}": 2, },
27: {"Name": "烹调工具(副工具)", "Icon": 60119, "Order{Minor}": 15, "Order{Major}": 2, },
28: {"Name": "采矿工具(主工具)", "Icon": 60120, "Order{Minor}": 16, "Order{Major}": 2, },
29: {"Name": "采矿工具(副工具)", "Icon": 60120, "Order{Minor}": 17, "Order{Major}": 2, },
30: {"Name": "园艺工具(主工具)", "Icon": 60121, "Order{Minor}": 18, "Order{Major}": 2, },
31: {"Name": "园艺工具(副工具)", "Icon": 60121, "Order{Minor}": 19, "Order{Major}": 2, },
32: {"Name": "捕鱼用具(主工具)", "Icon": 60122, "Order{Minor}": 20, "Order{Major}": 2, },
33: {"Name": "钓饵", "Icon": 60123, "Order{Minor}": 29, "Order{Major}": 7, },
34: {"Name": "头部防具", "Icon": 60124, "Order{Minor}": 1, "Order{Major}": 3, },
35: {"Name": "身体防具", "Icon": 60126, "Order{Minor}": 2, "Order{Major}": 3, },
36: {"Name": "腿部防具", "Icon": 60128, "Order{Minor}": 5, "Order{Major}": 3, },
37: {"Name": "手部防具", "Icon": 60129, "Order{Minor}": 3, "Order{Major}": 3, },
38: {"Name": "脚部防具", "Icon": 60130, "Order{Minor}": 6, "Order{Major}": 3, },
39: {"Name": "腰部防具", "Icon": 60131, "Order{Minor}": 4, "Order{Major}": 3, },
40: {"Name": "项链", "Icon": 60132, "Order{Minor}": 1, "Order{Major}": 4, },
41: {"Name": "耳饰", "Icon": 60133, "Order{Minor}": 0, "Order{Major}": 4, },
42: {"Name": "手镯", "Icon": 60134, "Order{Minor}": 2, "Order{Major}": 4, },
43: {"Name": "戒指", "Icon": 60135, "Order{Minor}": 3, "Order{Major}": 4, },
44: {"Name": "药品", "Icon": 60136, "Order{Minor}": 0, "Order{Major}": 5, },
45: {"Name": "食材", "Icon": 60137, "Order{Minor}": 0, "Order{Major}": 6, },
46: {"Name": "食品", "Icon": 60146, "Order{Minor}": 1, "Order{Major}": 5, },
47: {"Name": "水产品", "Icon": 60138, "Order{Minor}": 1, "Order{Major}": 6, },
48: {"Name": "石材", "Icon": 60139, "Order{Minor}": 2, "Order{Major}": 6, },
49: {"Name": "金属", "Icon": 60140, "Order{Minor}": 3, "Order{Major}": 6, },
50: {"Name": "木材", "Icon": 60141, "Order{Minor}": 4, "Order{Major}": 6, },
51: {"Name": "布料", "Icon": 60142, "Order{Minor}": 5, "Order{Major}": 6, },
52: {"Name": "皮革", "Icon": 60143, "Order{Minor}": 6, "Order{Major}": 6, },
53: {"Name": "骨材", "Icon": 60144, "Order{Minor}": 7, "Order{Major}": 6, },
54: {"Name": "炼金原料", "Icon": 60145, "Order{Minor}": 8, "Order{Major}": 6, },
55: {"Name": "染料", "Icon": 60147, "Order{Minor}": 10, "Order{Major}": 6, },
56: {"Name": "部件", "Icon": 60148, "Order{Minor}": 9, "Order{Major}": 6, },
57: {"Name": "家具", "Icon": 60164, "Order{Minor}": 20, "Order{Major}": 7, },
58: {"Name": "魔晶石", "Icon": 60150, "Order{Minor}": 0, "Order{Major}": 7, },
59: {"Name": "水晶", "Icon": 60151, "Order{Minor}": 1, "Order{Major}": 7, },
60: {"Name": "触媒", "Icon": 60152, "Order{Minor}": 2, "Order{Major}": 7, },
61: {"Name": "杂货", "Icon": 60153, "Order{Minor}": 3, "Order{Major}": 7, },
62: {"Name": "灵魂水晶", "Icon": 60157, "Order{Minor}": 4, "Order{Major}": 4, },
63: {"Name": "其他", "Icon": 60159, "Order{Minor}": 6, "Order{Major}": 7, },
64: {"Name": "房产证书", "Icon": 60160, "Order{Minor}": 7, "Order{Major}": 7, },
65: {"Name": "房顶", "Icon": 60160, "Order{Minor}": 8, "Order{Major}": 7, },
66: {"Name": "外墙", "Icon": 60160, "Order{Minor}": 9, "Order{Major}": 7, },
67: {"Name": "窗户", "Icon": 60160, "Order{Minor}": 10, "Order{Major}": 7, },
68: {"Name": "房门", "Icon": 60160, "Order{Minor}": 11, "Order{Major}": 7, },
69: {"Name": "房顶装饰", "Icon": 60160, "Order{Minor}": 12, "Order{Major}": 7, },
70: {"Name": "外墙装饰", "Icon": 60160, "Order{Minor}": 13, "Order{Major}": 7, },
71: {"Name": "门牌", "Icon": 60160, "Order{Minor}": 14, "Order{Major}": 7, },
72: {"Name": "院墙", "Icon": 60160, "Order{Minor}": 15, "Order{Major}": 7, },
73: {"Name": "内墙", "Icon": 60161, "Order{Minor}": 16, "Order{Major}": 7, },
74: {"Name": "地板", "Icon": 60161, "Order{Minor}": 17, "Order{Major}": 7, },
75: {"Name": "屋顶照明", "Icon": 60161, "Order{Minor}": 18, "Order{Major}": 7, },
76: {"Name": "庭具", "Icon": 60168, "Order{Minor}": 19, "Order{Major}": 7, },
77: {"Name": "桌台", "Icon": 60162, "Order{Minor}": 21, "Order{Major}": 7, },
78: {"Name": "桌上", "Icon": 60163, "Order{Minor}": 22, "Order{Major}": 7, },
79: {"Name": "壁挂", "Icon": 60166, "Order{Minor}": 23, "Order{Major}": 7, },
80: {"Name": "地毯", "Icon": 60167, "Order{Minor}": 24, "Order{Major}": 7, },
81: {"Name": "宠物", "Icon": 60155, "Order{Minor}": 5, "Order{Major}": 7, },
82: {"Name": "栽培用品", "Icon": 60153, "Order{Minor}": 25, "Order{Major}": 7, },
83: {"Name": "半魔晶石", "Icon": 60150, "Order{Minor}": 26, "Order{Major}": 7, },
84: {"Name": "双剑", "Icon": 60106, "Order{Minor}": 7, "Order{Major}": 1, },
85: {"Name": "杂货(季节活动)", "Icon": 60154, "Order{Minor}": 4, "Order{Major}": 7, },
86: {"Name": "九宫幻卡", "Icon": 60156, "Order{Minor}": 27, "Order{Major}": 7, },
87: {"Name": "双手剑", "Icon": 60170, "Order{Minor}": 2, "Order{Major}": 1, },
88: {"Name": "火枪", "Icon": 60172, "Order{Minor}": 9, "Order{Major}": 1, },
89: {"Name": "天球仪", "Icon": 60171, "Order{Minor}": 19, "Order{Major}": 1, },
90: {"Name": "飞空艇部件(船体)", "Icon": 60169, "Order{Minor}": 11, "Order{Major}": 6, },
91: {"Name": "飞空艇部件(舾装)", "Icon": 60169, "Order{Minor}": 12, "Order{Major}": 6, },
92: {"Name": "飞空艇部件(船尾)", "Icon": 60169, "Order{Minor}": 14, "Order{Major}": 6, },
93: {"Name": "飞空艇部件(船首)", "Icon": 60169, "Order{Minor}": 13, "Order{Major}": 6, },
94: {"Name": "管弦乐琴乐谱", "Icon": 60173, "Order{Minor}": 28, "Order{Major}": 7, },
95: {"Name": "绘画作品", "Icon": 60175, "Order{Minor}": 29, "Order{Major}": 7, },
96: {"Name": "武士刀", "Icon": 60177, "Order{Minor}": 6, "Order{Major}": 1, },
97: {"Name": "刺剑", "Icon": 60176, "Order{Minor}": 14, "Order{Major}": 1, },
98: {"Name": "魔导书(学者专用)", "Icon": 60178, "Order{Minor}": 18, "Order{Major}": 1, },
99: {"Name": "捕鱼用具(副工具)", "Icon": 60122, "Order{Minor}": 21, "Order{Major}": 2, },
100: {"Name": "货币", "Icon": 60179, "Order{Minor}": 255, "Order{Major}": 7, },
101: {"Name": "潜水艇部件(船体)", "Icon": 60169, "Order{Minor}": 15, "Order{Major}": 6, },
102: {"Name": "潜水艇部件(船尾)", "Icon": 60169, "Order{Minor}": 16, "Order{Major}": 6, },
103: {"Name": "潜水艇部件(船首)", "Icon": 60169, "Order{Minor}": 17, "Order{Major}": 6, },
104: {"Name": "潜水艇部件(舰桥)", "Icon": 60169, "Order{Minor}": 18, "Order{Major}": 6, },
105: {"Name": "青魔杖", "Icon": 60180, "Order{Minor}": 15, "Order{Major}": 1, },
106: {"Name": "枪刃", "Icon": 60181, "Order{Minor}": 3, "Order{Major}": 1, },
107: {"Name": "投掷武器", "Icon": 60182, "Order{Minor}": 10, "Order{Major}": 1, }
}
Item_Repair = {
0: '',
5594: '1级暗物质',
5595: '2级暗物质',
5596: '3级暗物质',
5597: '4级暗物质',
5598: '5级暗物质',
10386: '6级暗物质',
17837: '7级暗物质',
}
Item_Glamour = {
0: ''
}
|
py | 1a396f0600666d783892c65011e5e239131489d7 | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Channelflow(CMakePackage):
"""Channelflow is a software system for numerical analysis of the
incompressible fluid flow in channel geometries, written in C++.
"""
homepage = 'https://github.com/epfl-ecps/channelflow'
url = 'https://github.com/epfl-ecps/channelflow.git'
version(
'develop',
git='https://github.com/epfl-ecps/channelflow.git',
branch='master'
)
variant('shared', default=True, description='Build shared libs')
variant('mpi', default=True, description='Enable MPI parallelism')
variant('hdf5', default=True, description='Enable support for HDF5 I/O')
variant(
'netcdf', default='serial', values=('none', 'serial', 'parallel'),
multi=False, description='Level of support for NetCDF I/O'
)
variant('python', default=False, description='Build python bindings')
depends_on('eigen')
depends_on('fftw')
# MPI related constraints
depends_on('mpi', when='+mpi')
depends_on('fftw+mpi', when='+mpi')
# Support for different I/O formats
depends_on('hdf5+cxx', when='+hdf5')
depends_on('netcdf', when='netcdf=serial')
depends_on('netcdf+mpi', when='netcdf=parallel')
# Python bindings
depends_on('boost+python', when='+python')
conflicts('~mpi', when='netcdf=parallel', msg='Parallel NetCDF requires MPI')
conflicts(
'+mpi', when='+python',
msg='Building python bindings is possible only for the serial code'
)
conflicts('~mpi', when='^mpi',
msg='There should be no MPI in the DAG when ~mpi is active')
def cmake_args(self):
spec = self.spec
on_or_off = lambda predicate: 'ON' if predicate else 'OFF'
args = [
'-DBUILD_SHARED_LIBS:BOOL={0}'.format(
on_or_off('+shared' in spec)
),
'-DUSE_MPI:BOOL={0}'.format(on_or_off('+mpi' in spec)),
'-DWITH_HDF5CXX:BOOL={0}'.format(on_or_off('+hdf5' in spec)),
'-DWITH_PYTHON:BOOL={0}'.format(on_or_off('+python' in spec))
]
netcdf_str = {
'none': 'OFF',
'serial': 'Serial',
'parallel': 'Parallel'
}
args.append('-DWITH_NETCDF:STRING={0}'.format(
netcdf_str[spec.variants['netcdf'].value]
))
# Set an MPI compiler for parallel builds
if '+mpi' in spec:
args.append(
'-DCMAKE_CXX_COMPILER:PATH={0}'.format(spec['mpi'].mpicxx)
)
return args
|
py | 1a39702ed3b55ad31573c0683dab30fb758f3352 | # Import flask and template operators
from flask import Flask, render_template
# Import SQLAlchemy
from flask_sqlalchemy import SQLAlchemy
# Define the WSGI application object
app = Flask(__name__)
# Configurations
app.config.from_object('config')
# Define the database object which is imported
# by modules and controllers
db = SQLAlchemy(app)
# Sample HTTP error handling
@app.errorhandler(404)
def not_found(error):
return render_template('404.html'), 404
# Import a module / component using its blueprint handler variable (mod_auth)
from app.mod_auth.controllers import mod_auth as auth_module
# Register blueprint(s)
app.register_blueprint(auth_module)
# app.register_blueprint(xyz_module)
# ..
# Build the database:
# This will create the database file using SQLAlchemy
db.create_all()
|
py | 1a39713fa250688c19f474c26c67b01dc36f80e7 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import cv2
import time
import paddle
import numpy as np
from .visualization import plot_tracking_dict
__all__ = [
'MOTTimer',
'Detection',
'write_mot_results',
'save_vis_results',
'load_det_results',
'preprocess_reid',
'get_crops',
'clip_box',
'scale_coords',
]
class MOTTimer(object):
"""
This class used to compute and print the current FPS while evaling.
"""
def __init__(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
self.duration = 0.
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.average_time = self.total_time / self.calls
if average:
self.duration = self.average_time
else:
self.duration = self.diff
return self.duration
def clear(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
self.duration = 0.
class Detection(object):
"""
This class represents a bounding box detection in a single image.
Args:
tlwh (Tensor): Bounding box in format `(top left x, top left y,
width, height)`.
score (Tensor): Bounding box confidence score.
feature (Tensor): A feature vector that describes the object
contained in this image.
cls_id (Tensor): Bounding box category id.
"""
def __init__(self, tlwh, score, feature, cls_id):
self.tlwh = np.asarray(tlwh, dtype=np.float32)
self.score = float(score)
self.feature = np.asarray(feature, dtype=np.float32)
self.cls_id = int(cls_id)
def to_tlbr(self):
"""
Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
`(top left, bottom right)`.
"""
ret = self.tlwh.copy()
ret[2:] += ret[:2]
return ret
def to_xyah(self):
"""
Convert bounding box to format `(center x, center y, aspect ratio,
height)`, where the aspect ratio is `width / height`.
"""
ret = self.tlwh.copy()
ret[:2] += ret[2:] / 2
ret[2] /= ret[3]
return ret
def write_mot_results(filename, results, data_type='mot', num_classes=1):
# support single and multi classes
if data_type in ['mot', 'mcmot']:
save_format = '{frame},{id},{x1},{y1},{w},{h},{score},{cls_id},-1,-1\n'
elif data_type == 'kitti':
save_format = '{frame} {id} car 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n'
else:
raise ValueError(data_type)
f = open(filename, 'w')
for cls_id in range(num_classes):
for frame_id, tlwhs, tscores, track_ids in results[cls_id]:
if data_type == 'kitti':
frame_id -= 1
for tlwh, score, track_id in zip(tlwhs, tscores, track_ids):
if track_id < 0: continue
if data_type == 'mot':
cls_id = -1
x1, y1, w, h = tlwh
x2, y2 = x1 + w, y1 + h
line = save_format.format(
frame=frame_id,
id=track_id,
x1=x1,
y1=y1,
x2=x2,
y2=y2,
w=w,
h=h,
score=score,
cls_id=cls_id)
f.write(line)
print('MOT results save in {}'.format(filename))
def save_vis_results(data,
frame_id,
online_ids,
online_tlwhs,
online_scores,
average_time,
show_image,
save_dir,
num_classes=1):
if show_image or save_dir is not None:
assert 'ori_image' in data
img0 = data['ori_image'].numpy()[0]
online_im = plot_tracking_dict(
img0,
num_classes,
online_tlwhs,
online_ids,
online_scores,
frame_id=frame_id,
fps=1. / average_time)
if show_image:
cv2.imshow('online_im', online_im)
if save_dir is not None:
cv2.imwrite(
os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)
def load_det_results(det_file, num_frames):
assert os.path.exists(det_file) and os.path.isfile(det_file), \
'{} is not exist or not a file.'.format(det_file)
labels = np.loadtxt(det_file, dtype='float32', delimiter=',')
assert labels.shape[1] == 7, \
"Each line of {} should have 7 items: '[frame_id],[x0],[y0],[w],[h],[score],[class_id]'.".format(det_file)
results_list = []
for frame_i in range(num_frames):
results = {'bbox': [], 'score': [], 'cls_id': []}
lables_with_frame = labels[labels[:, 0] == frame_i + 1]
# each line of lables_with_frame:
# [frame_id],[x0],[y0],[w],[h],[score],[class_id]
for l in lables_with_frame:
results['bbox'].append(l[1:5])
results['score'].append(l[5])
results['cls_id'].append(l[6])
results_list.append(results)
return results_list
def scale_coords(coords, input_shape, im_shape, scale_factor):
im_shape = im_shape.numpy()[0]
ratio = scale_factor[0][0]
pad_w = (input_shape[1] - int(im_shape[1])) / 2
pad_h = (input_shape[0] - int(im_shape[0])) / 2
coords = paddle.cast(coords, 'float32')
coords[:, 0::2] -= pad_w
coords[:, 1::2] -= pad_h
coords[:, 0:4] /= ratio
coords[:, :4] = paddle.clip(coords[:, :4], min=0, max=coords[:, :4].max())
return coords.round()
def clip_box(xyxy, input_shape, im_shape, scale_factor):
im_shape = im_shape.numpy()[0]
ratio = scale_factor.numpy()[0][0]
img0_shape = [int(im_shape[0] / ratio), int(im_shape[1] / ratio)]
xyxy[:, 0::2] = paddle.clip(xyxy[:, 0::2], min=0, max=img0_shape[1])
xyxy[:, 1::2] = paddle.clip(xyxy[:, 1::2], min=0, max=img0_shape[0])
w = xyxy[:, 2:3] - xyxy[:, 0:1]
h = xyxy[:, 3:4] - xyxy[:, 1:2]
mask = paddle.logical_and(h > 0, w > 0)
keep_idx = paddle.nonzero(mask)
xyxy = paddle.gather_nd(xyxy, keep_idx[:, :1])
return xyxy, keep_idx
def get_crops(xyxy, ori_img, w, h):
crops = []
xyxy = xyxy.numpy().astype(np.int64)
ori_img = ori_img.numpy()
ori_img = np.squeeze(ori_img, axis=0).transpose(1, 0, 2)
for i, bbox in enumerate(xyxy):
crop = ori_img[bbox[0]:bbox[2], bbox[1]:bbox[3], :]
crops.append(crop)
crops = preprocess_reid(crops, w, h)
return crops
def preprocess_reid(imgs,
w=64,
h=192,
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]):
im_batch = []
for img in imgs:
img = cv2.resize(img, (w, h))
img = img[:, :, ::-1].astype('float32').transpose((2, 0, 1)) / 255
img_mean = np.array(mean).reshape((3, 1, 1))
img_std = np.array(std).reshape((3, 1, 1))
img -= img_mean
img /= img_std
img = np.expand_dims(img, axis=0)
im_batch.append(img)
im_batch = np.concatenate(im_batch, 0)
return im_batch
|
py | 1a397157a90356cc69d5c783a05f820a28281187 | from math import floor
def format_(number: int) -> float:
return round(float(number), 2)
def play_for( a_performance: dict, plays: dict) -> dict:
return plays[a_performance["playID"]]
def amount_for(a_performance: dict, plays: dict) -> int:
result: int = 0
if play_for(a_performance, plays)["type"] == "tragedy":
result = 40000
if int(a_performance["audience"]) > 30:
result += 1000 * (a_performance["audience"] - 30)
elif play_for(a_performance, plays)["type"] == "comedy":
result = 30000
if a_performance["audience"] > 20:
result += (10000 + 500 * (a_performance["audience"]) - 20)
result += 300 * a_performance["audience"]
else:
print("Unkonw type: %s" % play_for(a_performance, plays)["type"])
return result
def statement(invoice: dict, plays: dict) -> str:
total_amount: int = 0
volume_credits: int = 0
result: str = "Statement for %s\n" % invoice["customer"]
for perf in invoice["performances"]:
# add volume credits
volume_credits += max(perf["audience"] - 30, 0)
# add extra credit for every ten comedy attendees
if play_for(perf, plays)["type"] == "comedy":
volume_credits += floor(perf["audience"] / 5)
# print line for this order
result += "%s: $%d (%d seats)\n" % (play_for(perf, plays)["name"], format_(amount_for(perf, plays)/100), perf["audience"])
total_amount += amount_for(perf, plays)
result += "Amount owed is %d\n" % format_(total_amount/100)
result += "You earned %d credits\n" % format_(volume_credits)
return result
if __name__ == "__main__":
import json
with open("./invoices.json") as f:
invoices = json.load(f)
print(invoices[0]["customer"])
with open("./plays.json") as f:
plays = json.load(f)
print(statement(invoices[0], plays)) |
py | 1a3971bc25d74236e3ed9f2138fd70ed051a4761 | # -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class AlgorithmResult(AbstractModel):
"""每个算法的返回结果
"""
def __init__(self):
r"""
:param AlgoId: 算法ID
:type AlgoId: str
:param AlgoName: 算法名称
注意:此字段可能返回 null,表示取不到有效值。
:type AlgoName: str
:param Result: 算法返回的结果。
- 当算法类型为“OCR(1)”时,结果为文本字符串
- 当算法类型为“文本分类(2)”时,结果字符串为json对象数组:
Class:分类结果
Confidence:置信度
- 算法类型为“情感分析(3)”时,结果字符串为json对象:
Positive:正面情感概率
Negative:负面情感概率
Neutral:中性情感概率
- 当算法类型为“合同要素抽取(4)”时,结果字符串为json对象数组:
NodeName:一级要素名称
ItemName:二级要素名称
Content:要素文本内容
- 当算法类型为“实体识别(5)”时,结果字符串为json对象数组:
- Entity:实体类型
- Content:实体文本内容
注意:此字段可能返回 null,表示取不到有效值。
:type Result: str
:param Error: 算法调用错误信息
注意:此字段可能返回 null,表示取不到有效值。
:type Error: str
:param AlgoType: 算法类型:
1:OCR算法
2:文本分类算法
3:情感分析算法
4:合同要素抽取算法
5、实体识别算法
注意:此字段可能返回 null,表示取不到有效值。
:type AlgoType: int
"""
self.AlgoId = None
self.AlgoName = None
self.Result = None
self.Error = None
self.AlgoType = None
def _deserialize(self, params):
self.AlgoId = params.get("AlgoId")
self.AlgoName = params.get("AlgoName")
self.Result = params.get("Result")
self.Error = params.get("Error")
self.AlgoType = params.get("AlgoType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeInvocationResultRequest(AbstractModel):
"""DescribeInvocationResult请求参数结构体
"""
def __init__(self):
r"""
:param InvokeId: 调用id,为调用InvokeService接口返回的RequestId
:type InvokeId: str
"""
self.InvokeId = None
def _deserialize(self, params):
self.InvokeId = params.get("InvokeId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeInvocationResultResponse(AbstractModel):
"""DescribeInvocationResult返回参数结构体
"""
def __init__(self):
r"""
:param Results: 服务的调用结果
:type Results: list of AlgorithmResult
:param Status: 0:获取结果失败
1:结果还没有生成,继续轮询
2:获取结果成功
:type Status: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Results = None
self.Status = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Results") is not None:
self.Results = []
for item in params.get("Results"):
obj = AlgorithmResult()
obj._deserialize(item)
self.Results.append(obj)
self.Status = params.get("Status")
self.RequestId = params.get("RequestId")
class InvokeServiceRequest(AbstractModel):
"""InvokeService请求参数结构体
"""
def __init__(self):
r"""
:param ServiceId: 待调用的服务ID。
:type ServiceId: str
:param ServiceStatus: 要调用服务的状态:0表示调试版本,1表示上线版本
:type ServiceStatus: int
:param FileUrl: 用于测试的文档的URL。
:type FileUrl: str
:param Input: 用于测试的文本,当此值不为空时,调用内容以此参数的值为准。
:type Input: str
"""
self.ServiceId = None
self.ServiceStatus = None
self.FileUrl = None
self.Input = None
def _deserialize(self, params):
self.ServiceId = params.get("ServiceId")
self.ServiceStatus = params.get("ServiceStatus")
self.FileUrl = params.get("FileUrl")
self.Input = params.get("Input")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class InvokeServiceResponse(AbstractModel):
"""InvokeService返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId") |
py | 1a39736b1820262627e7e518db2648a27af36600 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from typing import Optional, Tuple
import numpy as np
def fft_centered(
input: np.ndarray,
shape: Optional[Tuple[int, ...]] = None,
dim: Optional[Tuple[int, ...]] = None,
norm: Optional[str] = None
) -> np.ndarray:
"""Computes the centered N dimensional discrete Fourier transform (FFT) of
input.
Args:
input: Input NumPy array, can be complex.
shape (Optional): Shape of output, truncated or zero-padded.
dim (Optional): Dimensions along which to apply FFT.
norm (Optional): Normalization mode:
- "forward": normalized by 1 / n;
- "backward" (Default): no normalization;
- "ortho": normalized by 1 / sqrt(n), making the FFT orthonormal.
Returns:
The FFT of input. Output has the same shape as input.
"""
input = np.fft.ifftshift(input, axes=dim)
input = np.fft.fftn(input, s=shape, axes=dim, norm=norm)
input = np.fft.fftshift(input, axes=dim)
return input
def ifft_centered(
input: np.ndarray,
shape: Optional[Tuple[int, ...]] = None,
dim: Optional[Tuple[int, ...]] = None,
norm: Optional[str] = None
) -> np.ndarray:
"""Computes the centered N dimensional inverse discrete Fourier transform
(IFFT) of input.
Args:
input: Input NumPy array, can be complex.
shape (Optional): Shape of output, truncated or zero-padded.
dim (Optional): Dimensions along which to apply IFFT.
norm (Optional): Normalization mode:
- "forward": normalized by 1 / n;
- "backward" (Default): no normalization;
- "ortho": normalized by 1 / sqrt(n), making the IFFT orthonormal.
Returns:
The IFFT of input. Output has the same shape as input.
"""
input = np.fft.ifftshift(input, axes=dim)
input = np.fft.ifftn(input, s=shape, axes=dim, norm=norm)
input = np.fft.fftshift(input, axes=dim)
return input
|
py | 1a3973b26559f0a0f85168cc36def715b3775000 | from rocketpyBeta import *
from numpy.random import normal, uniform, choice
from datetime import datetime
IN = {
"impulse": (1240, 100),
"burnOut": (2.84, 0.15),
"nozzleRadius": (30/1000, 0.5/1000),
"throatRadius": (8/1000, 0.5/1000),
"grainSeparation": (1/1000, 0.5/1000),
"grainDensity": (1707, 24),
"grainOuterRadius": (21.05/1000, 0.137/1000),
"grainInitialInnerRadius": (9.63/1000, 0.076/1000),
"grainInitialHeight": (118.38/1000, 0.415/1000),
"m_prop": (1.664, 0.05),
"m_aero": (0.696, 0.02),
"inertiaI": (0.3437,0.01*0.3437),
"inertiaZ": (0.00288,0.01*0.00288),
"radius": (0.0378,0.0001),
"distanceRocketNozzle": (0.467,0.003),
"distanceRocketPropellant": (0.091,0.003),
"powerOffDrag": (1,0.03),
"powerOnDrag": (1,0.03),
"noseLength": (0.151, 0.001),
"noseDistanceToCM": (0.539, 0.003),
"tail1TopRadius": (0.0378, 0.0001),
"tail1BottomRadius": (0.0602/2, 0.0001),
"tail1Length": (0.00765, 0.0001),
"tail1DistanceToCM": (0.168, 0.003),
"tail2Length": (0.00580, 0.0001),
"tail2TopRadius": (0.0602/2,0.0001),
"tail2BottomRadius": (0.0723/2,0.0001),
"tail2DistanceToCM": (-0.3374,0.003),
"tail3Length": (0.005, 0.0005),
"tail3TopRadius": (0.0723/2, 0.0001),
"tail3BottomRadius": (0.0411/2, 0.0001),
"tail3DistanceToCM": (-0.4624, 0.0001),
"finSpan": (0.070, 0.001),
"finRootChord": (0.08, 0.001),
"finTipChord": (0.04, 0.001),
"finDistanceToCM": (-0.344, 0.003),
"inclination": (85, 1),
"heading": (90, 1),
"m_rec": (0.160, 0.024),
"CdS": (0.43, 0.086),
"lag_rec": (1 , 0.5),
"m_se": (0.300, 0.02),
"lag_se": (0.73, 0.16)}
while True:
# Number of simulations
s = 500
print('Initializing new dispersion analysis sequence.')
print('Euporia I - Plan A - Balistic')
print('Number of simulations: '+str(s))
print('Estimated time: ' + str(1.5*s/60) + ' mins')
print(datetime.now())
init = datetime.now()
# Initialize output
inputs = []
output = []
# Enviroment Variabels
envRailLength = normal(2, 0.01, s)
envYear = choice(np.arange(2013, 2017), s)
envDay = choice(np.arange(1, 10), s)
# envHour = choice([18, 12], s, p=[0.5, 0.5])
# Motor Variables
motorBurnOut = normal(*IN['burnOut'], s)
motorTotalImpulse = normal(*IN['impulse'], s)
motornozzleRadius = normal(*IN['nozzleRadius'], s)
motorthroatRadius = normal(*IN['throatRadius'], s)
motorgrainSeparation = normal(*IN['grainSeparation'], s)
motorgrainDensity = normal(*IN['grainDensity'], s)
motorgrainOuterRadius = normal(*IN['grainOuterRadius'], s)
motorgrainInitialInnerRadius = normal(*IN['grainInitialInnerRadius'], s)
motorgrainInitialHeight = normal(*IN['grainInitialHeight'], s)
# Rocket Variables
rMassSE = normal(*IN['m_se'], s)
rMassRec = normal(*IN['m_rec'], s)
rMassProp = normal(*IN['m_prop'], s)
rMassAero = normal(*IN['m_aero'], s)
rInertiaI = normal(*IN['inertiaI'], s)
rInertiaZ = normal(*IN['inertiaZ'], s)
rRadius = normal(*IN['radius'], s)
rDistanceRocketNozzle = normal(*IN['distanceRocketNozzle'], s)
rDistanceRocketPropellant = normal(*IN['distanceRocketPropellant'], s)
rpowerOnDrag = normal(*IN['powerOnDrag'], s)
rpowerOffDrag = normal(*IN['powerOffDrag'], s)
# Nose
rNoseLength = normal(*IN['noseLength'], s)
rNoseDistanceToCM = normal(*IN['noseDistanceToCM'], s)
# Fins
rFinsSpan = normal(*IN['finSpan'], s)
rFinsRootChord = normal(*IN['finRootChord'], s)
rFinsTipChord = normal(*IN['finTipChord'], s)
rFinsDistanceToCM = normal(*IN['finDistanceToCM'], s)
# Tail 1
rTail1TopRadius = normal(*IN['tail1TopRadius'], s)
rTail1BottomRadius = normal(*IN['tail1BottomRadius'], s)
rTail1Length = normal(*IN['tail1Length'], s)
rTail1DistanceToCM = normal(*IN['tail1DistanceToCM'], s)
# Tail 2
rTail2TopRadius = normal(*IN['tail2TopRadius'], s)
rTail2BottomRadius = normal(*IN['tail2BottomRadius'], s)
rTail2Length = normal(*IN['tail2Length'], s)
rTail2DistanceToCM = normal(*IN['tail2DistanceToCM'], s)
# Tail 3
rTail3TopRadius = normal(*IN['tail3TopRadius'], s)
rTail3BottomRadius = normal(*IN['tail3BottomRadius'], s)
rTail3Length = normal(*IN['tail3Length'], s)
rTail3DistanceToCM = normal(*IN['tail3DistanceToCM'], s)
# Parachute
pDrogueCdS = normal(*IN['CdS'], s)
pDrogueLag = normal(*IN['lag_rec'], s)
dSeLag = normal(*IN['lag_se'], s)
# Flight variables
fInclination = normal(*IN['inclination'], s)
fHeading = normal(*IN['heading'], s)
# Initialize enviroment and motor
E = Environment(railLength=2,
gravity=9.8,
windData='../data/weather/RioSaoPaulo.nc',
location=(-21.961526, -47.480908),
date=(2016, 2, 4, 12))
for i in range(s):
print('Iteration: ', i, end='\r')
# Enviroment Variabels
railLength = envRailLength[i]
year = envYear[i]
day = envDay[i]
hour = 12
# Motor Variables
burnOut = motorBurnOut[i]
totalImpulse = motorTotalImpulse[i]
nozzleRadius = motornozzleRadius[i]
throatRadius = motorthroatRadius[i]
grainSeparation = motorgrainSeparation[i]
grainDensity = motorgrainDensity[i]
grainOuterRadius = motorgrainOuterRadius[i]
grainInitialInnerRadius = motorgrainInitialInnerRadius[i]
grainInitialHeight = motorgrainInitialHeight[i]
# Rocket Variables
m_aeroI = rMassAero[i]
m_recI = rMassRec[i]
m_seI = rMassSE[i]
m_propI = rMassProp[i]
mass = m_aeroI + m_recI + m_seI + m_propI
inertiaI = rInertiaI[i]
inertiaZ = rInertiaZ[i]
radius = rRadius[i]
distanceRocketNozzle = rDistanceRocketNozzle[i]
distanceRocketPropellant = rDistanceRocketPropellant[i]
powerOnDrag = rpowerOnDrag[i]
powerOffDrag = rpowerOffDrag[i]
# Nose
noseLength = rNoseLength[i]
noseDistanceToCM = rNoseDistanceToCM[i]
# Fins
finSpan = rFinsSpan[i]
finRootChord = rFinsRootChord[i]
finTipChord = rFinsTipChord[i]
finDistanceToCM = rFinsDistanceToCM[i]
# Tail 1
tail1TopRadius = rTail1TopRadius[i]
tail1BottomRadius = rTail1BottomRadius[i]
tail1Length = rTail1Length[i]
tail1DistanceToCM = rTail1DistanceToCM[i]
# Tail 2
tail2TopRadius = rTail2TopRadius[i]
tail2BottomRadius = rTail2BottomRadius[i]
tail2Length = rTail2Length[i]
tail2DistanceToCM = rTail2DistanceToCM[i]
# Tail 3
tail3TopRadius = rTail3TopRadius[i]
tail3BottomRadius = rTail3BottomRadius[i]
tail3Length = rTail3Length[i]
tail3DistanceToCM = rTail3DistanceToCM[i]
# Parachute
drogueCdS = pDrogueCdS[i]
drogueLag = pDrogueLag[i] + dSeLag[i]
# Flight variables
inclination = fInclination[i]
heading = fHeading[i]
inputs.append([year, day, hour, railLength, burnOut, totalImpulse, mass, inertiaI, inertiaZ, radius, inclination, heading])
E.setDate((year, 2, day, hour))
E.railLength = railLength
Jiboia58 = Motor(thrustSource='../data/jiboia/thrustCurve.csv',
burnOut=2.84,
reshapeThrustCurve=(burnOut, totalImpulse),
interpolationMethod='spline',
nozzleRadius=nozzleRadius,
throatRadius=throatRadius,
grainNumber=5,
grainSeparation=grainSeparation,
grainDensity=grainDensity,
grainOuterRadius=grainOuterRadius,
grainInitialInnerRadius=grainInitialInnerRadius,
grainInitialHeight=grainInitialHeight)
EuporiaI = Rocket(motor=Jiboia58,
mass=m_aeroI+m_propI+m_recI+m_seI,
inertiaI=inertiaI,
inertiaZ=inertiaZ,
radius=radius,
distanceRocketNozzle=distanceRocketNozzle,
distanceRocketPropellant=distanceRocketPropellant,
offCenter=0,
powerOffDrag="../data/euporia/euporiaIDragOff.csv",
powerOnDrag="../data/euporia/euporiaIDragOn.csv",
drogueArea=False,
drogueCd=False,
drogueLag=drogueLag,
mainArea=False,
mainCd=False,
mainAlt=50)
EuporiaI.powerOffDrag = powerOffDrag*EuporiaI.powerOffDrag
EuporiaI.powerOnDrag = powerOnDrag*EuporiaI.powerOnDrag
EuporiaI.addNose(length=noseLength, kind='parabolic', distanceToCM=noseDistanceToCM)
EuporiaI.addTail(topRadius=tail1TopRadius, bottomRadius=tail1BottomRadius, length=tail1Length, distanceToCM=tail1DistanceToCM)
EuporiaI.addTail(topRadius=tail2TopRadius, bottomRadius=tail2BottomRadius, length=tail2Length, distanceToCM=tail2DistanceToCM)
EuporiaI.addFins(n=4, rootChord=finRootChord, tipChord=finTipChord, span=finSpan, distanceToCM=finDistanceToCM)
EuporiaI.addTail(topRadius=tail3TopRadius, bottomRadius=tail3BottomRadius, length=tail3Length, distanceToCM=tail3DistanceToCM)
F = Flight(EuporiaI, E, inclination=inclination, heading=heading, flightPhases=-1, timeStep=[0.01, 0.1])
# Calculate Max Vel
sol = np.array(F.solution)
F.vx = Function(sol[:, [0, 4]], 'Time (s)', 'Vx (m/s)', 'spline', extrapolation="natural")
F.vy = Function(sol[:, [0, 5]], 'Time (s)', 'Vy (m/s)', 'spline', extrapolation="natural")
F.vz = Function(sol[:, [0, 6]], 'Time (s)', 'Vz (m/s)', 'spline', extrapolation="natural")
F.v = (F.vx**2 + F.vy**2 + F.vz**2)**0.5
F.v.setDiscrete(0, burnOut, 100)
F.maxVel = np.amax(F.v.source[:, 1])
# Output
output.append([F.outOfRailTime, F.outOfRailVelocity, F.maxVel, F.apogeeTime, F.apogee, F.apogeeX, F.apogeeY,
F.drogueOpeningTime, F.drogueOpeningVelocity, F.drogueX, F.drogueY, F.drogueZ,
F.tFinal, F.xImpact, F.yImpact, F.impactVelocity, F.rocket.staticMargin])
# Write to file
print('Sequence completed!')
id = str(choice(200000))
np.savetxt('InpDispersion' + id + '.euporia_I_AB', inputs, delimiter=',')
np.savetxt('OutDispersion' + id + '.euporia_I_AB', output, delimiter=',')
print('Results written to file!')
print('End Time:', datetime.now())
print('Total Elapsed Time (min):', (datetime.now() - init).seconds/60)
print('Avarage Time (s):', (datetime.now() - init).seconds/s)
print() |
py | 1a3973be4c68d01201589559d84714f1f297c47f | from File import generateNames
from File import regenerateFile
from File import getVersion
from MyParser import MyParser
import fnmatch
import os
print("Initializing html parser.")
parser = MyParser()
print("Reading configuration file.")
if not parser.getConfig():
print("Starting failed!!!")
exit()
version = parser.version
print("Found configuration.")
print("Domain: " + parser.domainName)
print("Web directory: " + parser.webDir)
print("Version: " + parser.version)
print("Looking for html files.")
matches = []
for root, dirnames, filenames in os.walk(parser.webDir):
for filename in fnmatch.filter(filenames, '*.html'):
matches.append(os.path.join(root, filename))
if len(matches) == 0:
print("No available files. Cancelling...")
exit()
print("Number of files found: " + len(matches))
print("Listing files...")
for match in matches:
print(match)
for file in matches:
print("Parsing: " + file)
data = parser.parseFile(file)
links = parser.findTags(data, "link")
fileList = parser.extractFiles(links)
newFiles = generateNames(list(fileList), version)
for index in range(0, len(newFiles)):
if not os.path.isfile(newFiles[index]):
os.rename(fileList[index], newFiles[index])
parser.generateTag(data, newFiles[index])
data = parser.strip(data)
regenerateFile(file, data) |
py | 1a39740f64f83d5e7d7a3f0fc39eb1a01fdc714a | def main(request, response):
"""Send a response with the Origin-Isolation header given in the "header"
query parameter, or no header if that is not provided. In either case, the
response will listen for message and messageerror events and echo them back
to the parent. See ./helpers.mjs for how these handlers are used.
"""
if "header" in request.GET:
header = request.GET.first("header")
response.headers.set("Origin-Isolation", header)
response.headers.set("Content-Type", "text/html")
return """
<!DOCTYPE html>
<meta charset="utf-8">
<title>Helper page for origin isolation tests</title>
<script type="module">
import { sendWasmModule } from "./helpers.mjs";
window.onmessage = async (e) => {
// These could come from the parent or siblings.
if (e.data.constructor === WebAssembly.Module) {
e.source.postMessage("WebAssembly.Module message received", "*");
}
// These only come from the parent.
if (e.data.command === "set document.domain") {
document.domain = e.data.newDocumentDomain;
parent.postMessage("document.domain is set", "*");
} else if (e.data.command === "send WASM module") {
const destinationFrameWindow = parent.frames[e.data.indexIntoParentFrameOfDestination];
const whatHappened = await sendWasmModule(destinationFrameWindow);
parent.postMessage(whatHappened, "*");
} else if (e.data.command === "access document") {
const destinationFrameWindow = parent.frames[e.data.indexIntoParentFrameOfDestination];
try {
destinationFrameWindow.document;
parent.postMessage("accessed document successfully", "*");
} catch (e) {
parent.postMessage(e.name, "*");
}
}
// We could also receive e.data === "WebAssembly.Module message received",
// but that's handled by await sendWasmModule() above.
};
window.onmessageerror = e => {
e.source.postMessage("messageerror", "*");
};
</script>
"""
|
py | 1a3974c32b395963c2a990ed53ceccbc1dc6b683 | """Methods for testing the subroutines in the grouptheory module."""
import unittest as ut
from phenum.grouptheory import ArrowPerm, RotPermList, opList
import pytest
gpath = "tests/grouptheory/"
def _read_fixOp_1D(fname):
import os
i = 1
growing = True
out = []
while growing:
if os.path.isfile(fname+"/_-"+str(i)+"-rot") or os.path.isfile(fname+"/_-"+str(i)+"-shift"):
i += 1
else:
growing = False
for j in range(1,i):
if os.path.isfile(fname+"/_-"+str(j)+"-rot"):
rot = _read_float_3D(fname+"/_-"+str(j)+"-rot")
else:
rot = None
if os.path.isfile(fname+"/_-"+str(j)+"-shift"):
shift = list(map(list,zip(*_read_float_2D(fname+"/_-"+str(j)+"-shift"))))
else:
shift = None
temp = opList(rot=rot,shift=shift)
out.append(temp)
return out
def _read_RotPermList_1D(fname,arrowp = None):
import os
i = 1
growing = True
out = []
while growing :
if os.path.isfile(fname+"/_-"+str(i)+"-nL") or os.path.isfile(fname+"/_-"+str(i)+"-v") or os.path.isfile(fname+"/_-"+str(i)+"-RotIndx") or os.path.isfile(fname+"/_-"+str(i)+"-perm"):
i += 1
else:
growing = False
for j in range(1,i):
if os.path.isfile(fname+"/_-"+str(j)+"-nL"):
nL = _read_int(fname+"/_-"+str(j)+"-nL")
else:
nL = None
if os.path.isfile(fname+"/_-"+str(j)+"-v"):
v = _read_float_3D(fname+"/_-"+str(j)+"-v")
else:
v = None
if os.path.isfile(fname+"/_-"+str(j)+"-perm"):
perm = _read_int_2D(fname+"/_-"+str(j)+"-perm")
perm = [[i-1 for i in t] for t in perm]
else:
perm = None
if arrowp == None:
a_perm = None
if os.path.isfile(fname+"/_-"+str(j)+"-RotIndx"):
RotIndx = _read_int_1D(fname+"/_-"+str(j)+"-RotIndx")
RotIndx = [i-1 for i in RotIndx]
else:
RotIndx = None
temp = RotPermList(nL = nL, v = v, perm = perm, arrows=a_perm, RotIndx= RotIndx)
out.append(temp)
return out
def _read_fixOp(fname):
import os
if os.path.isfile(fname+"/_-rot"):
rot = _read_float_3D(fname+"/_-rot")
else:
rot = None
if os.path.isfile(fname+"/_-shift"):
shift = list(map(list,zip(*_read_float_2D(fname+"/_-shift"))))
else:
shift = None
out = opList(rot=rot,shift=shift)
return out
def _read_RotPermList(fname,arrowp = None):
import os
if os.path.isfile(fname+"/_-nL"):
nL = _read_int(fname+"/_-nL")
else:
nL = None
if os.path.isfile(fname+"/_-v"):
v = _read_float_3D(fname+"/_-v")
else:
v = None
if os.path.isfile(fname+"/_-perm"):
perm = _read_int_2D(fname+"/_-perm")
perm = [[i-1 for i in j] for j in perm]
else:
perm = None
if arrowp == None:
a_perm = None
if os.path.isfile(fname+"/_-RotIndx"):
RotIndx = _read_int_1D(fname+"/_-RotIndx")
RotIndx = [i-1 for i in RotIndx]
else:
RotIndx = None
out = RotPermList(nL = nL, v = v, perm = perm, arrows=a_perm, RotIndx= RotIndx)
return out
def _read_float_3D(fname):
with open(fname,"r") as inf:
temp = inf.readline()
sizes = inf.readline()
sizes = [int(x) for x in sizes.strip().split() if x !="##"]
temp = inf.readline()
in_data = []
in_temp = []
for line in inf:
if "#" not in line:
in_temp.append([float(i) for i in line.strip().split()])
else:
in_data.append(in_temp)
in_temp = []
in_data.append(in_temp)
out = []
for i in range(sizes[2]):
out_t = []
for j in range(sizes[1]):
out_t.append([k[j][i] for k in in_data])
out.append(out_t)
return(out)
def _read_int_3D(fname):
with open(fname,"r") as inf:
temp = inf.readline()
sizes = inf.readline()
sizes = [int(x) for x in sizes.strip().split() if x !="##"]
temp = inf.readline()
in_data = []
in_temp = []
for line in inf:
if "#" not in line:
in_temp.append([int(i) for i in line.strip().split()])
else:
in_data.append(in_temp)
in_temp = []
in_data.append(in_temp)
out = []
for i in range(sizes[2]):
out_t = []
for j in range(sizes[1]):
out_t.append([k[j][i] for k in in_data])
out.append(out_t)
return(out)
def _read_output(test):
values = []
with open("tests/grouptheory/"+test) as f:
for line in f:
values.append(eval(line))
return values
def _read_float_2D(fname):
array = []
with open(fname,"r") as f1:
for line in f1:
if "#" not in line:
array.append([float(i) for i in line.strip().split()])
return array
def _read_float_1D(fname):
array = []
from os import getcwd
with open(fname,"r") as f1:
for line in f1:
if "#" not in line:
array = [float(i) for i in line.strip().split()]
return array
def _read_int_2D(fname):
array = []
with open(fname,"r") as f1:
for line in f1:
if "#" not in line:
array.append([int(i) for i in line.strip().split()])
return array
def _read_int_1D(fname):
array = []
with open(fname,"r") as f1:
for line in f1:
if "#" not in line:
array = [int(i) for i in line.strip().split()]
return array
def _read_int(fname):
with open(fname,"r") as f1:
line = f1.readline()
if "#" in line:
line = f1.readline()
val = int(line.strip())
return val
def _read_float(fname):
with open(fname,"r") as f1:
line = f1.readline()
if "#" in line:
line = f1.readline()
val = float(line.strip())
return val
def _read_logical(fname):
with open(fname,"r") as f1:
line = f1.readline()
if "#" in line:
line = f1.readline()
if "t" in line.lower():
val = True
else:
val = False
return val
class TestGetFullHNF(ut.TestCase):
""" Tests of the get_full_HNF subroutine."""
def test1(self):
from phenum.grouptheory import get_full_HNF
from numpy import array
HNF = array([1,0,1,0,0,1])
out = [[1,0,0],[0,1,0],[0,0,1]]
self.assertEqual(get_full_HNF(HNF),out)
def test2(self):
from phenum.grouptheory import get_full_HNF
from numpy import array
HNF = array([2,1,2,1,0,4])
out = [[2,0,0],[1,2,0],[1,0,4]]
self.assertEqual(get_full_HNF(HNF),out)
def test3(self):
from phenum.grouptheory import get_full_HNF
from numpy import array
HNF = array([1,0,3,1,2,3])
out = [[1,0,0],[0,3,0],[1,2,3]]
self.assertEqual(get_full_HNF(HNF),out)
def test4(self):
from phenum.grouptheory import get_full_HNF
from numpy import array
HNF = [0,0,0,0,0,0]
out = [[0,0,0],[0,0,0],[0,0,0]]
self.assertEqual(get_full_HNF(HNF),out)
def test5(self):
from phenum.grouptheory import get_full_HNF
from numpy import array
HNF = array([3,0,3,0,0,3])
out = [[3,0,0],[0,3,0],[0,0,3]]
self.assertEqual(get_full_HNF(HNF),out)
def test1(self):
from phenum.grouptheory import get_full_HNF
from numpy import array
HNF = array([1,1,2,0,2,2])
out = [[1,0,0],[1,2,0],[0,2,2]]
self.assertEqual(get_full_HNF(HNF),out)
def test1(self):
from phenum.grouptheory import get_full_HNF
from numpy import array
HNF = array([2,0,2,0,2,4])
out = [[2,0,0],[0,2,0],[0,2,4]]
self.assertEqual(get_full_HNF(HNF),out)
class TestSmithNormalForm(ut.TestCase):
""" Tests of the SmithNormalForm subroutine."""
def test1(self):
from phenum.grouptheory import SmithNormalForm
HNF = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
out = ([[1, 0, 0], [0, 1, 0], [0, 0, 1]], [[1, 0, 0], [0, 1, 0], [0, 0, 1]], [[1, 0, 0], [0, 1, 0], [0, 0, 1]])
self.assertEqual(SmithNormalForm(HNF),out)
def test2(self):
from phenum.grouptheory import SmithNormalForm
HNF = [[1, 0, 0], [0, 1, 0], [0, 1, 2]]
out = ([[1, 0, 0], [0, 1, 0], [0, 0, 2]], [[1, 0, 0], [0, 1, 0], [0, -1, 1]], [[1, 0, 0], [0, 1, 0], [0, 0, 1]])
self.assertEqual(SmithNormalForm(HNF),out)
def test3(self):
from phenum.grouptheory import SmithNormalForm
HNF = [[1, 0, 0], [0, 1, 0], [0, 0, 3]]
out = ([[1, 0, 0], [0, 1, 0], [0, 0, 3]], [[1, 0, 0], [0, 1, 0], [0, 0, 1]], [[1, 0, 0], [0, 1, 0], [0, 0, 1]])
self.assertEqual(SmithNormalForm(HNF),out)
def test4(self):
from phenum.grouptheory import SmithNormalForm
HNF = [[1, 0, 0], [0, 2, 0], [0, 0, 2]]
out = ([[1, 0, 0], [0, 2, 0], [0, 0, 2]], [[1, 0, 0], [0, 1, 0], [0, 0, 1]], [[1, 0, 0], [0, 1, 0], [0, 0, 1]])
self.assertEqual(SmithNormalForm(HNF),out)
def test5(self):
from phenum.grouptheory import SmithNormalForm
HNF = [[1, 0, 0], [0, 1, 0], [1, 2, 5]]
out = ([[1, 0, 0], [0, 1, 0], [0, 0, 5]], [[1, 0, 0], [0, 1, 0], [-1, -2, 1]], [[1, 0, 0], [0, 1, 0], [0, 0, 1]])
self.assertEqual(SmithNormalForm(HNF),out)
def test6(self):
from phenum.grouptheory import SmithNormalForm
HNF = [[1, 0, 0], [0, 1, 0], [2, 3, 6]]
out = ([[1, 0, 0], [0, 1, 0], [0, 0, 6]], [[1, 0, 0], [0, 1, 0], [-2, -3, 1]], [[1, 0, 0], [0, 1, 0], [0, 0, 1]])
self.assertEqual(SmithNormalForm(HNF),out)
def test7(self):
from phenum.grouptheory import SmithNormalForm
HNF = [[1, 0, 0], [0, 1, 0], [0, 6, 7]]
out = ([[1, 0, 0], [0, 1, 0], [0, 0, 7]], [[1, 0, 0], [0, 1, 0], [0, -6, 1]], [[1, 0, 0], [0, 1, 0], [0, 0, 1]])
self.assertEqual(SmithNormalForm(HNF),out)
def test8(self):
from phenum.grouptheory import SmithNormalForm
HNF = [[1, 0, 0], [1, 2, 0], [1, 0, 4]]
out = ([[1, 0, 0], [0, 2, 0], [0, 0, 4]], [[1, 0, 0], [-1, 1, 0], [-1, 0, 1]], [[1, 0, 0], [0, 1, 0], [0, 0, 1]])
self.assertEqual(SmithNormalForm(HNF),out)
def test9(self):
from phenum.grouptheory import SmithNormalForm
HNF = [[2, 0, 0], [0, 2, 0], [0, 0, 2]]
out = ([[2, 0, 0], [0, 2, 0], [0, 0, 2]], [[1, 0, 0], [0, 1, 0], [0, 0, 1]], [[1, 0, 0], [0, 1, 0], [0, 0, 1]])
self.assertEqual(SmithNormalForm(HNF),out)
def test10(self):
from phenum.grouptheory import SmithNormalForm
HNF = [[1, 0, 0], [0, 1, 0], [1, 5, 10]]
out = ([[1, 0, 0], [0, 1, 0], [0, 0, 10]], [[1, 0, 0], [0, 1, 0], [-1, -5, 1]], [[1, 0, 0], [0, 1, 0], [0, 0, 1]])
self.assertEqual(SmithNormalForm(HNF),out)
def test11(self):
from phenum.grouptheory import SmithNormalForm
HNF = [[-1, 0, 0], [0, 1, 0], [0, 0, 1]]
out = ()
with pytest.raises(ValueError):
self.assertEqual(SmithNormalForm(HNF),out)
def test12(self):
from phenum.grouptheory import SmithNormalForm
HNF = [[0, 1, 0], [0, 0, 1], [1, 0, 0]]
out = ([[1, 0, 0], [0, 1, 0], [0, 0, 1]], [[0, 0, 1], [1, 0, 0], [0, 1, 0]], [[1, 0, 0], [0, 1, 0], [0, 0, 1]])
self.assertEqual(SmithNormalForm(HNF),out)
def test13(self):
from phenum.grouptheory import SmithNormalForm
HNF = [[1, -1, -2], [1, 2, -3], [1, 2, 4]]
out = ([[1, 0, 0], [0, -1, 0], [0, 0, 21]], [[1, 0, 0], [-1, 1, 0], [-7, 6, 1]], [[1, 2, 7], [0, 0, 1], [0, 1, 3]])
self.assertEqual(SmithNormalForm(HNF),out)
def test14(self):
from phenum.grouptheory import SmithNormalForm
HNF = [[-1, -2, -3], [-1, -1, -2], [-1, -2, -4]]
out = ([[1, 0, 0], [0, 1, 0], [0, 0, 1]], [[1, 0, 0], [-1, 1, 0], [-1, 0, 1]], [[-1, -2, 1], [0, 1, 1], [0, 0, -1]])
self.assertEqual(SmithNormalForm(HNF),out)
def test15(self):
from phenum.grouptheory import SmithNormalForm
HNF = [[1, 2.5, 0], [0, 1.5, 1.66], [1.5, 1.25, 1.3]]
out = ([[0.5, 0.0, 0.0], [0.0, 0.5, 0.0], [0.0, 0.0, 16.5]], [[-1.0, 0.0, 1.0], [3.0, -3.0, -2.0], [-9.0, 10.0, 6.0]], [[1, 2.5, 23.25], [0, 1.0, 10.5], [0, 0.0, 1.0]])
with pytest.raises(RuntimeError):
SmithNormalForm(HNF)
class TestAGroup(ut.TestCase):
""" Tests of the a_group subroutine."""
def test1(self):
from phenum.grouptheory import a_group
trans = [[0,1],[1,0]]
rots = [[[0,1],[0,1,2,3,4,5]],[[1,0],[2,3,0,1,5,4]],[[1,0],[2,1,0,3,5,4]],[[0,1],[0,3,2,1,5,4]]]
out = _read_output("agroup.out.1")
self.assertEqual(a_group(trans,rots),out)
def test2(self):
from phenum.grouptheory import a_group
trans = [[j-1 for j in i] for i in [[1, 2, 3, 4], [2, 1, 4, 3], [3, 4, 1, 2], [4, 3, 2, 1]]]
rots = [[[j-1 for j in i] for i in t] for t in [[[1, 2, 3, 4], [1, 2, 3, 4, 5, 6]], [[1, 4, 3, 2], [1, 3, 2, 4, 6, 5]], [[1, 2, 3, 4], [4, 2, 3, 1, 5, 6]], [[1, 4, 3, 2], [4, 3, 2, 1, 6, 5]], [[1, 2, 3, 4], [1, 5, 3, 4, 2, 6]], [[1, 4, 3, 2], [1, 3, 5, 4, 6, 2]], [[1, 2, 3, 4], [4, 5, 3, 1, 2, 6]], [[1, 4, 3, 2], [4, 3, 5, 1, 6, 2]], [[1, 2, 3, 4], [1, 2, 6, 4, 5, 3]], [[1, 4, 3, 2], [1, 6, 2, 4, 3, 5]], [[1, 2, 3, 4], [4, 2, 6, 1, 5, 3]], [[1, 4, 3, 2], [4, 6, 2, 1, 3, 5]], [[1, 2, 3, 4], [1, 5, 6, 4, 2, 3]], [[1, 4, 3, 2], [1, 6, 5, 4, 3, 2]], [[1, 2, 3, 4], [4, 5, 6, 1, 2, 3]], [[1, 4, 3, 2], [4, 6, 5, 1, 3, 2]]]]
out = _read_output("agroup.out.2")
self.assertEqual(a_group(trans,rots),out)
def test3(self):
from phenum.grouptheory import a_group
trans = [[j-1 for j in i] for i in [[1, 2, 3, 4, 5, 6, 7, 8], [2, 1, 4, 3, 6, 5, 8, 7], [3, 4, 5, 6, 7, 8, 1, 2], [4, 3, 6, 5, 8, 7, 2, 1], [5, 6, 7, 8, 1, 2, 3, 4], [6, 5, 8, 7, 2, 1, 4, 3], [7, 8, 1, 2, 3, 4, 5, 6], [8, 7, 2, 1, 4, 3, 6, 5]]]
rots = [[[j-1 for j in i] for i in t] for t in [[[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6]], [[1, 2, 3, 4, 5, 6, 7, 8], [4, 2, 3, 1, 5, 6]], [[1, 2, 3, 4, 5, 6, 7, 8], [1, 5, 3, 4, 2, 6]], [[1, 2, 3, 4, 5, 6, 7, 8], [4, 5, 3, 1, 2, 6]], [[1, 2, 7, 8, 5, 6, 3, 4], [1, 2, 6, 4, 5, 3]], [[1, 2, 7, 8, 5, 6, 3, 4], [4, 2, 6, 1, 5, 3]], [[1, 2, 7, 8, 5, 6, 3, 4], [1, 5, 6, 4, 2, 3]], [[1, 2, 7, 8, 5, 6, 3, 4], [4, 5, 6, 1, 2, 3]]]]
out = _read_output("agroup.out.3")
self.assertEqual(a_group(trans,rots),out)
def test4(self):
from phenum.grouptheory import a_group
trans =[[j - 1 for j in i] for i in[[1,2,3,4,5,6,7,8], [2,1,4,3,6,5,8,7], [3,4,5,6,7,8,1,2], [4,3,6,5,8,7,2,1], [5,6,7,8,1,2,3,4], [6,5,8,7,2,1,4,3], [7,8,1,2,3,4,5,6], [8,7,2,1,4,3,6,5]]]
rots = [[[0,1,2,3,4,5,6,7],[0,1,2,3]],[[0,1,2,3,4,5,6,7],[2,1,0,3]],[[0,1,6,7,4,5,2,3],[0,3,2,1]],[[0,1,6,7,4,5,2,3],[2,3,0,1]]]
out = _read_output("agroup.out.4")
self.assertEqual(a_group(trans,rots),out)
def test5(self):
from phenum.grouptheory import a_group
trans =[[j - 1 for j in i] for i in[[1, 2, 3, 4, 5, 6, 7, 8], [2, 1, 4, 3, 6, 5, 8, 7], [3, 4, 5, 6, 7, 8, 1, 2], [4, 3, 6, 5, 8, 7, 2, 1], [5, 6, 7, 8, 1, 2, 3, 4], [6, 5, 8, 7, 2, 1, 4, 3], [7, 8, 1, 2, 3, 4, 5, 6], [8, 7, 2, 1, 4, 3, 6, 5]]]
rots = [[[0,1,2,3,4,5,6,7],[0,1,2,3]],[[0,1,2,3,4,5,6,7],[2,1,0,3]],[[0,1,6,7,4,5,2,3],[0,3,2,1]],[[0,1,6,7,4,5,2,3],[2,3,0,1]]]
out = _read_output("agroup.out.5")
self.assertEqual(a_group(trans,rots),out)
class TestAGroupGen(ut.TestCase):
""" Tests of the a_group subroutine."""
def test1(self):
from phenum.grouptheory import a_group_gen
trans = [[0,1],[1,0]]
rots = [[[0,1],[0,1,2,3,4,5]],[[1,0],[2,3,0,1,5,4]],[[1,0],[2,1,0,3,5,4]],[[0,1],[0,3,2,1,5,4]]]
out = _read_output("agroupgen.out.1")
self.assertEqual(a_group_gen(trans,rots),out)
def test2(self):
from phenum.grouptheory import a_group_gen
trans = [[j-1 for j in i] for i in [[1, 2, 3, 4], [2, 1, 4, 3], [3, 4, 1, 2], [4, 3, 2, 1]]]
rots = [[[j-1 for j in i] for i in t] for t in [[[1, 2, 3, 4], [1, 2, 3, 4, 5, 6]], [[1, 4, 3, 2], [1, 3, 2, 4, 6, 5]], [[1, 2, 3, 4], [4, 2, 3, 1, 5, 6]], [[1, 4, 3, 2], [4, 3, 2, 1, 6, 5]], [[1, 2, 3, 4], [1, 5, 3, 4, 2, 6]], [[1, 4, 3, 2], [1, 3, 5, 4, 6, 2]], [[1, 2, 3, 4], [4, 5, 3, 1, 2, 6]], [[1, 4, 3, 2], [4, 3, 5, 1, 6, 2]], [[1, 2, 3, 4], [1, 2, 6, 4, 5, 3]], [[1, 4, 3, 2], [1, 6, 2, 4, 3, 5]], [[1, 2, 3, 4], [4, 2, 6, 1, 5, 3]], [[1, 4, 3, 2], [4, 6, 2, 1, 3, 5]], [[1, 2, 3, 4], [1, 5, 6, 4, 2, 3]], [[1, 4, 3, 2], [1, 6, 5, 4, 3, 2]], [[1, 2, 3, 4], [4, 5, 6, 1, 2, 3]], [[1, 4, 3, 2], [4, 6, 5, 1, 3, 2]]]]
out = _read_output("agroupgen.out.2")
self.assertEqual(a_group_gen(trans,rots),out)
def test3(self):
from phenum.grouptheory import a_group_gen
trans = [[j-1 for j in i] for i in [[1, 2, 3, 4, 5, 6, 7, 8], [2, 1, 4, 3, 6, 5, 8, 7], [3, 4, 5, 6, 7, 8, 1, 2], [4, 3, 6, 5, 8, 7, 2, 1], [5, 6, 7, 8, 1, 2, 3, 4], [6, 5, 8, 7, 2, 1, 4, 3], [7, 8, 1, 2, 3, 4, 5, 6], [8, 7, 2, 1, 4, 3, 6, 5]]]
rots = [[[j-1 for j in i] for i in t] for t in [[[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6]], [[1, 2, 3, 4, 5, 6, 7, 8], [4, 2, 3, 1, 5, 6]], [[1, 2, 3, 4, 5, 6, 7, 8], [1, 5, 3, 4, 2, 6]], [[1, 2, 3, 4, 5, 6, 7, 8], [4, 5, 3, 1, 2, 6]], [[1, 2, 7, 8, 5, 6, 3, 4], [1, 2, 6, 4, 5, 3]], [[1, 2, 7, 8, 5, 6, 3, 4], [4, 2, 6, 1, 5, 3]], [[1, 2, 7, 8, 5, 6, 3, 4], [1, 5, 6, 4, 2, 3]], [[1, 2, 7, 8, 5, 6, 3, 4], [4, 5, 6, 1, 2, 3]]]]
out = _read_output("agroupgen.out.3")
self.assertEqual(a_group_gen(trans,rots),out)
def test4(self):
from phenum.grouptheory import a_group_gen
trans =[[j - 1 for j in i] for i in[[1,2,3,4,5,6,7,8], [2,1,4,3,6,5,8,7], [3,4,5,6,7,8,1,2], [4,3,6,5,8,7,2,1], [5,6,7,8,1,2,3,4], [6,5,8,7,2,1,4,3], [7,8,1,2,3,4,5,6], [8,7,2,1,4,3,6,5]]]
rots = [[[0,1,2,3,4,5,6,7],[0,1,2,3]],[[0,1,2,3,4,5,6,7],[2,1,0,3]],[[0,1,6,7,4,5,2,3],[0,3,2,1]],[[0,1,6,7,4,5,2,3],[2,3,0,1]]]
out = _read_output("agroupgen.out.4")
self.assertEqual(a_group_gen(trans,rots),out)
def test5(self):
from phenum.grouptheory import a_group_gen
trans =[[j - 1 for j in i] for i in[[1, 2, 3, 4, 5, 6, 7, 8], [2, 1, 4, 3, 6, 5, 8, 7], [3, 4, 5, 6, 7, 8, 1, 2], [4, 3, 6, 5, 8, 7, 2, 1], [5, 6, 7, 8, 1, 2, 3, 4], [6, 5, 8, 7, 2, 1, 4, 3], [7, 8, 1, 2, 3, 4, 5, 6], [8, 7, 2, 1, 4, 3, 6, 5]]]
rots = [[[0,1,2,3,4,5,6,7],[0,1,2,3]],[[0,1,2,3,4,5,6,7],[2,1,0,3]],[[0,1,6,7,4,5,2,3],[0,3,2,1]],[[0,1,6,7,4,5,2,3],[2,3,0,1]]]
out = _read_output("agroupgen.out.5")
self.assertEqual(a_group_gen(trans,rots),out)
class TestMakeMemberList(ut.TestCase):
"""Tests of the _make_member_list subroutine."""
def test1(self):
from phenum.grouptheory import _make_member_list
case = 1
n = _read_float_1D(gpath+"make_member_list_n.in."+str(case))
out = list(map(list,zip(*_read_float_2D(gpath+"make_member_list_p.out."+str(case)))))
self.assertEqual(_make_member_list(n),out)
def test2(self):
from phenum.grouptheory import _make_member_list
case = 2
n = _read_float_1D(gpath+"make_member_list_n.in."+str(case))
out = list(map(list,zip(*_read_float_2D(gpath+"make_member_list_p.out."+str(case)))))
self.assertEqual(_make_member_list(n),out)
def test3(self):
from phenum.grouptheory import _make_member_list
case = 3
n = _read_float_1D(gpath+"make_member_list_n.in."+str(case))
out = list(map(list,zip(*_read_float_2D(gpath+"make_member_list_p.out."+str(case)))))
self.assertEqual(_make_member_list(n),out)
def test4(self):
from phenum.grouptheory import _make_member_list
case = 4
n = _read_float_1D(gpath+"make_member_list_n.in."+str(case))
out = list(map(list,zip(*_read_float_2D(gpath+"make_member_list_p.out."+str(case)))))
self.assertEqual(_make_member_list(n),out)
def test5(self):
from phenum.grouptheory import _make_member_list
case = 5
n = _read_float_1D(gpath+"make_member_list_n.in."+str(case))
out = list(map(list,zip(*_read_float_2D(gpath+"make_member_list_p.out."+str(case)))))
self.assertEqual(_make_member_list(n),out)
def test6(self):
from phenum.grouptheory import _make_member_list
case = 6
n = _read_float_1D(gpath+"make_member_list_n.in."+str(case))
out = list(map(list,zip(*_read_float_2D(gpath+"make_member_list_p.out."+str(case)))))
self.assertEqual(_make_member_list(n),out)
def test7(self):
from phenum.grouptheory import _make_member_list
case = 7
n = _read_float_1D(gpath+"make_member_list_n.in."+str(case))
out = list(map(list,zip(*_read_float_2D(gpath+"make_member_list_p.out."+str(case)))))
self.assertEqual(_make_member_list(n),out)
def test8(self):
from phenum.grouptheory import _make_member_list
case = 8
n = _read_float_1D(gpath+"make_member_list_n.in."+str(case))
out = list(map(list,zip(*_read_float_2D(gpath+"make_member_list_p.out."+str(case)))))
self.assertEqual(_make_member_list(n),out)
def test9(self):
from phenum.grouptheory import _make_member_list
case = 9
n = _read_float_1D(gpath+"make_member_list_n.in."+str(case))
out = list(map(list,zip(*_read_float_2D(gpath+"make_member_list_p.out."+str(case)))))
self.assertEqual(_make_member_list(n),out)
def test10(self):
from phenum.grouptheory import _make_member_list
case = 10
n = _read_float_1D(gpath+"make_member_list_n.in."+str(case))
out = list(map(list,zip(*_read_float_2D(gpath+"make_member_list_p.out."+str(case)))))
self.assertEqual(_make_member_list(n),out)
def test11(self):
from phenum.grouptheory import _make_member_list
case = 11
n = _read_float_1D(gpath+"make_member_list_n.in."+str(case))
out = list(map(list,zip(*_read_float_2D(gpath+"make_member_list_p.out."+str(case)))))
self.assertEqual(_make_member_list(n),out)
def test12(self):
from phenum.grouptheory import _make_member_list
case = 12
n = _read_float_1D(gpath+"make_member_list_n.in."+str(case))
out = list(map(list,zip(*_read_float_2D(gpath+"make_member_list_p.out."+str(case)))))
self.assertEqual(_make_member_list(n),out)
def test13(self):
from phenum.grouptheory import _make_member_list
case = 13
n = _read_float_1D(gpath+"make_member_list_n.in."+str(case))
out = list(map(list,zip(*_read_float_2D(gpath+"make_member_list_p.out."+str(case)))))
self.assertEqual(_make_member_list(n),out)
def test14(self):
from phenum.grouptheory import _make_member_list
case = 14
n = _read_float_1D(gpath+"make_member_list_n.in."+str(case))
out = list(map(list,zip(*_read_float_2D(gpath+"make_member_list_p.out."+str(case)))))
self.assertEqual(_make_member_list(n),out)
def test15(self):
from phenum.grouptheory import _make_member_list
case = 15
n = _read_float_1D(gpath+"make_member_list_n.in."+str(case))
out = list(map(list,zip(*_read_float_2D(gpath+"make_member_list_p.out."+str(case)))))
self.assertEqual(_make_member_list(n),out)
def test16(self):
from phenum.grouptheory import _make_member_list
case = 16
n = _read_float_1D(gpath+"make_member_list_n.in."+str(case))
out = list(map(list,zip(*_read_float_2D(gpath+"make_member_list_p.out."+str(case)))))
self.assertEqual(_make_member_list(n),out)
def test17(self):
from phenum.grouptheory import _make_member_list
case = 17
n = _read_float_1D(gpath+"make_member_list_n.in."+str(case))
out = list(map(list,zip(*_read_float_2D(gpath+"make_member_list_p.out."+str(case)))))
self.assertEqual(_make_member_list(n),out)
def test18(self):
from phenum.grouptheory import _make_member_list
case = 18
n = _read_float_1D(gpath+"make_member_list_n.in."+str(case))
out = list(map(list,zip(*_read_float_2D(gpath+"make_member_list_p.out."+str(case)))))
self.assertEqual(_make_member_list(n),out)
def test19(self):
from phenum.grouptheory import _make_member_list
case = 19
n = _read_float_1D(gpath+"make_member_list_n.in."+str(case))
out = list(map(list,zip(*_read_float_2D(gpath+"make_member_list_p.out."+str(case)))))
self.assertEqual(_make_member_list(n),out)
def test20(self):
from phenum.grouptheory import _make_member_list
case = 20
n = _read_float_1D(gpath+"make_member_list_n.in."+str(case))
out = list(map(list,zip(*_read_float_2D(gpath+"make_member_list_p.out."+str(case)))))
self.assertEqual(_make_member_list(n),out)
class TestFindPermutationOfGroup(ut.TestCase):
"""Tests of the _find_permutation_of_group subroutine."""
def test1(self):
from phenum.grouptheory import _find_permutation_of_group
case = 1
g = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_g.in."+str(case)))))
gp = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_gp.in."+str(case)))))
out = [i-1 for i in _read_int_1D(gpath+"find_permutation_of_group_perm.out."+str(case))]
self.assertEqual(_find_permutation_of_group(g,gp),out)
def test2(self):
from phenum.grouptheory import _find_permutation_of_group
case = 2
g = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_g.in."+str(case)))))
gp = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_gp.in."+str(case)))))
out = [i-1 for i in _read_int_1D(gpath+"find_permutation_of_group_perm.out."+str(case))]
self.assertEqual(_find_permutation_of_group(g,gp),out)
def test3(self):
from phenum.grouptheory import _find_permutation_of_group
case = 3
g = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_g.in."+str(case)))))
gp = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_gp.in."+str(case)))))
out = [i-1 for i in _read_int_1D(gpath+"find_permutation_of_group_perm.out."+str(case))]
self.assertEqual(_find_permutation_of_group(g,gp),out)
def test4(self):
from phenum.grouptheory import _find_permutation_of_group
case = 4
g = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_g.in."+str(case)))))
gp = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_gp.in."+str(case)))))
out = [i-1 for i in _read_int_1D(gpath+"find_permutation_of_group_perm.out."+str(case))]
self.assertEqual(_find_permutation_of_group(g,gp),out)
def test5(self):
from phenum.grouptheory import _find_permutation_of_group
case = 5
g = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_g.in."+str(case)))))
gp = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_gp.in."+str(case)))))
out = [i-1 for i in _read_int_1D(gpath+"find_permutation_of_group_perm.out."+str(case))]
self.assertEqual(_find_permutation_of_group(g,gp),out)
def test6(self):
from phenum.grouptheory import _find_permutation_of_group
case = 6
g = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_g.in."+str(case)))))
gp = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_gp.in."+str(case)))))
out = [i-1 for i in _read_int_1D(gpath+"find_permutation_of_group_perm.out."+str(case))]
self.assertEqual(_find_permutation_of_group(g,gp),out)
def test7(self):
from phenum.grouptheory import _find_permutation_of_group
case = 7
g = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_g.in."+str(case)))))
gp = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_gp.in."+str(case)))))
out = [i-1 for i in _read_int_1D(gpath+"find_permutation_of_group_perm.out."+str(case))]
self.assertEqual(_find_permutation_of_group(g,gp),out)
def test8(self):
from phenum.grouptheory import _find_permutation_of_group
case = 8
g = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_g.in."+str(case)))))
gp = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_gp.in."+str(case)))))
out = [i-1 for i in _read_int_1D(gpath+"find_permutation_of_group_perm.out."+str(case))]
self.assertEqual(_find_permutation_of_group(g,gp),out)
def test9(self):
from phenum.grouptheory import _find_permutation_of_group
case = 9
g = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_g.in."+str(case)))))
gp = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_gp.in."+str(case)))))
out = [i-1 for i in _read_int_1D(gpath+"find_permutation_of_group_perm.out."+str(case))]
self.assertEqual(_find_permutation_of_group(g,gp),out)
def test10(self):
from phenum.grouptheory import _find_permutation_of_group
case = 10
g = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_g.in."+str(case)))))
gp = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_gp.in."+str(case)))))
out = [i-1 for i in _read_int_1D(gpath+"find_permutation_of_group_perm.out."+str(case))]
self.assertEqual(_find_permutation_of_group(g,gp),out)
def test11(self):
from phenum.grouptheory import _find_permutation_of_group
case = 11
g = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_g.in."+str(case)))))
gp = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_gp.in."+str(case)))))
out = [i-1 for i in _read_int_1D(gpath+"find_permutation_of_group_perm.out."+str(case))]
self.assertEqual(_find_permutation_of_group(g,gp),out)
def test12(self):
from phenum.grouptheory import _find_permutation_of_group
case = 12
g = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_g.in."+str(case)))))
gp = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_gp.in."+str(case)))))
out = [i-1 for i in _read_int_1D(gpath+"find_permutation_of_group_perm.out."+str(case))]
self.assertEqual(_find_permutation_of_group(g,gp),out)
def test13(self):
from phenum.grouptheory import _find_permutation_of_group
case = 13
g = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_g.in."+str(case)))))
gp = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_gp.in."+str(case)))))
out = [i-1 for i in _read_int_1D(gpath+"find_permutation_of_group_perm.out."+str(case))]
self.assertEqual(_find_permutation_of_group(g,gp),out)
def test14(self):
from phenum.grouptheory import _find_permutation_of_group
case = 14
g = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_g.in."+str(case)))))
gp = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_gp.in."+str(case)))))
out = [i-1 for i in _read_int_1D(gpath+"find_permutation_of_group_perm.out."+str(case))]
self.assertEqual(_find_permutation_of_group(g,gp),out)
def test15(self):
from phenum.grouptheory import _find_permutation_of_group
case = 15
g = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_g.in."+str(case)))))
gp = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_gp.in."+str(case)))))
out = [i-1 for i in _read_int_1D(gpath+"find_permutation_of_group_perm.out."+str(case))]
self.assertEqual(_find_permutation_of_group(g,gp),out)
def test16(self):
from phenum.grouptheory import _find_permutation_of_group
case = 16
g = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_g.in."+str(case)))))
gp = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_gp.in."+str(case)))))
out = [i-1 for i in _read_int_1D(gpath+"find_permutation_of_group_perm.out."+str(case))]
self.assertEqual(_find_permutation_of_group(g,gp),out)
def test17(self):
from phenum.grouptheory import _find_permutation_of_group
case = 17
g = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_g.in."+str(case)))))
gp = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_gp.in."+str(case)))))
out = [i-1 for i in _read_int_1D(gpath+"find_permutation_of_group_perm.out."+str(case))]
self.assertEqual(_find_permutation_of_group(g,gp),out)
def test18(self):
from phenum.grouptheory import _find_permutation_of_group
case = 18
g = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_g.in."+str(case)))))
gp = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_gp.in."+str(case)))))
out = [i-1 for i in _read_int_1D(gpath+"find_permutation_of_group_perm.out."+str(case))]
self.assertEqual(_find_permutation_of_group(g,gp),out)
def test19(self):
from phenum.grouptheory import _find_permutation_of_group
case = 19
g = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_g.in."+str(case)))))
gp = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_gp.in."+str(case)))))
out = [i-1 for i in _read_int_1D(gpath+"find_permutation_of_group_perm.out."+str(case))]
self.assertEqual(_find_permutation_of_group(g,gp),out)
def test20(self):
from phenum.grouptheory import _find_permutation_of_group
case = 20
g = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_g.in."+str(case)))))
gp = list(map(list,zip(*_read_float_2D(gpath+"find_permutation_of_group_gp.in."+str(case)))))
out = [i-1 for i in _read_int_1D(gpath+"find_permutation_of_group_perm.out."+str(case))]
self.assertEqual(_find_permutation_of_group(g,gp),out)
class TestIsEquivLattice(ut.TestCase):
"""Tests of the _is_equiv_lattice subroutine."""
def test1(self):
from phenum.grouptheory import _is_equiv_lattice
case = 1
lat1 = _read_float_2D(gpath+"is_equiv_lattice_lat1.in."+str(case))
lat2 = _read_float_2D(gpath+"is_equiv_lattice_lat2.in."+str(case))
eps = _read_float(gpath+"is_equiv_lattice_eps.in."+str(case))
out = _read_logical(gpath+"is_equiv_lattice.out."+str(case))
self.assertEqual(_is_equiv_lattice(lat1,lat2,eps),out)
def test2(self):
from phenum.grouptheory import _is_equiv_lattice
case = 2
lat1 = _read_float_2D(gpath+"is_equiv_lattice_lat1.in."+str(case))
lat2 = _read_float_2D(gpath+"is_equiv_lattice_lat2.in."+str(case))
eps = _read_float(gpath+"is_equiv_lattice_eps.in."+str(case))
out = _read_logical(gpath+"is_equiv_lattice.out."+str(case))
self.assertEqual(_is_equiv_lattice(lat1,lat2,eps),out)
def test3(self):
from phenum.grouptheory import _is_equiv_lattice
case = 3
lat1 = _read_float_2D(gpath+"is_equiv_lattice_lat1.in."+str(case))
lat2 = _read_float_2D(gpath+"is_equiv_lattice_lat2.in."+str(case))
eps = _read_float(gpath+"is_equiv_lattice_eps.in."+str(case))
out = _read_logical(gpath+"is_equiv_lattice.out."+str(case))
self.assertEqual(_is_equiv_lattice(lat1,lat2,eps),out)
def test4(self):
from phenum.grouptheory import _is_equiv_lattice
case = 4
lat1 = _read_float_2D(gpath+"is_equiv_lattice_lat1.in."+str(case))
lat2 = _read_float_2D(gpath+"is_equiv_lattice_lat2.in."+str(case))
eps = _read_float(gpath+"is_equiv_lattice_eps.in."+str(case))
out = _read_logical(gpath+"is_equiv_lattice.out."+str(case))
self.assertEqual(_is_equiv_lattice(lat1,lat2,eps),out)
def test5(self):
from phenum.grouptheory import _is_equiv_lattice
case = 5
lat1 = _read_float_2D(gpath+"is_equiv_lattice_lat1.in."+str(case))
lat2 = _read_float_2D(gpath+"is_equiv_lattice_lat2.in."+str(case))
eps = _read_float(gpath+"is_equiv_lattice_eps.in."+str(case))
out = _read_logical(gpath+"is_equiv_lattice.out."+str(case))
self.assertEqual(_is_equiv_lattice(lat1,lat2,eps),out)
def test6(self):
from phenum.grouptheory import _is_equiv_lattice
case = 6
lat1 = _read_float_2D(gpath+"is_equiv_lattice_lat1.in."+str(case))
lat2 = _read_float_2D(gpath+"is_equiv_lattice_lat2.in."+str(case))
eps = _read_float(gpath+"is_equiv_lattice_eps.in."+str(case))
out = _read_logical(gpath+"is_equiv_lattice.out."+str(case))
self.assertEqual(_is_equiv_lattice(lat1,lat2,eps),out)
def test7(self):
from phenum.grouptheory import _is_equiv_lattice
case = 7
lat1 = _read_float_2D(gpath+"is_equiv_lattice_lat1.in."+str(case))
lat2 = _read_float_2D(gpath+"is_equiv_lattice_lat2.in."+str(case))
eps = _read_float(gpath+"is_equiv_lattice_eps.in."+str(case))
out = _read_logical(gpath+"is_equiv_lattice.out."+str(case))
self.assertEqual(_is_equiv_lattice(lat1,lat2,eps),out)
def test8(self):
from phenum.grouptheory import _is_equiv_lattice
case = 8
lat1 = _read_float_2D(gpath+"is_equiv_lattice_lat1.in."+str(case))
lat2 = _read_float_2D(gpath+"is_equiv_lattice_lat2.in."+str(case))
eps = _read_float(gpath+"is_equiv_lattice_eps.in."+str(case))
out = _read_logical(gpath+"is_equiv_lattice.out."+str(case))
self.assertEqual(_is_equiv_lattice(lat1,lat2,eps),out)
def test9(self):
from phenum.grouptheory import _is_equiv_lattice
case = 9
lat1 = _read_float_2D(gpath+"is_equiv_lattice_lat1.in."+str(case))
lat2 = _read_float_2D(gpath+"is_equiv_lattice_lat2.in."+str(case))
eps = _read_float(gpath+"is_equiv_lattice_eps.in."+str(case))
out = _read_logical(gpath+"is_equiv_lattice.out."+str(case))
self.assertEqual(_is_equiv_lattice(lat1,lat2,eps),out)
def test10(self):
from phenum.grouptheory import _is_equiv_lattice
case = 10
lat1 = _read_float_2D(gpath+"is_equiv_lattice_lat1.in."+str(case))
lat2 = _read_float_2D(gpath+"is_equiv_lattice_lat2.in."+str(case))
eps = _read_float(gpath+"is_equiv_lattice_eps.in."+str(case))
out = _read_logical(gpath+"is_equiv_lattice.out."+str(case))
self.assertEqual(_is_equiv_lattice(lat1,lat2,eps),out)
def test11(self):
from phenum.grouptheory import _is_equiv_lattice
case = 11
lat1 = _read_float_2D(gpath+"is_equiv_lattice_lat1.in."+str(case))
lat2 = _read_float_2D(gpath+"is_equiv_lattice_lat2.in."+str(case))
eps = _read_float(gpath+"is_equiv_lattice_eps.in."+str(case))
out = _read_logical(gpath+"is_equiv_lattice.out."+str(case))
self.assertEqual(_is_equiv_lattice(lat1,lat2,eps),out)
def test12(self):
from phenum.grouptheory import _is_equiv_lattice
case = 12
lat1 = _read_float_2D(gpath+"is_equiv_lattice_lat1.in."+str(case))
lat2 = _read_float_2D(gpath+"is_equiv_lattice_lat2.in."+str(case))
eps = _read_float(gpath+"is_equiv_lattice_eps.in."+str(case))
out = _read_logical(gpath+"is_equiv_lattice.out."+str(case))
self.assertEqual(_is_equiv_lattice(lat1,lat2,eps),out)
def test13(self):
from phenum.grouptheory import _is_equiv_lattice
case = 13
lat1 = _read_float_2D(gpath+"is_equiv_lattice_lat1.in."+str(case))
lat2 = _read_float_2D(gpath+"is_equiv_lattice_lat2.in."+str(case))
eps = _read_float(gpath+"is_equiv_lattice_eps.in."+str(case))
out = _read_logical(gpath+"is_equiv_lattice.out."+str(case))
self.assertEqual(_is_equiv_lattice(lat1,lat2,eps),out)
def test14(self):
from phenum.grouptheory import _is_equiv_lattice
case = 14
lat1 = _read_float_2D(gpath+"is_equiv_lattice_lat1.in."+str(case))
lat2 = _read_float_2D(gpath+"is_equiv_lattice_lat2.in."+str(case))
eps = _read_float(gpath+"is_equiv_lattice_eps.in."+str(case))
out = _read_logical(gpath+"is_equiv_lattice.out."+str(case))
self.assertEqual(_is_equiv_lattice(lat1,lat2,eps),out)
def test15(self):
from phenum.grouptheory import _is_equiv_lattice
case = 15
lat1 = _read_float_2D(gpath+"is_equiv_lattice_lat1.in."+str(case))
lat2 = _read_float_2D(gpath+"is_equiv_lattice_lat2.in."+str(case))
eps = _read_float(gpath+"is_equiv_lattice_eps.in."+str(case))
out = _read_logical(gpath+"is_equiv_lattice.out."+str(case))
self.assertEqual(_is_equiv_lattice(lat1,lat2,eps),out)
def test16(self):
from phenum.grouptheory import _is_equiv_lattice
case = 16
lat1 = _read_float_2D(gpath+"is_equiv_lattice_lat1.in."+str(case))
lat2 = _read_float_2D(gpath+"is_equiv_lattice_lat2.in."+str(case))
eps = _read_float(gpath+"is_equiv_lattice_eps.in."+str(case))
out = _read_logical(gpath+"is_equiv_lattice.out."+str(case))
self.assertEqual(_is_equiv_lattice(lat1,lat2,eps),out)
def test17(self):
from phenum.grouptheory import _is_equiv_lattice
case = 17
lat1 = _read_float_2D(gpath+"is_equiv_lattice_lat1.in."+str(case))
lat2 = _read_float_2D(gpath+"is_equiv_lattice_lat2.in."+str(case))
eps = _read_float(gpath+"is_equiv_lattice_eps.in."+str(case))
out = _read_logical(gpath+"is_equiv_lattice.out."+str(case))
self.assertEqual(_is_equiv_lattice(lat1,lat2,eps),out)
def test18(self):
from phenum.grouptheory import _is_equiv_lattice
case = 18
lat1 = _read_float_2D(gpath+"is_equiv_lattice_lat1.in."+str(case))
lat2 = _read_float_2D(gpath+"is_equiv_lattice_lat2.in."+str(case))
eps = _read_float(gpath+"is_equiv_lattice_eps.in."+str(case))
out = _read_logical(gpath+"is_equiv_lattice.out."+str(case))
self.assertEqual(_is_equiv_lattice(lat1,lat2,eps),out)
def test19(self):
from phenum.grouptheory import _is_equiv_lattice
case = 19
lat1 = _read_float_2D(gpath+"is_equiv_lattice_lat1.in."+str(case))
lat2 = _read_float_2D(gpath+"is_equiv_lattice_lat2.in."+str(case))
eps = _read_float(gpath+"is_equiv_lattice_eps.in."+str(case))
out = _read_logical(gpath+"is_equiv_lattice.out."+str(case))
self.assertEqual(_is_equiv_lattice(lat1,lat2,eps),out)
def test20(self):
from phenum.grouptheory import _is_equiv_lattice
case = 20
lat1 = _read_float_2D(gpath+"is_equiv_lattice_lat1.in."+str(case))
lat2 = _read_float_2D(gpath+"is_equiv_lattice_lat2.in."+str(case))
eps = _read_float(gpath+"is_equiv_lattice_eps.in."+str(case))
out = _read_logical(gpath+"is_equiv_lattice.out."+str(case))
self.assertEqual(_is_equiv_lattice(lat1,lat2,eps),out)
class TestGetSLVFixingOperations(ut.TestCase):
"""Tests of the _get_sLV_fixing_operations subroutine."""
def _compare_outputs(self,out1,out2):
fix1 = out1[0]
fix2 = out2[0]
rot1 = out1[1]
rot2 = out2[1]
deg1 = out1[2]
deg2 = out2[2]
self.assertEqual(deg1,deg2)
if len(fix1.rot) == len(fix2.rot):
for i in range(len(fix1.rot)):
for j in range(3):
for k in range(3):
self.assertAlmostEqual(fix1.rot[i][j][k],fix2.rot[i][j][k],places=12)
else:
self.assertEqual(len(fix1.rot),len(fix2.rot))
if len(fix1.shift) == len(fix2.shift):
for i in range(len(fix1.shift)):
for j in range(3):
self.assertAlmostEqual(fix1.shift[i][j],fix2.shift[i][j],places=12)
else:
self.assertEqual(len(fix1.shift),len(fix2.shift))
self.assertEqual(rot1.nL,rot2.nL)
self.assertEqual(rot1.RotIndx, rot2.RotIndx)
if (rot1.v != None) and (rot2.v != None):
if len(rot1.v) == len(rot2.v):
for i in range(len(rot1.v)):
for j in range(len(rot1.v[i])):
for k in range(len(rot1.v[i][j])):
self.assertAlmostEqual(rot1.v[i][j][k],rot2.v[i][j][k],places=12)
else:
self.assertEqual(len(rot1.v),len(rot2.v))
else:
self.assertEqual(rot1.v,rot2.v)
# if (rot1.perm.site_perm != None) and (rot2.perm.site_perm != None):
# if len(rot1.perm.site_perm) == len(rot2.perm.site_perm):
# for i in range(len(rot1.perm.site_perm)):
# for j in range(len(rot1.perm.site_perm[i])):
# self.assertEqual(rot1.perm.site_perm[i][j],rot2.perm.site_perm[i][j])
# else:
# self.assertEqual(len(rot1.perm.site_perm),len(rot2.perm.site_perm))
# else:
# self.assertEqual(rot1.perm.site_perm,rot2.perm.site_perm)
if (rot1.perm.arrow_perm != None) and (rot2.perm.arrow_perm != None):
if len(rot1.perm.arrow_perm) == len(rot2.perm.arrow_perm):
for i in range(len(rot1.perm.arrow_perm)):
for j in range(len(rot1.perm.arrow_perm[i])):
self.assertEqual(rot1.perm.arrow_perm[i][j],rot2.perm.arrow_perm[i][j])
else:
self.assertEqual(len(rot1.perm.arrow_perm),len(rot2.perm.arrow_perm))
else:
self.assertEqual(rot1.perm.arrow_perm,rot2.perm.arrow_perm)
def test1(self):
from phenum.grouptheory import _get_sLV_fixing_operations
case = 1
HNF = _read_int_2D(gpath+"get_sLV_fixing_operations_HNF.in."+str(case))
pLV = _read_float_2D(gpath+"get_sLV_fixing_operations_pLV.in."+str(case))
nD = _read_int(gpath+"get_sLV_fixing_operations_nD.in."+str(case))
rot = _read_float_3D(gpath+"get_sLV_fixing_operations_rot.in."+str(case))
shift = list(map(list,zip(*_read_float_2D(gpath+"get_sLV_fixing_operations_shift.in."+str(case)))))
eps = _read_float(gpath+"get_sLV_fixing_operations_eps.in."+str(case))
dPerm = _read_RotPermList(gpath+"get_sLV_fixing_operations_dPerm.in."+str(case))
fixOp, rotPerm, degeneracy = _get_sLV_fixing_operations(HNF,pLV,nD,rot,shift,dPerm,eps)
rotPerm_out = _read_RotPermList(gpath+"get_sLV_fixing_operations_rotPerm.out."+str(case))
degen_out = _read_int(gpath+"get_sLV_fixing_operations_degeneracy.out."+str(case))
fixOp_out = _read_fixOp(gpath+"get_sLV_fixing_operations_fixOp.out."+str(case))
self._compare_outputs([fixOp,rotPerm,degeneracy],[fixOp_out,rotPerm_out,degen_out])
def test2(self):
from phenum.grouptheory import _get_sLV_fixing_operations
case = 10
HNF = _read_int_2D(gpath+"get_sLV_fixing_operations_HNF.in."+str(case))
pLV = _read_float_2D(gpath+"get_sLV_fixing_operations_pLV.in."+str(case))
nD = _read_int(gpath+"get_sLV_fixing_operations_nD.in."+str(case))
rot = _read_float_3D(gpath+"get_sLV_fixing_operations_rot.in."+str(case))
shift = list(map(list,zip(*_read_float_2D(gpath+"get_sLV_fixing_operations_shift.in."+str(case)))))
eps = _read_float(gpath+"get_sLV_fixing_operations_eps.in."+str(case))
dPerm = _read_RotPermList(gpath+"get_sLV_fixing_operations_dPerm.in."+str(case))
fixOp, rotPerm, degeneracy = _get_sLV_fixing_operations(HNF,pLV,nD,rot,shift,dPerm,eps)
rotPerm_out = _read_RotPermList(gpath+"get_sLV_fixing_operations_rotPerm.out."+str(case))
degen_out = _read_int(gpath+"get_sLV_fixing_operations_degeneracy.out."+str(case))
fixOp_out = _read_fixOp(gpath+"get_sLV_fixing_operations_fixOp.out."+str(case))
self._compare_outputs([fixOp,rotPerm,degeneracy],[fixOp_out,rotPerm_out,degen_out])
def test3(self):
from phenum.grouptheory import _get_sLV_fixing_operations
case = 20
HNF = _read_int_2D(gpath+"get_sLV_fixing_operations_HNF.in."+str(case))
pLV = _read_float_2D(gpath+"get_sLV_fixing_operations_pLV.in."+str(case))
nD = _read_int(gpath+"get_sLV_fixing_operations_nD.in."+str(case))
rot = _read_float_3D(gpath+"get_sLV_fixing_operations_rot.in."+str(case))
shift = list(map(list,zip(*_read_float_2D(gpath+"get_sLV_fixing_operations_shift.in."+str(case)))))
eps = _read_float(gpath+"get_sLV_fixing_operations_eps.in."+str(case))
dPerm = _read_RotPermList(gpath+"get_sLV_fixing_operations_dPerm.in."+str(case))
fixOp, rotPerm, degeneracy = _get_sLV_fixing_operations(HNF,pLV,nD,rot,shift,dPerm,eps)
rotPerm_out = _read_RotPermList(gpath+"get_sLV_fixing_operations_rotPerm.out."+str(case))
degen_out = _read_int(gpath+"get_sLV_fixing_operations_degeneracy.out."+str(case))
fixOp_out = _read_fixOp(gpath+"get_sLV_fixing_operations_fixOp.out."+str(case))
self._compare_outputs([fixOp,rotPerm,degeneracy],[fixOp_out,rotPerm_out,degen_out])
def test4(self):
from phenum.grouptheory import _get_sLV_fixing_operations
case = 30
HNF = _read_int_2D(gpath+"get_sLV_fixing_operations_HNF.in."+str(case))
pLV = _read_float_2D(gpath+"get_sLV_fixing_operations_pLV.in."+str(case))
nD = _read_int(gpath+"get_sLV_fixing_operations_nD.in."+str(case))
rot = _read_float_3D(gpath+"get_sLV_fixing_operations_rot.in."+str(case))
shift = list(map(list,zip(*_read_float_2D(gpath+"get_sLV_fixing_operations_shift.in."+str(case)))))
eps = _read_float(gpath+"get_sLV_fixing_operations_eps.in."+str(case))
dPerm = _read_RotPermList(gpath+"get_sLV_fixing_operations_dPerm.in."+str(case))
fixOp, rotPerm, degeneracy = _get_sLV_fixing_operations(HNF,pLV,nD,rot,shift,dPerm,eps)
rotPerm_out = _read_RotPermList(gpath+"get_sLV_fixing_operations_rotPerm.out."+str(case))
degen_out = _read_int(gpath+"get_sLV_fixing_operations_degeneracy.out."+str(case))
fixOp_out = _read_fixOp(gpath+"get_sLV_fixing_operations_fixOp.out."+str(case))
self._compare_outputs([fixOp,rotPerm,degeneracy],[fixOp_out,rotPerm_out,degen_out])
def test5(self):
from phenum.grouptheory import _get_sLV_fixing_operations
case = 40
HNF = _read_int_2D(gpath+"get_sLV_fixing_operations_HNF.in."+str(case))
pLV = _read_float_2D(gpath+"get_sLV_fixing_operations_pLV.in."+str(case))
nD = _read_int(gpath+"get_sLV_fixing_operations_nD.in."+str(case))
rot = _read_float_3D(gpath+"get_sLV_fixing_operations_rot.in."+str(case))
shift = list(map(list,zip(*_read_float_2D(gpath+"get_sLV_fixing_operations_shift.in."+str(case)))))
eps = _read_float(gpath+"get_sLV_fixing_operations_eps.in."+str(case))
dPerm = _read_RotPermList(gpath+"get_sLV_fixing_operations_dPerm.in."+str(case))
fixOp, rotPerm, degeneracy = _get_sLV_fixing_operations(HNF,pLV,nD,rot,shift,dPerm,eps)
rotPerm_out = _read_RotPermList(gpath+"get_sLV_fixing_operations_rotPerm.out."+str(case))
degen_out = _read_int(gpath+"get_sLV_fixing_operations_degeneracy.out."+str(case))
fixOp_out = _read_fixOp(gpath+"get_sLV_fixing_operations_fixOp.out."+str(case))
self._compare_outputs([fixOp,rotPerm,degeneracy],[fixOp_out,rotPerm_out,degen_out])
def test6(self):
from phenum.grouptheory import _get_sLV_fixing_operations
case = 50
HNF = _read_int_2D(gpath+"get_sLV_fixing_operations_HNF.in."+str(case))
pLV = _read_float_2D(gpath+"get_sLV_fixing_operations_pLV.in."+str(case))
nD = _read_int(gpath+"get_sLV_fixing_operations_nD.in."+str(case))
rot = _read_float_3D(gpath+"get_sLV_fixing_operations_rot.in."+str(case))
shift = list(map(list,zip(*_read_float_2D(gpath+"get_sLV_fixing_operations_shift.in."+str(case)))))
eps = _read_float(gpath+"get_sLV_fixing_operations_eps.in."+str(case))
dPerm = _read_RotPermList(gpath+"get_sLV_fixing_operations_dPerm.in."+str(case))
fixOp, rotPerm, degeneracy = _get_sLV_fixing_operations(HNF,pLV,nD,rot,shift,dPerm,eps)
rotPerm_out = _read_RotPermList(gpath+"get_sLV_fixing_operations_rotPerm.out."+str(case))
degen_out = _read_int(gpath+"get_sLV_fixing_operations_degeneracy.out."+str(case))
fixOp_out = _read_fixOp(gpath+"get_sLV_fixing_operations_fixOp.out."+str(case))
self._compare_outputs([fixOp,rotPerm,degeneracy],[fixOp_out,rotPerm_out,degen_out])
def test7(self):
from phenum.grouptheory import _get_sLV_fixing_operations
case = 60
HNF = _read_int_2D(gpath+"get_sLV_fixing_operations_HNF.in."+str(case))
pLV = _read_float_2D(gpath+"get_sLV_fixing_operations_pLV.in."+str(case))
nD = _read_int(gpath+"get_sLV_fixing_operations_nD.in."+str(case))
rot = _read_float_3D(gpath+"get_sLV_fixing_operations_rot.in."+str(case))
shift = list(map(list,zip(*_read_float_2D(gpath+"get_sLV_fixing_operations_shift.in."+str(case)))))
eps = _read_float(gpath+"get_sLV_fixing_operations_eps.in."+str(case))
dPerm = _read_RotPermList(gpath+"get_sLV_fixing_operations_dPerm.in."+str(case))
fixOp, rotPerm, degeneracy = _get_sLV_fixing_operations(HNF,pLV,nD,rot,shift,dPerm,eps)
rotPerm_out = _read_RotPermList(gpath+"get_sLV_fixing_operations_rotPerm.out."+str(case))
degen_out = _read_int(gpath+"get_sLV_fixing_operations_degeneracy.out."+str(case))
fixOp_out = _read_fixOp(gpath+"get_sLV_fixing_operations_fixOp.out."+str(case))
self._compare_outputs([fixOp,rotPerm,degeneracy],[fixOp_out,rotPerm_out,degen_out])
def test8(self):
from phenum.grouptheory import _get_sLV_fixing_operations
case = 70
HNF = _read_int_2D(gpath+"get_sLV_fixing_operations_HNF.in."+str(case))
pLV = _read_float_2D(gpath+"get_sLV_fixing_operations_pLV.in."+str(case))
nD = _read_int(gpath+"get_sLV_fixing_operations_nD.in."+str(case))
rot = _read_float_3D(gpath+"get_sLV_fixing_operations_rot.in."+str(case))
shift = list(map(list,zip(*_read_float_2D(gpath+"get_sLV_fixing_operations_shift.in."+str(case)))))
eps = _read_float(gpath+"get_sLV_fixing_operations_eps.in."+str(case))
dPerm = _read_RotPermList(gpath+"get_sLV_fixing_operations_dPerm.in."+str(case))
fixOp, rotPerm, degeneracy = _get_sLV_fixing_operations(HNF,pLV,nD,rot,shift,dPerm,eps)
rotPerm_out = _read_RotPermList(gpath+"get_sLV_fixing_operations_rotPerm.out."+str(case))
degen_out = _read_int(gpath+"get_sLV_fixing_operations_degeneracy.out."+str(case))
fixOp_out = _read_fixOp(gpath+"get_sLV_fixing_operations_fixOp.out."+str(case))
self._compare_outputs([fixOp,rotPerm,degeneracy],[fixOp_out,rotPerm_out,degen_out])
def test9(self):
from phenum.grouptheory import _get_sLV_fixing_operations
case = 80
HNF = _read_int_2D(gpath+"get_sLV_fixing_operations_HNF.in."+str(case))
pLV = _read_float_2D(gpath+"get_sLV_fixing_operations_pLV.in."+str(case))
nD = _read_int(gpath+"get_sLV_fixing_operations_nD.in."+str(case))
rot = _read_float_3D(gpath+"get_sLV_fixing_operations_rot.in."+str(case))
shift = list(map(list,zip(*_read_float_2D(gpath+"get_sLV_fixing_operations_shift.in."+str(case)))))
eps = _read_float(gpath+"get_sLV_fixing_operations_eps.in."+str(case))
dPerm = _read_RotPermList(gpath+"get_sLV_fixing_operations_dPerm.in."+str(case))
fixOp, rotPerm, degeneracy = _get_sLV_fixing_operations(HNF,pLV,nD,rot,shift,dPerm,eps)
rotPerm_out = _read_RotPermList(gpath+"get_sLV_fixing_operations_rotPerm.out."+str(case))
degen_out = _read_int(gpath+"get_sLV_fixing_operations_degeneracy.out."+str(case))
fixOp_out = _read_fixOp(gpath+"get_sLV_fixing_operations_fixOp.out."+str(case))
self._compare_outputs([fixOp,rotPerm,degeneracy],[fixOp_out,rotPerm_out,degen_out])
def test10(self):
from phenum.grouptheory import _get_sLV_fixing_operations
case = 90
HNF = _read_int_2D(gpath+"get_sLV_fixing_operations_HNF.in."+str(case))
pLV = _read_float_2D(gpath+"get_sLV_fixing_operations_pLV.in."+str(case))
nD = _read_int(gpath+"get_sLV_fixing_operations_nD.in."+str(case))
rot = _read_float_3D(gpath+"get_sLV_fixing_operations_rot.in."+str(case))
shift = list(map(list,zip(*_read_float_2D(gpath+"get_sLV_fixing_operations_shift.in."+str(case)))))
eps = _read_float(gpath+"get_sLV_fixing_operations_eps.in."+str(case))
dPerm = _read_RotPermList(gpath+"get_sLV_fixing_operations_dPerm.in."+str(case))
fixOp, rotPerm, degeneracy = _get_sLV_fixing_operations(HNF,pLV,nD,rot,shift,dPerm,eps)
rotPerm_out = _read_RotPermList(gpath+"get_sLV_fixing_operations_rotPerm.out."+str(case))
degen_out = _read_int(gpath+"get_sLV_fixing_operations_degeneracy.out."+str(case))
fixOp_out = _read_fixOp(gpath+"get_sLV_fixing_operations_fixOp.out."+str(case))
self._compare_outputs([fixOp,rotPerm,degeneracy],[fixOp_out,rotPerm_out,degen_out])
def test11(self):
from phenum.grouptheory import _get_sLV_fixing_operations
case = 100
HNF = _read_int_2D(gpath+"get_sLV_fixing_operations_HNF.in."+str(case))
pLV = _read_float_2D(gpath+"get_sLV_fixing_operations_pLV.in."+str(case))
nD = _read_int(gpath+"get_sLV_fixing_operations_nD.in."+str(case))
rot = _read_float_3D(gpath+"get_sLV_fixing_operations_rot.in."+str(case))
shift = list(map(list,zip(*_read_float_2D(gpath+"get_sLV_fixing_operations_shift.in."+str(case)))))
eps = _read_float(gpath+"get_sLV_fixing_operations_eps.in."+str(case))
dPerm = _read_RotPermList(gpath+"get_sLV_fixing_operations_dPerm.in."+str(case))
fixOp, rotPerm, degeneracy = _get_sLV_fixing_operations(HNF,pLV,nD,rot,shift,dPerm,eps)
rotPerm_out = _read_RotPermList(gpath+"get_sLV_fixing_operations_rotPerm.out."+str(case))
degen_out = _read_int(gpath+"get_sLV_fixing_operations_degeneracy.out."+str(case))
fixOp_out = _read_fixOp(gpath+"get_sLV_fixing_operations_fixOp.out."+str(case))
self._compare_outputs([fixOp,rotPerm,degeneracy],[fixOp_out,rotPerm_out,degen_out])
def test12(self):
from phenum.grouptheory import _get_sLV_fixing_operations
case = 110
HNF = _read_int_2D(gpath+"get_sLV_fixing_operations_HNF.in."+str(case))
pLV = _read_float_2D(gpath+"get_sLV_fixing_operations_pLV.in."+str(case))
nD = _read_int(gpath+"get_sLV_fixing_operations_nD.in."+str(case))
rot = _read_float_3D(gpath+"get_sLV_fixing_operations_rot.in."+str(case))
shift = list(map(list,zip(*_read_float_2D(gpath+"get_sLV_fixing_operations_shift.in."+str(case)))))
eps = _read_float(gpath+"get_sLV_fixing_operations_eps.in."+str(case))
dPerm = _read_RotPermList(gpath+"get_sLV_fixing_operations_dPerm.in."+str(case))
fixOp, rotPerm, degeneracy = _get_sLV_fixing_operations(HNF,pLV,nD,rot,shift,dPerm,eps)
rotPerm_out = _read_RotPermList(gpath+"get_sLV_fixing_operations_rotPerm.out."+str(case))
degen_out = _read_int(gpath+"get_sLV_fixing_operations_degeneracy.out."+str(case))
fixOp_out = _read_fixOp(gpath+"get_sLV_fixing_operations_fixOp.out."+str(case))
self._compare_outputs([fixOp,rotPerm,degeneracy],[fixOp_out,rotPerm_out,degen_out])
def test13(self):
from phenum.grouptheory import _get_sLV_fixing_operations
case = 120
HNF = _read_int_2D(gpath+"get_sLV_fixing_operations_HNF.in."+str(case))
pLV = _read_float_2D(gpath+"get_sLV_fixing_operations_pLV.in."+str(case))
nD = _read_int(gpath+"get_sLV_fixing_operations_nD.in."+str(case))
rot = _read_float_3D(gpath+"get_sLV_fixing_operations_rot.in."+str(case))
shift = list(map(list,zip(*_read_float_2D(gpath+"get_sLV_fixing_operations_shift.in."+str(case)))))
eps = _read_float(gpath+"get_sLV_fixing_operations_eps.in."+str(case))
dPerm = _read_RotPermList(gpath+"get_sLV_fixing_operations_dPerm.in."+str(case))
fixOp, rotPerm, degeneracy = _get_sLV_fixing_operations(HNF,pLV,nD,rot,shift,dPerm,eps)
rotPerm_out = _read_RotPermList(gpath+"get_sLV_fixing_operations_rotPerm.out."+str(case))
degen_out = _read_int(gpath+"get_sLV_fixing_operations_degeneracy.out."+str(case))
fixOp_out = _read_fixOp(gpath+"get_sLV_fixing_operations_fixOp.out."+str(case))
self._compare_outputs([fixOp,rotPerm,degeneracy],[fixOp_out,rotPerm_out,degen_out])
def test14(self):
from phenum.grouptheory import _get_sLV_fixing_operations
case = 130
HNF = _read_int_2D(gpath+"get_sLV_fixing_operations_HNF.in."+str(case))
pLV = _read_float_2D(gpath+"get_sLV_fixing_operations_pLV.in."+str(case))
nD = _read_int(gpath+"get_sLV_fixing_operations_nD.in."+str(case))
rot = _read_float_3D(gpath+"get_sLV_fixing_operations_rot.in."+str(case))
shift = list(map(list,zip(*_read_float_2D(gpath+"get_sLV_fixing_operations_shift.in."+str(case)))))
eps = _read_float(gpath+"get_sLV_fixing_operations_eps.in."+str(case))
dPerm = _read_RotPermList(gpath+"get_sLV_fixing_operations_dPerm.in."+str(case))
fixOp, rotPerm, degeneracy = _get_sLV_fixing_operations(HNF,pLV,nD,rot,shift,dPerm,eps)
rotPerm_out = _read_RotPermList(gpath+"get_sLV_fixing_operations_rotPerm.out."+str(case))
degen_out = _read_int(gpath+"get_sLV_fixing_operations_degeneracy.out."+str(case))
fixOp_out = _read_fixOp(gpath+"get_sLV_fixing_operations_fixOp.out."+str(case))
self._compare_outputs([fixOp,rotPerm,degeneracy],[fixOp_out,rotPerm_out,degen_out])
def test15(self):
from phenum.grouptheory import _get_sLV_fixing_operations
case = 140
HNF = _read_int_2D(gpath+"get_sLV_fixing_operations_HNF.in."+str(case))
pLV = _read_float_2D(gpath+"get_sLV_fixing_operations_pLV.in."+str(case))
nD = _read_int(gpath+"get_sLV_fixing_operations_nD.in."+str(case))
rot = _read_float_3D(gpath+"get_sLV_fixing_operations_rot.in."+str(case))
shift = list(map(list,zip(*_read_float_2D(gpath+"get_sLV_fixing_operations_shift.in."+str(case)))))
eps = _read_float(gpath+"get_sLV_fixing_operations_eps.in."+str(case))
dPerm = _read_RotPermList(gpath+"get_sLV_fixing_operations_dPerm.in."+str(case))
fixOp, rotPerm, degeneracy = _get_sLV_fixing_operations(HNF,pLV,nD,rot,shift,dPerm,eps)
rotPerm_out = _read_RotPermList(gpath+"get_sLV_fixing_operations_rotPerm.out."+str(case))
degen_out = _read_int(gpath+"get_sLV_fixing_operations_degeneracy.out."+str(case))
fixOp_out = _read_fixOp(gpath+"get_sLV_fixing_operations_fixOp.out."+str(case))
self._compare_outputs([fixOp,rotPerm,degeneracy],[fixOp_out,rotPerm_out,degen_out])
def test16(self):
from phenum.grouptheory import _get_sLV_fixing_operations
case = 150
HNF = _read_int_2D(gpath+"get_sLV_fixing_operations_HNF.in."+str(case))
pLV = _read_float_2D(gpath+"get_sLV_fixing_operations_pLV.in."+str(case))
nD = _read_int(gpath+"get_sLV_fixing_operations_nD.in."+str(case))
rot = _read_float_3D(gpath+"get_sLV_fixing_operations_rot.in."+str(case))
shift = list(map(list,zip(*_read_float_2D(gpath+"get_sLV_fixing_operations_shift.in."+str(case)))))
eps = _read_float(gpath+"get_sLV_fixing_operations_eps.in."+str(case))
dPerm = _read_RotPermList(gpath+"get_sLV_fixing_operations_dPerm.in."+str(case))
fixOp, rotPerm, degeneracy = _get_sLV_fixing_operations(HNF,pLV,nD,rot,shift,dPerm,eps)
rotPerm_out = _read_RotPermList(gpath+"get_sLV_fixing_operations_rotPerm.out."+str(case))
degen_out = _read_int(gpath+"get_sLV_fixing_operations_degeneracy.out."+str(case))
fixOp_out = _read_fixOp(gpath+"get_sLV_fixing_operations_fixOp.out."+str(case))
self._compare_outputs([fixOp,rotPerm,degeneracy],[fixOp_out,rotPerm_out,degen_out])
class TestMapDvectorPermutation(ut.TestCase):
"""Tests of the _map_dvector_permutation subroutine."""
def _compare_outputs(self,out1,out2):
if len(out1) == len(out2):
for i in range(len(out1)):
self.assertEqual(out1[i],out2[i])
else:
self.assertEqual(len(out1),len(out2))
def test1(self):
from phenum.grouptheory import _map_dvector_permutation
case = 1
rd = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_rd.in."+str(case)))))
d = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_d.in."+str(case)))))
eps = _read_float(gpath+"map_dvector_permutation_eps.in."+str(case))
n = len(rd)
out = _read_int_1D(gpath+"map_dvector_permutation_RP.out."+str(case))
out = [i-1 for i in out]
self._compare_outputs(_map_dvector_permutation(rd,d,eps,n),out)
def test2(self):
from phenum.grouptheory import _map_dvector_permutation
case = 10
rd = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_rd.in."+str(case)))))
d = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_d.in."+str(case)))))
eps = _read_float(gpath+"map_dvector_permutation_eps.in."+str(case))
n = len(rd)
out = _read_int_1D(gpath+"map_dvector_permutation_RP.out."+str(case))
out = [i-1 for i in out]
self._compare_outputs(_map_dvector_permutation(rd,d,eps,n),out)
def test3(self):
from phenum.grouptheory import _map_dvector_permutation
case = 20
rd = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_rd.in."+str(case)))))
d = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_d.in."+str(case)))))
eps = _read_float(gpath+"map_dvector_permutation_eps.in."+str(case))
n = len(rd)
out = _read_int_1D(gpath+"map_dvector_permutation_RP.out."+str(case))
out = [i-1 for i in out]
self._compare_outputs(_map_dvector_permutation(rd,d,eps,n),out)
def test4(self):
from phenum.grouptheory import _map_dvector_permutation
case = 30
rd = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_rd.in."+str(case)))))
d = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_d.in."+str(case)))))
eps = _read_float(gpath+"map_dvector_permutation_eps.in."+str(case))
n = len(rd)
out = _read_int_1D(gpath+"map_dvector_permutation_RP.out."+str(case))
out = [i-1 for i in out]
self._compare_outputs(_map_dvector_permutation(rd,d,eps,n),out)
def test5(self):
from phenum.grouptheory import _map_dvector_permutation
case = 40
rd = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_rd.in."+str(case)))))
d = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_d.in."+str(case)))))
eps = _read_float(gpath+"map_dvector_permutation_eps.in."+str(case))
n = len(rd)
out = _read_int_1D(gpath+"map_dvector_permutation_RP.out."+str(case))
out = [i-1 for i in out]
self._compare_outputs(_map_dvector_permutation(rd,d,eps,n),out)
def test6(self):
from phenum.grouptheory import _map_dvector_permutation
case = 50
rd = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_rd.in."+str(case)))))
d = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_d.in."+str(case)))))
eps = _read_float(gpath+"map_dvector_permutation_eps.in."+str(case))
n = len(rd)
out = _read_int_1D(gpath+"map_dvector_permutation_RP.out."+str(case))
out = [i-1 for i in out]
self._compare_outputs(_map_dvector_permutation(rd,d,eps,n),out)
def test7(self):
from phenum.grouptheory import _map_dvector_permutation
case = 60
rd = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_rd.in."+str(case)))))
d = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_d.in."+str(case)))))
eps = _read_float(gpath+"map_dvector_permutation_eps.in."+str(case))
n = len(rd)
out = _read_int_1D(gpath+"map_dvector_permutation_RP.out."+str(case))
out = [i-1 for i in out]
self._compare_outputs(_map_dvector_permutation(rd,d,eps,n),out)
def test8(self):
from phenum.grouptheory import _map_dvector_permutation
case = 70
rd = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_rd.in."+str(case)))))
d = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_d.in."+str(case)))))
eps = _read_float(gpath+"map_dvector_permutation_eps.in."+str(case))
n = len(rd)
out = _read_int_1D(gpath+"map_dvector_permutation_RP.out."+str(case))
out = [i-1 for i in out]
self._compare_outputs(_map_dvector_permutation(rd,d,eps,n),out)
def test9(self):
from phenum.grouptheory import _map_dvector_permutation
case = 80
rd = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_rd.in."+str(case)))))
d = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_d.in."+str(case)))))
eps = _read_float(gpath+"map_dvector_permutation_eps.in."+str(case))
n = len(rd)
out = _read_int_1D(gpath+"map_dvector_permutation_RP.out."+str(case))
out = [i-1 for i in out]
self._compare_outputs(_map_dvector_permutation(rd,d,eps,n),out)
def test10(self):
from phenum.grouptheory import _map_dvector_permutation
case = 90
rd = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_rd.in."+str(case)))))
d = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_d.in."+str(case)))))
eps = _read_float(gpath+"map_dvector_permutation_eps.in."+str(case))
n = len(rd)
out = _read_int_1D(gpath+"map_dvector_permutation_RP.out."+str(case))
out = [i-1 for i in out]
self._compare_outputs(_map_dvector_permutation(rd,d,eps,n),out)
def test11(self):
from phenum.grouptheory import _map_dvector_permutation
case = 100
rd = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_rd.in."+str(case)))))
d = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_d.in."+str(case)))))
eps = _read_float(gpath+"map_dvector_permutation_eps.in."+str(case))
n = len(rd)
out = _read_int_1D(gpath+"map_dvector_permutation_RP.out."+str(case))
out = [i-1 for i in out]
self._compare_outputs(_map_dvector_permutation(rd,d,eps,n),out)
def test12(self):
from phenum.grouptheory import _map_dvector_permutation
case = 110
rd = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_rd.in."+str(case)))))
d = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_d.in."+str(case)))))
eps = _read_float(gpath+"map_dvector_permutation_eps.in."+str(case))
n = len(rd)
out = _read_int_1D(gpath+"map_dvector_permutation_RP.out."+str(case))
out = [i-1 for i in out]
self._compare_outputs(_map_dvector_permutation(rd,d,eps,n),out)
def test13(self):
from phenum.grouptheory import _map_dvector_permutation
case = 120
rd = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_rd.in."+str(case)))))
d = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_d.in."+str(case)))))
eps = _read_float(gpath+"map_dvector_permutation_eps.in."+str(case))
n = len(rd)
out = _read_int_1D(gpath+"map_dvector_permutation_RP.out."+str(case))
out = [i-1 for i in out]
self._compare_outputs(_map_dvector_permutation(rd,d,eps,n),out)
def test14(self):
from phenum.grouptheory import _map_dvector_permutation
case = 130
rd = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_rd.in."+str(case)))))
d = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_d.in."+str(case)))))
eps = _read_float(gpath+"map_dvector_permutation_eps.in."+str(case))
n = len(rd)
out = _read_int_1D(gpath+"map_dvector_permutation_RP.out."+str(case))
out = [i-1 for i in out]
self._compare_outputs(_map_dvector_permutation(rd,d,eps,n),out)
def test15(self):
from phenum.grouptheory import _map_dvector_permutation
case = 140
rd = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_rd.in."+str(case)))))
d = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_d.in."+str(case)))))
eps = _read_float(gpath+"map_dvector_permutation_eps.in."+str(case))
n = len(rd)
out = _read_int_1D(gpath+"map_dvector_permutation_RP.out."+str(case))
out = [i-1 for i in out]
self._compare_outputs(_map_dvector_permutation(rd,d,eps,n),out)
def test16(self):
from phenum.grouptheory import _map_dvector_permutation
case = 150
rd = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_rd.in."+str(case)))))
d = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_d.in."+str(case)))))
eps = _read_float(gpath+"map_dvector_permutation_eps.in."+str(case))
n = len(rd)
out = _read_int_1D(gpath+"map_dvector_permutation_RP.out."+str(case))
out = [i-1 for i in out]
self._compare_outputs(_map_dvector_permutation(rd,d,eps,n),out)
def test17(self):
from phenum.grouptheory import _map_dvector_permutation
case = 160
rd = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_rd.in."+str(case)))))
d = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_d.in."+str(case)))))
eps = _read_float(gpath+"map_dvector_permutation_eps.in."+str(case))
n = len(rd)
out = _read_int_1D(gpath+"map_dvector_permutation_RP.out."+str(case))
out = [i-1 for i in out]
self._compare_outputs(_map_dvector_permutation(rd,d,eps,n),out)
def test18(self):
from phenum.grouptheory import _map_dvector_permutation
case = 170
rd = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_rd.in."+str(case)))))
d = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_d.in."+str(case)))))
eps = _read_float(gpath+"map_dvector_permutation_eps.in."+str(case))
n = len(rd)
out = _read_int_1D(gpath+"map_dvector_permutation_RP.out."+str(case))
out = [i-1 for i in out]
self._compare_outputs(_map_dvector_permutation(rd,d,eps,n),out)
def test19(self):
from phenum.grouptheory import _map_dvector_permutation
case = 180
rd = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_rd.in."+str(case)))))
d = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_d.in."+str(case)))))
eps = _read_float(gpath+"map_dvector_permutation_eps.in."+str(case))
n = len(rd)
out = _read_int_1D(gpath+"map_dvector_permutation_RP.out."+str(case))
out = [i-1 for i in out]
self._compare_outputs(_map_dvector_permutation(rd,d,eps,n),out)
def test20(self):
from phenum.grouptheory import _map_dvector_permutation
case = 190
rd = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_rd.in."+str(case)))))
d = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_d.in."+str(case)))))
eps = _read_float(gpath+"map_dvector_permutation_eps.in."+str(case))
n = len(rd)
out = _read_int_1D(gpath+"map_dvector_permutation_RP.out."+str(case))
out = [i-1 for i in out]
self._compare_outputs(_map_dvector_permutation(rd,d,eps,n),out)
def test21(self):
from phenum.grouptheory import _map_dvector_permutation
case = 200
rd = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_rd.in."+str(case)))))
d = list(map(list,zip(*_read_float_2D(gpath+"map_dvector_permutation_d.in."+str(case)))))
eps = _read_float(gpath+"map_dvector_permutation_eps.in."+str(case))
n = len(rd)
out = _read_int_1D(gpath+"map_dvector_permutation_RP.out."+str(case))
out = [i-1 for i in out]
self._compare_outputs(_map_dvector_permutation(rd,d,eps,n),out)
class TestFindMinmaxIndices(ut.TestCase):
"""Tests of the _find_minmax_indices subroutine."""
def test1(self):
from phenum.grouptheory import _find_minmax_indices
case = 1
invec = _read_int_1D(gpath+"get_minmax_indices_invec.in."+str(case))
min,max = _find_minmax_indices(invec)
min_out = _read_int(gpath+"get_minmax_indices_min.out."+str(case))-1
max_out = _read_int(gpath+"get_minmax_indices_max.out."+str(case))-1
self.assertEqual(min,min_out)
self.assertEqual(max,max_out)
def test2(self):
from phenum.grouptheory import _find_minmax_indices
case = 5
invec = _read_int_1D(gpath+"get_minmax_indices_invec.in."+str(case))
min,max = _find_minmax_indices(invec)
min_out = _read_int(gpath+"get_minmax_indices_min.out."+str(case))-1
max_out = _read_int(gpath+"get_minmax_indices_max.out."+str(case))-1
self.assertEqual(min,min_out)
self.assertEqual(max,max_out)
def test3(self):
from phenum.grouptheory import _find_minmax_indices
case = 10
invec = _read_int_1D(gpath+"get_minmax_indices_invec.in."+str(case))
min,max = _find_minmax_indices(invec)
min_out = _read_int(gpath+"get_minmax_indices_min.out."+str(case))-1
max_out = _read_int(gpath+"get_minmax_indices_max.out."+str(case))-1
self.assertEqual(min,min_out)
self.assertEqual(max,max_out)
def test4(self):
from phenum.grouptheory import _find_minmax_indices
case = 15
invec = _read_int_1D(gpath+"get_minmax_indices_invec.in."+str(case))
min,max = _find_minmax_indices(invec)
min_out = _read_int(gpath+"get_minmax_indices_min.out."+str(case))-1
max_out = _read_int(gpath+"get_minmax_indices_max.out."+str(case))-1
self.assertEqual(min,min_out)
self.assertEqual(max,max_out)
def test5(self):
from phenum.grouptheory import _find_minmax_indices
case = 20
invec = _read_int_1D(gpath+"get_minmax_indices_invec.in."+str(case))
min,max = _find_minmax_indices(invec)
min_out = _read_int(gpath+"get_minmax_indices_min.out."+str(case))-1
max_out = _read_int(gpath+"get_minmax_indices_max.out."+str(case))-1
self.assertEqual(min,min_out)
self.assertEqual(max,max_out)
def test6(self):
from phenum.grouptheory import _find_minmax_indices
case = 25
invec = _read_int_1D(gpath+"get_minmax_indices_invec.in."+str(case))
min,max = _find_minmax_indices(invec)
min_out = _read_int(gpath+"get_minmax_indices_min.out."+str(case))-1
max_out = _read_int(gpath+"get_minmax_indices_max.out."+str(case))-1
self.assertEqual(min,min_out)
self.assertEqual(max,max_out)
def test7(self):
from phenum.grouptheory import _find_minmax_indices
case = 30
invec = _read_int_1D(gpath+"get_minmax_indices_invec.in."+str(case))
min,max = _find_minmax_indices(invec)
min_out = _read_int(gpath+"get_minmax_indices_min.out."+str(case))-1
max_out = _read_int(gpath+"get_minmax_indices_max.out."+str(case))-1
self.assertEqual(min,min_out)
self.assertEqual(max,max_out)
def test8(self):
from phenum.grouptheory import _find_minmax_indices
case = 35
invec = _read_int_1D(gpath+"get_minmax_indices_invec.in."+str(case))
min,max = _find_minmax_indices(invec)
min_out = _read_int(gpath+"get_minmax_indices_min.out."+str(case))-1
max_out = _read_int(gpath+"get_minmax_indices_max.out."+str(case))-1
self.assertEqual(min,min_out)
self.assertEqual(max,max_out)
def test9(self):
from phenum.grouptheory import _find_minmax_indices
case = 40
invec = _read_int_1D(gpath+"get_minmax_indices_invec.in."+str(case))
min,max = _find_minmax_indices(invec)
min_out = _read_int(gpath+"get_minmax_indices_min.out."+str(case))-1
max_out = _read_int(gpath+"get_minmax_indices_max.out."+str(case))-1
self.assertEqual(min,min_out)
self.assertEqual(max,max_out)
def test10(self):
from phenum.grouptheory import _find_minmax_indices
case = 45
invec = _read_int_1D(gpath+"get_minmax_indices_invec.in."+str(case))
min,max = _find_minmax_indices(invec)
min_out = _read_int(gpath+"get_minmax_indices_min.out."+str(case))-1
max_out = _read_int(gpath+"get_minmax_indices_max.out."+str(case))-1
self.assertEqual(min,min_out)
self.assertEqual(max,max_out)
def test11(self):
from phenum.grouptheory import _find_minmax_indices
case = 50
invec = _read_int_1D(gpath+"get_minmax_indices_invec.in."+str(case))
min,max = _find_minmax_indices(invec)
min_out = _read_int(gpath+"get_minmax_indices_min.out."+str(case))-1
max_out = _read_int(gpath+"get_minmax_indices_max.out."+str(case))-1
self.assertEqual(min,min_out)
self.assertEqual(max,max_out)
class TestGetDvectorPermutations(ut.TestCase):
"""Tests of the _get_dvector_permutations subroutine."""
def _compare_outputs(self,rot1,rot2):
# self.assertEqual(rot1.nL,rot2.nL)
self.assertEqual(rot1.RotIndx, rot2.RotIndx)
if (rot1.v != None) and (rot2.v != None):
if len(rot1.v) == len(rot2.v):
for i in range(len(rot1.v)):
for j in range(len(rot1.v[i])):
for k in range(len(rot1.v[i][j])):
self.assertAlmostEqual(rot1.v[i][j][k],rot2.v[i][j][k],places=12)
else:
self.assertEqual(len(rot1.v),len(rot2.v))
else:
self.assertEqual(rot1.v,rot2.v)
if (rot1.perm.site_perm != None) and (rot2.perm.site_perm != None):
if len(rot1.perm.site_perm) == len(rot2.perm.site_perm):
for i in range(len(rot1.perm.site_perm)):
for j in range(len(rot1.perm.site_perm[i])):
self.assertEqual(rot1.perm.site_perm[i][j],rot2.perm.site_perm[i][j])
else:
self.assertEqual(len(rot1.perm.site_perm),len(rot2.perm.site_perm))
else:
self.assertEqual(rot1.perm.site_perm,rot2.perm.site_perm)
if (rot1.perm.arrow_perm != None) and (rot2.perm.arrow_perm != None):
if len(rot1.perm.arrow_perm) == len(rot2.perm.arrow_perm):
for i in range(len(rot1.perm.arrow_perm)):
for j in range(len(rot1.perm.arrow_perm[i])):
self.assertEqual(rot1.perm.arrow_perm[i][j],rot2.perm.arrow_perm[i][j])
else:
self.assertEqual(len(rot1.perm.arrow_perm),len(rot2.perm.arrow_perm))
else:
self.assertEqual(rot1.perm.arrow_perm,rot2.perm.arrow_perm)
def test1(self):
from phenum.grouptheory import _get_dvector_permutations
case = 1
par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pLV.in."+str(case)))))
bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pd.in."+str(case)))))
LatDim = _read_int(gpath+"get_dvector_permutations_LatDim.in."+str(case))
eps = _read_float(gpath+"get_dvector_permutations_eps.in."+str(case))
dRPList_out = _read_RotPermList(gpath+"get_dvector_permutations_dRPList.out."+str(case))
dRPList = _get_dvector_permutations(par_lat,bas_vecs,LatDim,eps)
self._compare_outputs(dRPList,dRPList_out)
def test2(self):
from phenum.grouptheory import _get_dvector_permutations
case = 2
par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pLV.in."+str(case)))))
bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pd.in."+str(case)))))
LatDim = _read_int(gpath+"get_dvector_permutations_LatDim.in."+str(case))
eps = _read_float(gpath+"get_dvector_permutations_eps.in."+str(case))
dRPList_out = _read_RotPermList(gpath+"get_dvector_permutations_dRPList.out."+str(case))
dRPList = _get_dvector_permutations(par_lat,bas_vecs,LatDim,eps)
self._compare_outputs(dRPList,dRPList_out)
def test3(self):
from phenum.grouptheory import _get_dvector_permutations
case = 3
par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pLV.in."+str(case)))))
bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pd.in."+str(case)))))
LatDim = _read_int(gpath+"get_dvector_permutations_LatDim.in."+str(case))
eps = _read_float(gpath+"get_dvector_permutations_eps.in."+str(case))
dRPList_out = _read_RotPermList(gpath+"get_dvector_permutations_dRPList.out."+str(case))
dRPList = _get_dvector_permutations(par_lat,bas_vecs,LatDim,eps)
self._compare_outputs(dRPList,dRPList_out)
def test4(self):
from phenum.grouptheory import _get_dvector_permutations
case = 4
par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pLV.in."+str(case)))))
bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pd.in."+str(case)))))
LatDim = _read_int(gpath+"get_dvector_permutations_LatDim.in."+str(case))
eps = _read_float(gpath+"get_dvector_permutations_eps.in."+str(case))
dRPList_out = _read_RotPermList(gpath+"get_dvector_permutations_dRPList.out."+str(case))
dRPList = _get_dvector_permutations(par_lat,bas_vecs,LatDim,eps)
self._compare_outputs(dRPList,dRPList_out)
def test5(self):
from phenum.grouptheory import _get_dvector_permutations
case = 5
par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pLV.in."+str(case)))))
bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pd.in."+str(case)))))
LatDim = _read_int(gpath+"get_dvector_permutations_LatDim.in."+str(case))
eps = _read_float(gpath+"get_dvector_permutations_eps.in."+str(case))
dRPList_out = _read_RotPermList(gpath+"get_dvector_permutations_dRPList.out."+str(case))
dRPList = _get_dvector_permutations(par_lat,bas_vecs,LatDim,eps)
self._compare_outputs(dRPList,dRPList_out)
def test6(self):
from phenum.grouptheory import _get_dvector_permutations
case = 6
par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pLV.in."+str(case)))))
bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pd.in."+str(case)))))
LatDim = _read_int(gpath+"get_dvector_permutations_LatDim.in."+str(case))
eps = _read_float(gpath+"get_dvector_permutations_eps.in."+str(case))
dRPList_out = _read_RotPermList(gpath+"get_dvector_permutations_dRPList.out."+str(case))
dRPList = _get_dvector_permutations(par_lat,bas_vecs,LatDim,eps)
self._compare_outputs(dRPList,dRPList_out)
def test7(self):
from phenum.grouptheory import _get_dvector_permutations
case = 7
par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pLV.in."+str(case)))))
bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pd.in."+str(case)))))
LatDim = _read_int(gpath+"get_dvector_permutations_LatDim.in."+str(case))
eps = _read_float(gpath+"get_dvector_permutations_eps.in."+str(case))
dRPList_out = _read_RotPermList(gpath+"get_dvector_permutations_dRPList.out."+str(case))
dRPList = _get_dvector_permutations(par_lat,bas_vecs,LatDim,eps)
self._compare_outputs(dRPList,dRPList_out)
def test8(self):
from phenum.grouptheory import _get_dvector_permutations
case = 8
par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pLV.in."+str(case)))))
bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pd.in."+str(case)))))
LatDim = _read_int(gpath+"get_dvector_permutations_LatDim.in."+str(case))
eps = _read_float(gpath+"get_dvector_permutations_eps.in."+str(case))
dRPList_out = _read_RotPermList(gpath+"get_dvector_permutations_dRPList.out."+str(case))
dRPList = _get_dvector_permutations(par_lat,bas_vecs,LatDim,eps)
self._compare_outputs(dRPList,dRPList_out)
def test9(self):
from phenum.grouptheory import _get_dvector_permutations
case = 9
par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pLV.in."+str(case)))))
bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pd.in."+str(case)))))
LatDim = _read_int(gpath+"get_dvector_permutations_LatDim.in."+str(case))
eps = _read_float(gpath+"get_dvector_permutations_eps.in."+str(case))
dRPList = _get_dvector_permutations(par_lat,bas_vecs,LatDim,eps)
dRPList_out = _read_RotPermList(gpath+"get_dvector_permutations_dRPList.out."+str(case))
self._compare_outputs(dRPList,dRPList_out)
def test10(self):
from phenum.grouptheory import _get_dvector_permutations
case = 10
par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pLV.in."+str(case)))))
bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pd.in."+str(case)))))
LatDim = _read_int(gpath+"get_dvector_permutations_LatDim.in."+str(case))
eps = _read_float(gpath+"get_dvector_permutations_eps.in."+str(case))
dRPList_out = _read_RotPermList(gpath+"get_dvector_permutations_dRPList.out."+str(case))
dRPList = _get_dvector_permutations(par_lat,bas_vecs,LatDim,eps)
self._compare_outputs(dRPList,dRPList_out)
def test11(self):
from phenum.grouptheory import _get_dvector_permutations
case = 11
par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pLV.in."+str(case)))))
bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pd.in."+str(case)))))
LatDim = _read_int(gpath+"get_dvector_permutations_LatDim.in."+str(case))
eps = _read_float(gpath+"get_dvector_permutations_eps.in."+str(case))
dRPList_out = _read_RotPermList(gpath+"get_dvector_permutations_dRPList.out."+str(case))
dRPList = _get_dvector_permutations(par_lat,bas_vecs,LatDim,eps)
self._compare_outputs(dRPList,dRPList_out)
def test12(self):
from phenum.grouptheory import _get_dvector_permutations
case = 12
par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pLV.in."+str(case)))))
bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pd.in."+str(case)))))
LatDim = _read_int(gpath+"get_dvector_permutations_LatDim.in."+str(case))
eps = _read_float(gpath+"get_dvector_permutations_eps.in."+str(case))
dRPList_out = _read_RotPermList(gpath+"get_dvector_permutations_dRPList.out."+str(case))
dRPList = _get_dvector_permutations(par_lat,bas_vecs,LatDim,eps)
self._compare_outputs(dRPList,dRPList_out)
def test13(self):
from phenum.grouptheory import _get_dvector_permutations
case = 13
par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pLV.in."+str(case)))))
bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pd.in."+str(case)))))
LatDim = _read_int(gpath+"get_dvector_permutations_LatDim.in."+str(case))
eps = _read_float(gpath+"get_dvector_permutations_eps.in."+str(case))
dRPList_out = _read_RotPermList(gpath+"get_dvector_permutations_dRPList.out."+str(case))
dRPList = _get_dvector_permutations(par_lat,bas_vecs,LatDim,eps)
self._compare_outputs(dRPList,dRPList_out)
def test14(self):
from phenum.grouptheory import _get_dvector_permutations
case = 14
par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pLV.in."+str(case)))))
bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pd.in."+str(case)))))
LatDim = _read_int(gpath+"get_dvector_permutations_LatDim.in."+str(case))
eps = _read_float(gpath+"get_dvector_permutations_eps.in."+str(case))
dRPList_out = _read_RotPermList(gpath+"get_dvector_permutations_dRPList.out."+str(case))
dRPList = _get_dvector_permutations(par_lat,bas_vecs,LatDim,eps)
self._compare_outputs(dRPList,dRPList_out)
def test15(self):
from phenum.grouptheory import _get_dvector_permutations
case = 15
par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pLV.in."+str(case)))))
bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pd.in."+str(case)))))
LatDim = _read_int(gpath+"get_dvector_permutations_LatDim.in."+str(case))
eps = _read_float(gpath+"get_dvector_permutations_eps.in."+str(case))
dRPList_out = _read_RotPermList(gpath+"get_dvector_permutations_dRPList.out."+str(case))
dRPList = _get_dvector_permutations(par_lat,bas_vecs,LatDim,eps)
self._compare_outputs(dRPList,dRPList_out)
def test16(self):
from phenum.grouptheory import _get_dvector_permutations
case = 16
par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pLV.in."+str(case)))))
bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pd.in."+str(case)))))
LatDim = _read_int(gpath+"get_dvector_permutations_LatDim.in."+str(case))
eps = _read_float(gpath+"get_dvector_permutations_eps.in."+str(case))
dRPList_out = _read_RotPermList(gpath+"get_dvector_permutations_dRPList.out."+str(case))
dRPList = _get_dvector_permutations(par_lat,bas_vecs,LatDim,eps)
self._compare_outputs(dRPList,dRPList_out)
def test17(self):
from phenum.grouptheory import _get_dvector_permutations
case = 17
par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pLV.in."+str(case)))))
bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pd.in."+str(case)))))
LatDim = _read_int(gpath+"get_dvector_permutations_LatDim.in."+str(case))
eps = _read_float(gpath+"get_dvector_permutations_eps.in."+str(case))
dRPList_out = _read_RotPermList(gpath+"get_dvector_permutations_dRPList.out."+str(case))
dRPList = _get_dvector_permutations(par_lat,bas_vecs,LatDim,eps)
self._compare_outputs(dRPList,dRPList_out)
def test18(self):
from phenum.grouptheory import _get_dvector_permutations
case = 18
par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pLV.in."+str(case)))))
bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pd.in."+str(case)))))
LatDim = _read_int(gpath+"get_dvector_permutations_LatDim.in."+str(case))
eps = _read_float(gpath+"get_dvector_permutations_eps.in."+str(case))
dRPList_out = _read_RotPermList(gpath+"get_dvector_permutations_dRPList.out."+str(case))
dRPList = _get_dvector_permutations(par_lat,bas_vecs,LatDim,eps)
self._compare_outputs(dRPList,dRPList_out)
def test19(self):
from phenum.grouptheory import _get_dvector_permutations
case = 19
par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pLV.in."+str(case)))))
bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pd.in."+str(case)))))
LatDim = _read_int(gpath+"get_dvector_permutations_LatDim.in."+str(case))
eps = _read_float(gpath+"get_dvector_permutations_eps.in."+str(case))
dRPList_out = _read_RotPermList(gpath+"get_dvector_permutations_dRPList.out."+str(case))
dRPList = _get_dvector_permutations(par_lat,bas_vecs,LatDim,eps)
self._compare_outputs(dRPList,dRPList_out)
def test20(self):
from phenum.grouptheory import _get_dvector_permutations
case = 20
par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pLV.in."+str(case)))))
bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_dvector_permutations_pd.in."+str(case)))))
LatDim = _read_int(gpath+"get_dvector_permutations_LatDim.in."+str(case))
eps = _read_float(gpath+"get_dvector_permutations_eps.in."+str(case))
dRPList_out = _read_RotPermList(gpath+"get_dvector_permutations_dRPList.out."+str(case))
dRPList = _get_dvector_permutations(par_lat,bas_vecs,LatDim,eps)
self._compare_outputs(dRPList,dRPList_out)
class TestGetRotationPermsLists(ut.TestCase):
"""Tests of the _get_rotation_perms_lists subroutine."""
def _compare_outputs(self,out1,out2):
if len(out1) == len(out2):
for t in range(len(out1)):
rot1 = out1[t]
rot2 = out2[t]
if rot1.nL == 0 or rot1.nL == None:
if rot2.nL == 0 or rot2.nL == None:
self.assertEqual(True,True)
else:
self.assertEqual(rot1.nL,rot2.nL)
else:
self.assertEqual(rot1.nL,rot2.nL)
self.assertEqual(rot1.RotIndx, rot2.RotIndx)
if (rot1.v != None) and (rot2.v != None):
if len(rot1.v) == len(rot2.v):
for i in range(len(rot1.v)):
for j in range(len(rot1.v[i])):
for k in range(len(rot1.v[i][j])):
self.assertAlmostEqual(rot1.v[i][j][k],rot2.v[i][j][k],places=12)
else:
self.assertEqual(len(rot1.v),len(rot2.v))
else:
self.assertEqual(rot1.v,rot2.v)
if (rot1.perm.site_perm != None) and (rot2.perm.site_perm != None):
if len(rot1.perm.site_perm) == len(rot2.perm.site_perm):
rot1.perm.site_perm = sorted(rot1.perm.site_perm)
rot2.perm.site_perm = sorted(rot2.perm.site_perm)
for i in range(len(rot1.perm.site_perm)):
for j in range(len(rot1.perm.site_perm[i])):
self.assertEqual(rot1.perm.site_perm[i][j],rot2.perm.site_perm[i][j])
else:
self.assertEqual(len(rot1.perm.site_perm),len(rot2.perm.site_perm))
else:
self.assertEqual(rot1.perm.site_perm,rot2.perm.site_perm)
if (rot1.perm.arrow_perm != None) and (rot2.perm.arrow_perm != None):
if len(rot1.perm.arrow_perm) == len(rot2.perm.arrow_perm):
for i in range(len(rot1.perm.arrow_perm)):
for j in range(len(rot1.perm.arrow_perm[i])):
self.assertEqual(rot1.perm.arrow_perm[i][j],rot2.perm.arrow_perm[i][j])
else:
self.assertEqual(len(rot1.perm.arrow_perm),len(rot2.perm.arrow_perm))
else:
self.assertEqual(rot1.perm.arrow_perm,rot2.perm.arrow_perm)
else:
self.assertEqual(len(out1),len(out2))
def test1(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 1
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test2(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 2
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test3(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 3
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test4(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 4
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test5(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 5
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test6(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 6
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test7(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 7
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test8(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 8
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test9(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 9
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test10(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 10
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test11(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 11
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test12(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 12
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test13(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 13
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test14(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 14
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test15(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 15
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test16(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 16
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test17(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 17
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test18(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 18
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test19(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 19
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test20(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 20
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test21(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 21
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test22(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 22
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test23(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 23
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test24(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 24
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test25(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 25
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test26(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 26
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test27(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 27
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test28(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 28
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test29(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 29
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test30(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 30
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test31(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 31
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test32(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 32
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test33(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 33
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test34(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 34
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test35(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 35
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test36(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 36
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test37(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 37
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test38(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 38
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test39(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 39
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test50(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 50
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test51(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 51
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test52(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 52
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test53(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 53
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test54(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 54
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test55(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 55
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test56(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 56
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test57(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 57
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
def test58(self):
from phenum.grouptheory import _get_rotation_perms_lists
case = 58
A = list(map(list,zip(*_read_float_2D(gpath+"get_rotation_perms_lists_A.in."+str(case)))))
HNF = _read_int_3D(gpath+"get_rotation_perms_lists_HNF.in."+str(case))
L = _read_int_3D(gpath+"get_rotation_perms_lists_L.in."+str(case))
SNF = _read_int_3D(gpath+"get_rotation_perms_lists_SNF.in."+str(case))
Op = _read_fixOp_1D(gpath+"get_rotation_perms_lists_Op.in."+str(case))
dperms = _read_RotPermList(gpath+"get_rotation_perms_lists_dperms.in."+str(case))
RPlist = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.in."+str(case))
eps = _read_float(gpath+"get_rotation_perms_lists_eps.in."+str(case))
out1 = _get_rotation_perms_lists(A,HNF,L,SNF,Op,RPlist,dperms,eps)
out2 = _read_RotPermList_1D(gpath+"get_rotation_perms_lists_RPlist.out."+str(case))
self._compare_outputs(out1,out2)
class TestRM3DOperations(ut.TestCase):
"""Tests of the _rm_3D_operations subroutine."""
def test1(self):
from phenum.grouptheory import _rm_3D_operations
with pytest.raises(ValueError):
_rm_3D_operations([[1,1,0],[1,1,1],[0,1,1]],[0],[0],1E-7)
class TestGetSymGroup(ut.TestCase):
""" Tests of the get_sym_group subroutine."""
def test1(self):
from phenum.grouptheory import get_sym_group
par_lat = [[0.5, 0.5, 0.0], [0.5, 0.0, 0.5], [0.0, 0.5, 0.5]]
bas_vacs = [[0.0, 0.0, 0.0]]
HNF = [[1, 0, 0], [0, 1, 0], [2, 3, 6]]
LatDim = 3
out = _read_output("arrow_group.out.1")
symm = get_sym_group(par_lat,bas_vacs,HNF,LatDim)
agroup = []
for i in range(len(symm.perm.site_perm)):
agroup.append([symm.perm.site_perm[i],symm.perm.arrow_perm[i]])
self.assertEqual(agroup,out)
def test2(self):
from phenum.grouptheory import get_sym_group
par_lat = [[0.5, 0.5, 0.0], [0.5, 0.0, 0.5], [0.0, 0.5, 0.5]]
bas_vacs = [[0.0, 0.0, 0.0]]
HNF = [[1, 0, 0], [0, 1, 0], [0, 5, 6]]
LatDim = 3
out = _read_output("arrow_group.out.2")
symm = get_sym_group(par_lat,bas_vacs,HNF,LatDim)
agroup = []
for i in range(len(symm.perm.site_perm)):
agroup.append([symm.perm.site_perm[i],symm.perm.arrow_perm[i]])
self.assertEqual(agroup,out)
def test3(self):
from phenum.grouptheory import get_sym_group
par_lat = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
bas_vacs = [[0.0, 0.0, 0.0], [0.5, 0.5, 0.5]]
HNF = [[1, 0, 0], [0, 1, 0], [0, 1, 2]]
LatDim = 3
out = _read_output("arrow_group.out.3")
symm = get_sym_group(par_lat,bas_vacs,HNF,LatDim)
agroup = []
for i in range(len(symm.perm.site_perm)):
agroup.append([symm.perm.site_perm[i],symm.perm.arrow_perm[i]])
self.assertEqual(agroup,out)
def test4(self):
from phenum.grouptheory import get_sym_group
par_lat = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
bas_vacs = [[0.0, 0.0, 0.0]]
HNF = [[1, 0, 0], [0, 1, 0], [0, 0, 7]]
LatDim = 3
out = _read_output("arrow_group.out.4")
symm = get_sym_group(par_lat,bas_vacs,HNF,LatDim)
agroup = []
for i in range(len(symm.perm.site_perm)):
agroup.append([symm.perm.site_perm[i],symm.perm.arrow_perm[i]])
self.assertEqual(agroup,out)
def test5(self):
from phenum.grouptheory import get_sym_group
par_lat = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
bas_vacs = [[0.0, 0.0, 0.0]]
HNF = [[1, 0, 0], [1, 2, 0], [1, 0, 2]]
LatDim = 3
out = _read_output("arrow_group.out.5")
symm = get_sym_group(par_lat,bas_vacs,HNF,LatDim)
agroup = []
for i in range(len(symm.perm.site_perm)):
agroup.append([symm.perm.site_perm[i],symm.perm.arrow_perm[i]])
self.assertEqual(agroup,out)
def test6(self):
from phenum.grouptheory import get_sym_group
par_lat = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
bas_vacs = [[0.0, 0.0, 0.0]]
HNF = [[1, 0, 0], [0, 2, 0], [0, 0, 2]]
LatDim = 3
out = _read_output("arrow_group.out.6")
symm = get_sym_group(par_lat,bas_vacs,HNF,LatDim)
agroup = []
for i in range(len(symm.perm.site_perm)):
agroup.append([symm.perm.site_perm[i],symm.perm.arrow_perm[i]])
self.assertEqual(agroup,out)
def test7(self):
from phenum.grouptheory import get_sym_group
par_lat = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
bas_vacs = [[0.0, 0.0, 0.0], [0.5, 0.5, 0.5], [0.25, 0.25, 0.75]]
HNF = [[1, 0, 0], [0, 1, 0], [0, 1, 2]]
LatDim = 3
out = _read_output("arrow_group.out.7")
symm = get_sym_group(par_lat,bas_vacs,HNF,LatDim)
agroup = []
for i in range(len(symm.perm.site_perm)):
agroup.append([symm.perm.site_perm[i],symm.perm.arrow_perm[i]])
self.assertEqual(agroup,out)
def test8(self):
from phenum.grouptheory import get_sym_group
par_lat = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
bas_vacs = [[0.0, 0.0, 0.0], [0.5, 0.5, 0.5], [0.25, 0.25, 0.75]]
HNF = [[1, 0, 0], [0, 1, 0], [0, 0, 3]]
LatDim = 3
out = _read_output("arrow_group.out.8")
symm = get_sym_group(par_lat,bas_vacs,HNF,LatDim)
agroup = []
for i in range(len(symm.perm.site_perm)):
agroup.append([symm.perm.site_perm[i],symm.perm.arrow_perm[i]])
self.assertEqual(agroup,out)
def test9(self):
from phenum.grouptheory import get_sym_group
par_lat = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
bas_vacs = [[0.0, 0.0, 0.0], [0.5, 0.5, 0.5], [0.25, 0.25, 0.75]]
HNF = [[1, 0, 0], [0, 1, 0], [0, 2, 3]]
LatDim = 3
out = _read_output("arrow_group.out.9")
symm = get_sym_group(par_lat,bas_vacs,HNF,LatDim)
agroup = []
for i in range(len(symm.perm.site_perm)):
agroup.append([symm.perm.site_perm[i],symm.perm.arrow_perm[i]])
self.assertEqual(agroup,out)
def test10(self):
from phenum.grouptheory import get_sym_group
par_lat = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
bas_vacs = [[0.0, 0.0, 0.0]]
HNF = [[2, 0, 0], [0, 2, 0], [0, 0, 2]]
LatDim = 3
out = _read_output("arrow_group.out.10")
symm = get_sym_group(par_lat,bas_vacs,HNF,LatDim)
agroup = []
for i in range(len(symm.perm.site_perm)):
agroup.append([symm.perm.site_perm[i],symm.perm.arrow_perm[i]])
self.assertEqual(agroup,out)
def test11(self):
from phenum.grouptheory import get_sym_group
par_lat = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
bas_vacs = [[2.0, 2.0, 2.0]]
HNF = [[2, 0, 0], [0, 2, 0], [0, 0, 2]]
LatDim = 3
out = _read_output("arrow_group.out.10")
symm = get_sym_group(par_lat,bas_vacs,HNF,LatDim)
agroup = []
for i in range(len(symm.perm.site_perm)):
agroup.append([symm.perm.site_perm[i],symm.perm.arrow_perm[i]])
self.assertEqual(agroup,out)
|
py | 1a3974eb6a1a37f4c568d576dc0abb6c1ced6e4d | from datetime import timedelta
import aiohttp
import aiohttp_client_cache
from sqlalchemy.ext.asyncio import AsyncSession
from Backend.core.errors import CustomException
from Backend.crud import discord_users
from Backend.database.models import DiscordUsers
from Backend.networking.bungieAuth import BungieAuth
from Backend.networking.schemas import WebResponse
from Backend.networking.base import NetworkBase
from settings import BUNGIE_TOKEN
class BungieApi(NetworkBase):
"""Handles all networking to any API. To call an api that is not bungies, change the headers"""
# base bungie headers
normal_headers = {"X-API-Key": BUNGIE_TOKEN, "Accept": "application/json"}
auth_headers = normal_headers.copy()
# the cache object. Low expire time since players dont want to wait an eternity for their stuff to _update
cache = aiohttp_client_cache.SQLiteBackend(
cache_name="networking/bungie_networking_cache",
expire_after=timedelta(minutes=5),
urls_expire_after={
"platform/app/oauth/token": 0, # do not save token stuff
"Destiny2/Stats/PostGameCarnageReport": 0, # do not save pgcr. We save them anyway and don't look them up more than once
"Destiny2/*/Profile/**components=": timedelta(minutes=15), # profile call
"Destiny2/*/Account/*/Stats": timedelta(minutes=60), # stats
"Destiny2/*/Account/*/Character/*/Stats/Activities": timedelta(minutes=5), # activity history
},
)
def __init__(self, db: AsyncSession, user: DiscordUsers = None, headers: dict = None, i_understand_what_im_doing_and_that_setting_this_to_true_might_break_stuff: bool = False):
assert user or headers or i_understand_what_im_doing_and_that_setting_this_to_true_might_break_stuff, "One argument needs to be defined"
self.user = user
self.discord_id = user.discord_id if user else None
self.db = db
# allows different urls than bungies to be called (fe. steam players)
if headers:
self.normal_headers = headers
self.auth_headers = headers
self.bungie_request = False
async def get(self, route: str, params: dict = None, use_cache: bool = True) -> WebResponse:
"""Grabs JSON from the specified URL (no oauth)"""
# check if the user has a private profile, if so we use oauth
if self.user:
if self.user.private_profile:
# then we use get_with_token()
return await self.get_with_token(route=route, params=params, use_cache=use_cache)
try:
async with aiohttp_client_cache.CachedSession(cache=self.cache) as session:
# use cache for the responses
if use_cache:
return await self._request(
session=session,
method="GET",
route=route,
headers=self.normal_headers,
params=params,
)
# do not use cache
else:
async with session.disabled():
return await self._request(
session=session,
method="GET",
route=route,
headers=self.normal_headers,
params=params,
)
except CustomException as exc:
if exc.error == "BungieDestinyPrivacyRestriction":
# catch the BungieDestinyPrivacyRestriction error to change privacy settings in our db
await discord_users.update(db=self.db, to_update=self.user, has_private_profile=True)
# then call the same endpoint again, this time with a token
return await self.get_with_token(route=route, params=params, use_cache=use_cache)
else:
# otherwise raise error again
raise exc
async def get_with_token(
self, route: str, params: dict = None, use_cache: bool = True
) -> WebResponse:
"""Grabs JSON from the specified URL (oauth)"""
# set the auth headers to a working token
await self.__set_auth_headers()
# ignore cookies
no_jar = aiohttp.DummyCookieJar()
async with aiohttp_client_cache.CachedSession(cache=self.cache, cookie_jar=no_jar) as session:
# use cache for the responses
if use_cache:
return await self._request(
session=session,
method="GET",
route=route,
headers=self.auth_headers,
params=params,
)
# do not use cache
else:
async with session.disabled():
return await self._request(
session=session,
method="GET",
route=route,
headers=self.auth_headers,
params=params,
)
async def post(self, route: str, json: dict, params: dict = None) -> WebResponse:
"""Post data to bungie. self.discord_id must have the authentication for the action"""
# set the auth headers to a working token
await self.__set_auth_headers()
async with aiohttp_client_cache.CachedSession(cache=self.cache) as session:
# do not use cache here
async with session.disabled():
return await self._request(
session=session,
method="POST",
route=route,
json=json,
headers=self.auth_headers,
params=params,
)
async def __set_auth_headers(self):
"""Update the auth headers to include a working token. Raise an error if that doesnt exist"""
# get a working token or abort
auth = BungieAuth(db=self.db, user=self.user)
token = await auth.get_working_token()
# use special token headers if its a bungie request
if self.bungie_request:
self.auth_headers.update(
{
"Authorization": f"Bearer {token}",
}
)
|
py | 1a3975bdaa196a3b831dc791f375ff814da4340a | #! /usr/bin/env python2
# -*- coding:utf-8 -*-
class Student(object):
name = 'student'
s = Student()
print(s.name)
print(Student.name)
s.name = "s.name"
print(s.name)
print(Student.name)
del s.name
print(s.name)
print(Student.name)
del Student.name
print(s.name)
print(Student.name)
|
py | 1a3976f4045742987323596bf9375b67baa5b58d | """Default variable filters."""
import re
import random as random_module
import unicodedata
from decimal import Decimal, InvalidOperation, Context, ROUND_HALF_UP
from functools import wraps
from pprint import pformat
from django.template.base import Variable, Library, VariableDoesNotExist
from django.conf import settings
from django.utils import formats
from django.utils.dateformat import format, time_format
from django.utils.encoding import force_unicode, iri_to_uri
from django.utils.html import (conditional_escape, escapejs, fix_ampersands,
escape, urlize as urlize_impl, linebreaks, strip_tags)
from django.utils.http import urlquote
from django.utils.text import Truncator, wrap, phone2numeric
from django.utils.safestring import mark_safe, SafeData, mark_for_escaping
from django.utils.timesince import timesince, timeuntil
from django.utils.translation import ugettext, ungettext
from django.utils.text import normalize_newlines
register = Library()
#######################
# STRING DECORATOR #
#######################
def stringfilter(func):
"""
Decorator for filters which should only receive unicode objects. The object
passed as the first positional argument will be converted to a unicode
object.
"""
def _dec(*args, **kwargs):
if args:
args = list(args)
args[0] = force_unicode(args[0])
if (isinstance(args[0], SafeData) and
getattr(_dec._decorated_function, 'is_safe', False)):
return mark_safe(func(*args, **kwargs))
return func(*args, **kwargs)
# Include a reference to the real function (used to check original
# arguments by the template parser, and to bear the 'is_safe' attribute
# when multiple decorators are applied).
_dec._decorated_function = getattr(func, '_decorated_function', func)
for attr in ('is_safe', 'needs_autoescape'):
if hasattr(func, attr):
import warnings
warnings.warn("Setting the %s attribute of a template filter "
"function is deprecated; use @register.filter(%s=%s) "
"instead" % (attr, attr, getattr(func, attr)),
DeprecationWarning)
setattr(_dec, attr, getattr(func, attr))
return wraps(func)(_dec)
###################
# STRINGS #
###################
@register.filter(is_safe=True)
@stringfilter
def addslashes(value):
"""
Adds slashes before quotes. Useful for escaping strings in CSV, for
example. Less useful for escaping JavaScript; use the ``escapejs``
filter instead.
"""
return value.replace('\\', '\\\\').replace('"', '\\"').replace("'", "\\'")
@register.filter(is_safe=True)
@stringfilter
def capfirst(value):
"""Capitalizes the first character of the value."""
return value and value[0].upper() + value[1:]
@register.filter("escapejs")
@stringfilter
def escapejs_filter(value):
"""Hex encodes characters for use in JavaScript strings."""
return escapejs(value)
@register.filter("fix_ampersands", is_safe=True)
@stringfilter
def fix_ampersands_filter(value):
"""Replaces ampersands with ``&`` entities."""
return fix_ampersands(value)
# Values for testing floatformat input against infinity and NaN representations,
# which differ across platforms and Python versions. Some (i.e. old Windows
# ones) are not recognized by Decimal but we want to return them unchanged vs.
# returning an empty string as we do for completley invalid input. Note these
# need to be built up from values that are not inf/nan, since inf/nan values do
# not reload properly from .pyc files on Windows prior to some level of Python 2.5
# (see Python Issue757815 and Issue1080440).
pos_inf = 1e200 * 1e200
neg_inf = -1e200 * 1e200
nan = (1e200 * 1e200) // (1e200 * 1e200)
special_floats = [str(pos_inf), str(neg_inf), str(nan)]
@register.filter(is_safe=True)
def floatformat(text, arg=-1):
"""
Displays a float to a specified number of decimal places.
If called without an argument, it displays the floating point number with
one decimal place -- but only if there's a decimal place to be displayed:
* num1 = 34.23234
* num2 = 34.00000
* num3 = 34.26000
* {{ num1|floatformat }} displays "34.2"
* {{ num2|floatformat }} displays "34"
* {{ num3|floatformat }} displays "34.3"
If arg is positive, it will always display exactly arg number of decimal
places:
* {{ num1|floatformat:3 }} displays "34.232"
* {{ num2|floatformat:3 }} displays "34.000"
* {{ num3|floatformat:3 }} displays "34.260"
If arg is negative, it will display arg number of decimal places -- but
only if there are places to be displayed:
* {{ num1|floatformat:"-3" }} displays "34.232"
* {{ num2|floatformat:"-3" }} displays "34"
* {{ num3|floatformat:"-3" }} displays "34.260"
If the input float is infinity or NaN, the (platform-dependent) string
representation of that value will be displayed.
"""
try:
input_val = force_unicode(text)
d = Decimal(input_val)
except UnicodeEncodeError:
return u''
except InvalidOperation:
if input_val in special_floats:
return input_val
try:
d = Decimal(force_unicode(float(text)))
except (ValueError, InvalidOperation, TypeError, UnicodeEncodeError):
return u''
try:
p = int(arg)
except ValueError:
return input_val
try:
m = int(d) - d
except (ValueError, OverflowError, InvalidOperation):
return input_val
if not m and p < 0:
return mark_safe(formats.number_format(u'%d' % (int(d)), 0))
if p == 0:
exp = Decimal(1)
else:
exp = Decimal(u'1.0') / (Decimal(10) ** abs(p))
try:
# Set the precision high enough to avoid an exception, see #15789.
tupl = d.as_tuple()
units = len(tupl[1]) - tupl[2]
prec = abs(p) + units + 1
# Avoid conversion to scientific notation by accessing `sign`, `digits`
# and `exponent` from `Decimal.as_tuple()` directly.
sign, digits, exponent = d.quantize(exp, ROUND_HALF_UP,
Context(prec=prec)).as_tuple()
digits = [unicode(digit) for digit in reversed(digits)]
while len(digits) <= abs(exponent):
digits.append(u'0')
digits.insert(-exponent, u'.')
if sign:
digits.append(u'-')
number = u''.join(reversed(digits))
return mark_safe(formats.number_format(number, abs(p)))
except InvalidOperation:
return input_val
@register.filter(is_safe=True)
@stringfilter
def iriencode(value):
"""Escapes an IRI value for use in a URL."""
return force_unicode(iri_to_uri(value))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def linenumbers(value, autoescape=None):
"""Displays text with line numbers."""
lines = value.split(u'\n')
# Find the maximum width of the line count, for use with zero padding
# string format command
width = unicode(len(unicode(len(lines))))
if not autoescape or isinstance(value, SafeData):
for i, line in enumerate(lines):
lines[i] = (u"%0" + width + u"d. %s") % (i + 1, line)
else:
for i, line in enumerate(lines):
lines[i] = (u"%0" + width + u"d. %s") % (i + 1, escape(line))
return mark_safe(u'\n'.join(lines))
@register.filter(is_safe=True)
@stringfilter
def lower(value):
"""Converts a string into all lowercase."""
return value.lower()
@register.filter(is_safe=False)
@stringfilter
def make_list(value):
"""
Returns the value turned into a list.
For an integer, it's a list of digits.
For a string, it's a list of characters.
"""
return list(value)
@register.filter(is_safe=True)
@stringfilter
def slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
"""
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(re.sub('[^\w\s-]', '', value).strip().lower())
return mark_safe(re.sub('[-\s]+', '-', value))
@register.filter(is_safe=True)
def stringformat(value, arg):
"""
Formats the variable according to the arg, a string formatting specifier.
This specifier uses Python string formating syntax, with the exception that
the leading "%" is dropped.
See http://docs.python.org/lib/typesseq-strings.html for documentation
of Python string formatting
"""
try:
return (u"%" + unicode(arg)) % value
except (ValueError, TypeError):
return u""
@register.filter(is_safe=True)
@stringfilter
def title(value):
"""Converts a string into titlecase."""
t = re.sub("([a-z])'([A-Z])", lambda m: m.group(0).lower(), value.title())
return re.sub("\d([A-Z])", lambda m: m.group(0).lower(), t)
@register.filter(is_safe=True)
@stringfilter
def truncatechars(value, arg):
"""
Truncates a string after a certain number of characters.
Argument: Number of characters to truncate after.
"""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
return Truncator(value).chars(length)
@register.filter(is_safe=True)
@stringfilter
def truncatewords(value, arg):
"""
Truncates a string after a certain number of words.
Argument: Number of words to truncate after.
Newlines within the string are removed.
"""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
return Truncator(value).words(length, truncate=' ...')
@register.filter(is_safe=True)
@stringfilter
def truncatewords_html(value, arg):
"""
Truncates HTML after a certain number of words.
Argument: Number of words to truncate after.
Newlines in the HTML are preserved.
"""
try:
length = int(arg)
except ValueError: # invalid literal for int()
return value # Fail silently.
return Truncator(value).words(length, html=True, truncate=' ...')
@register.filter(is_safe=False)
@stringfilter
def upper(value):
"""Converts a string into all uppercase."""
return value.upper()
@register.filter(is_safe=False)
@stringfilter
def urlencode(value, safe=None):
"""
Escapes a value for use in a URL.
Takes an optional ``safe`` parameter used to determine the characters which
should not be escaped by Django's ``urlquote`` method. If not provided, the
default safe characters will be used (but an empty string can be provided
when *all* characters should be escaped).
"""
kwargs = {}
if safe is not None:
kwargs['safe'] = safe
return urlquote(value, **kwargs)
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def urlize(value, autoescape=None):
"""Converts URLs in plain text into clickable links."""
return mark_safe(urlize_impl(value, nofollow=True, autoescape=autoescape))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def urlizetrunc(value, limit, autoescape=None):
"""
Converts URLs into clickable links, truncating URLs to the given character
limit, and adding 'rel=nofollow' attribute to discourage spamming.
Argument: Length to truncate URLs to.
"""
return mark_safe(urlize_impl(value, trim_url_limit=int(limit), nofollow=True,
autoescape=autoescape))
@register.filter(is_safe=False)
@stringfilter
def wordcount(value):
"""Returns the number of words."""
return len(value.split())
@register.filter(is_safe=True)
@stringfilter
def wordwrap(value, arg):
"""
Wraps words at specified line length.
Argument: number of characters to wrap the text at.
"""
return wrap(value, int(arg))
@register.filter(is_safe=True)
@stringfilter
def ljust(value, arg):
"""
Left-aligns the value in a field of a given width.
Argument: field size.
"""
return value.ljust(int(arg))
@register.filter(is_safe=True)
@stringfilter
def rjust(value, arg):
"""
Right-aligns the value in a field of a given width.
Argument: field size.
"""
return value.rjust(int(arg))
@register.filter(is_safe=True)
@stringfilter
def center(value, arg):
"""Centers the value in a field of a given width."""
return value.center(int(arg))
@register.filter
@stringfilter
def cut(value, arg):
"""
Removes all values of arg from the given string.
"""
safe = isinstance(value, SafeData)
value = value.replace(arg, u'')
if safe and arg != ';':
return mark_safe(value)
return value
###################
# HTML STRINGS #
###################
@register.filter("escape", is_safe=True)
@stringfilter
def escape_filter(value):
"""
Marks the value as a string that should not be auto-escaped.
"""
return mark_for_escaping(value)
@register.filter(is_safe=True)
@stringfilter
def force_escape(value):
"""
Escapes a string's HTML. This returns a new string containing the escaped
characters (as opposed to "escape", which marks the content for later
possible escaping).
"""
return escape(value)
@register.filter("linebreaks", is_safe=True, needs_autoescape=True)
@stringfilter
def linebreaks_filter(value, autoescape=None):
"""
Replaces line breaks in plain text with appropriate HTML; a single
newline becomes an HTML line break (``<br />``) and a new line
followed by a blank line becomes a paragraph break (``</p>``).
"""
autoescape = autoescape and not isinstance(value, SafeData)
return mark_safe(linebreaks(value, autoescape))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def linebreaksbr(value, autoescape=None):
"""
Converts all newlines in a piece of plain text to HTML line breaks
(``<br />``).
"""
autoescape = autoescape and not isinstance(value, SafeData)
value = normalize_newlines(value)
if autoescape:
value = escape(value)
return mark_safe(value.replace('\n', '<br />'))
@register.filter(is_safe=True)
@stringfilter
def safe(value):
"""
Marks the value as a string that should not be auto-escaped.
"""
return mark_safe(value)
@register.filter(is_safe=True)
def safeseq(value):
"""
A "safe" filter for sequences. Marks each element in the sequence,
individually, as safe, after converting them to unicode. Returns a list
with the results.
"""
return [mark_safe(force_unicode(obj)) for obj in value]
@register.filter(is_safe=True)
@stringfilter
def removetags(value, tags):
"""Removes a space separated list of [X]HTML tags from the output."""
tags = [re.escape(tag) for tag in tags.split()]
tags_re = u'(%s)' % u'|'.join(tags)
starttag_re = re.compile(ur'<%s(/?>|(\s+[^>]*>))' % tags_re, re.U)
endtag_re = re.compile(u'</%s>' % tags_re)
value = starttag_re.sub(u'', value)
value = endtag_re.sub(u'', value)
return value
@register.filter(is_safe=True)
@stringfilter
def striptags(value):
"""Strips all [X]HTML tags."""
return strip_tags(value)
###################
# LISTS #
###################
@register.filter(is_safe=False)
def dictsort(value, arg):
"""
Takes a list of dicts, returns that list sorted by the property given in
the argument.
"""
try:
return sorted(value, key=Variable(arg).resolve)
except (TypeError, VariableDoesNotExist):
return u''
@register.filter(is_safe=False)
def dictsortreversed(value, arg):
"""
Takes a list of dicts, returns that list sorted in reverse order by the
property given in the argument.
"""
try:
return sorted(value, key=Variable(arg).resolve, reverse=True)
except (TypeError, VariableDoesNotExist):
return u''
@register.filter(is_safe=False)
def first(value):
"""Returns the first item in a list."""
try:
return value[0]
except IndexError:
return u''
@register.filter(is_safe=True, needs_autoescape=True)
def join(value, arg, autoescape=None):
"""
Joins a list with a string, like Python's ``str.join(list)``.
"""
value = map(force_unicode, value)
if autoescape:
value = [conditional_escape(v) for v in value]
try:
data = conditional_escape(arg).join(value)
except AttributeError: # fail silently but nicely
return value
return mark_safe(data)
@register.filter(is_safe=True)
def last(value):
"Returns the last item in a list"
try:
return value[-1]
except IndexError:
return u''
@register.filter(is_safe=True)
def length(value):
"""Returns the length of the value - useful for lists."""
try:
return len(value)
except (ValueError, TypeError):
return ''
@register.filter(is_safe=False)
def length_is(value, arg):
"""Returns a boolean of whether the value's length is the argument."""
try:
return len(value) == int(arg)
except (ValueError, TypeError):
return ''
@register.filter(is_safe=True)
def random(value):
"""Returns a random item from the list."""
return random_module.choice(value)
@register.filter("slice", is_safe=True)
def slice_filter(value, arg):
"""
Returns a slice of the list.
Uses the same syntax as Python's list slicing; see
http://diveintopython.org/native_data_types/lists.html#odbchelper.list.slice
for an introduction.
"""
try:
bits = []
for x in arg.split(u':'):
if len(x) == 0:
bits.append(None)
else:
bits.append(int(x))
return value[slice(*bits)]
except (ValueError, TypeError):
return value # Fail silently.
@register.filter(is_safe=True, needs_autoescape=True)
def unordered_list(value, autoescape=None):
"""
Recursively takes a self-nested list and returns an HTML unordered list --
WITHOUT opening and closing <ul> tags.
The list is assumed to be in the proper format. For example, if ``var``
contains: ``['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']]``,
then ``{{ var|unordered_list }}`` would return::
<li>States
<ul>
<li>Kansas
<ul>
<li>Lawrence</li>
<li>Topeka</li>
</ul>
</li>
<li>Illinois</li>
</ul>
</li>
"""
if autoescape:
escaper = conditional_escape
else:
escaper = lambda x: x
def convert_old_style_list(list_):
"""
Converts old style lists to the new easier to understand format.
The old list format looked like:
['Item 1', [['Item 1.1', []], ['Item 1.2', []]]
And it is converted to:
['Item 1', ['Item 1.1', 'Item 1.2]]
"""
if not isinstance(list_, (tuple, list)) or len(list_) != 2:
return list_, False
first_item, second_item = list_
if second_item == []:
return [first_item], True
try:
# see if second item is iterable
iter(second_item)
except TypeError:
return list_, False
old_style_list = True
new_second_item = []
for sublist in second_item:
item, old_style_list = convert_old_style_list(sublist)
if not old_style_list:
break
new_second_item.extend(item)
if old_style_list:
second_item = new_second_item
return [first_item, second_item], old_style_list
def _helper(list_, tabs=1):
indent = u'\t' * tabs
output = []
list_length = len(list_)
i = 0
while i < list_length:
title = list_[i]
sublist = ''
sublist_item = None
if isinstance(title, (list, tuple)):
sublist_item = title
title = ''
elif i < list_length - 1:
next_item = list_[i+1]
if next_item and isinstance(next_item, (list, tuple)):
# The next item is a sub-list.
sublist_item = next_item
# We've processed the next item now too.
i += 1
if sublist_item:
sublist = _helper(sublist_item, tabs+1)
sublist = '\n%s<ul>\n%s\n%s</ul>\n%s' % (indent, sublist,
indent, indent)
output.append('%s<li>%s%s</li>' % (indent,
escaper(force_unicode(title)), sublist))
i += 1
return '\n'.join(output)
value, converted = convert_old_style_list(value)
return mark_safe(_helper(value))
###################
# INTEGERS #
###################
@register.filter(is_safe=False)
def add(value, arg):
"""Adds the arg to the value."""
try:
return int(value) + int(arg)
except (ValueError, TypeError):
try:
return value + arg
except Exception:
return ''
@register.filter(is_safe=False)
def get_digit(value, arg):
"""
Given a whole number, returns the requested digit of it, where 1 is the
right-most digit, 2 is the second-right-most digit, etc. Returns the
original value for invalid input (if input or argument is not an integer,
or if argument is less than 1). Otherwise, output is always an integer.
"""
try:
arg = int(arg)
value = int(value)
except ValueError:
return value # Fail silently for an invalid argument
if arg < 1:
return value
try:
return int(str(value)[-arg])
except IndexError:
return 0
###################
# DATES #
###################
@register.filter(expects_localtime=True, is_safe=False)
def date(value, arg=None):
"""Formats a date according to the given format."""
if not value:
return u''
if arg is None:
arg = settings.DATE_FORMAT
try:
return formats.date_format(value, arg)
except AttributeError:
try:
return format(value, arg)
except AttributeError:
return ''
@register.filter(expects_localtime=True, is_safe=False)
def time(value, arg=None):
"""Formats a time according to the given format."""
if value in (None, u''):
return u''
if arg is None:
arg = settings.TIME_FORMAT
try:
return formats.time_format(value, arg)
except AttributeError:
try:
return time_format(value, arg)
except AttributeError:
return ''
@register.filter("timesince", is_safe=False)
def timesince_filter(value, arg=None):
"""Formats a date as the time since that date (i.e. "4 days, 6 hours")."""
if not value:
return u''
try:
if arg:
return timesince(value, arg)
return timesince(value)
except (ValueError, TypeError):
return u''
@register.filter("timeuntil", is_safe=False)
def timeuntil_filter(value, arg=None):
"""Formats a date as the time until that date (i.e. "4 days, 6 hours")."""
if not value:
return u''
try:
return timeuntil(value, arg)
except (ValueError, TypeError):
return u''
###################
# LOGIC #
###################
@register.filter(is_safe=False)
def default(value, arg):
"""If value is unavailable, use given default."""
return value or arg
@register.filter(is_safe=False)
def default_if_none(value, arg):
"""If value is None, use given default."""
if value is None:
return arg
return value
@register.filter(is_safe=False)
def divisibleby(value, arg):
"""Returns True if the value is devisible by the argument."""
return int(value) % int(arg) == 0
@register.filter(is_safe=False)
def yesno(value, arg=None):
"""
Given a string mapping values for true, false and (optionally) None,
returns one of those strings according to the value:
========== ====================== ==================================
Value Argument Outputs
========== ====================== ==================================
``True`` ``"yeah,no,maybe"`` ``yeah``
``False`` ``"yeah,no,maybe"`` ``no``
``None`` ``"yeah,no,maybe"`` ``maybe``
``None`` ``"yeah,no"`` ``"no"`` (converts None to False
if no mapping for None is given.
========== ====================== ==================================
"""
if arg is None:
arg = ugettext('yes,no,maybe')
bits = arg.split(u',')
if len(bits) < 2:
return value # Invalid arg.
try:
yes, no, maybe = bits
except ValueError:
# Unpack list of wrong size (no "maybe" value provided).
yes, no, maybe = bits[0], bits[1], bits[1]
if value is None:
return maybe
if value:
return yes
return no
###################
# MISC #
###################
@register.filter(is_safe=True)
def filesizeformat(bytes):
"""
Formats the value like a 'human-readable' file size (i.e. 13 KB, 4.1 MB,
102 bytes, etc).
"""
try:
bytes = float(bytes)
except (TypeError,ValueError,UnicodeDecodeError):
return ungettext("%(size)d byte", "%(size)d bytes", 0) % {'size': 0}
filesize_number_format = lambda value: formats.number_format(round(value, 1), 1)
if bytes < 1024:
return ungettext("%(size)d byte", "%(size)d bytes", bytes) % {'size': bytes}
if bytes < 1024 * 1024:
return ugettext("%s KB") % filesize_number_format(bytes / 1024)
if bytes < 1024 * 1024 * 1024:
return ugettext("%s MB") % filesize_number_format(bytes / (1024 * 1024))
if bytes < 1024 * 1024 * 1024 * 1024:
return ugettext("%s GB") % filesize_number_format(bytes / (1024 * 1024 * 1024))
if bytes < 1024 * 1024 * 1024 * 1024 * 1024:
return ugettext("%s TB") % filesize_number_format(bytes / (1024 * 1024 * 1024 * 1024))
return ugettext("%s PB") % filesize_number_format(bytes / (1024 * 1024 * 1024 * 1024 * 1024))
@register.filter(is_safe=False)
def pluralize(value, arg=u's'):
"""
Returns a plural suffix if the value is not 1. By default, 's' is used as
the suffix:
* If value is 0, vote{{ value|pluralize }} displays "0 votes".
* If value is 1, vote{{ value|pluralize }} displays "1 vote".
* If value is 2, vote{{ value|pluralize }} displays "2 votes".
If an argument is provided, that string is used instead:
* If value is 0, class{{ value|pluralize:"es" }} displays "0 classes".
* If value is 1, class{{ value|pluralize:"es" }} displays "1 class".
* If value is 2, class{{ value|pluralize:"es" }} displays "2 classes".
If the provided argument contains a comma, the text before the comma is
used for the singular case and the text after the comma is used for the
plural case:
* If value is 0, cand{{ value|pluralize:"y,ies" }} displays "0 candies".
* If value is 1, cand{{ value|pluralize:"y,ies" }} displays "1 candy".
* If value is 2, cand{{ value|pluralize:"y,ies" }} displays "2 candies".
"""
if not u',' in arg:
arg = u',' + arg
bits = arg.split(u',')
if len(bits) > 2:
return u''
singular_suffix, plural_suffix = bits[:2]
try:
if int(value) != 1:
return plural_suffix
except ValueError: # Invalid string that's not a number.
pass
except TypeError: # Value isn't a string or a number; maybe it's a list?
try:
if len(value) != 1:
return plural_suffix
except TypeError: # len() of unsized object.
pass
return singular_suffix
@register.filter("phone2numeric", is_safe=True)
def phone2numeric_filter(value):
"""Takes a phone number and converts it in to its numerical equivalent."""
return phone2numeric(value)
@register.filter(is_safe=True)
def pprint(value):
"""A wrapper around pprint.pprint -- for debugging, really."""
try:
return pformat(value)
except Exception as e:
return u"Error in formatting: %s" % force_unicode(e, errors="replace")
|
py | 1a3976f7cbb190f6c17ff7611cdab4fd52ddffee | """empty message
Revision ID: dd7377000e2e
Revises: 3a28d0608e7f
Create Date: 2020-08-18 14:14:59.119395
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'dd7377000e2e'
down_revision = '3a28d0608e7f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('comment',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('content', sa.Text(), nullable=False),
sa.Column('create_date', sa.DateTime(), nullable=False),
sa.Column('modify_date', sa.DateTime(), nullable=True),
sa.Column('question_id', sa.Integer(), nullable=True),
sa.Column('answer_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['answer_id'], ['answer.id'], name=op.f('fk_comment_answer_id_answer'), ondelete='CASCADE'),
sa.ForeignKeyConstraint(['question_id'], ['question.id'], name=op.f('fk_comment_question_id_question'), ondelete='CASCADE'),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], name=op.f('fk_comment_user_id_user'), ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id', name=op.f('pk_comment'))
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('comment')
# ### end Alembic commands ###
|
py | 1a39774a9e00876e8cef7f4515e2b36cd0f3f119 | """
Django settings for djangobackend project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ao5z(o(z@cvzodm99d32jkxa5e8a1!q_4sqss5-a%n6tg$#h$+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["dealer55555.eu-gb.mybluemix.net","127.0.0.1"]
# Application definition
INSTALLED_APPS = [
'djangoapp.apps.DjangoappConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangobackend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.media',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangobackend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_ROOT = os.path.join(STATIC_ROOT, 'media')
MEDIA_URL = '/media/'
|
py | 1a39780788a5a6130c61ba52a7f57e3860e8737b | import blspy
from bitchia.types.blockchain_format.sized_bytes import bytes32
def std_hash(b) -> bytes32:
"""
The standard hash used in many places.
"""
return bytes32(blspy.Util.hash256(bytes(b)))
|
py | 1a3978f0df9c0c44fe8f2c5adb51a9a2f39c2674 | print('\033[1;31m comparando valores \033[m')
ja_chega = float(input('digite um valor'))
desisto = float(input('digite um valor '))
if ja_chega > desisto:
print('o primeiro valor é maior !!')
elif desisto > ja_chega:
print('o segundo valor é maior !!')
else:
print('os dois valores são iguais')
escolha4 = ''
while escolha4 != 'sim' and escolha4 != 'nao':
escolha4 = str(input('você deseja executar novamente [sim/nao]?')).lower()
if escolha4 == 'sim':
import jogo_do_tio_Dodo
if escolha4 == 'nao':
print('obrigado por ultilizar nossos serviços')
break |
py | 1a397bedc00aafa0e4259b4ce9c743c1f0789462 | import torch
import torch.nn as nn
#from .utils import load_state_dict_from_url
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
print("current num class : {}".format(num_classes))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x)
def _resnet(arch, block, layers, num_classes,pretrained, progress, **kwargs):
model = ResNet(block, layers, num_classes, **kwargs)
# if pretrained:
# state_dict = load_state_dict_from_url(model_urls[arch],
# progress=progress)
# model.load_state_dict(state_dict)
return model
def resnet18(num_classes=1000,pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], num_classes,pretrained, progress,
**kwargs)
'''
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
'''
def test():
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
net = resnet18(num_classes=6).to(device)
y = net(torch.randn(1,3,224,224).to(device))
print(y.size())
#test() |
py | 1a397c7aecd8b08d6935ceda241f17185f05ba01 | # Copyright 2012-2019 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import mlog
import pickle, os, uuid, shlex
import sys
from itertools import chain
from pathlib import PurePath
from collections import OrderedDict
from .mesonlib import (
MesonException, MachineChoice, PerMachine,
default_libdir, default_libexecdir, default_prefix
)
from .wrap import WrapMode
import ast
import argparse
import configparser
from typing import (
Any, Dict, Generic, Iterable, List, Optional, Type, TypeVar, Union
)
import typing
import enum
if typing.TYPE_CHECKING:
from . import dependencies
version = '0.51.999'
backendlist = ['ninja', 'vs', 'vs2010', 'vs2015', 'vs2017', 'vs2019', 'xcode']
default_yielding = False
# Can't bind this near the class method it seems, sadly.
_T = TypeVar('_T')
class UserOption(Generic[_T]):
def __init__(self, description, choices, yielding):
super().__init__()
self.choices = choices
self.description = description
if yielding is None:
yielding = default_yielding
if not isinstance(yielding, bool):
raise MesonException('Value of "yielding" must be a boolean.')
self.yielding = yielding
def printable_value(self):
return self.value
# Check that the input is a valid value and return the
# "cleaned" or "native" version. For example the Boolean
# option could take the string "true" and return True.
def validate_value(self, value: Any) -> _T:
raise RuntimeError('Derived option class did not override validate_value.')
def set_value(self, newvalue):
self.value = self.validate_value(newvalue)
class UserStringOption(UserOption[str]):
def __init__(self, description, value, choices=None, yielding=None):
super().__init__(description, choices, yielding)
self.set_value(value)
def validate_value(self, value):
if not isinstance(value, str):
raise MesonException('Value "%s" for string option is not a string.' % str(value))
return value
class UserBooleanOption(UserOption[bool]):
def __init__(self, description, value, yielding=None):
super().__init__(description, [True, False], yielding)
self.set_value(value)
def __bool__(self) -> bool:
return self.value
def validate_value(self, value) -> bool:
if isinstance(value, bool):
return value
if value.lower() == 'true':
return True
if value.lower() == 'false':
return False
raise MesonException('Value %s is not boolean (true or false).' % value)
class UserIntegerOption(UserOption[int]):
def __init__(self, description, min_value, max_value, value, yielding=None):
super().__init__(description, [True, False], yielding)
self.min_value = min_value
self.max_value = max_value
self.set_value(value)
c = []
if min_value is not None:
c.append('>=' + str(min_value))
if max_value is not None:
c.append('<=' + str(max_value))
self.choices = ', '.join(c)
def validate_value(self, value) -> int:
if isinstance(value, str):
value = self.toint(value)
if not isinstance(value, int):
raise MesonException('New value for integer option is not an integer.')
if self.min_value is not None and value < self.min_value:
raise MesonException('New value %d is less than minimum value %d.' % (value, self.min_value))
if self.max_value is not None and value > self.max_value:
raise MesonException('New value %d is more than maximum value %d.' % (value, self.max_value))
return value
def toint(self, valuestring) -> int:
try:
return int(valuestring)
except ValueError:
raise MesonException('Value string "%s" is not convertable to an integer.' % valuestring)
class UserUmaskOption(UserIntegerOption, UserOption[Union[str, int]]):
def __init__(self, description, value, yielding=None):
super().__init__(description, 0, 0o777, value, yielding)
self.choices = ['preserve', '0000-0777']
def printable_value(self):
if self.value == 'preserve':
return self.value
return format(self.value, '04o')
def validate_value(self, value):
if value is None or value == 'preserve':
return 'preserve'
return super().validate_value(value)
def toint(self, valuestring):
try:
return int(valuestring, 8)
except ValueError as e:
raise MesonException('Invalid mode: {}'.format(e))
class UserComboOption(UserOption[str]):
def __init__(self, description, choices: List[str], value, yielding=None):
super().__init__(description, choices, yielding)
if not isinstance(self.choices, list):
raise MesonException('Combo choices must be an array.')
for i in self.choices:
if not isinstance(i, str):
raise MesonException('Combo choice elements must be strings.')
self.set_value(value)
def validate_value(self, value):
if value not in self.choices:
optionsstring = ', '.join(['"%s"' % (item,) for item in self.choices])
raise MesonException('Value "%s" for combo option is not one of the choices. Possible choices are: %s.' % (value, optionsstring))
return value
class UserArrayOption(UserOption[List[str]]):
def __init__(self, description, value, shlex_split=False, user_input=False, allow_dups=False, **kwargs):
super().__init__(description, kwargs.get('choices', []), yielding=kwargs.get('yielding', None))
self.shlex_split = shlex_split
self.allow_dups = allow_dups
self.value = self.validate_value(value, user_input=user_input)
def validate_value(self, value, user_input=True) -> List[str]:
# User input is for options defined on the command line (via -D
# options). Users can put their input in as a comma separated
# string, but for defining options in meson_options.txt the format
# should match that of a combo
if not user_input and isinstance(value, str) and not value.startswith('['):
raise MesonException('Value does not define an array: ' + value)
if isinstance(value, str):
if value.startswith('['):
newvalue = ast.literal_eval(value)
elif value == '':
newvalue = []
else:
if self.shlex_split:
newvalue = shlex.split(value)
else:
newvalue = [v.strip() for v in value.split(',')]
elif isinstance(value, list):
newvalue = value
else:
raise MesonException('"{0}" should be a string array, but it is not'.format(str(newvalue)))
if not self.allow_dups and len(set(newvalue)) != len(newvalue):
msg = 'Duplicated values in array option is deprecated. ' \
'This will become a hard error in the future.'
mlog.deprecation(msg)
for i in newvalue:
if not isinstance(i, str):
raise MesonException('String array element "{0}" is not a string.'.format(str(newvalue)))
if self.choices:
bad = [x for x in newvalue if x not in self.choices]
if bad:
raise MesonException('Options "{}" are not in allowed choices: "{}"'.format(
', '.join(bad), ', '.join(self.choices)))
return newvalue
class UserFeatureOption(UserComboOption):
static_choices = ['enabled', 'disabled', 'auto']
def __init__(self, description, value, yielding=None):
super().__init__(description, self.static_choices, value, yielding)
def is_enabled(self):
return self.value == 'enabled'
def is_disabled(self):
return self.value == 'disabled'
def is_auto(self):
return self.value == 'auto'
def load_configs(filenames: List[str]) -> configparser.ConfigParser:
"""Load configuration files from a named subdirectory."""
config = configparser.ConfigParser()
config.read(filenames)
return config
if typing.TYPE_CHECKING:
CacheKeyType = typing.Tuple[typing.Tuple[typing.Any, ...], ...]
SubCacheKeyType = typing.Tuple[typing.Any, ...]
class DependencyCacheType(enum.Enum):
OTHER = 0
PKG_CONFIG = 1
CMAKE = 2
@classmethod
def from_type(cls, dep: 'dependencies.Dependency') -> 'DependencyCacheType':
from . import dependencies
# As more types gain search overrides they'll need to be added here
if isinstance(dep, dependencies.PkgConfigDependency):
return cls.PKG_CONFIG
if isinstance(dep, dependencies.CMakeDependency):
return cls.CMAKE
return cls.OTHER
class DependencySubCache:
def __init__(self, type_: DependencyCacheType):
self.types = [type_]
self.__cache = {} # type: typing.Dict[SubCacheKeyType, dependencies.Dependency]
def __getitem__(self, key: 'SubCacheKeyType') -> 'dependencies.Dependency':
return self.__cache[key]
def __setitem__(self, key: 'SubCacheKeyType', value: 'dependencies.Dependency') -> None:
self.__cache[key] = value
def __contains__(self, key: 'SubCacheKeyType') -> bool:
return key in self.__cache
def values(self) -> typing.Iterable['dependencies.Dependency']:
return self.__cache.values()
class DependencyCache:
"""Class that stores a cache of dependencies.
This class is meant to encapsulate the fact that we need multiple keys to
successfully lookup by providing a simple get/put interface.
"""
def __init__(self, builtins_per_machine: PerMachine[typing.Dict[str, UserOption[typing.Any]]], for_machine: MachineChoice):
self.__cache = OrderedDict() # type: typing.MutableMapping[CacheKeyType, DependencySubCache]
self.__builtins_per_machine = builtins_per_machine
self.__for_machine = for_machine
def __calculate_subkey(self, type_: DependencyCacheType) -> typing.Tuple[typing.Any, ...]:
if type_ is DependencyCacheType.PKG_CONFIG:
return tuple(self.__builtins_per_machine[self.__for_machine]['pkg_config_path'].value)
elif type_ is DependencyCacheType.CMAKE:
return tuple(self.__builtins_per_machine[self.__for_machine]['cmake_prefix_path'].value)
assert type_ is DependencyCacheType.OTHER, 'Someone forgot to update subkey calculations for a new type'
return tuple()
def __iter__(self) -> typing.Iterator['CacheKeyType']:
return self.keys()
def put(self, key: 'CacheKeyType', dep: 'dependencies.Dependency') -> None:
t = DependencyCacheType.from_type(dep)
if key not in self.__cache:
self.__cache[key] = DependencySubCache(t)
subkey = self.__calculate_subkey(t)
self.__cache[key][subkey] = dep
def get(self, key: 'CacheKeyType') -> typing.Optional['dependencies.Dependency']:
"""Get a value from the cache.
If there is no cache entry then None will be returned.
"""
try:
val = self.__cache[key]
except KeyError:
return None
for t in val.types:
subkey = self.__calculate_subkey(t)
try:
return val[subkey]
except KeyError:
pass
return None
def values(self) -> typing.Iterator['dependencies.Dependency']:
for c in self.__cache.values():
yield from c.values()
def keys(self) -> typing.Iterator['CacheKeyType']:
return iter(self.__cache.keys())
def items(self) -> typing.Iterator[typing.Tuple['CacheKeyType', typing.List['dependencies.Dependency']]]:
for k, v in self.__cache.items():
vs = []
for t in v.types:
subkey = self.__calculate_subkey(t)
if subkey in v:
vs.append(v[subkey])
yield k, vs
def clear(self) -> None:
self.__cache.clear()
# Can't bind this near the class method it seems, sadly.
_V = TypeVar('_V')
# This class contains all data that must persist over multiple
# invocations of Meson. It is roughly the same thing as
# cmakecache.
class CoreData:
def __init__(self, options: argparse.Namespace, scratch_dir: str):
self.lang_guids = {
'default': '8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942',
'c': '8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942',
'cpp': '8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942',
'test': '3AC096D0-A1C2-E12C-1390-A8335801FDAB',
'directory': '2150E333-8FDC-42A3-9474-1A3956D46DE8',
}
self.test_guid = str(uuid.uuid4()).upper()
self.regen_guid = str(uuid.uuid4()).upper()
self.install_guid = str(uuid.uuid4()).upper()
self.target_guids = {}
self.version = version
self.init_builtins()
self.backend_options = {} # : Dict[str, UserOption]
self.user_options = {} # : Dict[str, UserOption]
self.compiler_options = PerMachine({}, {})
self.base_options = {} # : Dict[str, UserOption]
self.cross_files = self.__load_config_files(options, scratch_dir, 'cross')
self.compilers = PerMachine(OrderedDict(), OrderedDict())
build_cache = DependencyCache(self.builtins_per_machine, MachineChoice.BUILD)
host_cache = DependencyCache(self.builtins_per_machine, MachineChoice.BUILD)
self.deps = PerMachine(build_cache, host_cache) # type: PerMachine[DependencyCache]
self.compiler_check_cache = OrderedDict()
# Only to print a warning if it changes between Meson invocations.
self.config_files = self.__load_config_files(options, scratch_dir, 'native')
self.libdir_cross_fixup()
@staticmethod
def __load_config_files(options: argparse.Namespace, scratch_dir: str, ftype: str) -> List[str]:
# Need to try and make the passed filenames absolute because when the
# files are parsed later we'll have chdir()d.
if ftype == 'cross':
filenames = options.cross_file
else:
filenames = options.native_file
if not filenames:
return []
real = []
for i, f in enumerate(filenames):
f = os.path.expanduser(os.path.expandvars(f))
if os.path.exists(f):
if os.path.isfile(f):
real.append(os.path.abspath(f))
elif os.path.isdir(f):
raise MesonException('Cross and native files must not be directories')
else:
# in this case we've been passed some kind of pipe, copy
# the contents of that file into the meson private (scratch)
# directory so that it can be re-read when wiping/reconfiguring
copy = os.path.join(scratch_dir, '{}.{}.ini'.format(uuid.uuid4(), ftype))
with open(f, 'r') as rf:
with open(copy, 'w') as wf:
wf.write(rf.read())
real.append(copy)
# Also replace the command line argument, as the pipe
# probably wont exist on reconfigure
filenames[i] = copy
continue
elif sys.platform != 'win32':
paths = [
os.environ.get('XDG_DATA_HOME', os.path.expanduser('~/.local/share')),
] + os.environ.get('XDG_DATA_DIRS', '/usr/local/share:/usr/share').split(':')
for path in paths:
path_to_try = os.path.join(path, 'meson', ftype, f)
if os.path.isfile(path_to_try):
real.append(path_to_try)
break
else:
raise MesonException('Cannot find specified {} file: {}'.format(ftype, f))
continue
raise MesonException('Cannot find specified {} file: {}'.format(ftype, f))
return real
def libdir_cross_fixup(self):
# By default set libdir to "lib" when cross compiling since
# getting the "system default" is always wrong on multiarch
# platforms as it gets a value like lib/x86_64-linux-gnu.
if self.cross_files:
self.builtins['libdir'].value = 'lib'
def sanitize_prefix(self, prefix):
prefix = os.path.expanduser(prefix)
if not os.path.isabs(prefix):
raise MesonException('prefix value {!r} must be an absolute path'
''.format(prefix))
if prefix.endswith('/') or prefix.endswith('\\'):
# On Windows we need to preserve the trailing slash if the
# string is of type 'C:\' because 'C:' is not an absolute path.
if len(prefix) == 3 and prefix[1] == ':':
pass
# If prefix is a single character, preserve it since it is
# the root directory.
elif len(prefix) == 1:
pass
else:
prefix = prefix[:-1]
return prefix
def sanitize_dir_option_value(self, prefix, option, value):
'''
If the option is an installation directory option and the value is an
absolute path, check that it resides within prefix and return the value
as a path relative to the prefix.
This way everyone can do f.ex, get_option('libdir') and be sure to get
the library directory relative to prefix.
'''
if option.endswith('dir') and os.path.isabs(value) and \
option not in builtin_dir_noprefix_options:
# Value must be a subdir of the prefix
# commonpath will always return a path in the native format, so we
# must use pathlib.PurePath to do the same conversion before
# comparing.
if os.path.commonpath([value, prefix]) != str(PurePath(prefix)):
m = 'The value of the {!r} option is {!r} which must be a ' \
'subdir of the prefix {!r}.\nNote that if you pass a ' \
'relative path, it is assumed to be a subdir of prefix.'
raise MesonException(m.format(option, value, prefix))
# Convert path to be relative to prefix
skip = len(prefix) + 1
value = value[skip:]
return value
def init_builtins(self):
# Create builtin options with default values
self.builtins = {}
for key, opt in builtin_options.items():
self.builtins[key] = opt.init_option()
self.builtins_per_machine = PerMachine({}, {})
for for_machine in iter(MachineChoice):
for key, opt in builtin_options_per_machine.items():
self.builtins_per_machine[for_machine][key] = opt.init_option()
def init_backend_options(self, backend_name):
if backend_name == 'ninja':
self.backend_options['backend_max_links'] = \
UserIntegerOption(
'Maximum number of linker processes to run or 0 for no '
'limit',
0, None, 0)
elif backend_name.startswith('vs'):
self.backend_options['backend_startup_project'] = \
UserStringOption(
'Default project to execute in Visual Studio',
'')
def get_builtin_option(self, optname):
for opts in self._get_all_builtin_options():
v = opts.get(optname)
if v is None:
continue
if optname == 'wrap_mode':
return WrapMode.from_string(v.value)
return v.value
raise RuntimeError('Tried to get unknown builtin option %s.' % optname)
def _try_set_builtin_option(self, optname, value):
for opts in self._get_all_builtin_options():
opt = opts.get(optname)
if opt is None:
continue
if optname == 'prefix':
value = self.sanitize_prefix(value)
else:
prefix = self.builtins['prefix'].value
value = self.sanitize_dir_option_value(prefix, optname, value)
break
else:
return False
opt.set_value(value)
# Make sure that buildtype matches other settings.
if optname == 'buildtype':
self.set_others_from_buildtype(value)
else:
self.set_buildtype_from_others()
return True
def set_builtin_option(self, optname, value):
res = self._try_set_builtin_option(optname, value)
if not res:
raise RuntimeError('Tried to set unknown builtin option %s.' % optname)
def set_others_from_buildtype(self, value):
if value == 'plain':
opt = '0'
debug = False
elif value == 'debug':
opt = '0'
debug = True
elif value == 'debugoptimized':
opt = '2'
debug = True
elif value == 'release':
opt = '3'
debug = False
elif value == 'minsize':
opt = 's'
debug = True
else:
assert(value == 'custom')
return
self.builtins['optimization'].set_value(opt)
self.builtins['debug'].set_value(debug)
def set_buildtype_from_others(self):
opt = self.builtins['optimization'].value
debug = self.builtins['debug'].value
if opt == '0' and not debug:
mode = 'plain'
elif opt == '0' and debug:
mode = 'debug'
elif opt == '2' and debug:
mode = 'debugoptimized'
elif opt == '3' and not debug:
mode = 'release'
elif opt == 's' and debug:
mode = 'minsize'
else:
mode = 'custom'
self.builtins['buildtype'].set_value(mode)
@staticmethod
def get_prefixed_options_per_machine(
options_per_machine # : PerMachine[Dict[str, _V]]]
) -> Iterable[Dict[str, _V]]:
for for_machine in iter(MachineChoice):
prefix = for_machine.get_prefix()
yield {
prefix + k: v
for k, v in options_per_machine[for_machine].items()
}
def _get_all_nonbuiltin_options(self) -> Iterable[Dict[str, UserOption]]:
yield self.backend_options
yield self.user_options
yield from self.get_prefixed_options_per_machine(self.compiler_options)
yield self.base_options
def _get_all_builtin_options(self) -> Dict[str, UserOption]:
yield from self.get_prefixed_options_per_machine(self.builtins_per_machine)
yield self.builtins
def get_all_options(self) -> Dict[str, UserOption]:
yield from self._get_all_nonbuiltin_options()
yield from self._get_all_builtin_options()
def validate_option_value(self, option_name, override_value):
for opts in self.get_all_options():
opt = opts.get(option_name)
if opt is not None:
try:
return opt.validate_value(override_value)
except MesonException as e:
raise type(e)(('Validation failed for option %s: ' % option_name) + str(e)) \
.with_traceback(sys.exc_into()[2])
else:
raise MesonException('Tried to validate unknown option %s.' % option_name)
def get_external_args(self, for_machine: MachineChoice, lang):
return self.compiler_options[for_machine][lang + '_args'].value
def get_external_link_args(self, for_machine: MachineChoice, lang):
return self.compiler_options[for_machine][lang + '_link_args'].value
def merge_user_options(self, options):
for (name, value) in options.items():
if name not in self.user_options:
self.user_options[name] = value
else:
oldval = self.user_options[name]
if type(oldval) != type(value):
self.user_options[name] = value
def set_options(self, options, subproject='', warn_unknown=True):
# Set prefix first because it's needed to sanitize other options
prefix = self.builtins['prefix'].value
if 'prefix' in options:
prefix = self.sanitize_prefix(options['prefix'])
self.builtins['prefix'].set_value(prefix)
for key in builtin_dir_noprefix_options:
if key not in options:
self.builtins[key].set_value(builtin_options[key].prefixed_default(key, prefix))
unknown_options = []
for k, v in options.items():
if k == 'prefix':
continue
if self._try_set_builtin_option(k, v):
continue
for opts in self._get_all_nonbuiltin_options():
tgt = opts.get(k)
if tgt is None:
continue
tgt.set_value(v)
break
else:
unknown_options.append(k)
if unknown_options and warn_unknown:
unknown_options = ', '.join(sorted(unknown_options))
sub = 'In subproject {}: '.format(subproject) if subproject else ''
mlog.warning('{}Unknown options: "{}"'.format(sub, unknown_options))
def set_default_options(self, default_options, subproject, env):
# Set defaults first from conf files (cross or native), then
# override them as nec as necessary.
for k, v in env.paths.host:
if v is not None:
env.cmd_line_options.setdefault(k, v)
# Set default options as if they were passed to the command line.
# Subprojects can only define default for user options.
from . import optinterpreter
for k, v in default_options.items():
if subproject:
if optinterpreter.is_invalid_name(k, log=False):
continue
k = subproject + ':' + k
env.cmd_line_options.setdefault(k, v)
# Create a subset of cmd_line_options, keeping only options for this
# subproject. Also take builtin options if it's the main project.
# Language and backend specific options will be set later when adding
# languages and setting the backend (builtin options must be set first
# to know which backend we'll use).
options = {}
# Some options default to environment variables if they are
# unset, set those now. These will either be overwritten
# below, or they won't. These should only be set on the first run.
if env.first_invocation:
p_env = os.environ.get('PKG_CONFIG_PATH')
if p_env:
options['pkg_config_path'] = p_env.split(':')
for k, v in env.cmd_line_options.items():
if subproject:
if not k.startswith(subproject + ':'):
continue
elif k not in builtin_options.keys() \
and 'build.' + k not in builtin_options_per_machine.keys() \
and k not in builtin_options_per_machine.keys():
if ':' in k:
continue
if optinterpreter.is_invalid_name(k, log=False):
continue
options[k] = v
self.set_options(options, subproject)
def process_new_compiler(self, lang: str, comp, env):
from . import compilers
self.compilers[comp.for_machine][lang] = comp
optprefix = lang + '_'
for k, o in comp.get_and_default_options(env.properties[comp.for_machine]).items():
if not k.startswith(optprefix):
raise MesonException('Internal error, %s has incorrect prefix.' % k)
# prefixed compiler options affect just this machine
opt_prefix = comp.for_machine.get_prefix()
if opt_prefix + k in env.cmd_line_options:
o.set_value(env.cmd_line_options[opt_prefix + k])
self.compiler_options[comp.for_machine].setdefault(k, o)
enabled_opts = []
for optname in comp.base_options:
if optname in self.base_options:
continue
oobj = compilers.base_options[optname]
if optname in env.cmd_line_options:
oobj.set_value(env.cmd_line_options[optname])
enabled_opts.append(optname)
self.base_options[optname] = oobj
self.emit_base_options_warnings(enabled_opts)
def emit_base_options_warnings(self, enabled_opts: list):
if 'b_bitcode' in enabled_opts:
mlog.warning('Base option \'b_bitcode\' is enabled, which is incompatible with many linker options. Incompatible options such as such as \'b_asneeded\' have been disabled.')
mlog.warning('Please see https://mesonbuild.com/Builtin-options.html#Notes_about_Apple_Bitcode_support for more details.')
class CmdLineFileParser(configparser.ConfigParser):
def __init__(self):
# We don't want ':' as key delimiter, otherwise it would break when
# storing subproject options like "subproject:option=value"
super().__init__(delimiters=['='])
def get_cmd_line_file(build_dir):
return os.path.join(build_dir, 'meson-private', 'cmd_line.txt')
def read_cmd_line_file(build_dir, options):
filename = get_cmd_line_file(build_dir)
config = CmdLineFileParser()
config.read(filename)
# Do a copy because config is not really a dict. options.cmd_line_options
# overrides values from the file.
d = dict(config['options'])
d.update(options.cmd_line_options)
options.cmd_line_options = d
properties = config['properties']
if not options.cross_file:
options.cross_file = ast.literal_eval(properties.get('cross_file', '[]'))
if not options.native_file:
# This will be a string in the form: "['first', 'second', ...]", use
# literal_eval to get it into the list of strings.
options.native_file = ast.literal_eval(properties.get('native_file', '[]'))
def write_cmd_line_file(build_dir, options):
filename = get_cmd_line_file(build_dir)
config = CmdLineFileParser()
properties = {}
if options.cross_file:
properties['cross_file'] = options.cross_file
if options.native_file:
properties['native_file'] = options.native_file
config['options'] = options.cmd_line_options
config['properties'] = properties
with open(filename, 'w') as f:
config.write(f)
def update_cmd_line_file(build_dir, options):
filename = get_cmd_line_file(build_dir)
config = CmdLineFileParser()
config.read(filename)
config['options'].update(options.cmd_line_options)
with open(filename, 'w') as f:
config.write(f)
def major_versions_differ(v1, v2):
return v1.split('.')[0:2] != v2.split('.')[0:2]
def load(build_dir):
filename = os.path.join(build_dir, 'meson-private', 'coredata.dat')
load_fail_msg = 'Coredata file {!r} is corrupted. Try with a fresh build tree.'.format(filename)
try:
with open(filename, 'rb') as f:
obj = pickle.load(f)
except (pickle.UnpicklingError, EOFError):
raise MesonException(load_fail_msg)
except AttributeError:
raise MesonException(
"Coredata file {!r} references functions or classes that don't "
"exist. This probably means that it was generated with an old "
"version of meson.".format(filename))
if not isinstance(obj, CoreData):
raise MesonException(load_fail_msg)
if major_versions_differ(obj.version, version):
raise MesonException('Build directory has been generated with Meson version %s, '
'which is incompatible with current version %s.\n' %
(obj.version, version))
return obj
def save(obj, build_dir):
filename = os.path.join(build_dir, 'meson-private', 'coredata.dat')
prev_filename = filename + '.prev'
tempfilename = filename + '~'
if major_versions_differ(obj.version, version):
raise MesonException('Fatal version mismatch corruption.')
if os.path.exists(filename):
import shutil
shutil.copyfile(filename, prev_filename)
with open(tempfilename, 'wb') as f:
pickle.dump(obj, f)
f.flush()
os.fsync(f.fileno())
os.replace(tempfilename, filename)
return filename
def register_builtin_arguments(parser):
for n, b in builtin_options.items():
b.add_to_argparse(n, parser, '', '')
for n, b in builtin_options_per_machine.items():
b.add_to_argparse(n, parser, '', ' (just for host machine)')
b.add_to_argparse(n, parser, 'build.', ' (just for build machine)')
parser.add_argument('-D', action='append', dest='projectoptions', default=[], metavar="option",
help='Set the value of an option, can be used several times to set multiple options.')
def create_options_dict(options):
result = {}
for o in options:
try:
(key, value) = o.split('=', 1)
except ValueError:
raise MesonException('Option {!r} must have a value separated by equals sign.'.format(o))
result[key] = value
return result
def parse_cmd_line_options(args):
args.cmd_line_options = create_options_dict(args.projectoptions)
# Merge builtin options set with --option into the dict.
for name in chain(
builtin_options.keys(),
('build.' + k for k in builtin_options_per_machine.keys()),
builtin_options_per_machine.keys(),
):
value = getattr(args, name, None)
if value is not None:
if name in args.cmd_line_options:
cmdline_name = BuiltinOption.argparse_name_to_arg(name)
raise MesonException(
'Got argument {0} as both -D{0} and {1}. Pick one.'.format(name, cmdline_name))
args.cmd_line_options[name] = value
delattr(args, name)
_U = TypeVar('_U', bound=UserOption[_T])
class BuiltinOption(Generic[_T, _U]):
"""Class for a builtin option type.
Currently doesn't support UserIntegerOption, or a few other cases.
"""
def __init__(self, opt_type: Type[_U], description: str, default: Any, yielding: Optional[bool] = None, *,
choices: Any = None):
self.opt_type = opt_type
self.description = description
self.default = default
self.choices = choices
self.yielding = yielding
def init_option(self) -> _U:
"""Create an instance of opt_type and return it."""
keywords = {'yielding': self.yielding, 'value': self.default}
if self.choices:
keywords['choices'] = self.choices
return self.opt_type(self.description, **keywords)
def _argparse_action(self) -> Optional[str]:
if self.default is True:
return 'store_false'
elif self.default is False:
return 'store_true'
return None
def _argparse_choices(self) -> Any:
if self.opt_type is UserBooleanOption:
return [True, False]
elif self.opt_type is UserFeatureOption:
return UserFeatureOption.static_choices
return self.choices
@staticmethod
def argparse_name_to_arg(name: str) -> str:
if name == 'warning_level':
return '--warnlevel'
else:
return '--' + name.replace('_', '-')
def prefixed_default(self, name: str, prefix: str = '') -> Any:
if self.opt_type in [UserComboOption, UserIntegerOption]:
return self.default
try:
return builtin_dir_noprefix_options[name][prefix]
except KeyError:
pass
return self.default
def add_to_argparse(self, name: str, parser: argparse.ArgumentParser, prefix: str, help_suffix: str) -> None:
kwargs = {}
c = self._argparse_choices()
b = self._argparse_action()
h = self.description
if not b:
h = '{} (default: {}).'.format(h.rstrip('.'), self.prefixed_default(name))
else:
kwargs['action'] = b
if c and not b:
kwargs['choices'] = c
kwargs['default'] = argparse.SUPPRESS
kwargs['dest'] = prefix + name
cmdline_name = self.argparse_name_to_arg(prefix + name)
parser.add_argument(cmdline_name, help=h + help_suffix, **kwargs)
# Update `docs/markdown/Builtin-options.md` after changing the options below
builtin_options = OrderedDict([
# Directories
('prefix', BuiltinOption(UserStringOption, 'Installation prefix', default_prefix())),
('bindir', BuiltinOption(UserStringOption, 'Executable directory', 'bin')),
('datadir', BuiltinOption(UserStringOption, 'Data file directory', 'share')),
('includedir', BuiltinOption(UserStringOption, 'Header file directory', 'include')),
('infodir', BuiltinOption(UserStringOption, 'Info page directory', 'share/info')),
('libdir', BuiltinOption(UserStringOption, 'Library directory', default_libdir())),
('libexecdir', BuiltinOption(UserStringOption, 'Library executable directory', default_libexecdir())),
('localedir', BuiltinOption(UserStringOption, 'Locale data directory', 'share/locale')),
('localstatedir', BuiltinOption(UserStringOption, 'Localstate data directory', 'var')),
('mandir', BuiltinOption(UserStringOption, 'Manual page directory', 'share/man')),
('sbindir', BuiltinOption(UserStringOption, 'System executable directory', 'sbin')),
('sharedstatedir', BuiltinOption(UserStringOption, 'Architecture-independent data directory', 'com')),
('sysconfdir', BuiltinOption(UserStringOption, 'Sysconf data directory', 'etc')),
# Core options
('auto_features', BuiltinOption(UserFeatureOption, "Override value of all 'auto' features", 'auto')),
('backend', BuiltinOption(UserComboOption, 'Backend to use', 'ninja', choices=backendlist)),
('buildtype', BuiltinOption(UserComboOption, 'Build type to use', 'debug',
choices=['plain', 'debug', 'debugoptimized', 'release', 'minsize', 'custom'])),
('debug', BuiltinOption(UserBooleanOption, 'Debug', True)),
('default_library', BuiltinOption(UserComboOption, 'Default library type', 'shared', choices=['shared', 'static', 'both'])),
('errorlogs', BuiltinOption(UserBooleanOption, "Whether to print the logs from failing tests", True)),
('install_umask', BuiltinOption(UserUmaskOption, 'Default umask to apply on permissions of installed files', '022')),
('layout', BuiltinOption(UserComboOption, 'Build directory layout', 'mirror', choices=['mirror', 'flat'])),
('optimization', BuiltinOption(UserComboOption, 'Optimization level', '0', choices=['0', 'g', '1', '2', '3', 's'])),
('stdsplit', BuiltinOption(UserBooleanOption, 'Split stdout and stderr in test logs', True)),
('strip', BuiltinOption(UserBooleanOption, 'Strip targets on install', False)),
('unity', BuiltinOption(UserComboOption, 'Unity build', 'off', choices=['on', 'off', 'subprojects'])),
('warning_level', BuiltinOption(UserComboOption, 'Compiler warning level to use', '1', choices=['0', '1', '2', '3'])),
('werror', BuiltinOption(UserBooleanOption, 'Treat warnings as errors', False)),
('wrap_mode', BuiltinOption(UserComboOption, 'Wrap mode', 'default', choices=['default', 'nofallback', 'nodownload', 'forcefallback'])),
])
builtin_options_per_machine = OrderedDict([
('pkg_config_path', BuiltinOption(UserArrayOption, 'List of additional paths for pkg-config to search', [])),
('cmake_prefix_path', BuiltinOption(UserArrayOption, 'List of additional prefixes for cmake to search', [])),
])
# Special prefix-dependent defaults for installation directories that reside in
# a path outside of the prefix in FHS and common usage.
builtin_dir_noprefix_options = {
'sysconfdir': {'/usr': '/etc'},
'localstatedir': {'/usr': '/var', '/usr/local': '/var/local'},
'sharedstatedir': {'/usr': '/var/lib', '/usr/local': '/var/local/lib'},
}
forbidden_target_names = {'clean': None,
'clean-ctlist': None,
'clean-gcno': None,
'clean-gcda': None,
'coverage': None,
'coverage-text': None,
'coverage-xml': None,
'coverage-html': None,
'phony': None,
'PHONY': None,
'all': None,
'test': None,
'benchmark': None,
'install': None,
'uninstall': None,
'build.ninja': None,
'scan-build': None,
'reconfigure': None,
'dist': None,
'distcheck': None,
}
|
py | 1a397d2da8e97d06e3654a0b41035f8f5a38bbd3 | # Copyright 2019 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing class for AWS's Athena EDW service."""
import datetime
import json
import logging
import os
from typing import Dict
from perfkitbenchmarker import data
from perfkitbenchmarker import edw_service
from perfkitbenchmarker import flags
from perfkitbenchmarker import providers
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.aws import s3
from perfkitbenchmarker.providers.aws import util
AWS_ATHENA_CMD_PREFIX = ['aws', 'athena']
AWS_ATHENA_CMD_POSTFIX = ['--output', 'json']
# TODO(user): Derive the full table set from the TPC suite.
TPC_H_TABLES = [
'customer', 'lineitem', 'nation', 'orders', 'part', 'partsupp', 'region',
'supplier'
]
TPC_DS_TABLES = [
'call_center', 'catalog_page', 'catalog_returns', 'catalog_sales',
'customer', 'customer_address', 'customer_demographics', 'date_dim',
'dbgen_version', 'household_demographics', 'income_band', 'inventory',
'item', 'promotion', 'reason', 'ship_mode', 'store', 'store_returns',
'store_sales', 'time_dim', 'warehouse', 'web_page', 'web_returns',
'web_sales', 'web_site'
]
FLAGS = flags.FLAGS
class AthenaQueryError(RuntimeError):
pass
def GetAthenaClientInterface(
database: str, output_bucket: str) -> edw_service.EdwClientInterface:
"""Builds and Returns the requested Athena client Interface.
Args:
database: Name of the Athena database to execute queries against.
output_bucket: String name of the S3 bucket to store query output.
Returns:
A concrete Client Interface object (subclass of EdwClientInterface)
Raises:
RuntimeError: if an unsupported athena_client_interface is requested
"""
if FLAGS.athena_client_interface == 'CLI':
return CliClientInterface(database, output_bucket)
raise RuntimeError('Unknown Athena Client Interface requested.')
class CliClientInterface(edw_service.EdwClientInterface):
"""Command Line Client Interface class for Athena.
Uses the native Athena client available with the awscli
https://docs.aws.amazon.com/cli/latest/reference/athena/index.html.
Attributes:
database: String name of the Athena database to execute queries against.
output_bucket: String name of the S3 bucket to store query output.
"""
def __init__(self, database: str, output_bucket: str):
super(CliClientInterface, self).__init__()
self.database = database
self.output_bucket = 's3://%s' % output_bucket
def Prepare(self, benchmark_name: str) -> None:
"""Prepares the client vm to execute query.
Installs the bq tool dependencies and authenticates using a service account.
Args:
benchmark_name: String name of the benchmark, to allow extraction and
usage of benchmark specific artifacts (certificates, etc.) during client
vm preparation.
"""
self.client_vm.Install('pip')
self.client_vm.RemoteCommand('sudo pip install absl-py')
for pkg in ('aws_credentials', 'awscli'):
self.client_vm.Install(pkg)
# Push the framework to execute a sql query and gather performance details.
service_specific_dir = os.path.join('edw', Athena.SERVICE_TYPE)
self.client_vm.PushFile(
data.ResourcePath(
os.path.join(service_specific_dir, 'script_runner.sh')))
runner_permission_update_cmd = 'chmod 755 {}'.format('script_runner.sh')
self.client_vm.RemoteCommand(runner_permission_update_cmd)
self.client_vm.PushFile(
data.ResourcePath(os.path.join('edw', 'script_driver.py')))
self.client_vm.PushFile(
data.ResourcePath(
os.path.join(service_specific_dir,
'provider_specific_script_driver.py')))
def ExecuteQuery(self, query_name) -> (float, Dict[str, str]):
"""Executes a query and returns performance details.
Args:
query_name: String name of the query to execute
Returns:
A tuple of (execution_time, run_metadata)
execution_time: A Float variable set to the query's completion time in
secs. -1.0 is used as a sentinel value implying the query failed. For a
successful query the value is expected to be positive.
run_metadata: A dictionary of query execution attributes eg. script name
"""
stdout, _ = self.client_vm.RemoteCommand(
'python script_driver.py --script={} --database={} --query_timeout={} '
'--athena_query_output_bucket={}'.format(query_name, self.database,
FLAGS.athena_query_timeout,
self.output_bucket))
script_performance = json.loads(str(stdout))
execution_time = script_performance[query_name]['execution_time']
run_metadata = {'script': query_name}
if 'error_details' in script_performance[query_name]:
run_metadata['error_details'] = script_performance[query_name][
'error_details']
run_metadata.update(self.GetMetadata())
return execution_time, run_metadata
def GetMetadata(self) -> Dict[str, str]:
"""Gets the Metadata attributes for the Client Interface."""
return {'client': FLAGS.athena_client_interface}
def ReadScript(script_uri):
"""Method to read a sql script based on its local path.
Arguments:
script_uri: Local URI of file containing SQL query.
Returns:
Query String contents of the URI location.
Raises:
IOError: If the script cannot be read.
"""
with open(script_uri) as fp:
return fp.read()
def PrepareQueryString(query_string_template, substitutions):
"""Method to read a template Athena script and substitute placeholders.
Args:
query_string_template: Template version of the Athena query.
substitutions: A dictionary of string placeholder keys and corresponding
string values.
Returns:
Materialized Athena query as a string.
"""
for key, value in substitutions.items():
query_string = query_string_template.replace(key, value)
return query_string
def RunScriptCommand(script_command):
"""Method to execute an AWS Athena cli command.
Args:
script_command: Fully compiled AWS Athena cli command.
Returns:
String stdout result of executing the query.
Script Command execution duration in seconds (rounded).
Raises:
AthenaQueryError: If the return code does not indicate success.
"""
start_time = datetime.datetime.now()
stdout, _, retcode = vm_util.IssueCommand(
script_command, raise_on_failure=False)
if retcode:
raise AthenaQueryError
end_time = datetime.datetime.now()
return stdout, int((end_time - start_time).total_seconds())
class Athena(edw_service.EdwService):
"""Object representing a Athena data warehouse."""
CLOUD = providers.AWS
SERVICE_TYPE = 'athena'
def __init__(self, edw_service_spec):
super(Athena, self).__init__(edw_service_spec)
self.region = util.GetRegionFromZone(FLAGS.zones[0])
self.output_bucket = '-'.join(
[FLAGS.athena_output_location_prefix, self.region, FLAGS.run_uri])
self.client_interface = GetAthenaClientInterface(self.cluster_identifier,
self.output_bucket)
self.s3_service = s3.S3Service()
self.s3_service.PrepareService(self.region)
self.s3_service.MakeBucket(self.output_bucket)
if FLAGS.provision_athena:
self.data_bucket = 'pkb' + self.cluster_identifier.replace('_', '')
self.tables = (
TPC_H_TABLES if FLAGS.edw_tpc_dsb_type == 'tpc_h' else TPC_DS_TABLES)
self.athena_db_create_time = 0
self.athena_table_create_time = 0
def BuildAthenaCommand(self, query_string, database=None):
"""Method to compile a AWS Athena cli command.
Arguments:
query_string: A string with the query that needs to be executed on Athena.
database: The Athena database against which the query should be executed.
Returns:
Fully compiled AWS Athena cli command.
"""
cmd = []
cmd.extend(AWS_ATHENA_CMD_PREFIX)
cmd.extend([
'--region', self.region,
'start-query-execution',
'--query-string', query_string
])
if database:
cmd.extend(['--query-execution-context', ('Database=%s' % database)])
cmd.extend([
'--result-configuration',
('OutputLocation=s3://%s' % self.output_bucket)
])
cmd.extend(AWS_ATHENA_CMD_POSTFIX)
return cmd
def _Create(self):
"""Create a Athena data warehouse."""
def _EmptyDatabase():
"""Remove tables, if they exist, so they can be refreshed.
If the database and/or tables don't already exist, the drop commands
will simply fail, which won't raise errors.
"""
drop_script_path = data.ResourcePath('edw/athena/%s/ddl/drop.sql' %
FLAGS.edw_tpc_dsb_type)
drop_script_contents = ReadScript(drop_script_path)
# Drop all tables so the database can be dropped.
for table in self.tables:
# Remove the folder backing each parquet table so they can be refreshed.
vm_util.IssueCommand([
'aws', 's3', 'rm',
's3://%s/%s_parquet' % (self.data_bucket, table), '--recursive'
], raise_on_failure=False)
# The parquet tables don't have the type suffix so that the queries can
# run as written without having to change the table names.
for suffix in ['_csv', '']:
script_contents = PrepareQueryString(drop_script_contents,
{'{table}': table + suffix})
script_command = self.BuildAthenaCommand(
script_contents, database=self.cluster_identifier)
RunScriptCommand(script_command)
drop_database_query_string = PrepareQueryString(
'drop database database_name',
{'database_name': self.cluster_identifier})
script_command = self.BuildAthenaCommand(drop_database_query_string)
RunScriptCommand(script_command)
def _CreateDatabase():
create_database_query_string = PrepareQueryString(
'create database database_name',
{'database_name': self.cluster_identifier})
script_command = self.BuildAthenaCommand(create_database_query_string)
return RunScriptCommand(script_command)
def _CreateTable(table_create_sql_template):
template_script_path = data.ResourcePath(table_create_sql_template)
template_script_contents = ReadScript(template_script_path)
script_contents = PrepareQueryString(template_script_contents,
{'{bucket}': self.data_bucket})
script_command = self.BuildAthenaCommand(
script_contents, database=self.cluster_identifier)
return RunScriptCommand(script_command)
def _CreateAllTables():
"""Create all TPC benchmarking tables."""
cumulative_table_create_time = 0
for table in self.tables:
for suffix in ['_csv', '_parquet']:
script = 'edw/athena/%s/ddl/%s.sql' % (FLAGS.edw_tpc_dsb_type,
table + suffix)
_, table_create_time = _CreateTable(script)
cumulative_table_create_time += table_create_time
return cumulative_table_create_time
_EmptyDatabase()
_, self.athena_db_create_time = _CreateDatabase()
self.athena_table_create_time = _CreateAllTables()
def _Exists(self):
"""Method to validate the existence of a Athena data warehouse.
Returns:
Boolean value indicating the existence of a Athena data warehouse.
"""
raise NotImplementedError
def _Delete(self):
"""Delete a Athena data warehouse."""
if not FLAGS.teardown_athena:
logging.info('The current resource is requested to be long living.')
return
raise NotImplementedError
def Cleanup(self):
# Direct cleanup is used instead of _DeleteDependencies because the Athena
# warehouse resource isn't created/deleted each time.
self.s3_service.DeleteBucket(self.output_bucket)
def GetMetadata(self):
"""Return a dictionary of the metadata for the Athena data warehouse."""
basic_data = super(Athena, self).GetMetadata()
basic_data.update({'database': self.cluster_identifier})
basic_data.update(self.client_interface.GetMetadata())
return basic_data
|
py | 1a397d6a2edfb9d4dcdef785fbd32657a1e18871 | from discord import User
from typing import Union
class MatchingRoom:
def __init__(
self,
id: int,
title: str,
host: User,
max_players: int,
timestamp: int,
):
self.id = id
self.title = title
self.host = host
self.players = [host]
self.max_players = max_players
self.timestamp = timestamp
self.description = None
self.game = None
def set_desc(self, description: str):
self.description = description
def set_game(self, game: str):
self.game = game
def set_max_players(self, max_players: int):
self.max_players = max_players
def add_user(self, user: User):
self.players.append(user)
def del_user(self, user: User):
self.players.remove(user)
class RoomHandler:
def __init__(self):
self.rooms: dict[int, MatchingRoom] = {} # room_id: room
self.users: dict[User, int] = {} # user: room_id
def is_room_exist(self, room_id: int) -> bool:
return room_id in self.rooms
def is_room_full(self, room_id: int) -> bool:
return len(self.rooms[room_id].players) >= self.rooms[room_id].max_players
def is_user_in_room(self, user: User) -> bool:
return user in self.users
def register_room(self, room: MatchingRoom):
self.rooms[room.id] = room
self.users[room.host] = room.id
def close_room(self, room_id: int):
for user in self.rooms[room_id].players:
del self.users[user]
del self.rooms[room_id]
def join_room(self, room_id: int, user: User):
self.rooms[room_id].add_user(user)
self.users[user] = room_id
def leave_room(self, user: User):
room_id = self.users[user]
self.rooms[room_id].del_user(user)
del self.users[user]
def get_room(self, room_id: int) -> Union[MatchingRoom, None]:
if room_id in self.rooms:
return self.rooms[room_id]
return None
def get_room_by_user(self, user: User) -> Union[MatchingRoom, None]:
if self.is_user_in_room(user):
return self.get_room(self.users[user])
return None
|
py | 1a397dc03e5482ea5161bcc7b8476ba1a8b9d50d | import json
from django.db.models import Count
from django.http import HttpResponse
from django.template import loader
from .models import Scent, TestResult
def index(request):
template = loader.get_template('smelltest/index.html')
context = {
}
return HttpResponse(template.render(context, request))
def data(request):
scents = Scent.objects.order_by('id')
test_results = TestResult.objects.values('scent', 'guess').annotate(Count('scent'))
ret = {
'nodes': [{
'name': s.name,
'group': 1,
'testCount': s.tests.count()
} for s in scents],
'links': [{
'source': r['scent'] - 1, # 0-index array vs 1-index table PK
'target': r['guess'] - 1,
'value': r['scent__count']
} for r in test_results]
}
return HttpResponse(json.dumps(ret), content_type="application/json")
|
py | 1a397dd1cad52ff758b173a055f4e4f79a8ae06d | """
eZmax API Definition (Full)
This API expose all the functionnalities for the eZmax and eZsign applications. # noqa: E501
The version of the OpenAPI document: 1.1.7
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import eZmaxApi
from eZmaxApi.model.ezsigntemplatesigner_get_object_v1_response_m_payload import EzsigntemplatesignerGetObjectV1ResponseMPayload
globals()['EzsigntemplatesignerGetObjectV1ResponseMPayload'] = EzsigntemplatesignerGetObjectV1ResponseMPayload
from eZmaxApi.model.ezsigntemplatesigner_get_object_v1_response_all_of import EzsigntemplatesignerGetObjectV1ResponseAllOf
class TestEzsigntemplatesignerGetObjectV1ResponseAllOf(unittest.TestCase):
"""EzsigntemplatesignerGetObjectV1ResponseAllOf unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testEzsigntemplatesignerGetObjectV1ResponseAllOf(self):
"""Test EzsigntemplatesignerGetObjectV1ResponseAllOf"""
# FIXME: construct object with mandatory attributes with example values
# model = EzsigntemplatesignerGetObjectV1ResponseAllOf() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | 1a397ea7293dd412faf1ca216b848e46f21db1ca | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvpc.endpoint import endpoint_data
class AllocateEipAddressRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Vpc', '2016-04-28', 'AllocateEipAddress','Vpc')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_ISP(self):
return self.get_query_params().get('ISP')
def set_ISP(self,ISP):
self.add_query_param('ISP',ISP)
def get_ResourceGroupId(self):
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self,ResourceGroupId):
self.add_query_param('ResourceGroupId',ResourceGroupId)
def get_Netmode(self):
return self.get_query_params().get('Netmode')
def set_Netmode(self,Netmode):
self.add_query_param('Netmode',Netmode)
def get_InstanceChargeType(self):
return self.get_query_params().get('InstanceChargeType')
def set_InstanceChargeType(self,InstanceChargeType):
self.add_query_param('InstanceChargeType',InstanceChargeType)
def get_Period(self):
return self.get_query_params().get('Period')
def set_Period(self,Period):
self.add_query_param('Period',Period)
def get_AutoPay(self):
return self.get_query_params().get('AutoPay')
def set_AutoPay(self,AutoPay):
self.add_query_param('AutoPay',AutoPay)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_Bandwidth(self):
return self.get_query_params().get('Bandwidth')
def set_Bandwidth(self,Bandwidth):
self.add_query_param('Bandwidth',Bandwidth)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_ActivityId(self):
return self.get_query_params().get('ActivityId')
def set_ActivityId(self,ActivityId):
self.add_query_param('ActivityId',ActivityId)
def get_InternetChargeType(self):
return self.get_query_params().get('InternetChargeType')
def set_InternetChargeType(self,InternetChargeType):
self.add_query_param('InternetChargeType',InternetChargeType)
def get_PricingCycle(self):
return self.get_query_params().get('PricingCycle')
def set_PricingCycle(self,PricingCycle):
self.add_query_param('PricingCycle',PricingCycle) |
py | 1a397ea760dff07945201b4fbec218be7412f33e | """
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/customer_rewards"
# docs_base_url = "https://[org_name].github.io/customer_rewards"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
def get_context(context):
context.brand_html = "Customer Rewards"
|
py | 1a397f0f5b1042d2263b30b3568efa3f021b4749 | #!/usr/bin/env python
# Copyright 2019, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Tool to automatically format source code in Nuitka style.
"""
import os
import re
import subprocess
import sys
from logging import warning
from nuitka.tools.quality.Git import (
getFileHashContent,
putFileHashContent,
updateFileIndex,
updateWorkingFile,
)
from nuitka.Tracing import my_print
from nuitka.utils.Execution import getExecutablePath, withEnvironmentPathAdded
from nuitka.utils.FileOperations import (
getFileContents,
renameFile,
withPreserveFileMode,
)
from nuitka.utils.Shebang import getShebangFromFile
from nuitka.utils.Utils import getOS
def _cleanupWindowsNewlines(filename):
""" Remove Windows new-lines from a file.
Simple enough to not depend on external binary.
"""
with open(filename, "rb") as f:
source_code = f.read()
updated_code = source_code.replace(b"\r\n", b"\n")
updated_code = updated_code.replace(b"\n\r", b"\n")
if updated_code != source_code:
with open(filename, "wb") as out_file:
out_file.write(updated_code)
def _cleanupTrailingWhitespace(filename):
""" Remove trailing white spaces from a file.
"""
with open(filename, "r") as f:
source_lines = [line for line in f]
clean_lines = [line.rstrip() for line in source_lines]
if clean_lines != source_lines:
with open(filename, "w") as out_file:
out_file.write("\n".join(clean_lines) + "\n")
def _updateCommentNode(comment_node):
if "pylint:" in str(comment_node.value):
def replacer(part):
def renamer(pylint_token):
# pylint: disable=too-many-branches,too-many-return-statements
if pylint_token == "E0602":
return "undefined-variable"
elif pylint_token in ("E0401", "F0401"):
return "import-error"
elif pylint_token == "E1102":
return "not-callable"
elif pylint_token == "E1133":
return " not-an-iterable"
elif pylint_token == "E1128":
return "assignment-from-none"
# Save line length for this until isort is better at long lines.
elif pylint_token == "useless-suppression":
return "I0021"
# elif pylint_token == "I0021":
# return "useless-suppression"
elif pylint_token == "R0911":
return "too-many-return-statements"
elif pylint_token == "R0201":
return "no-self-use"
elif pylint_token == "R0902":
return "too-many-instance-attributes"
elif pylint_token == "R0912":
return "too-many-branches"
elif pylint_token == "R0914":
return "too-many-locals"
elif pylint_token == "R0915":
return "too-many-statements"
elif pylint_token == "W0123":
return "eval-used"
elif pylint_token == "W0603":
return "global-statement"
elif pylint_token == "W0613":
return "unused-argument"
elif pylint_token == "W0622":
return "redefined-builtin"
elif pylint_token == "W0703":
return "broad-except"
else:
return pylint_token
return part.group(1) + ",".join(
sorted(renamer(token) for token in part.group(2).split(","))
)
new_value = re.sub(
r"(pylint\: disable=)(.*)", replacer, str(comment_node.value), flags=re.M
)
comment_node.value = new_value
def _cleanupPyLintComments(filename, abort):
from baron.parser import ( # pylint: disable=I0021,import-error,no-name-in-module
ParsingError, # @UnresolvedImport
)
from redbaron import ( # pylint: disable=I0021,import-error,no-name-in-module
RedBaron, # @UnresolvedImport
)
old_code = getFileContents(filename)
try:
red = RedBaron(old_code)
# red = RedBaron(old_code.rstrip()+'\n')
except ParsingError:
if abort:
raise
my_print("PARSING ERROR.")
return 2
for node in red.find_all("CommentNode"):
try:
_updateCommentNode(node)
except Exception:
my_print("Problem with", node)
node.help(deep=True, with_formatting=True)
raise
new_code = red.dumps()
if new_code != old_code:
with open(filename, "w") as source_code:
source_code.write(red.dumps())
def _cleanupImportRelative(filename):
package_name = os.path.dirname(filename).replace(os.path.sep, ".")
# Make imports local if possible.
if package_name.startswith("nuitka."):
source_code = getFileContents(filename)
updated_code = re.sub(
r"from %s import" % package_name, "from . import", source_code
)
updated_code = re.sub(r"from %s\." % package_name, "from .", source_code)
if source_code != updated_code:
with open(filename, "w") as out_file:
out_file.write(updated_code)
_binary_calls = {}
def _getPythonBinaryCall(binary_name):
if binary_name not in _binary_calls:
# Try running Python installation.
try:
__import__(binary_name)
_binary_calls[binary_name] = [sys.executable, "-m", binary_name]
return _binary_calls[binary_name]
except ImportError:
pass
binary_path = getExecutablePath(binary_name)
if binary_path:
_binary_calls[binary_name] = [binary_path]
return _binary_calls[binary_name]
sys.exit("Error, cannot find %s, not installed for this Python?" % binary_name)
return _binary_calls[binary_name]
def _cleanupImportSortOrder(filename):
_cleanupImportRelative(filename)
isort_call = _getPythonBinaryCall("isort")
contents = getFileContents(filename)
start_index = None
if "\n# isort:start" in contents:
parts = contents.splitlines()
start_index = parts.index("# isort:start")
contents = "\n".join(parts[start_index + 1 :])
with open(filename, "w") as out_file:
out_file.write(contents)
with open(os.devnull, "w") as devnull:
subprocess.check_call(
isort_call
+ [
"-q", # quiet, but stdout is still garbage
"-ot", # Order imports by type in addition to alphabetically
"-m3", # "vert-hanging"
"-up", # Prefer braces () over \ for line continuation.
"-tc", # Trailing commas
"-ns", # Do not ignore those:
"__init__.py",
filename,
],
stdout=devnull,
)
if start_index is not None:
contents = getFileContents(filename)
contents = "\n".join(parts[: start_index + 1]) + "\n" + contents
with open(filename, "w") as out_file:
out_file.write(contents)
warned_clang_format = False
def cleanupClangFormat(filename):
""" Call clang-format on a given filename to format C code.
Args:
filename: What file to re-format.
"""
# Using global here, as this is really a singleton, in
# the form of a module, pylint: disable=global-statement
global warned_clang_format
clang_format_path = getExecutablePath("clang-format-6.0")
# Extra ball on Windows, check default installation PATH too.
if not clang_format_path and getOS() == "Windows":
with withEnvironmentPathAdded("PATH", r"C:\Program Files\LLVM\bin"):
clang_format_path = getExecutablePath("clang-format")
if clang_format_path:
subprocess.call(
[
clang_format_path,
"-i",
"-style={BasedOnStyle: llvm, IndentWidth: 4, ColumnLimit: 120}",
filename,
]
)
else:
if not warned_clang_format:
warning("Need to install LLVM for C files format.")
warned_clang_format = True
def _shouldNotFormatCode(filename):
parts = os.path.abspath(filename).split(os.path.sep)
if "inline_copy" in parts:
return True
elif "tests" in parts:
return "run_all.py" not in parts and "compile_itself.py" not in parts
else:
return False
def _isPythonFile(filename):
if filename.endswith((".py", ".pyw", ".scons")):
return True
else:
shebang = getShebangFromFile(filename)
if shebang is not None:
shebang = shebang[2:].lstrip()
if shebang.startswith("/usr/bin/env"):
shebang = shebang[12:].lstrip()
if shebang.startswith("python"):
return True
return False
def autoformat(filename, git_stage, abort):
# This does a lot of distinctions, pylint:disable=too-many-branches
if os.path.isdir(filename):
return
filename = os.path.normpath(filename)
my_print("Consider", filename, end=": ")
is_python = _isPythonFile(filename)
is_c = filename.endswith((".c", ".h"))
is_txt = filename.endswith(
(".txt", ".rst", ".sh", ".in", ".md", ".stylesheet", ".j2")
)
# Some parts of Nuitka must not be re-formatted with black or clang-format
# as they have different intentions.
if not (is_python or is_c or is_txt):
my_print("Ignored file type")
return
# Work on a temporary copy
tmp_filename = filename + ".tmp"
if git_stage:
old_code = getFileHashContent(git_stage["dst_hash"])
else:
old_code = getFileContents(filename, "rb")
with open(tmp_filename, "wb") as output_file:
output_file.write(old_code)
try:
if is_python:
_cleanupWindowsNewlines(tmp_filename)
if not _shouldNotFormatCode(filename):
_cleanupPyLintComments(tmp_filename, abort)
_cleanupImportSortOrder(tmp_filename)
black_call = _getPythonBinaryCall("black")
subprocess.call(black_call + ["-q", tmp_filename])
_cleanupWindowsNewlines(tmp_filename)
elif is_c:
_cleanupWindowsNewlines(tmp_filename)
cleanupClangFormat(filename)
_cleanupWindowsNewlines(tmp_filename)
elif is_txt:
_cleanupWindowsNewlines(tmp_filename)
_cleanupTrailingWhitespace(tmp_filename)
_cleanupWindowsNewlines(tmp_filename)
changed = False
if old_code != getFileContents(tmp_filename, "rb"):
my_print("Updated.")
with withPreserveFileMode(filename):
if git_stage:
new_hash_value = putFileHashContent(tmp_filename)
updateFileIndex(git_stage, new_hash_value)
updateWorkingFile(filename, git_stage["dst_hash"], new_hash_value)
else:
renameFile(tmp_filename, filename)
changed = True
else:
my_print("OK.")
return changed
finally:
if os.path.exists(tmp_filename):
os.unlink(tmp_filename)
|
py | 1a397fd19e5c693e5f98ab858aee39edc3b62152 | import json
import requests
def webhook(event, context):
print(event)
body = json.loads(event['Records'][0]['body'])
print(body)
headers = {
'Authorization': body['token'],
'Content-Type': 'application/x-www-form-urlencoded'
}
r = requests.post('https://notify-api.line.me/api/notify',
headers=headers,
data={'message': body['message']})
print(r)
# AWS record sample
#{'Records': [{'messageId': 'fddc42ba-a122-4581-965e-d0144ac8a5ad', 'receiptHandle': 'AQEBjO32gY5pXOfOrmDR0hD4k1av9KyjbHFpc+rIBPV2Brif7Lo+jqnGevSjfFwlICyGf+BhWwKaxFw8XdB3QTzRbw0vnLURjnQeDSBrJHa/S57SRs9TOLRBq38maycAVg69iZbetg9VhLMBCcLtOtPHTzKkmo+/Sosm51WA5CzXK7A0rteikx6nxS1CUIpq6MAujodupP0Hgr5RjK5nH/nmxA4Db0leWEmLokalZbtlx4W14tp7PZxPOrQOLDaGrH//p4h32tY8IN3MkCqi+gyNT7kCU4KwCGOIrybb07ZWyKBTKw+KOMNr/Ykj4z2N1qxIvTM55UY9d8V29YsH32OjrZTei5P7Nke/51E2tWkmkqoFAlqzxDjQPvpP+Pvvr8aazeeZ6opkr59UefAiiyM71Q==', 'body': 'hi', 'attributes': {'ApproximateReceiveCount': '9', 'SentTimestamp': '1566621263072', 'SenderId': '901588721449', 'ApproximateFirstReceiveTimestamp': '1566621263072'}, 'messageAttributes': {}, 'md5OfBody': '49f68a5c8493ec2c0bf489821c21fc3b', 'eventSource': 'aws:sqs', 'eventSourceARN': 'arn:aws:sqs:us-east-1:901588721449:LINE_notify_consumer', 'awsRegion': 'us-east-1'}]}
|
py | 1a39807ee3a7f17efd69c566abf1bdb962d38a94 | """Unit tests of scripts folder."""
|
py | 1a3982262060e1a2d1f3bbbdb83500607e69444a | # 要添加一个新单元,输入 '# %%'
# 要添加一个新的标记单元,输入 '# %% [markdown]'
# %% [markdown]
# # 含并行连结的网络(GoogLeNet)
#
# Inception块
# %%
import torch
from torch import nn
from torch.nn import functional as F
from d2l import torch as d2l
class Inception(nn.Module):
def __init__(self, in_channels, c1, c2, c3, c4, **kwargs):
super(Inception, self).__init__(**kwargs)
# 线路1:1x1卷积
self.p1_1 = nn.Conv2d(in_channels, c1, kernel_size=1)
# 线路2:1x1卷积 + 3x3卷积
self.p2_1 = nn.Conv2d(in_channels, c2[0], kernel_size=1)
self.p2_2 = nn.Conv2d(c2[0], c2[1], kernel_size=3, padding=1)
# 线路3:1x1卷积 + 5x5卷积
self.p3_1 = nn.Conv2d(in_channels, c3[0], kernel_size=1)
self.p3_2 = nn.Conv2d(c3[0], c3[1], kernel_size=5, padding=2)
# 线路4:3x3最大值池化 + 1x1卷积
self.p4_1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
self.p4_2 = nn.Conv2d(in_channels, c4, kernel_size=1)
def forward(self, x):
p1 = F.relu(self.p1_1(x))
p2 = F.relu(self.p2_2(F.relu(self.p2_1(x))))
p3 = F.relu(self.p3_2(F.relu(self.p3_1(x))))
p4 = F.relu(self.p4_2(self.p4_1(x)))
# 4个线路输出 在通道维度拼接
return torch.cat((p1, p2, p3, p4), dim=1)
# %% [markdown]
# GoogLeNet模型
# %%
# GoogLeNet 由5个block串行构成
# b1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),
# nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2,
# padding=1))
# b2 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1), nn.ReLU(),
# nn.Conv2d(64, 192, kernel_size=3, padding=1),
# nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
# b3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32),
# Inception(256, 128, (128, 192), (32, 96), 64),
# nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
# b4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64),
# Inception(512, 160, (112, 224), (24, 64), 64),
# Inception(512, 128, (128, 256), (24, 64), 64),
# Inception(512, 112, (144, 288), (32, 64), 64),
# Inception(528, 256, (160, 320), (32, 128), 128),
# nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
# b5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128),
# Inception(832, 384, (192, 384), (48, 128), 128),
# nn.AdaptiveAvgPool2d((1, 1)), nn.Flatten())
# net = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 10))
class GooLeNet(nn.Module):
def __init__(self):
super().__init__()
self.b1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3), nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
self.b2 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1), nn.ReLU(),
nn.Conv2d(64, 192, kernel_size=3, padding=1),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
self.b3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32),
Inception(256, 128, (128, 192), (32, 96), 64),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
self.b4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64),
Inception(512, 160, (112, 224), (24, 64), 64),
Inception(512, 128, (128, 256), (24, 64), 64),
Inception(512, 112, (144, 288), (32, 64), 64),
Inception(528, 256, (160, 320), (32, 128), 128),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
self.b5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128),
Inception(832, 384, (192, 384), (48, 128), 128),
nn.AdaptiveAvgPool2d((1, 1)), nn.Flatten())
self.fc = nn.Linear(1024, 10)
def forward(self, x):
x = self.b1(x)
x = self.b2(x)
x = self.b3(x)
x = self.b4(x)
x = self.b5(x)
x = self.fc(x)
return x
# %% [markdown]
# 为了使Fashion-MNIST上的训练短小精悍,我们将输入的高和宽从224降到96
# %%
X = torch.rand(size=(1, 1, 96, 96))
for layer in net:
X = layer(X)
print(layer.__class__.__name__, 'output shape:\t', X.shape)
# %% [markdown]
# 训练模型
# %%
# lr, num_epochs, batch_size = 0.1, 10, 128
# train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size, resize=96)
# d2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu())
# %%
model = GooLeNet()
# loss
loss_func = F.cross_entropy
# 优化器设置
opt = optim.SGD(model.parameters(), lr=lr, momentum=0.9)
# 数据
train_ds = TensorDataset(x_train, y_train)
train_dl = DataLoader(train_ds, batch_size=bs, shuffle=True)
valid_ds = TensorDataset(x_valid, y_valid)
valid_dl = DataLoader(valid_ds, batch_size=bs, shuffle=False)
# 训练
for epoch in range(epochs):
# 设置为训练模式
model.train()
# iterate: 每次一个batch
for xb, yb in train_dl:
# 前向传播
pred = model(xb)
# 计算损失
loss = loss_func(pred, yb)
# 反向传播,计算loss关于各权重参数的偏导,更新grad
loss.backward()
# 优化器基于梯度下降原则,更新(学习)权重参数parameters
opt.step()
# 各权重参数的偏导清零 grad=>0
opt.zero_grad()
# 设置为评估(推理)模式,设置BN、dropout等模块
model.eval()
# 不更新梯度
with torch.no_grad():
valid_loss = sum(loss_func(model(xb), yb) for xb, yb in valid_dl)
print(epoch, valid_loss / len(valid_dl)) |
py | 1a398409af260d928ac7711da60701bf255e779d |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class interface(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface - based on the path /interface/fortygigabitethernet/ipv6/vrrpv3-group/track/interface. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__interface_type','__interface_name','__track_priority',)
_yang_name = 'interface'
_rest_name = 'interface'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__interface_type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'fortygigabitethernet': {'value': 2}, u'gigabitethernet': {'value': 0}, u'tengigabitethernet': {'value': 1}, u'hundredgigabitethernet': {'value': 10}, u'port-channel': {'value': 3}},), is_leaf=True, yang_name="interface-type", rest_name="interface-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-vrrpv3', defining_module='brocade-vrrpv3', yang_type='track-iftype', is_config=True)
self.__interface_name = YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-3][0-9])/)?(([0-9]|[1][0-6]))/([1-9]|[1-9][0-9]|[1-9][0-9][0-9])(:[1-4])?)', 'length': [u'3..16']}),RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..6144']}),], is_leaf=True, yang_name="interface-name", rest_name="interface-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-vrrpv3', defining_module='brocade-vrrpv3', yang_type='union', is_config=True)
self.__track_priority = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': [u'1..254']}), is_leaf=True, yang_name="track-priority", rest_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Track Priority', u'alt-name': u'priority'}}, namespace='urn:brocade.com:mgmt:brocade-vrrpv3', defining_module='brocade-vrrpv3', yang_type='uint8', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'interface', u'fortygigabitethernet', u'ipv6', u'vrrpv3-group', u'track', u'interface']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'FortyGigabitEthernet', u'ipv6', u'vrrp-group', u'track', u'interface']
def _get_interface_type(self):
"""
Getter method for interface_type, mapped from YANG variable /interface/fortygigabitethernet/ipv6/vrrpv3_group/track/interface/interface_type (track-iftype)
"""
return self.__interface_type
def _set_interface_type(self, v, load=False):
"""
Setter method for interface_type, mapped from YANG variable /interface/fortygigabitethernet/ipv6/vrrpv3_group/track/interface/interface_type (track-iftype)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_type() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'fortygigabitethernet': {'value': 2}, u'gigabitethernet': {'value': 0}, u'tengigabitethernet': {'value': 1}, u'hundredgigabitethernet': {'value': 10}, u'port-channel': {'value': 3}},), is_leaf=True, yang_name="interface-type", rest_name="interface-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-vrrpv3', defining_module='brocade-vrrpv3', yang_type='track-iftype', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_type must be of a type compatible with track-iftype""",
'defined-type': "brocade-vrrpv3:track-iftype",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'fortygigabitethernet': {'value': 2}, u'gigabitethernet': {'value': 0}, u'tengigabitethernet': {'value': 1}, u'hundredgigabitethernet': {'value': 10}, u'port-channel': {'value': 3}},), is_leaf=True, yang_name="interface-type", rest_name="interface-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-vrrpv3', defining_module='brocade-vrrpv3', yang_type='track-iftype', is_config=True)""",
})
self.__interface_type = t
if hasattr(self, '_set'):
self._set()
def _unset_interface_type(self):
self.__interface_type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'fortygigabitethernet': {'value': 2}, u'gigabitethernet': {'value': 0}, u'tengigabitethernet': {'value': 1}, u'hundredgigabitethernet': {'value': 10}, u'port-channel': {'value': 3}},), is_leaf=True, yang_name="interface-type", rest_name="interface-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-vrrpv3', defining_module='brocade-vrrpv3', yang_type='track-iftype', is_config=True)
def _get_interface_name(self):
"""
Getter method for interface_name, mapped from YANG variable /interface/fortygigabitethernet/ipv6/vrrpv3_group/track/interface/interface_name (union)
"""
return self.__interface_name
def _set_interface_name(self, v, load=False):
"""
Setter method for interface_name, mapped from YANG variable /interface/fortygigabitethernet/ipv6/vrrpv3_group/track/interface/interface_name (union)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_name() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-3][0-9])/)?(([0-9]|[1][0-6]))/([1-9]|[1-9][0-9]|[1-9][0-9][0-9])(:[1-4])?)', 'length': [u'3..16']}),RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..6144']}),], is_leaf=True, yang_name="interface-name", rest_name="interface-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-vrrpv3', defining_module='brocade-vrrpv3', yang_type='union', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_name must be of a type compatible with union""",
'defined-type': "brocade-vrrpv3:union",
'generated-type': """YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-3][0-9])/)?(([0-9]|[1][0-6]))/([1-9]|[1-9][0-9]|[1-9][0-9][0-9])(:[1-4])?)', 'length': [u'3..16']}),RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..6144']}),], is_leaf=True, yang_name="interface-name", rest_name="interface-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-vrrpv3', defining_module='brocade-vrrpv3', yang_type='union', is_config=True)""",
})
self.__interface_name = t
if hasattr(self, '_set'):
self._set()
def _unset_interface_name(self):
self.__interface_name = YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-3][0-9])/)?(([0-9]|[1][0-6]))/([1-9]|[1-9][0-9]|[1-9][0-9][0-9])(:[1-4])?)', 'length': [u'3..16']}),RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..6144']}),], is_leaf=True, yang_name="interface-name", rest_name="interface-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-vrrpv3', defining_module='brocade-vrrpv3', yang_type='union', is_config=True)
def _get_track_priority(self):
"""
Getter method for track_priority, mapped from YANG variable /interface/fortygigabitethernet/ipv6/vrrpv3_group/track/interface/track_priority (uint8)
YANG Description: Track Priority
"""
return self.__track_priority
def _set_track_priority(self, v, load=False):
"""
Setter method for track_priority, mapped from YANG variable /interface/fortygigabitethernet/ipv6/vrrpv3_group/track/interface/track_priority (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_track_priority is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_track_priority() directly.
YANG Description: Track Priority
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': [u'1..254']}), is_leaf=True, yang_name="track-priority", rest_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Track Priority', u'alt-name': u'priority'}}, namespace='urn:brocade.com:mgmt:brocade-vrrpv3', defining_module='brocade-vrrpv3', yang_type='uint8', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """track_priority must be of a type compatible with uint8""",
'defined-type': "uint8",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': [u'1..254']}), is_leaf=True, yang_name="track-priority", rest_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Track Priority', u'alt-name': u'priority'}}, namespace='urn:brocade.com:mgmt:brocade-vrrpv3', defining_module='brocade-vrrpv3', yang_type='uint8', is_config=True)""",
})
self.__track_priority = t
if hasattr(self, '_set'):
self._set()
def _unset_track_priority(self):
self.__track_priority = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': [u'1..254']}), is_leaf=True, yang_name="track-priority", rest_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Track Priority', u'alt-name': u'priority'}}, namespace='urn:brocade.com:mgmt:brocade-vrrpv3', defining_module='brocade-vrrpv3', yang_type='uint8', is_config=True)
interface_type = __builtin__.property(_get_interface_type, _set_interface_type)
interface_name = __builtin__.property(_get_interface_name, _set_interface_name)
track_priority = __builtin__.property(_get_track_priority, _set_track_priority)
_pyangbind_elements = {'interface_type': interface_type, 'interface_name': interface_name, 'track_priority': track_priority, }
|
py | 1a39855d00d9bb09bd507c20ceabc42339572963 | #!/usr/bin/env python3
# Copyright 2018 Brocade Communications Systems LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may also obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`snmp_v3_account_show`-PyFOS util to show the snmp v3 account information.
*******************************************************************************
The :mod:`snmp_v3_account_show` provides option to display the
snmp v3 account information.
This module can be used to display the snmp v3 account information including
the index, user name, user group, authentication & privacy protocol,
authenticaiton & privacy password and manager engine id.
* inputs:
| Infrastructure options:
| -i,--ipaddr=IPADDR IP address of FOS switch.
| -L,--login=LOGIN login name.
| -P,--password=PASSWORD password.
| -f,--vfid=VFID VFID to which the request is directed to [OPTIONAL].
| -s,--secured=MODE HTTPS mode "self" or "CA" [OPTIONAL].
| -v,--verbose verbose mode[OPTIONAL].
| Util scripts options:
| --index=INDEX Index of SNMPv3 account
* outputs:
* SNMP v3 account configuration details.
.. function:: snmp_v3_account_info(session, v3account)
* Display the snmp v3 account information.
Example usage of the method:
result = snmp_v3_account_info(inputs['session'], v3account)
print (result)
Details::
snmp_v3_acc_obj = v3_account()
result = snmp_v3_acc_obj.get(session)
* inputs:
:param session: session returned by login.
* outputs:
:rtype: dictionary of return snmp v3 account rest response
*use cases*
1. Retrieve the snmp v3 account details.
"""
import sys
from pyfos import pyfos_auth
from pyfos import pyfos_util
from pyfos.utils import brcd_util
from pyfos.pyfos_brocade_snmp import v3_account
def snmp_v3_account_info(session, v3account):
snmp_v3_acc_obj = v3_account()
if v3account is None:
result = snmp_v3_acc_obj.get(session, None)
else:
result = snmp_v3_acc_obj.get(session, v3account)
return result
def main(argv):
# Print arguments
# print(sys.argv[1:])
filters = ['index']
inputs = brcd_util.parse(argv, v3_account, filters)
v3account_obj = inputs['utilobject']
session = brcd_util.getsession(inputs)
result = snmp_v3_account_info(
inputs['session'], v3account_obj.peek_index())
pyfos_util.response_print(result)
pyfos_auth.logout(session)
if __name__ == "__main__":
main(sys.argv[1:])
|
py | 1a398745b8dab073dc23501d4596b6c8d6c160af | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from opentelemetry import trace as trace_api
from opentelemetry.exporter.datadog import constants, propagator
from opentelemetry.sdk import trace
from opentelemetry.trace import get_current_span, set_span_in_context
FORMAT = propagator.DatadogFormat()
def get_as_list(dict_object, key):
value = dict_object.get(key)
return [value] if value is not None else []
class TestDatadogFormat(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.serialized_trace_id = propagator.format_trace_id(
trace.generate_trace_id()
)
cls.serialized_parent_id = propagator.format_span_id(
trace.generate_span_id()
)
cls.serialized_origin = "origin-service"
def test_malformed_headers(self):
"""Test with no Datadog headers"""
malformed_trace_id_key = FORMAT.TRACE_ID_KEY + "-x"
malformed_parent_id_key = FORMAT.PARENT_ID_KEY + "-x"
context = get_current_span(
FORMAT.extract(
get_as_list,
{
malformed_trace_id_key: self.serialized_trace_id,
malformed_parent_id_key: self.serialized_parent_id,
},
)
).get_context()
self.assertNotEqual(context.trace_id, int(self.serialized_trace_id))
self.assertNotEqual(context.span_id, int(self.serialized_parent_id))
self.assertFalse(context.is_remote)
def test_missing_trace_id(self):
"""If a trace id is missing, populate an invalid trace id."""
carrier = {
FORMAT.PARENT_ID_KEY: self.serialized_parent_id,
}
ctx = FORMAT.extract(get_as_list, carrier)
span_context = get_current_span(ctx).get_context()
self.assertEqual(span_context.trace_id, trace_api.INVALID_TRACE_ID)
def test_missing_parent_id(self):
"""If a parent id is missing, populate an invalid trace id."""
carrier = {
FORMAT.TRACE_ID_KEY: self.serialized_trace_id,
}
ctx = FORMAT.extract(get_as_list, carrier)
span_context = get_current_span(ctx).get_context()
self.assertEqual(span_context.span_id, trace_api.INVALID_SPAN_ID)
def test_context_propagation(self):
"""Test the propagation of Datadog headers."""
parent_context = get_current_span(
FORMAT.extract(
get_as_list,
{
FORMAT.TRACE_ID_KEY: self.serialized_trace_id,
FORMAT.PARENT_ID_KEY: self.serialized_parent_id,
FORMAT.SAMPLING_PRIORITY_KEY: str(constants.AUTO_KEEP),
FORMAT.ORIGIN_KEY: self.serialized_origin,
},
)
).get_context()
self.assertEqual(
parent_context.trace_id, int(self.serialized_trace_id)
)
self.assertEqual(
parent_context.span_id, int(self.serialized_parent_id)
)
self.assertEqual(parent_context.trace_flags, constants.AUTO_KEEP)
self.assertEqual(
parent_context.trace_state.get(constants.DD_ORIGIN),
self.serialized_origin,
)
self.assertTrue(parent_context.is_remote)
child = trace.Span(
"child",
trace_api.SpanContext(
parent_context.trace_id,
trace.generate_span_id(),
is_remote=False,
trace_flags=parent_context.trace_flags,
trace_state=parent_context.trace_state,
),
parent=parent_context,
)
child_carrier = {}
child_context = set_span_in_context(child)
FORMAT.inject(dict.__setitem__, child_carrier, context=child_context)
self.assertEqual(
child_carrier[FORMAT.TRACE_ID_KEY], self.serialized_trace_id
)
self.assertEqual(
child_carrier[FORMAT.PARENT_ID_KEY], str(child.context.span_id)
)
self.assertEqual(
child_carrier[FORMAT.SAMPLING_PRIORITY_KEY],
str(constants.AUTO_KEEP),
)
self.assertEqual(
child_carrier.get(FORMAT.ORIGIN_KEY), self.serialized_origin
)
def test_sampling_priority_auto_reject(self):
"""Test sampling priority rejected."""
parent_context = get_current_span(
FORMAT.extract(
get_as_list,
{
FORMAT.TRACE_ID_KEY: self.serialized_trace_id,
FORMAT.PARENT_ID_KEY: self.serialized_parent_id,
FORMAT.SAMPLING_PRIORITY_KEY: str(constants.AUTO_REJECT),
},
)
).get_context()
self.assertEqual(parent_context.trace_flags, constants.AUTO_REJECT)
child = trace.Span(
"child",
trace_api.SpanContext(
parent_context.trace_id,
trace.generate_span_id(),
is_remote=False,
trace_flags=parent_context.trace_flags,
trace_state=parent_context.trace_state,
),
parent=parent_context,
)
child_carrier = {}
child_context = set_span_in_context(child)
FORMAT.inject(dict.__setitem__, child_carrier, context=child_context)
self.assertEqual(
child_carrier[FORMAT.SAMPLING_PRIORITY_KEY],
str(constants.AUTO_REJECT),
)
|
py | 1a3987785458dd4dc3c31deed91eb6a81153a6ec | #!/usr/bin/env python
# Triforce Netfirm Toolbox, put into the public domain.
# Please attribute properly, but only if you want.
import struct, sys
import socket
from Crypto.Cipher import DES
triforce_ip = sys.argv[1]
# connect to the Triforce. Port is tcp/10703.
# note that this port is only open on
# - all Type-3 triforces,
# - pre-type3 triforces jumpered to satellite mode.
# - it *should* work on naomi and chihiro, but due to lack of hardware, i didn't try.
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print "connecting..."
s.connect((triforce_ip, 10703))
print "ok!"
# a function to receive a number of bytes with hard blocking
def readsocket(n):
res = ""
while len(res) < n:
res += s.recv(n - len(res))
return res
# Peeks 16 bytes from Host (gamecube) memory
def HOST_Read16(addr):
s.send(struct.pack("<II", 0xf0000004, addr))
data = readsocket(0x20)
res = ""
for d in xrange(0x10):
res += data[4 + (d ^ 3)]
return res
# same, but 4 bytes.
def HOST_Read4(addr, type = 0):
s.send(struct.pack("<III", 0x10000008, addr, type))
return s.recv(0xc)[8:]
def HOST_Poke4(addr, data):
s.send(struct.pack("<IIII", 0x1100000C, addr, 0, data))
def HOST_Restart():
s.send(struct.pack("<I", 0x0A000000))
# Read a number of bytes (up to 32k) from DIMM memory (i.e. where the game is). Probably doesn't work for NAND-based games.
def DIMM_Read(addr, size):
s.send(struct.pack("<III", 0x05000008, addr, size))
return readsocket(size + 0xE)[0xE:]
def DIMM_GetInformation():
s.send(struct.pack("<I", 0x18000000))
return readsocket(0x10)
def DIMM_SetInformation(crc, length):
print "length: %08x" % length
s.send(struct.pack("<IIII", 0x1900000C, crc & 0xFFFFFFFF, length, 0))
def DIMM_Upload(addr, data, mark):
s.send(struct.pack("<IIIH", 0x04800000 | (len(data) + 0xA) | (mark << 16), 0, addr, 0) + data)
def NETFIRM_GetInformation():
s.send(struct.pack("<I", 0x1e000000))
return s.recv(0x404)
def CONTROL_Read(addr):
s.send(struct.pack("<II", 0xf2000004, addr))
return s.recv(0xC)
def SECURITY_SetKeycode(data):
assert len(data) == 8
s.send(struct.pack("<I", 0x7F000008) + data)
def HOST_SetMode(v_and, v_or):
s.send(struct.pack("<II", 0x07000004, (v_and << 8) | v_or))
return readsocket(0x8)
def DIMM_SetMode(v_and, v_or):
s.send(struct.pack("<II", 0x08000004, (v_and << 8) | v_or))
return readsocket(0x8)
def DIMM22(data):
assert len(data) >= 8
s.send(struct.pack("<I", 0x22000000 | len(data)) + data)
def MEDIA_SetInformation(data):
assert len(data) >= 8
s.send(struct.pack("<I", 0x25000000 | len(data)) + data)
def MEDIA_Format(data):
s.send(struct.pack("<II", 0x21000004, data))
def TIME_SetLimit(data):
s.send(struct.pack("<II", 0x17000004, data))
def DIMM_DumpToFile(file):
for x in xrange(0, 0x20000, 1):
file.write(DIMM_Read(x * 0x8000, 0x8000))
sys.stderr.write("%08x\r" % x)
def HOST_DumpToFile(file, addr, len):
for x in range(addr, addr + len, 0x10):
# if not (x & 0xFFF):
sys.stderr.write("%08x\r" % x)
file.write(HOST_Read16(x))
# upload a file into DIMM memory, and optionally encrypt for the given key.
# note that the re-encryption is obsoleted by just setting a zero-key, which
# is a magic to disable the decryption.
def DIMM_UploadFile(name, key = None):
import zlib
crc = 0
a = open(name, "rb")
addr = 0
if key:
d = DES.new(key[::-1], DES.MODE_ECB)
while True:
sys.stderr.write("%08x\r" % addr)
data = a.read(0x8000)
if not len(data):
break
if key:
data = d.encrypt(data[::-1])[::-1]
DIMM_Upload(addr, data, 0)
crc = zlib.crc32(data, crc)
addr += len(data)
crc = ~crc
DIMM_Upload(addr, "12345678", 1)
DIMM_SetInformation(crc, addr)
# obsolete
def PATCH_MakeProgressCode(x):
#addr = 0x80066ed8 # 2.03
#addr = 0x8005a9c0 # 1.07
#addr = 0x80068304 # 2.15
addr = 0x80068e0c # 3.01
HOST_Poke4(addr + 0, 0x4e800020)
HOST_Poke4(addr + 4, 0x38a00000 | x)
HOST_Poke4(addr + 8, 0x90a30000)
HOST_Poke4(addr + 12, 0x38a00000)
HOST_Poke4(addr + 16, 0x60000000)
HOST_Poke4(addr + 20, 0x4e800020)
HOST_Poke4(addr + 0, 0x60000000)
#obsolete
def PATCH_MakeContentError(x):
#addr = 0x80066b30 # 2.03
#addr = 0x8005a72c # 1.07
#addr = 0x80067f5c # 2.15
addr = 0x8005a72c # 3.01
HOST_Poke4(addr + 0, 0x4e800020)
HOST_Poke4(addr + 4, 0x38a00000 | x)
HOST_Poke4(addr + 8, 0x90a30000)
HOST_Poke4(addr + 12, 0x38a00000)
HOST_Poke4(addr + 16, 0x60000000)
HOST_Poke4(addr + 20, 0x4e800020)
HOST_Poke4(addr + 0, 0x60000000)
# this essentially removes a region check, and is triforce-specific; It's also segaboot-version specific.
# - look for string: "CLogo::CheckBootId: skipped."
# - binary-search for lower 16bit of address
def PATCH_CheckBootID():
# 3.01
addr = 0x8000dc5c
HOST_Poke4(addr + 0, 0x4800001C)
return
addr = 0x8000CC6C # 2.03, 2.15
#addr = 0x8000d8a0 # 1.07
HOST_Poke4(addr + 0, 0x4e800020)
HOST_Poke4(addr + 4, 0x38600000)
HOST_Poke4(addr + 8, 0x4e800020)
HOST_Poke4(addr + 0, 0x60000000)
# ok, now you're on your own, the tools are there.
# We see the DIMM space as it's seen by the dimm-board (i.e. as on the disc).
# It will be transparently decrypted when accessed from Host, unless a
# zero-key has been set. We do this before uploading something, so we don't
# have to bother with the inserted key chip. Still, some key chip must be
# present.
# You need to configure the triforce to boot in "satellite mode",
# which can be done using the dipswitches on the board (type-3) or jumpers
# (VxWorks-style).
# The dipswitch for type-3 must be in the following position:
# - SW1: ON ON *
# - It shouldn't wait for a GDROM anymore, but display error 31.
# For the VxWorks-Style:
# - Locate JP1..JP3 on the upper board in the DIMM board. They are near
# the GDROM-connector.
# The jumpers must be in this position for satellite mode:
# 1 3
# [. .]. JP1
# [. .]. JP2
# .[. .] JP3
# - when you switch on the triforce, it should say "waiting for network..."
#
# Good Luck. Warez are evil.
def HOST_DumpToFile(file):
for x in xrange(0, 0x10000, 1):
file.write(HOST_Read16(0x80000000 + x * 0x10))
sys.stderr.write("%08x\r" % x)
if 1:
# display "now loading..."
HOST_SetMode(0, 1)
# disable encryption by setting magic zero-key
SECURITY_SetKeycode("\x00" * 8)
# uploads file. Also sets "dimm information" (file length and crc32)
DIMM_UploadFile(sys.argv[2])
# restart host, this wil boot into game
HOST_Restart()
# set time limit to 10h. According to some reports, this does not work.
TIME_SetLimit(10*60*1000)
if 0:
# this is triforce-specific, and will remove the "region check"
PATCH_CheckBootID()
# this is not required anymore:
# PATCH_MakeContentError(2)
# PATCH_MakeProgressCode(5)
|
py | 1a3987871f27776e06a754714ea18049afb8114f | """This file contains functions used as part of a user creation pipeline, such as django-social-auth."""
# pylint: disable=W0613
from urllib.parse import urlunparse, urlparse
from .models import TermsAndConditions
from django.http import HttpResponseRedirect, QueryDict
from django.conf import settings
import logging
ACCEPT_TERMS_PATH = getattr(settings, "ACCEPT_TERMS_PATH", "/terms/accept/")
TERMS_RETURNTO_PARAM = getattr(settings, "TERMS_RETURNTO_PARAM", "returnTo")
LOGGER = logging.getLogger(name="termsandconditions")
def user_accept_terms(backend, user, uid, social_user=None, *args, **kwargs):
"""Check if the user has accepted the terms and conditions after creation."""
LOGGER.debug("user_accept_terms")
if TermsAndConditions.get_active_terms_not_agreed_to(user):
return redirect_to_terms_accept("/")
else:
return {"social_user": social_user, "user": user}
def redirect_to_terms_accept(current_path="/", slug="default"):
"""Redirect the user to the terms and conditions accept page."""
redirect_url_parts = list(urlparse(ACCEPT_TERMS_PATH))
if slug != "default":
redirect_url_parts[2] += slug
querystring = QueryDict(redirect_url_parts[4], mutable=True)
querystring[TERMS_RETURNTO_PARAM] = current_path
redirect_url_parts[4] = querystring.urlencode(safe="/")
return HttpResponseRedirect(urlunparse(redirect_url_parts))
|
py | 1a3989dc501970a8274e0e691271ad00765b2753 |
def strip_ptms(sequence):
"""
Removes all post-translation modifications (i.e. phosphorylation,
glycosylation, etc) from a sequence.
Parameters
----------
sequence : str
Returns
-------
str
"""
return sequence.upper()
|
py | 1a398a1b9035d9b2eed90a609e13eb301109e027 | """
ASGI config for mydjangoproject project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mydjangoproject.settings')
application = get_asgi_application()
|
py | 1a398adf3c2e95d9f2528a91e26b3c13909555e9 | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class OmPropVo(object):
def __init__(self, uuid=None, omId=None, omName=None, attrName=None, attrText=None, attrType=None, userPin=None, instanceId=None):
"""
:param uuid: (Optional)
:param omId: (Optional)
:param omName: (Optional)
:param attrName: (Optional)
:param attrText: (Optional)
:param attrType: (Optional)
:param userPin: (Optional)
:param instanceId: (Optional)
"""
self.uuid = uuid
self.omId = omId
self.omName = omName
self.attrName = attrName
self.attrText = attrText
self.attrType = attrType
self.userPin = userPin
self.instanceId = instanceId
|
py | 1a398b91fa27e7aca31228f1b018db1604f10d35 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import soundfile as sf
from ifdvsonogramonly import ifdvsonogramonly
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
from matplotlib.backends.backend_pdf import PdfPages
# import seaborn as sns; sns.set()
from matplotlib.ticker import FuncFormatter
"""
Load data song data
"""
# load in song data
data_path = "C:/Users/abiga/Box " \
"Sync/Abigail_Nicole/ChippiesTimeOfDay" \
"/FinalChippiesDataReExportedAs44100Hz_LogTransformed_forTOD.csv"
log_song_data = pd.DataFrame.from_csv(data_path, header=0, index_col=None)
col_to_skip = ['Latitude', 'Longitude', 'RecordingDay',
'RecordingMonth', 'RecordingYear', 'RecordingTime',
'RecordingTimeSeconds']
data_subset = log_song_data.drop(col_to_skip, axis=1)
# load in time data --> before or after sunrise, twilights, and noon (only going to use sunrise and noon)
data_path = "C:/Users/abiga/Box " \
"Sync/Abigail_Nicole/ChippiesTimeOfDay" \
"/FinalChippiesDataReExportedAs44100Hz_LogTransformed" \
"_forTOD_SunriseTwilightNoon.csv"
time_data = pd.DataFrame.from_csv(data_path, header=0, index_col=None)
# must remove duplicates -- have more than one bird from same recording -- duplicate catalog number and time data
time_data = time_data.drop_duplicates()
# combine tables using catalog no
combined_df = pd.merge(data_subset, time_data, on='CatalogNo')
# only keep ones with time data
combined_df = combined_df.drop(combined_df[combined_df.Sunrise ==
'--'].index).copy().reset_index(
drop=True)
song_variables = combined_df.columns[1:5]
"""
Finding an example of East, West, South songs (the ones closes to the average for specified song features of interest)
"""
var_of_interest = ['Duration of Song Bout (log(ms))', 'Total Number of '
'Syllables (log(number))']
var_diffs = ['DiffBoutDur', 'DiffSyllDur', 'DiffSilenceDur', 'DiffNumSylls']
example_files = {}
bout_dur = {}
for time in ['before sunrise', 'after sunrise', 'after noon']:
mean_df = pd.DataFrame(columns=['CatalogNo', 'DiffBoutDur', 'DiffSyllDur', 'DiffSilenceDur', 'DiffNumSylls'])
for i in range(0, 2):
tod_data = combined_df.loc[combined_df['Sunrise'] == time]
mean_df['CatalogNo'] = tod_data['CatalogNo']
mean_df['Duration of Song Bout (log(ms))'] = tod_data['Duration of ' \
'Song Bout (' \
'log(ms))']
mean_df[var_diffs[i]] = abs(tod_data[var_of_interest[i]] - tod_data[
var_of_interest[i]].mean())
mean_df['DiffSum'] = mean_df[var_diffs].sum(axis=1)
example_files.update({time: mean_df.loc[mean_df['DiffSum'].idxmin()][
'CatalogNo']})
bout_dur.update({time: mean_df.loc[mean_df['DiffSum'].idxmin()][
'Duration of Song Bout (log(ms))']})
del mean_df
print(example_files)
"""
Load in example songs and make figures
"""
# outputs from before we added data that had been misclassified as Borror Lab but really was ML
# song_names = ['176261_44k_b5of11_beforesunriseExt.wav',
# 'XC76506_b1of2_morningExt.wav',
# '76777_b4of17_afternoonExt.wav']
song_names = ['176261_44k_b5of11_beforesunriseExt.wav',
'73829 s1 bout_morningExt_ampmore.wav', # amplified in Audacity for better visualization in plot
'15435 s1 bout_afternoonExt.wav']
for name in song_names:
song_file = "C:/Users/abiga\Box " \
"Sync\Abigail_Nicole\ChippiesTimeOfDay" \
"\TODExampleSongs_boutDurNumSylls/ExtendedTimeOfRecording/" +\
name
song, rate = sf.read(song_file)
sonogram, timeAxis_conversion, freqAxis_conversion = ifdvsonogramonly(song,
rate,
1024,
1010.0,
2.0)
fig = plt.figure(figsize=(11, 7))
ax = fig.add_subplot(1, 1, 1)
# sns.set(style='white')
[rows, cols] = np.shape(sonogram)
im = plt.imshow(np.log(sonogram+3),
cmap='gray_r',
extent=[0, cols, 0, rows],
aspect='auto')
ax.get_xaxis().set_major_formatter(plt.FuncFormatter(
lambda x, p: "%.2f" % (x*timeAxis_conversion/1000)))
ax.get_yaxis().set_major_formatter(plt.FuncFormatter(
lambda x, p: "%.0f" % (x*freqAxis_conversion/1000)))
plt.tick_params(labelsize=14)
plt.savefig("C:/Users/abiga\Box "
"Sync/Abigail_Nicole/ChippiesTimeOfDay"
"/TODExampleSongs_boutDurNumSylls/ExtendedTimeOfRecording/" +
name + '_sonogram' + '.pdf', type='pdf',
dpi=fig.dpi, bbox_inches='tight',
transparent=True)
# plt.show()
|
py | 1a398c8ebca30f2f91347ae8c837541bdd17cea7 | class ElementConstructor(type):
def __new__(cls, name, classes, fields):
fields["cache"] = dict()
return super().__new__(cls, name, classes, fields)
# Common abstract classes
class Element(metaclass=ElementConstructor):
def __new__(cls, key, *args, **kwargs):
if not str(key) in cls.cache:
cls.cache[str(key)] = super().__new__(cls, *args, **kwargs)
return cls.cache[str(key)]
def __repr__(self):
return str(self.__getattribute__(self.primary_key))
def delete(self):
key = self.__getattribute__(self.primary_key)
if key in self.cache:
del self.cache[key]
@classmethod
def clear_cache(cls):
cls.cache.clear()
@property
def primary_key(self):
raise NotImplementedError
class UpdatableElement(Element):
@property
def entry_data_path(self):
raise NotImplementedError
@property
def base_url(self):
raise NotImplementedError
def set_data(self, data):
raise NotImplementedError
class HasMediaElement(UpdatableElement):
@property
def media_path(self):
raise NotImplementedError
@property
def media_query_hash(self):
raise NotImplementedError
class Account(HasMediaElement):
primary_key = "username"
entry_data_path = ("ProfilePage", 0, "graphql", "user")
base_url = ""
media_path = ("user", "edge_owner_to_timeline_media")
media_query_hash = "c6809c9c025875ac6f02619eae97a80e"
def __init__(self, username):
# self.id = None
self.username = username
# self.full_name = None
# self.profile_pic_url = None
# self.profile_pic_url_hd = None
# self.fb_page = None
# self.biography = None
# self.follows_count = None
# self.followers_count = None
# self.media_count = None
# self.is_private = None
# self.is_verified = None
# self.country_block = None
self.media = set()
self.follows = set()
self.followers = set()
def set_data(self, data):
self.id = data["id"]
self.full_name = data["full_name"]
self.profile_pic_url = data["profile_pic_url"]
self.profile_pic_url_hd = data["profile_pic_url_hd"]
self.fb_page = data["connected_fb_page"]
self.biography = data["biography"]
self.follows_count = data["edge_follow"]["count"]
self.followers_count = data["edge_followed_by"]["count"]
self.media_count = data["edge_owner_to_timeline_media"]["count"]
self.is_private = data["is_private"]
self.is_verified = data["is_verified"]
self.country_block = data["country_block"]
class Media(UpdatableElement):
primary_key = "code"
entry_data_path = ("PostPage", 0, "graphql", "shortcode_media")
base_url = "p/"
def __init__(self, code):
self.id = None
self.code = code
self.caption = None
self.owner = None
self.date = None
self.location = None
self.likes_count = None
self.comments_count = None
self.comments_disabled = None
self.is_video = None
self.video_url = None
self.is_ad = None
self.display_url = None
self.resources = None
self.is_album = None
self.album = set()
self.likes = set()
self.comments = set()
def set_data(self, data):
self.id = data["id"]
self.code = data["shortcode"]
if data["edge_media_to_caption"]["edges"]:
self.caption = data["edge_media_to_caption"]["edges"][0]["node"]["text"]
else:
self.caption = None
if "username" in data["owner"]:
self.owner = Account(data["owner"]["username"])
self.date = data["taken_at_timestamp"]
if "location" in data and data["location"] and "id" in data["location"]:
self.location = Location(data["location"]["id"])
self.likes_count = data["edge_media_preview_like"]["count"]
if "edge_media_to_comment" in data:
self.comments_count = data["edge_media_to_comment"]["count"]
else:
self.comments_count = data["edge_media_to_parent_comment"]["count"]
self.comments_disabled = data["comments_disabled"]
self.is_video = data["is_video"]
if self.is_video and "video_url" in data:
self.video_url = data["video_url"]
if "is_ad" in data:
self.is_ad = data["is_ad"]
self.display_url = data["display_url"]
if "display_resources" in data:
self.resources = [resource["src"] for resource in data["display_resources"]]
else:
self.resources = [resource["src"] for resource in data["thumbnail_resources"]]
self.album = set()
self.is_album = data.get("__typename") == "GraphSidecar"
if "edge_sidecar_to_children" in data:
for edge in data["edge_sidecar_to_children"]["edges"]:
if edge["node"].get("shortcode", self.code) != self.code:
child = Media(edge["node"]["shortcode"])
child.id = edge["node"]["id"]
child.is_video = edge["node"]["is_video"]
if child.is_video and "video_url" in edge["node"]:
child.video_url = edge["node"]["video_url"]
child.display_url = edge["node"]["display_url"]
if "display_resources" in edge["node"]:
child.resources = [resource["src"] for resource in edge["node"]["display_resources"]]
elif "thumbnail_resources" in edge["node"]:
child.resources = [resource["src"] for resource in edge["node"]["thumbnail_resources"]]
child.is_album = False
self.album.add(child)
class Story(Element):
primary_key = "id"
def __init__(self, id):
self.id = id
class Location(HasMediaElement):
primary_key = "id"
entry_data_path = ("LocationsPage", 0, "graphql", "location")
base_url = "explore/locations/"
media_path = ("location", "edge_location_to_media")
media_query_hash = "ac38b90f0f3981c42092016a37c59bf7"
def __init__(self, id):
self.id = id
self.slug = None
self.name = None
self.has_public_page = None
self.directory = None
self.coordinates = None
self.media_count = None
self.media = set()
self.top_posts = set()
def set_data(self, data):
self.id = data["id"]
self.slug = data["slug"]
self.name = data["name"]
self.has_public_page = data["has_public_page"]
if "directory" in data:
self.directory = data["directory"]
self.coordinates = (data["lat"], data["lng"])
self.media_count = data["edge_location_to_media"]["count"]
for node in data["edge_location_to_top_posts"]["edges"]:
self.top_posts.add(Media(node["node"]["shortcode"]))
class Tag(HasMediaElement):
primary_key = "name"
entry_data_path = ("TagPage", 0, "graphql", "hashtag")
base_url = "explore/tags/"
media_path = ("hashtag", "edge_hashtag_to_media")
media_query_hash = "ded47faa9a1aaded10161a2ff32abb6b"
def __init__(self, name):
self.name = name
self.media_count = None
self.media = set()
self.top_posts = set()
def set_data(self, data):
self.name = data["name"]
self.media_count = data["edge_hashtag_to_media"]["count"]
for node in data["edge_hashtag_to_top_posts"]["edges"]:
self.top_posts.add(Media(node["node"]["shortcode"]))
class Comment(Element):
primary_key = "id"
def __init__(self, id, media, owner, text, created_at):
self.id = id
self.media = media
self.owner = owner
self.text = text
self.created_at = created_at
|
py | 1a398d5ad9aac386bc90a69fa83b1d543d07dcbb | from .routing import Resolution, Router
from ..exceptions import ResponseTypeError
class API(object):
""" Base `API`. """
def __init__(self, request):
self._request = request
self._resolution = None
def _get(self, *args, **kwargs):
return self.__perform('GET', *args, **kwargs)
def _post(self, *args, **kwargs):
return self.__perform('POST', *args, **kwargs)
def _put(self, *args, **kwargs):
return self.__perform('PUT', *args, **kwargs)
def _patch(self, *args, **kwargs):
return self.__perform('PATCH', *args, **kwargs)
def _delete(self, *args, **kwargs):
return self.__perform('DELETE', *args, **kwargs)
def __perform(self, method, *args, **kwargs):
response_types = {
'raw': lambda response: response,
'json': lambda response: response.json(),
}
response_type = kwargs.pop('response_type', 'json')
if response_type not in response_types:
raise ResponseTypeError(
'Unsupported response type {}, must be one of: {}.'.format(
repr(response_type),
', '.join(map(repr, response_types.keys())),
)
)
response = self._request.perform(method, *args, **kwargs)
response.raise_for_status()
processed = response_types[response_type]
return processed(response)
def __getattr__(self, item):
if self._resolution is None:
self._resolution = self._build_resolution()
return self._resolution.__getattr__(item)
def _build_resolution(self):
""" Traverses the MRO and merges values of
`__router` attributes to build a single `Resolution`. """
router = None
for cls in type(self).mro():
attribute = '_{}__{}'.format(cls.__name__, 'router')
if hasattr(cls, attribute):
router = Router.merged(router, getattr(cls, attribute))
if router is None:
raise Exception(
'Could not build a resolution for {}.'.format(type(self))
)
return Resolution(self, router)
|
py | 1a398d75c24ad30586e1773ee27ae3afe2f18f04 | # coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=
"""Word embedding training datasets."""
__all__ = ['Text8']
import os
import zipfile
from mxnet.gluon.utils import check_sha1, download
from .dataset import CorpusDataset
from .utils import _get_home_dir
###############################################################################
# Datasets
###############################################################################
class Text8(CorpusDataset):
"""Text8 corpus
http://mattmahoney.net/dc/textdata.html
Part of the test data for the Large Text Compression Benchmark
http://mattmahoney.net/dc/text.html. The first 10**8 bytes of the English
Wikipedia dump on Mar. 3, 2006.
License: https://en.wikipedia.org/wiki/Wikipedia:Copyrights
Parameters
----------
root : str, default '$MXNET_HOME/datasets/text8'
Path to temp folder for storing data.
MXNET_HOME defaults to '~/.mxnet'.
"""
archive_file = ('text8.zip', '6c70299b93b7e1f927b42cd8f6ac1a31547c7a2e')
data_file = {
'train': ('text8', '0dc3edebc970dcc96137e7deda4d9995af9d93de')
}
url = 'http://mattmahoney.net/dc/'
def __init__(self, root=os.path.join(_get_home_dir(), 'datasets', 'text8'),
segment='train', max_sentence_length=10000):
root = os.path.expanduser(root)
if not os.path.isdir(root):
os.makedirs(root)
self._root = root
self._segment = segment
self._max_sentence_length = max_sentence_length
super(Text8, self).__init__(self._get_data())
# pylint: disable=access-member-before-definition
if max_sentence_length:
data = []
for sentence in self._data:
for i in range(0, len(sentence), max_sentence_length):
data.append(sentence[i:i + max_sentence_length])
self._data = data
def _get_data(self):
archive_file_name, archive_hash = self.archive_file
data_file_name, data_hash = self.data_file[self._segment]
root = self._root
path = os.path.join(root, data_file_name)
if not os.path.exists(path) or not check_sha1(path, data_hash):
downloaded_file_path = download(self.url + archive_file_name,
path=root, sha1_hash=archive_hash)
with zipfile.ZipFile(downloaded_file_path, 'r') as zf:
zf.extractall(root)
return path
|
py | 1a398db61ef6a5ca1d7f589805318611a161e778 | # test builtin callable
# primitives should not be callable
print(callable(None))
print(callable(1))
print(callable([]))
print(callable("dfsd"))
# modules should not be callabe
import sys
print(callable(sys))
# builtins should be callable
print(callable(callable))
# lambdas should be callable
print(callable(lambda:None))
# user defined functions should be callable
def f():
pass
print(callable(f))
# types should be callable, but not instances
class A:
pass
print(callable(A))
print(callable(A()))
# instances with __call__ method should be callable
class B:
def __call__(self):
pass
print(callable(B()))
# this checks internal use of callable when extracting members from an instance
class C:
def f(self):
return "A.f"
class D:
g = C() # g is a value and is not callable
print(callable(D().g))
print(D().g.f())
print("PASS") |
py | 1a398de63db0862f51440f585deb26238dd4cfbd | def findSmollest(arr):
smallest = arr[0]
smallest_index = 0
for i in range(1, len(arr)):
if arr[i] < smallest:
smallest = arr[i]
smallest_index = i
return smallest_index
def selectionSort(arr):
newArr = []
for i in range(len(arr)):
smallest = findSmollest(arr)
newArr.append(arr.pop(smallest))
return newArr
print(selectionSort([5,3,8,2,6,9,7,5]))
print(selectionSort([53,3345,854,34,622,952,74,15]))
|
py | 1a398e51430301528bdb48dd0e7aadf831b73824 | """ Static order of nodes in dask graph
Dask makes decisions on what tasks to prioritize both
* Dynamically at runtime
* Statically before runtime
Dynamically we prefer to run tasks that were just made available. However when
several tasks become available at the same time we have an opportunity to break
ties in an intelligent way
d
|
b c
\ /
a
For example after we finish ``a`` we can choose to run either ``b`` or ``c``
next. Making small decisions like this can greatly affect our performance,
especially because the order in which we run tasks affects the order in which
we can release memory, which operationally we find to have a large affect on
many computation. We want to run tasks in such a way that we keep only a small
amount of data in memory at any given time.
Static Ordering
---------------
And so we create a total ordering over all nodes to serve as a tie breaker. We
represent this ordering with a dictionary mapping keys to integer values.
Lower scores have higher priority. These scores correspond to the order in
which a sequential scheduler would visit each node.
{'a': 0,
'c': 1,
'd': 2,
'b': 3}
There are several ways in which we might order our keys. This is a nuanced
process that has to take into account many different kinds of workflows, and
operate efficiently in linear time. We strongly recommend that readers look at
the docstrings of tests in dask/tests/test_order.py. These tests usually have
graph types laid out very carefully to show the kinds of situations that often
arise, and the order we would like to be determined.
Policy
------
Work towards *small goals* with *big steps*.
1. **Small goals**: prefer tasks whose final dependents have few dependencies.
We prefer to prioritize those tasks that help branches of computation that
can terminate quickly.
With more detail, we compute the total number of dependencies that each
task depends on (both its own dependencies, and the dependencies of its
dependencies, and so on), and then we choose those tasks that drive towards
results with a low number of total dependencies. We choose to prioritize
tasks that work towards finishing shorter computations first.
2. **Big steps**: prefer tasks with many dependents
However, many tasks work towards the same final dependents. Among those,
we choose those tasks with the most work left to do. We want to finish
the larger portions of a sub-computation before we start on the smaller
ones.
3. **Name comparison**: break ties with key name
Often graphs are made with regular keynames. When no other structural
difference exists between two keys, use the key name to break ties.
This relies on the regularity of graph constructors like dask.array to be a
good proxy for ordering. This is usually a good idea and a sane default.
"""
from __future__ import absolute_import, division, print_function
from .core import get_dependencies, reverse_dict, get_deps # noqa: F401
from .utils_test import add, inc # noqa: F401
def order(dsk, dependencies=None):
""" Order nodes in dask graph
This produces an ordering over our tasks that we use to break ties when
executing. We do this ahead of time to reduce a bit of stress on the
scheduler and also to assist in static analysis.
This currently traverses the graph as a single-threaded scheduler would
traverse it. It breaks ties in the following ways:
1. Start from roots nodes that have the largest subgraphs
2. When a node has dependencies that are not yet computed prefer
dependencies with large subtrees (start hard things first)
2. When we reach a node that can be computed we then traverse up and
prefer dependents that have small super-trees (few total dependents)
(finish existing work quickly)
Examples
--------
>>> dsk = {'a': 1, 'b': 2, 'c': (inc, 'a'), 'd': (add, 'b', 'c')}
>>> order(dsk)
{'a': 0, 'c': 1, 'b': 2, 'd': 3}
"""
if dependencies is None:
dependencies = {k: get_dependencies(dsk, k) for k in dsk}
for k, deps in dependencies.items():
deps.discard(k)
dependents = reverse_dict(dependencies)
total_dependencies = ndependencies(dependencies, dependents)
total_dependents, min_dependencies = ndependents(dependencies, dependents, total_dependencies)
waiting = {k: set(v) for k, v in dependencies.items()}
def dependencies_key(x):
return total_dependencies.get(x, 0), ReverseStrComparable(x)
def dependents_key(x):
return (min_dependencies[x],
-total_dependents.get(x, 0),
StrComparable(x))
result = dict()
seen = set() # tasks that should not be added again to the stack
i = 0
stack = [k for k, v in dependents.items() if not v]
if len(stack) < 10000:
stack = sorted(stack, key=dependencies_key)
else:
stack = stack[::-1]
while stack:
item = stack.pop()
if item in result:
continue
deps = waiting[item]
if deps:
stack.append(item)
seen.add(item)
if len(deps) < 1000:
deps = sorted(deps, key=dependencies_key)
stack.extend(deps)
continue
result[item] = i
i += 1
for dep in dependents[item]:
waiting[dep].discard(item)
deps = [d for d in dependents[item]
if d not in result and not (d in seen and len(waiting[d]) > 1)]
if len(deps) < 1000:
deps = sorted(deps, key=dependents_key, reverse=True)
stack.extend(deps)
return result
def ndependents(dependencies, dependents, total_dependencies):
""" Number of total data elements that depend on key
For each key we return the number of keys that can only be run after this
key is run. The root nodes have value 1 while deep child nodes will have
larger values.
We also return the minimum value of the maximum number of dependencies of
all final dependencies (see module-level comment for more)
Examples
--------
>>> dsk = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b')}
>>> dependencies, dependents = get_deps(dsk)
>>> total_dependencies = ndependencies(dependencies, dependents)
>>> total_dependents, min_dependencies = ndependents(dependencies,
... dependents,
... total_dependencies)
>>> sorted(total_dependents.items())
[('a', 3), ('b', 2), ('c', 1)]
Returns
-------
total_dependendents: Dict[key, int]
min_dependencies: Dict[key, int]
"""
result = dict()
min_result = dict()
num_needed = {k: len(v) for k, v in dependents.items()}
current = {k for k, v in num_needed.items() if v == 0}
while current:
key = current.pop()
result[key] = 1 + sum(result[parent] for parent in dependents[key])
try:
min_result[key] = min(min_result[parent] for parent in dependents[key])
except ValueError:
min_result[key] = total_dependencies[key]
for child in dependencies[key]:
num_needed[child] -= 1
if num_needed[child] == 0:
current.add(child)
return result, min_result
def ndependencies(dependencies, dependents):
""" Number of total data elements on which this key depends
For each key we return the number of tasks that must be run for us to run
this task.
Examples
--------
>>> dsk = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b')}
>>> dependencies, dependents = get_deps(dsk)
>>> sorted(ndependencies(dependencies, dependents).items())
[('a', 1), ('b', 2), ('c', 3)]
"""
result = dict()
num_needed = {k: len(v) for k, v in dependencies.items()}
current = {k for k, v in num_needed.items() if v == 0}
while current:
key = current.pop()
result[key] = 1 + sum(result[child] for child in dependencies[key])
for parent in dependents[key]:
num_needed[parent] -= 1
if num_needed[parent] == 0:
current.add(parent)
return result
class StrComparable(object):
""" Wrap object so that it defaults to string comparison
When comparing two objects of different types Python fails
>>> 'a' < 1 # doctest: +SKIP
Traceback (most recent call last):
...
TypeError: '<' not supported between instances of 'str' and 'int'
This class wraps the object so that, when this would occur it instead
compares the string representation
>>> StrComparable('a') < StrComparable(1)
False
"""
__slots__ = ('obj',)
def __init__(self, obj):
self.obj = obj
def __lt__(self, other):
try:
return self.obj < other.obj
except Exception:
return str(self.obj) < str(other.obj)
class ReverseStrComparable(object):
""" Wrap object so that it defaults to string comparison
Used when sorting in reverse direction. See StrComparable for normal
documentation.
"""
__slots__ = ('obj',)
def __init__(self, obj):
self.obj = obj
def __lt__(self, other):
try:
return self.obj > other.obj
except Exception:
return str(self.obj) > str(other.obj)
|
py | 1a398f14218f824ddfdadccea1e5c312d07b267c | """
Example unit tests for MyProject package
"""
from __future__ import absolute_import
import unittest
import desc.myproject
class MyProjectTestCase(unittest.TestCase):
def setUp(self):
self.message = 'Hello, world'
def tearDown(self):
pass
def test_run(self):
foo = desc.myproject.MyProject(self.message)
#foo = MyProject(self.message)
self.assertEquals(foo.run(), self.message)
def test_failure(self):
self.assertRaises(TypeError, desc.myproject.MyProject)
#self.assertRaises(TypeError, MyProject)
foo = desc.myproject.MyProject(self.message)
#foo = MyProject(self.message)
self.assertRaises(RuntimeError, foo.run, True)
if __name__ == '__main__':
unittest.main()
|
py | 1a398f4aff25e58a2cb8d2b0d5cf980b17e825f1 | import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from src.SimulationController import SimulationController
from src.TrafficMap import TrafficMap
from src.Road import Road
from src.Intersection import Intersection
from src.drivers.DriverTemplate import DriverTemplate
from src.vehicles.VehicleTemplate import VehicleTemplate
from src.TemplatePairFactory import TemplatePairFactory
def test_accelerated():
trafficmap = TrafficMap()
prebuilt_list = [((0, 1), DriverTemplate(),VehicleTemplate())]
onlyroad = Road([200,200], 800, 2, 2, 0, 50)
initial_intersection = Intersection(center = (200, 220), radius = 30, speed_limit = 200,
template_factory=TemplatePairFactory(1000, prebuilt_list))
terminal_intersection = Intersection(center = (1000, 220), radius = 30, speed_limit = 200,
template_factory=TemplatePairFactory(1000, prebuilt_list))
initial_intersection.bind_road_to_intersection(onlyroad,'initial')
terminal_intersection.bind_road_to_intersection(onlyroad,'terminal')
trafficmap.roadlist.append(onlyroad)
trafficmap.intersectionlist.append(initial_intersection)
trafficmap.intersectionlist.append(terminal_intersection)
controller = SimulationController(trafficmap, 200, 60, 10)
controller.run()
|
py | 1a398f7df7b5d63d9403b601a9956f3c61181414 | import os
import uuid
from datetime import datetime
from django.conf import settings
from django.utils.translation import gettext as _
from rest_framework.pagination import LimitOffsetPagination
from magpie.apps.files.models import File
from magpie.apps.files.versions.v1.serializers.file import (
FileSerializer,
FilesSerializer,
)
from magpie.core import api_exceptions
from magpie.core.cache import Cache
class FileService:
@classmethod
def upload_file(cls, request):
file_keys = dict(request.FILES).keys()
uploaded_files = []
for file_key in file_keys:
file_data = {}
file = request.FILES[file_key]
file_data['file'] = request.data[file_key]
file_data['consumer'] = request.consumer.id
file_data['file_name'] = request.FILES[file_key].name
# Check if file is greater than max upload size
if float(file.size) > float(
settings.MAGPIE['MAX_UPLOAD_SIZE']):
raise api_exceptions.ValidationError400({
'file_size': _('File is larger than expected'),
'max_size': f"{settings.MAGPIE['MAX_UPLOAD_SIZE']} "
f"bytes",
})
file_serializer = FileSerializer(
data=file_data,
)
if file_serializer.is_valid(raise_exception=True):
file_serializer.save()
uploaded_files.append(
file_serializer.data
)
upload_message = _("Count of uploaded files")
done_files_count = len(uploaded_files)
return {
"message": f"{upload_message}: {done_files_count}",
"count": done_files_count,
"files": uploaded_files,
}
@classmethod
def download_file(cls, request, file_id):
try:
if not isinstance(file_id, uuid.UUID):
file_id = uuid.UUID(file_id)
except ValueError:
raise api_exceptions.ValidationError400(
{
'id': _('Not a valid UUID')
}
)
file_object = Cache.get(
str(f"file_id:{file_id}-user_id:{request.user.id}"),
)
if not file_object:
try:
file_object = File.objects.get(
file_id=file_id,
consumer=request.consumer,
)
Cache.set(
key=str(f"file_id:{file_id}-user_id:{request.user.id}"),
store_value=file_object,
expiry_time=settings.MAGPIE['CACHE_EXPIRY'],
)
except File.DoesNotExist:
raise api_exceptions.NotFound404(
_('File does not exists or does not belongs to this user'),
)
path = file_object.file.path
return (
os.path.basename(path),
os.path.dirname(path),
)
@classmethod
def get_files(cls, request):
files_query = File.objects.filter(
consumer=request.consumer,
)
if request.query_params is not None:
if 'created_at_from' in request.query_params:
try:
created_at_from = datetime.fromtimestamp(
float(request.query_params['created_at_from'])
)
files_query = files_query.filter(
created_at__gte=created_at_from
)
except ValueError:
raise api_exceptions.ValidationError400(
detail={
'created_at_from': _("Datetime parsing error")
}
)
if 'created_at_to' in request.query_params:
try:
created_at_to = datetime.fromtimestamp(
float(request.query_params['created_at_to'])
)
files_query = files_query.filter(
created_at__lte=created_at_to
)
except ValueError:
raise api_exceptions.ValidationError400(
detail={
'created_at_to': _("Datetime parsing error")
}
)
# Order by
if 'order_by' in request.query_params:
order_field_error = []
order_by = [
x.strip() for x in request.query_params
['order_by'].split(',')
]
for order in order_by:
if not File.model_field_exists(
order.replace('-', ''),
):
order_field_error.append(order)
if order_field_error:
raise api_exceptions.ValidationError400(
{
'non_fields': _("Invalid choices in order by "
"query"),
'errors': order_field_error,
}
)
files_query = files_query.order_by(
*order_by
)
paginator = LimitOffsetPagination()
files_query = paginator.paginate_queryset(files_query, request)
files_serializer = FilesSerializer(
files_query,
many=True
)
return files_serializer.data, paginator
@classmethod
def delete_file(cls, request, file_id):
try:
if not isinstance(file_id, uuid.UUID):
file_id = uuid.UUID(file_id)
except ValueError:
raise api_exceptions.ValidationError400(
{
'id': _('Not a valid UUID')
}
)
try:
file_object = File.objects.get(
file_id=file_id,
consumer=request.consumer,
)
except File.DoesNotExist:
raise api_exceptions.NotFound404(
_('File does not exists or does not belongs to this consumer'),
)
Cache.delete(
key=str(f"file_id:{file_id}-user_id:{request.user.id}"),
)
file_object.file.delete()
file_object.delete()
return True
|
py | 1a3990647ac3e06106f74908df1236e3c12e473f | #!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import math
import numpy as np
import unittest
from sumo.base.vector import Vector2, Vector2f, Vector3, Vector3f, on_left, unitize
class TestVector(unittest.TestCase):
def test_Vector2(self):
vector = Vector2(1, 2)
self.assertEqual(vector.shape, (2,))
self.assertEqual(vector.dtype, np.float64)
def test_Vector2f(self):
vector = Vector2f(1, 2)
self.assertEqual(vector.shape, (2,))
self.assertEqual(vector.dtype, np.float32)
def test_Vector3(self):
vector = Vector3(1, 2, 3)
self.assertEqual(vector.shape, (3,))
self.assertEqual(vector.dtype, np.float64)
def test_Vector3f(self):
vector = Vector3f(1, 2, 3)
self.assertEqual(vector.shape, (3,))
self.assertEqual(vector.dtype, np.float32)
def test_on_left(self):
N = Vector3(0, 0, 1)
p = Vector3(0, 2, 0)
a = Vector3(0, 0, 0)
b = Vector3(1, 0, 0)
self.assertTrue(on_left(N, p, a, b))
def test_unitize(self):
v = unitize(Vector3(2, 2, 2))
expected_v = Vector3(1, 1, 1) * (math.sqrt(3) / 3)
np.testing.assert_array_almost_equal(v, expected_v)
v = unitize(Vector3(100, 201, 50))
self.assertEqual(np.linalg.norm(v), 1)
v = unitize(Vector3(0, 0, 0))
np.testing.assert_array_almost_equal(v, Vector3(0, 0, 0))
if __name__ == "__main__":
unittest.main()
|
py | 1a39913346958772b88fdd32589a0094ee4ec413 | import re
import numpy as np
from specutils import SpectrumList
from specutils.io.registers import data_loader
from .common import get_times
from ..loaders import FITS_FILE_EXTS, no_auto_identify
def calc_aaomega_resolutions(hdr):
# TODO: Add MODE = MOS / IFU
grat = hdr.get("GRATID")
gang = hdr.get("GRATANGL")
cang = hdr.get("CAMANGL")
order = hdr.get("ORDER")
if not (grat and gang and cang and order):
return None, None, None, None
# perhaps I should get this from somewhere in header?
npixels = 2048
# use value for MOS
resolutionpix = 3.4
# check if hdr['INSTRUME'] contains KOALA or SPIRAL ???
# resolutionpix = 2.1
rad = 180.0 / np.pi
flcam = 247
pix = 0.015
hwid = (npixels * pix / flcam) / 2.0
ddisp = np.cos(72.5 / (3.15 * 190))
slant = {
"580V": 0.7,
"385R": 0.6,
"1700B": 0.2,
"1500V": 0.0,
"1000R": 1.2,
"1000I": 1.8,
"3200R": 0.0,
"2500V": 0.0,
"2000R": 0.0,
"1700I": 0.7,
"1700D": 0.2,
}
slantr = slant[grat] / rad
linespmm = int(grat[:-1])
gangr = gang / rad
cangr = cang / rad - gangr
lcen = 1e7 * (np.sin(gangr) + np.sin(cangr)) / (linespmm * order)
lblaze = 1e7 * 2 * np.sin(gangr + slantr) / (linespmm * order)
# Get central and blaze wavelengths
lcen = int(lcen + 0.5)
lblaze = int(lblaze + 0.5)
dispc = 1e7 * pix / flcam * np.cos(cangr) / (order * linespmm)
resolutionpix = resolutionpix * np.cos(gangr) / np.cos(cangr)
resa = resolutionpix * dispc
res = lcen / resa
lcb = 1e7 * (np.sin(gangr) + np.sin(cangr - hwid)) / (order * linespmm)
lcr = 1e7 * (np.sin(gangr) + np.sin(cangr + hwid)) / (order * linespmm)
leb = ddisp * lcb
ler = ddisp * lcr
dcb = 1e7 * pix / flcam * np.cos(cangr - hwid) / (order * linespmm)
dcr = 1e7 * pix / flcam * np.cos(cangr + hwid) / (order * linespmm)
deb = ddisp * dcb
der = ddisp * dcr
racb = resolutionpix * dcb
racr = resolutionpix * dcr
raeb = racb * ddisp
raer = racr * ddisp
rcb = lcb / (resolutionpix * dcb)
rcr = lcr / (resolutionpix * dcr)
reb = rcb / ddisp
rer = rcr / ddisp
dispc = int((1000 * dispc) + 0.5) / 1000
resa = int((1000 * resa) + 0.5) / 1000
res = int(res + 0.5)
resa = int((1000 * resa) + 0.5) / 1000
res = int(res + 0.5)
lcb = int(lcb + 0.5)
lcr = int(lcr + 0.5)
leb = int(leb + 0.5)
ler = int(ler + 0.5)
dcb = int((1000 * dcb) + 0.5) / 1000
dcr = int((1000 * dcr) + 0.5) / 1000
deb = int((1000 * deb) + 0.5) / 1000
der = int((1000 * der) + 0.5) / 1000
racb = int((1000 * racb) + 0.5) / 1000
racr = int((1000 * racr) + 0.5) / 1000
raeb = int((1000 * raeb) + 0.5) / 1000
raer = int((1000 * raer) + 0.5) / 1000
rcb = int(rcb + 0.5)
rcr = int(rcr + 0.5)
reb = int(reb + 0.5)
rer = int(rer + 0.5)
covc = lcr - lcb
cove = ler - leb
cov = ler - lcb
cen_res = resa
cen_rp = res
cen_rp_min = rcb
cen_rp_max = rcr
# cen_res = FWHM in Angstrom;
# cen_rp = resolving power at central wavelength
# cen_rp_min = min resolving power
# cen_rp_max = max resolving power
return cen_res, cen_rp, cen_rp_min, cen_rp_max
@data_loader(
label="Data Central AAOmega obscore", extensions=FITS_FILE_EXTS,
dtype=SpectrumList, identifier=no_auto_identify,
)
def aaomega_obscore_loader(fname):
spectra = SpectrumList.read(fname, format="Data Central AAOmega")
for spec in spectra:
cen_res, cen_rp, cen_rp_min, cen_rp_max = calc_aaomega_resolutions(
spec.meta["header"]
)
if cen_res is not None:
break
for spec in spectra:
# Don't produce obscore values for sky
if spec.meta["purpose"] == "reduced":
spec.meta["obscore"] = {}
obscore = spec.meta["obscore"]
hdr = spec.meta["header"]
t1, t2 = get_times(hdr, duration_kw="EXPOSED")
if t1 is not None:
obscore["t_min"] = t1.to_value('mjd',subfmt='float')
if t2 is not None:
obscore["t_max"] = t2.to_value('mjd',subfmt='float')
obscore["s_ra"] = hdr["RA"]
obscore["s_dec"] = hdr["DEC"]
obscore["s_fov"] = 2.1 / 3600
obscore["s_seeing"] = hdr.get("SEEING")
obscore["obs_collection"] = "aat_archive"
obscore["facility_name"] = "AAT"
# obscore["dataproduct_type"] = "spectrum"
obscore["dataproduct_subtype"] = "science"
obscore["calib_level"] = 2
obscore["t_exptime"] = hdr.get("EXPOSED")
nspecpix = len(spec.spectral_axis)
obscore["em_xel"] = nspecpix
obscore["em_ucd"] = "em.wl"
obscore["em_unit"] = "angstrom"
obscore["s_xel1"] = nspecpix
obscore["s_xel2"] = 1
obscore["t_xel"] = 1
obscore["em_min"] = spec.spectral_axis[0].meter
obscore["em_max"] = spec.spectral_axis[-1].meter
obscore["em_res_power"] = cen_rp
obscore["em_res_power_min"] = cen_rp_min
obscore["em_res_power_max"] = cen_rp_max
obscore["em_resolution"] = cen_res * 1e-10
obscore["o_ucd"] = "phot.count"
# spectra are calibrated in flux: ROW1 hdr comment: Flux-calibrated
# spectrum in 10^-17 erg/s/cm^2/A
# obscore['o_ucd'] = 'phot.flux'
# obscore['o_unit'] = '1.0E-17 erg/s/cm^2/A'
# obscore['o_calib_status'] = 'absolute'
# or perhaps 'relative' since these are fibre spectra?
obscore["instrument_name"] = "2dF-AAOmega"
obscore["em_calib_status"] = "calibrated"
if "OBJECT" in hdr:
obscore["target_name"] = hdr["OBJECT"]
if "OBJCOM" in hdr:
obscore["alt_target_name"] = hdr["OBJCOM"]
# alternative name: OBJECT
if "OBJPIV" in hdr:
obscore["obs_id"] = "%s-%s" % (
re.sub(".sds", "", hdr["CFG_FILE"]),
hdr["OBJPIV"],
)
return spectra
|
py | 1a399176014fe597d95ca28a88c9c6787c5fe55b | #*****************************************************
# *
# Copyright 2019 Amazon.com, Inc. or its affiliates. *
# All Rights Reserved. *
# *
#*****************************************************
# trigger_app.py
# This application acts as an IoT device in the AWS DeepLens Greengrass group.
# It triggers the camera to take a capture which is then published in a topic.
# The device consumes the message from the topic and shows it in the screen.
# The user then decides whether to keep it (pressing the letter 'y'), stop the app
# by pressing 'q', or drop it by pressing any other key
import logging
import queue
import base64
import json
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient
import numpy as np
import cv2
import time
GREENGRASS_IP = "<your DeepLens's IP address>"
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# Queue to receive the pictures from the DeepLens
thumbnail_queue = queue.Queue()
# For certificate based connection
mqttClient = AWSIoTMQTTClient("trigger")
# Configurations
# For TLS mutual authentication
mqttClient.configureEndpoint(GREENGRASS_IP, 8883)
# Make sure your certificates and key names are the same as below
mqttClient.configureCredentials("./certs/rootca.pem", "./certs/private.pem.key", "./certs/certificate.pem.crt")
mqttClient.configureOfflinePublishQueueing(-1) # Infinite offline Publish queueing
mqttClient.configureDrainingFrequency(2) # Draining: 2 Hz
mqttClient.configureConnectDisconnectTimeout(5) # 5 sec
mqttClient.configureMQTTOperationTimeout(5) # 5 sec
def main():
try:
connected = False
logger.debug("Connecting")
mqttClient.connect()
connected = True
mqttClient.subscribe('trigger/thumbnail', 0, process_thumbnail)
logger.debug("Connected!")
except BaseException as e:
logger.error("Error in connect!")
logger.exception(e)
if connected:
cv2.namedWindow("Input")
while True:
# Notify the camera to take a picture
mqttClient.publish('trigger/snap', json.dumps({ 'action': 'capture' }), 0)
# Wait until there is a thumbnail to show
try:
payload = thumbnail_queue.get()
except Exception:
pass
if payload:
thumbnail = payload.get('thumbnail')
pic_id = payload.get('id')
if thumbnail:
# Show the picture and wait for user input
pressed_key = str(chr(show_thumbnail(thumbnail) & 255)).lower()
if pressed_key == 'y':
logger.debug('Telling to store into S3')
# Notify the camera to save the picture
mqttClient.publish('trigger/snap', json.dumps({ 'action': 'save', 'id': pic_id }), 0)
elif pressed_key == 'q':
break
else:
time.sleep(5)
cv2.destroyAllWindows()
def show_thumbnail(thumbnail):
logger.debug(len(thumbnail))
nparr = np.frombuffer(base64.b64decode(thumbnail), np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
cv2.imshow('image', img)
return(cv2.waitKey(0))
def process_thumbnail(client, userdata, message):
payload = json.loads(message.payload.decode())
logger.debug('New message received: ')
logger.debug(payload.get('id'))
logger.debug("from topic: ")
logger.debug(message.topic)
logger.debug("--------------\n\n")
thumbnail_queue.put(payload)
if __name__ == "__main__":
main()
|
py | 1a399242f3f512a96d4bcd4bb1dbb126de647d20 | # include all necessary import statements here
# function shifting problem
def shift_function(f, a, b, n=401):
""" 'f' is a callable funciton, 'a' and 'b' are
the limits of the interval you want to consider."""
pass
# plotting for the example in the function shifting problem
def funcplot(f, a, b, n=401):
""" Constructs and plots the example given in the
problem on shifting the domain of a function to [-1, 1].
'n' is the number of points to use to generate the plot."""
pass
# example in the function shifting problem
def shift_example(n=401):
""" Plot the example given in the function shifting problem."""
pass
# integral estimation problem
def estimate_integral(f, a, b, points, weights):
""" Estimate the value of an integral given
the function 'f', the interval bounds 'a' and 'b',
the nodes to use for sampling, and their
corresponding weights."""
pass
# Jacobi construction problem
def construct_jacobi(a, b, c):
""" Construct the Jacobi matrix given the
sequences 'a', 'b', and 'c' from the
three term recurrence relation."""
pass
# points and weights problem
def points_and_weights(n):
""" Find the set of 'n' nodes and their
corresponding weights for the interval [-1, 1]."""
pass
# normal distribution cdf problem
def normal_cdf(x):
"""Compute the CDF of the standard normal
distribution at the point 'x'."""
pass
|
py | 1a39927a45a04944a1b602bb24e427807f2beb58 | # coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Test for movie rationales dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_datasets import testing
from tensorflow_datasets.text import movie_rationales
class MovieRationalesTest(testing.DatasetBuilderTestCase):
DATASET_CLASS = movie_rationales.MovieRationales
SPLITS = {
"train": 3, # Number of fake train example
"test": 1, # Number of fake test example
"validation": 1,
}
if __name__ == "__main__":
testing.test_main()
|
py | 1a399296cc10d2218481db634114eb9e0c04b98d | """jc - JSON CLI output utility `systemctl` command output parser
Usage (cli):
$ systemctl | jc --systemctl
or
$ jc systemctl
Usage (module):
import jc.parsers.systemctl
result = jc.parsers.systemctl.parse(systemctl_command_output)
Schema:
[
{
"unit": string,
"load": string,
"active": string,
"sub": string,
"description": string
}
]
Examples:
$ systemctl -a | jc --systemctl -p
[
{
"unit": "proc-sys-fs-binfmt_misc.automount",
"load": "loaded",
"active": "active",
"sub": "waiting",
"description": "Arbitrary Executable File Formats File System Automount Point"
},
{
"unit": "dev-block-8:2.device",
"load": "loaded",
"active": "active",
"sub": "plugged",
"description": "LVM PV 3klkIj-w1qk-DkJi-0XBJ-y3o7-i2Ac-vHqWBM on /dev/sda2 2"
},
{
"unit": "dev-cdrom.device",
"load": "loaded",
"active": "active",
"sub": "plugged",
"description": "VMware_Virtual_IDE_CDROM_Drive"
},
...
]
"""
import jc.utils
class info():
"""Provides parser metadata (version, author, etc.)"""
version = '1.4'
description = '`systemctl` command parser'
author = 'Kelly Brazil'
author_email = '[email protected]'
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
compatible = ['linux']
magic_commands = ['systemctl']
__version__ = info.version
def _process(proc_data):
"""
Final processing to conform to the schema.
Parameters:
proc_data: (List of Dictionaries) raw structured data to process
Returns:
List of Dictionaries. Structured data to conform to the schema.
"""
# nothing more to process
return proc_data
def parse(data, raw=False, quiet=False):
"""
Main text parsing function
Parameters:
data: (string) text data to parse
raw: (boolean) output preprocessed JSON if True
quiet: (boolean) suppress warning messages if True
Returns:
List of Dictionaries. Raw or processed structured data.
"""
if not quiet:
jc.utils.compatibility(__name__, info.compatible)
# Clear any blank lines
linedata = list(filter(None, data.splitlines()))
raw_output = []
if jc.utils.has_data(data):
# clean up non-ascii characters, if any
cleandata = []
for entry in linedata:
cleandata.append(entry.encode('ascii', errors='ignore').decode())
header_text = cleandata[0]
header_list = header_text.lower().split()
raw_output = []
for entry in cleandata[1:]:
if 'LOAD = ' in entry:
break
else:
entry_list = entry.rstrip().split(maxsplit=4)
output_line = dict(zip(header_list, entry_list))
raw_output.append(output_line)
if raw:
return raw_output
else:
return _process(raw_output)
|
py | 1a3992afe7b77d1dcfbd909ed9ae8a6cc3ba690c |
# https://practice.geeksforgeeks.org/problems/leaders-in-an-array-1587115620/1/?track=md-arrays&batchId=144#
# https://www.geeksforgeeks.org/leaders-in-an-array/
def leaders(A,N):
#Code here
ls = []
max_from_right = A[-1]
for i in range(N-2,-1,-1):
# max_ele=max(A[i+1:])
# print(A[i])
if max_from_right <= A[i]:
# max_ele=max(A[i+1:])
ls.append(A[i])
max_from_right = A[i]
ls=ls[::-1]
ls.append(A[-1])
return ls
a= [16,17,4,3,5,2]
n = 6
print(leaders(a,n))
|
py | 1a3992f0a3dc71036ead7771af08c2d141f762a8 | # Given a list of iterators, implement a FlattenedIterator class which incrementally iterates over the integers from all the iterators in an interleaved fashion.
# Example:
# Iterators[0] = [1,2,3]
# Iterators[1] = [4,5]
# Iterators[2] = [6,7,8]
# FlattenedIterator = [1, 4, 6, 2, 5, 7, 3, 8]
# An iterator implements the next() and hasNext() interface. You're free to use them, and you will implement them on the FlattenedIterator class.
# You're free to initialize FlattenedIterator with any data structure of your choice for the iterators.
class FlattenedIterator:
def __init__(self, subiterators):
self.subiterators = []
self.res_index = 0
self.getValue(subiterators)
def getValue(self,Subiterators):
for item in Subiterators:
if item.hasNext():
self.subiterators.append(item)
def ridValue(self):
self.subiterators.pop(self.res_index)
def moveNext(self):
res_index = self.res_index
if not self.subiterators[res_index].hasNext():
self.ridValue()
else:
res_index = self.res_index + 1
if res_index <= len(self.subiterators) - 1:
self.res_index = res_index
else:
self.res_index = 0
def hasNext(self):
if (self.subiterators):
return True
return False
def next(self):
if self.hasNext():
next_value = self.subiterators[self.res_index].next()
self.moveNext()
return next_value
|
py | 1a399343b1898a0dee369c5518d0047cc3d3d169 | from django.core.exceptions import ValidationError
from django.test import TestCase
from wagtail.core.models import Page
from wagtail.tests.utils import WagtailPageTests
from bc.standardpages.tests.fixtures import IndexPageFactory, InformationPageFactory
from ...standardpages.models import IndexPage, InformationPage
from ..models import HomePage
from .fixtures import HomePageFactory
class HomepageWagtailPageTests(WagtailPageTests):
"""
Test page creation and infrastructure
"""
def test_can_create_homepage(self):
self.assertCanCreateAt(Page, HomePage)
def test_can_only_create_homepage_under_root(self):
self.assertAllowedParentPageTypes(
HomePage,
{Page},
msg="HomePage should only be added as child of Page (root)",
)
class HomePageModelTests(TestCase):
def setUp(self):
self.root_page = Page.objects.get(id=1)
"""
Create a homepage which satisfies all required fields for positive test.
"""
self.homepage = HomePageFactory.build_with_fk_objs_committed()
self.root_page.add_child(instance=self.homepage)
"""
Set up children pages for TOC
5 IndexPage with 4 children InformationPage each
"""
self.index_pages = []
for i in range(5):
index_page = IndexPageFactory.build()
self.homepage.add_child(instance=index_page)
self.index_pages.append(index_page)
for j in range(4):
information_page = InformationPageFactory.build()
index_page.add_child(instance=information_page)
"""
Set up information page as children of homepage
"""
self.information_page = InformationPageFactory.build()
self.homepage.add_child(instance=self.information_page)
def test_hero_validation_when_no_image(self):
with self.assertRaises(ValidationError):
self.homepage.hero_image.delete()
self.homepage.save()
def test_hero_validation_when_no_strapline(self):
with self.assertRaises(ValidationError):
self.homepage.strapline = None
self.homepage.save()
def test_child_sections_types(self):
# IndexPage can only be created as direct children of homepage, so we don't have to test for nested IndexPage
self.assertEqual(
len(self.homepage.child_sections),
len(self.index_pages),
msg="HomePage.child_sections should get IndexPage pages under the homepage, nothing more.",
)
self.assertTrue(
len(self.homepage.child_sections) < len(self.homepage.get_children()),
msg="Homepage.child_sections should not include pages that are not IndexPage.",
)
def test_child_sections_only_get_published_sections(self):
self.index_pages[0].unpublish()
self.assertEqual(
len(self.homepage.child_sections),
len(self.index_pages) - 1,
msg="HomePage.child_sections should not include unpublished pages.",
)
def test_child_sections_only_get_public_sections(self):
self.index_pages[0].view_restrictions.create(password="test")
self.assertEqual(
len(self.homepage.child_sections),
len(self.index_pages) - 1,
msg="HomePage.child_sections should not include private pages.",
)
def test_child_sections_sortorder(self):
"""
Test that the queryset for IndexPage uses Wagtail explorer sort order
"""
section_page = self.index_pages[0]
original_order = list(
self.homepage.child_sections.values_list("title", flat=True)
)
# Move self.index_pages[0]'s sortoder to last
section_page.path = IndexPage._get_children_path_interval(self.homepage.path)[1]
section_page.save()
self.assertNotEqual(
original_order,
list(self.homepage.child_sections.values_list("title", flat=True)),
msg="HomePage.child_sections should sort by page path (Wagtail explorer custom sort).",
)
"""
Testing IndexPage.featured_pages
This is also covered in IndexPageModelTests(). However we are also testing here
in case someone decides to change how it behaves on IndexPage and doesn't realise
it also affects HomePage.
"""
def test_child_sections_returns_max_3_grandchildren(self):
# We have initially created 4 children under self.index_pages[0]
self.assertNotEqual(
len(self.index_pages[0].featured_pages),
len(self.index_pages[0].get_children().live().public()),
msg="IndexPage.featured_pages should be limited.",
)
self.assertLessEqual(
len(self.index_pages[0].featured_pages),
3,
msg="IndexPage.featured_pages should be limited to max 3.",
)
def test_child_sections_returns_live_grandchildren(self):
# Unpublish 2 of the 4 children
children = self.index_pages[0].featured_pages
children[0].unpublish()
children[1].unpublish()
self.assertNotEqual(
len(self.index_pages[0].featured_pages),
len(self.index_pages[0].get_children().public()[:3]),
msg="IndexPage.featured_pages should not include unpublished pages.",
)
def test_child_sections_returns_public_grandchildren(self):
section_page = self.index_pages[0]
section_page.get_children().first().delete() # delete 1 so we only have 3 to start with
section_page.get_children().last().view_restrictions.create(password="test")
self.assertEqual(
len(section_page.featured_pages),
len(section_page.get_children().live()) - 1,
msg="IndexPage.featured_pages should not include private pages.",
)
def test_child_sections_grandchildren_sortorder(self):
"""
Test that the queryset grandchildren uses Wagtail explorer sort order
"""
section_page = self.index_pages[0]
child_page = section_page.featured_pages.first()
original_order = list(
section_page.featured_pages.values_list("title", flat=True)
)
# Move childpage's sortoder to last
child_page.path = InformationPage._get_children_path_interval(
section_page.path
)[1]
child_page.save()
self.assertNotEqual(
original_order,
list(section_page.featured_pages.values_list("title", flat=True)),
msg="IndexPage.featured_pages should sort by page path (Wagtail explorer custom sort).",
)
|
py | 1a399346402cd91bcd9ac19d8cd63b04f5db9ff9 | gab = ["A", "C", "E", "B", "D", "B", "B", "C", "A", "E"]
n = []
while True:
p = []
print('-'*50)
nome = input('NOME DO CANDIDATO(digite N para sair): ').upper()
if nome == "N":
break
else:
ac = 0
p.append(nome)
for i in range(1, 11):
print('-'*50)
resposta = input('INFORME A RESPOSTA ENTRE - A e E: ').upper()
if resposta == gab[i-1]:
ac += 1
p.append(resposta)
p.append(ac)
n.append(p)
for i in range(0, len(n)):
print('NOTA FINAL->',n[i][0], ": ", n[i][11])
|
py | 1a3994c5adcd42a85e1f7707db6f753e8a3bbf97 | from django.http import HttpResponseRedirect
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse
from django.views.generic import TemplateView
from .forms import PostForm, CommentForm
from .models import Post, Comment
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
@login_required
def get_posts(request):
if request.method == 'POST':
if 'post' in request.POST:
postForm = PostForm(request.POST)
if postForm.is_valid():
post = postForm.save(commit=False)
post.author = request.user
post.save()
return redirect('cabPosts:posts')
else:
commentForm=CommentForm(request.POST)
if commentForm.is_valid():
post_id = request.POST['post_id']
post_instance = get_object_or_404(Post, id=post_id)
comment = commentForm.save(commit=False)
comment.name = request.user
comment.post = post_instance
comment.email = request.user.email
comment.save()
return redirect('cabPosts:posts')
else:
return render(request,'500.html',{})
else:
postForm = PostForm()
posts = Post.objects.all()
commentForm = CommentForm()
comments=Comment.objects.all()
args = {'postForm':postForm, 'posts':posts ,'commentForm':commentForm,'comments':comments}
return render(request, 'cabPosts/posts.html', args)
|
py | 1a399569811b0641b0818f7300087d9e2bd78b79 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class ErrorResponse(Model):
"""Describes the format of Error response.
:param code: Error code
:type code: str
:param message: Error message indicating why the operation failed.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, code=None, message=None):
super(ErrorResponse, self).__init__()
self.code = code
self.message = message
class ErrorResponseException(HttpOperationError):
"""Server responsed with exception of type: 'ErrorResponse'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(ErrorResponseException, self).__init__(deserialize, response, 'ErrorResponse', *args)
|
py | 1a3995722b45288b45fd166ce9290646a6a99ca8 | #!/usr/bin/env python3
# -*- coding: utf-8 -*
import sys
sys.path.append('../') # or just install the module
sys.path.append('../../fuzzy-tools') # or just install the module
sys.path.append('../../astro-lightcurves-handler') # or just install the module
sys.path.append('../../astro-lightcurves-fats') # or just install the module
###################################################################################################################################################
import argparse
from fuzzytools.prints import print_big_bar
parser = argparse.ArgumentParser(prefix_chars='--')
parser.add_argument('--method', type=str)
parser.add_argument('--kf', type=str)
parser.add_argument('--mid', type=str, default='0')
parser.add_argument('--mode', type=str, default='all')
parser.add_argument('--classifier_mids', type=int, default=2)
main_args = parser.parse_args()
print_big_bar()
###################################################################################################################################################
import numpy as np
from fuzzytools.files import load_pickle, save_pickle, get_dict_from_filedir
from lcfats.files import load_features
from fuzzytools.progress_bars import ProgressBar
from lcfats.classifiers import train_classifier, evaluate_classifier
import pandas as pd
filedir = f'../../surveys-save/survey=alerceZTFv7.1~bands=gr~mode=onlySNe~method={main_args.method}.splcds'
filedict = get_dict_from_filedir(filedir)
rootdir = filedict['_rootdir']
cfilename = filedict['_cfilename']
lcdataset = load_pickle(filedir)
lcset_info = lcdataset['raw'].get_info()
lcdataset.only_keep_kf(main_args.kf) # saves ram
# print(lcdataset)
# for train_config in ['r']:
for train_config in ['r', 's', 'r+s']:
for classifier_mid in range(0, main_args.classifier_mids):
print(f'training brf for train_config={train_config}; kf={main_args.kf}; mode={main_args.mode}; method={main_args.method}; mid={main_args.mid}c{classifier_mid}')
train_df_x_r, train_df_y_r = load_features(f'../save/fats/{cfilename}/{main_args.kf}@train.df', main_args.mode)
if train_config=='r':
k = 1 # 1 s_repeats*2
train_df_x = pd.concat([train_df_x_r]*k, axis='rows')
train_df_y = pd.concat([train_df_y_r]*k, axis='rows')
if train_config=='s':
k = 1 # 1 2
train_df_x = pd.concat([train_df_x_s]*k, axis='rows')
train_df_y = pd.concat([train_df_y_s]*k, axis='rows')
if train_config=='r+s':
train_df_x = pd.concat([train_df_x_r]*s_repeats+[train_df_x_s], axis='rows')
train_df_y = pd.concat([train_df_y_r]*s_repeats+[train_df_y_s], axis='rows')
features = list(train_df_x.columns)
val_df_x, val_df_y = load_features(f'../save/fats/{cfilename}/{main_args.kf}@val.df', main_args.mode)
brf_d = train_classifier(train_df_x, train_df_y, val_df_x, val_df_y, lcset_info,
max_samples=len(train_df_x_r),
)
d = evaluate_classifier(brf_d, f'../save/fats/{cfilename}/{main_args.kf}@test.df', main_args.mode, lcset_info)
save_rootdir = f'../save'
save_filedir = f'{save_rootdir}/exp=rf_eval~train_config={train_config}~mode={main_args.mode}/{cfilename}/{main_args.kf}@test/id={main_args.mid}c{classifier_mid}.d'
save_pickle(save_filedir, d) |
py | 1a3995c2f53cd510ef32da03438cd283cceb50f4 | # Copyright (c) 2020 Jarret Dyrbye
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php
import string
import json
from moneysocket.core.message.notification.notification import (
MoneysocketNotification)
class NotifyRendezvousEnd(MoneysocketNotification):
MUST_BE_CLEARTEXT = True
def __init__(self, rendezvous_id):
super().__init__("NOTIFY_RENDEZVOUS_END")
self['rendezvous_id'] = rendezvous_id
@staticmethod
def cast_class(msg_dict):
c = NotifyRendezvousEnd(msg_dict['rendezvous_id'])
c.update(msg_dict)
return c
@staticmethod
def check_valid_msg_dict(msg_dict):
if 'rendezvous_id' not in msg_dict.keys():
return "no rendezvous_id included"
if type(msg_dict['rendezvous_id']) != str:
return "unknown rendezvous_id type"
if not all(c in string.hexdigits for c in msg_dict['rendezvous_id']):
return "rendezvous_id not hex string"
if len(msg_dict['rendezvous_id']) != 64:
return "rendezvous_id not 256-bit value hex string"
return None
MoneysocketNotification.NOTIFICATION_SUBCLASSES['NOTIFY_RENDEZVOUS_END'] = (
NotifyRendezvousEnd)
|
py | 1a3995cd81591f7132b9bc9e070cab151ce7bad0 | """This module contains simple helper functions """
from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
from typing import Any, List, Tuple, Union
import sys
import random
def set_seed(seed=None):
if seed is not None:
torch.manual_seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
random.seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = True
class Logger(object):
"""Redirect stderr to stdout, optionally print stdout to a file, and optionally force flushing on both stdout and the file."""
def __init__(self, file_name: str = None, file_mode: str = "a", should_flush: bool = True, append=False):
self.file = None
if append:
file_mode = 'a'
else:
file_mode = 'w'
self.file = open(file_name, file_mode)
self.should_flush = should_flush
self.stdout = sys.stdout
self.stderr = sys.stderr
sys.stdout = self
sys.stderr = self
def __enter__(self) -> "Logger":
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
self.close()
def write(self, text: str) -> None:
"""Write text to stdout (and a file) and optionally flush."""
if len(text) == 0: # workaround for a bug in VSCode debugger: sys.stdout.write(''); sys.stdout.flush() => crash
return
if self.file is not None:
self.file.write(text)
self.stdout.write(text)
if self.should_flush:
self.flush()
def flush(self) -> None:
"""Flush written text to both stdout and a file, if open."""
if self.file is not None:
self.file.flush()
self.stdout.flush()
def close(self) -> None:
"""Flush, close possible files, and remove stdout/stderr mirroring."""
self.flush()
# if using multiple loggers, prevent closing in wrong order
if sys.stdout is self:
sys.stdout = self.stdout
if sys.stderr is self:
sys.stderr = self.stderr
if self.file is not None:
self.file.close()
def write_loss(iterations, trainer, train_writer, prefix):
members = [attr for attr in dir(trainer) \
if not callable(getattr(trainer, attr)) and not attr.startswith("__") and (
'loss' in attr or 'grad' in attr or 'nwd' in attr ) and 'name' not in attr and 'pool' not in attr]
for m in members:
train_writer.add_scalar(prefix+'/'+m, getattr(trainer, m), iterations + 1)
def format_time(seconds: Union[int, float]) -> str:
"""Convert the seconds to human readable string with days, hours, minutes and seconds."""
s = int(np.rint(seconds))
if s < 60:
return "{0}s".format(s)
elif s < 60 * 60:
return "{0}m {1:02}s".format(s // 60, s % 60)
elif s < 24 * 60 * 60:
return "{0}h {1:02}m {2:02}s".format(s // (60 * 60), (s // 60) % 60, s % 60)
else:
return "{0}d {1:02}h {2:02}m".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24, (s // 60) % 60)
def tensor2im(input_image, imtype=np.uint8):
""""Converts a Tensor array into a numpy image array.
Parameters:
input_image (tensor) -- the input image tensor array
imtype (type) -- the desired type of the converted numpy array
"""
if not isinstance(input_image, np.ndarray):
if isinstance(input_image, torch.Tensor): # get the data from a variable
image_tensor = input_image.data
else:
return input_image
image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array
if image_numpy.shape[0] == 1: # grayscale to RGB
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling
else: # if it is a numpy array, do nothing
image_numpy = input_image
return image_numpy.astype(imtype)
def str2bool(x):
return x.lower() in ('true')
def diagnose_network(net, name='network'):
"""Calculate and print the mean of average absolute(gradients)
Parameters:
net (torch network) -- Torch network
name (str) -- the name of the network
"""
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def save_image(image_numpy, image_path, aspect_ratio=1.0):
"""Save a numpy image to the disk
Parameters:
image_numpy (numpy array) -- input numpy array
image_path (str) -- the path of the image
"""
image_pil = Image.fromarray(image_numpy)
h, w, _ = image_numpy.shape
if aspect_ratio > 1.0:
image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC)
if aspect_ratio < 1.0:
image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC)
image_pil.save(image_path)
def print_numpy(x, val=True, shp=False):
"""Print the mean, min, max, median, std, and size of a numpy array
Parameters:
val (bool) -- if print the values of the numpy array
shp (bool) -- if print the shape of the numpy array
"""
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def get_model_list(dirname, key, exclude='latest'):
if os.path.exists(dirname) is False:
return None
gen_models = [os.path.join(dirname, f) for f in os.listdir(dirname) if
os.path.isfile(os.path.join(dirname, f)) and key in f and ".pt" in f and exclude not in f]
if gen_models is None:
return None
gen_models.sort()
last_model_name = gen_models[-1]
return last_model_name
def mkdirs(paths):
"""create empty directories if they don't exist
Parameters:
paths (str list) -- a list of directory paths
"""
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
"""create a single empty directory if it didn't exist
Parameters:
path (str) -- a single directory path
"""
if not os.path.exists(path):
os.makedirs(path)
def adjust_dynamic_range(data, drange_in, drange_out):
if drange_in != drange_out:
scale = (np.float32(drange_out[1]) - np.float32(drange_out[0])) / (np.float32(drange_in[1]) - np.float32(drange_in[0]))
bias = (np.float32(drange_out[0]) - np.float32(drange_in[0]) * scale)
data = data * scale + bias
return data
def create_image_grid(images, grid_size=None):
assert images.ndim == 3 or images.ndim == 4
num, img_w, img_h = images.shape[0], images.shape[-1], images.shape[-2]
if grid_size is not None:
grid_w, grid_h = tuple(grid_size)
else:
grid_w = max(int(np.ceil(np.sqrt(num))), 1)
grid_h = max((num - 1) // grid_w + 1, 1)
grid = np.zeros(list(images.shape[1:-2]) + [grid_h * img_h, grid_w * img_w], dtype=images.dtype)
for idx in range(num):
x = (idx % grid_w) * img_w
y = (idx // grid_w) * img_h
grid[..., y : y + img_h, x : x + img_w] = images[idx]
return grid
def convert_to_pil_image(image, drange=[-1,1]):
assert image.ndim == 2 or image.ndim == 3
if image.ndim == 3:
if image.shape[0] == 1:
image = image[0] # grayscale CHW => HW
elif image.shape[1]>image.shape[0]:
image = image.transpose(1, 2, 0) # CHW -> HWC
image = adjust_dynamic_range(image, drange, [0,255])
image = np.rint(image).clip(0, 255).astype(np.uint8)
fmt = 'RGB' if image.ndim == 3 else 'L'
return Image.fromarray(image, fmt)
def save_images(image, filename, drange=[-1,1], quality=95):
img = convert_to_pil_image(image, drange)
if '.jpg' in filename:
img.save(filename, "JPEG", quality=quality, optimize=True)
else:
img.save(filename)
def to_var( x):
"""Converts numpy to variable."""
if torch.cuda.is_available():
x = x.cuda()
return torch.autograd.Variable(x)
def to_data(x):
"""Converts variable to numpy."""
if torch.cuda.is_available():
x = x.cpu()
return x.data.numpy()
def save_image_grid(images, filename, drange=[-1,1], grid_size=None):
convert_to_pil_image(create_image_grid(images, grid_size), drange).save(filename) |
py | 1a3996a4a9c53bae7bc1f8392b82cdbf720aecc5 | # Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_decision_forests.tensorflow import check_version # pylint: disable=unused-import
import tensorflow as tf
from tensorflow.python.framework import load_library
from tensorflow.python.platform import resource_loader
try:
tf.load_op_library(resource_loader.get_path_to_datafile("distribute.so"))
except Exception as e:
check_version.info_fail_to_load_custom_op(e)
raise e
|
py | 1a399740692eab8ccea0c984a1a4f2ac984eb045 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
from paddle.fluid.tests.unittests.test_concat_op import TestConcatOp, TestConcatOp2, TestConcatOp3
class TestMKLDNNConcatOp(TestConcatOp):
def setUp(self):
super(TestMKLDNNConcatOp, self).setUp()
self.attrs["use_mkldnn"] = True
self._cpu_only = True
def test_check_grad(self):
pass
def init_kernel_type(self):
self.use_mkldnn = True
class TestMKLDNNConcatOp2(TestConcatOp2):
def setUp(self):
super(TestMKLDNNConcatOp2, self).setUp()
self.attrs["use_mkldnn"] = True
self._cpu_only = True
def test_check_grad(self):
pass
def init_kernel_type(self):
self.use_mkldnn = True
class TestMKLDNNConcatOp3(TestConcatOp3):
def setUp(self):
super(TestMKLDNNConcatOp3, self).setUp()
self.attrs["use_mkldnn"] = True
self._cpu_only = True
def test_check_grad(self):
pass
def init_kernel_type(self):
self.use_mkldnn = True
if __name__ == '__main__':
unittest.main()
|
py | 1a3997624cece4230a4eea3de2dd9cc28f444374 | from .inference import InferenceWorker
from .train import TrainWorker |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.