filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_18043 | # -*- coding: utf-8 -*-
"""Embedding weight initialization routines."""
import functools
import logging
import math
from typing import Optional, Sequence
import numpy as np
import torch
import torch.nn
import torch.nn.init
from torch.nn import functional
from .utils import TransformerEncoder
from ..triples import TriplesFactory
from ..utils import compose
__all__ = [
"xavier_uniform_",
"xavier_uniform_norm_",
"xavier_normal_",
"xavier_normal_norm_",
"uniform_norm_",
"uniform_norm_p1_",
"normal_norm_",
"init_phases",
"PretrainedInitializer",
"LabelBasedInitializer",
]
logger = logging.getLogger(__name__)
def xavier_uniform_(tensor, gain: float = 1.0):
r"""Initialize weights of the tensor similarly to Glorot/Xavier initialization.
Proceed as if it was a linear layer with fan_in of zero and Xavier uniform
initialization is used, i.e. fill the weight of input `embedding` with values values
sampled from :math:`\mathcal{U}(-a, a)` where
.. math::
a = \text{gain} \times \sqrt{\frac{6}{\text{embedding_dim}}}
:param tensor: A tensor
:param gain: An optional scaling factor, defaults to 1.0.
:return: Embedding with weights by the Xavier uniform initializer.
"""
bound = gain * 6 / math.sqrt(tensor.shape[-1])
torch.nn.init.uniform_(tensor, -bound, bound)
return tensor
def xavier_normal_(tensor: torch.Tensor, gain: float = 1.0) -> torch.Tensor:
r"""Initialize weights of the tensor similarly to Glorot/Xavier initialization.
Proceed as if it was a linear layer with fan_in of zero and Xavier normal
initialization is used. Fill the weight of input `embedding` with values values
sampled from :math:`\mathcal{N}(0, a^2)` where
.. math::
a = \text{gain} \times \sqrt{\frac{2}{\text{embedding_dim}}}
:param tensor: A tensor
:param gain: An optional scaling factor, defaults to 1.0.
:return: Embedding with weights by the Xavier normal initializer.
"""
std = gain * 2 / math.sqrt(tensor.shape[-1])
torch.nn.init.normal_(tensor, mean=0.0, std=std)
return tensor
def init_phases(x: torch.Tensor) -> torch.Tensor:
r"""Generate random phases between 0 and :math:`2\pi`."""
phases = 2 * np.pi * torch.rand_like(x[..., : x.shape[-1] // 2])
return torch.cat([torch.cos(phases), torch.sin(phases)], dim=-1).detach()
xavier_uniform_norm_ = compose(
torch.nn.init.xavier_uniform_,
functional.normalize,
)
xavier_normal_norm_ = compose(
torch.nn.init.xavier_normal_,
functional.normalize,
)
uniform_norm_ = compose(
torch.nn.init.uniform_,
functional.normalize,
)
normal_norm_ = compose(
torch.nn.init.normal_,
functional.normalize,
)
uniform_norm_p1_ = compose(
torch.nn.init.uniform_,
functools.partial(functional.normalize, p=1),
)
def init_quaternions(
x: torch.FloatTensor,
) -> torch.FloatTensor:
"""Initialize quaternion."""
num_elements, dim = x.shape
if dim % 4 != 0:
raise ValueError(f"Quaternions have four components, but dimension {dim} is not divisible by four.")
dim //= 4
# scaling factor
s = 1.0 / math.sqrt(2 * num_elements)
# modulus ~ Uniform[-s, s]
modulus = 2 * s * torch.rand(num_elements, dim) - s
# phase ~ Uniform[0, 2*pi]
phase = 2 * math.pi * torch.rand(num_elements, dim)
# real part
real = (modulus * phase.cos()).unsqueeze(dim=-1)
# purely imaginary quaternions unitary
imag = torch.rand(num_elements, dim, 3)
imag = functional.normalize(imag, p=2, dim=-1)
imag = imag * (modulus * phase.sin()).unsqueeze(dim=-1)
x = torch.cat([real, imag], dim=-1)
return x.view(num_elements, 4 * dim)
class PretrainedInitializer:
"""
Initialize tensor with pretrained weights.
Example usage:
.. code-block::
import torch
from pykeen.pipeline import pipeline
from pykeen.nn.init import create_init_from_pretrained
# this is usually loaded from somewhere else
# the shape must match, as well as the entity-to-id mapping
pretrained_embedding_tensor = torch.rand(14, 128)
result = pipeline(
dataset="nations",
model="transe",
model_kwargs=dict(
embedding_dim=pretrained_embedding_tensor.shape[-1],
entity_initializer=PretrainedInitializer(tensor=pretrained_embedding_tensor),
),
)
"""
def __init__(self, tensor: torch.FloatTensor) -> None:
"""
Initialize the initializer.
:param tensor:
the tensor of pretrained embeddings.
"""
self.tensor = tensor
def __call__(self, x: torch.Tensor) -> torch.Tensor:
"""Initialize the tensor with the given tensor."""
if x.shape != self.tensor.shape:
raise ValueError(f"shape does not match: expected {self.tensor.shape} but got {x.shape}")
return self.tensor.to(device=x.device, dtype=x.dtype)
class LabelBasedInitializer(PretrainedInitializer):
"""
An initializer using pretrained models from the `transformers` library to encode labels.
Example Usage:
Initialize entity representations as Transformer encodings of their labels. Afterwards,
the parameters are detached from the labels, and trained on the KGE task without any
further connection to the Transformer model.
.. code-block :: python
from pykeen.datasets import get_dataset
from pykeen.nn.init import LabelBasedInitializer
from pykeen.models import ERMLPE
dataset = get_dataset(dataset="nations")
model = ERMLPE(
embedding_dim=768, # for BERT base
entity_initializer=LabelBasedInitializer.from_triples_factory(
triples_factory=dataset.training,
),
)
"""
def __init__(
self,
labels: Sequence[str],
pretrained_model_name_or_path: str = "bert-base-cased",
batch_size: int = 32,
max_length: Optional[int] = None,
):
"""
Initialize the initializer.
:param labels:
the labels
:param pretrained_model_name_or_path:
the name of the pretrained model, or a path, cf. :func:`transformers.AutoModel.from_pretrained`
:param batch_size: >0
the batch size to use while encoding.
:param max_length: >0
the maximum number of tokens to pad/trim the labels to
:raise ImportError:
if the transformers library could not be imported
"""
super().__init__(
tensor=TransformerEncoder(
pretrained_model_name_or_path=pretrained_model_name_or_path,
max_length=max_length,
).encode_all(
labels=labels,
batch_size=batch_size,
),
)
@classmethod
def from_triples_factory(
cls,
triples_factory: TriplesFactory,
for_entities: bool = True,
**kwargs,
) -> "LabelBasedInitializer":
"""
Prepare a label-based initializer with labels from a triples factory.
:param triples_factory:
the triples factory
:param for_entities:
whether to create the initializer for entities (or relations)
:param kwargs:
additional keyword-based arguments passed to :func:`LabelBasedInitializer.__init__`
:returns:
A label-based initializer
:raise ImportError:
if the transformers library could not be imported
"""
id_to_label = triples_factory.entity_id_to_label if for_entities else triples_factory.relation_id_to_label
labels = [id_to_label[i] for i in sorted(id_to_label.keys())]
return cls(
labels=labels,
**kwargs,
)
|
the-stack_0_18045 | # -*- coding: utf-8 -*-
"""Tests uta postgresql client"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import re
import unittest
from hgvs.exceptions import HGVSDataNotAvailableError
import hgvs.dataproviders.uta
import hgvs.edit
import hgvs.location
import hgvs.posedit
import hgvs.variantmapper
import hgvs.sequencevariant
from support import CACHE
class UTA_Base(object):
def test_get_acs_for_protein_seq(self):
exp = ["NP_001005405.1", "MD5_8fc09b1d9a38a8c55176a0fa922df227"]
s = """
mgccgcsggc gsgcggcgsg sggcgsgcgg cgssccvpic cckpvcccvp acscsscgsc
ggskggcgsc gsskggcgsc gcsqsncckp ccsssgcgsf ccqsscskpc ccqssccqss
cckpcccqss ccqsscfkpc ccqssccvpv ccqcki
"""
s = re.sub(r"\s+", "", s.upper())
self.assertEqual(sorted(self.hdp.get_acs_for_protein_seq(s)), sorted(exp))
exp = ["NP_071928.2", "MD5_ffb0d4adbd5e0b5d71678228b3696984"]
s = """
masetektha llqtcstesl isslglgafc lvadrllqfs tiqqndwlra lsdnavhcvi
gmwswavvtg ikkktdfgei ilagflasvi dvdhfflags mslkaaltlp rrpflhcstv
ipvvvltlkf tmhlfklkds wcflpwmlfi swtshhirdg irhglwicpf gktsplpfwl
yviitsslph icsfvmyltg trqmmsskhg vridv
"""
s = re.sub(r"\s+", "", s.upper())
self.assertEqual(sorted(self.hdp.get_acs_for_protein_seq(s)), sorted(exp))
def test_get_gene_info(self):
gene_info = self.hdp.get_gene_info("VHL")
self.assertEqual("VHL", gene_info["hgnc"])
self.assertEqual("3p25.3", gene_info["maploc"])
self.assertEqual(6, len(gene_info))
def test_get_tx_exons(self):
tx_exons = self.hdp.get_tx_exons("NM_000551.3", "NC_000003.11", "splign")
self.assertEqual(3, len(tx_exons))
def test_get_tx_exons_invalid_tx_ac(self):
with self.assertRaises(HGVSDataNotAvailableError):
self.hdp.get_tx_exons("NM_999999.9", "NC_000003.11", "splign")
def test_get_tx_exons_invalid_alt_ac(self):
with self.assertRaises(HGVSDataNotAvailableError):
self.hdp.get_tx_exons("NM_000551.3", "NC_000999.9", "splign")
def test_get_tx_exons_invalid_alt_aln_method(self):
with self.assertRaises(HGVSDataNotAvailableError):
self.hdp.get_tx_exons("NM_000551.3", "NC_000999.9", "best")
def test_get_tx_for_gene(self):
tig = self.hdp.get_tx_for_gene("VHL")
self.assertEqual(16, len(tig))
def test_get_tx_for_gene_invalid_gene(self):
tig = self.hdp.get_tx_for_gene("GENE")
self.assertEqual(0, len(tig))
def test_get_tx_info(self):
tx_info = self.hdp.get_tx_info("NM_000051.3", "AC_000143.1", "splign")
self.assertEqual(385, tx_info["cds_start_i"])
self.assertEqual(9556, tx_info["cds_end_i"])
self.assertEqual("AC_000143.1", tx_info["alt_ac"])
def test_get_tx_info_invalid_tx_ac(self):
with self.assertRaises(HGVSDataNotAvailableError):
self.hdp.get_tx_info("NM_999999.9", "AC_000143.1", "splign")
def test_get_tx_mapping_options(self):
tx_mapping_options = self.hdp.get_tx_mapping_options("NM_000551.3")
self.assertIn(["NM_000551.3", "NC_000003.11", "splign"], tx_mapping_options)
self.assertIn(["NM_000551.3", "NC_000003.11", "blat"], tx_mapping_options)
def test_get_tx_mapping_options_invalid(self):
tx_info_options = self.hdp.get_tx_mapping_options("NM_999999.9")
self.assertEqual(tx_info_options, [])
class Test_hgvs_dataproviders_uta_UTA_default(unittest.TestCase, UTA_Base):
@classmethod
def setUpClass(cls):
cls.hdp = hgvs.dataproviders.uta.connect(
mode=os.environ.get("HGVS_CACHE_MODE", "run"), cache=CACHE)
class Test_hgvs_dataproviders_uta_UTA_default_with_pooling(unittest.TestCase, UTA_Base):
@classmethod
def setUpClass(cls):
cls.hdp = hgvs.dataproviders.uta.connect(
pooling=True, mode=os.environ.get("HGVS_CACHE_MODE", "run"), cache=CACHE)
class TestUTACache(Test_hgvs_dataproviders_uta_UTA_default):
def _create_cdna_variant(self):
start = hgvs.location.SimplePosition(118898437)
end = hgvs.location.SimplePosition(118898437)
iv = hgvs.location.Interval(start=start, end=end)
edit = hgvs.edit.NARefAlt(ref="C", alt="T")
posedit = hgvs.posedit.PosEdit(pos=iv, edit=edit)
genomic_variant = hgvs.sequencevariant.SequenceVariant(
ac="NC_000011.9",
type="g",
posedit=posedit,
)
variantmapper = hgvs.variantmapper.VariantMapper(self.hdp)
return variantmapper.g_to_c(genomic_variant, "NM_001164277.1")
def test_deterministic_cache_results(self):
"""
Check that identical request to the UTA yields the same results.
"""
var1 = self._create_cdna_variant()
var2 = self._create_cdna_variant()
self.assertEqual(str(var1), str(var2))
if __name__ == "__main__":
unittest.main()
# <LICENSE>
# Copyright 2018 HGVS Contributors (https://github.com/biocommons/hgvs)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# </LICENSE>
|
the-stack_0_18046 | from .api import get_api_response, URL
def get_new_symbol(gene_symbol: str, verbose: bool = True):
""" get the new symbol of a gene
Args:
- gene_symbol: str
- verbose: bool
Returns:
- str
- None
"""
gene_symbol = gene_symbol.strip().upper()
ext = "search/prev_symbol/{}".format(gene_symbol)
data = get_api_response("{}/{}".format(URL, ext))
res = data["response"]["docs"]
if res == []:
if verbose:
print("No new symbol found for {}".format(gene_symbol))
return
elif len(res) > 1:
if verbose:
print("2 or more different genes share this symbol {}:".format(
gene_symbol
))
for gene in res:
print(gene)
return
else:
if verbose:
print("New symbol found for {}: {}".format(
gene_symbol,
res[0]["symbol"]
))
return res[0]["symbol"]
def get_gene_starting_with(gene_symbol: str, verbose: bool = True):
""" get the genes that start with the symbol given
Args:
- gene_symbol: str
- verbose: bool
Returns:
- list of str
- None
"""
gene_symbol = gene_symbol.strip().upper()
ext = "search/symbol/{}*".format(gene_symbol)
data = get_api_response("{}/{}".format(URL, ext))
res = data["response"]["docs"]
if res == []:
if verbose:
print("No gene found starting with {}".format(gene_symbol))
return
else:
gene_symbols = [res[i]["symbol"] for i in range(len(res))]
if verbose:
print("Found these genes starting with {}:".format(gene_symbol))
for symbol in gene_symbols:
print(symbol)
return gene_symbols
def get_alias(gene_symbol: str, verbose: bool = True):
""" get aliases of given symbol
Args:
- gene_symbol: str
- verbose: bool
Returns:
- list of str
- None
"""
gene_symbol = gene_symbol.strip().upper()
ext = "fetch/symbol/{}".format(gene_symbol)
data = get_api_response("{}/{}".format(URL, ext))
res = data["response"]["docs"]
if len(res) == 1:
if "alias_symbol" in res[0]:
aliases = res[0]["alias_symbol"]
if verbose:
if isinstance(aliases, list):
display_aliases = ", ".join(aliases)
else:
display_aliases = aliases
print("Alias symbols for {}: {}".format(
gene_symbol,
display_aliases
))
return aliases
else:
if verbose:
print("No aliases for {}".format(gene_symbol))
return
else:
if verbose:
print("Couldn't get alias for {}".format(gene_symbol))
return
def get_main_symbol(gene_symbol: str, verbose: bool = True):
""" get the main symbol of given symbol
Returns None if symbol is already the "main" symbol
Args:
- gene_symbol: str
- verbose: bool
Returns:
- list of str
- None
"""
gene_symbol = gene_symbol.strip().upper()
ext = "search/alias_symbol/{}".format(gene_symbol)
data = get_api_response("{}/{}".format(URL, ext))
res = data["response"]["docs"]
if len(res) == 1:
if "symbol" in res[0]:
main_symbol = res[0]["symbol"]
if verbose:
print("Main symbol for {}: {}".format(
gene_symbol,
main_symbol
))
return main_symbol
else:
if verbose:
print("No main_symbol for {}".format(gene_symbol))
return
else:
return
def get_prev_symbol(gene_symbol: str, verbose: bool = True):
""" get the previous symbol of a gene
Args:
- gene_symbol: str
- verbose: bool
Returns:
- list of str
- None
"""
gene_symbol = gene_symbol.strip().upper()
ext = "fetch/symbol/{}".format(gene_symbol)
data = get_api_response("{}/{}".format(URL, ext))
res = data["response"]["docs"]
if len(res) == 1:
if "prev_symbol" in res[0]:
prev_symbol = res[0]["prev_symbol"]
if verbose:
print("Previous symbols for {}: {}".format(
gene_symbol,
", ".join(prev_symbol)
))
return prev_symbol
else:
if verbose:
print("No previous symbol for {}".format(gene_symbol))
return
else:
if verbose:
print("Couldn't get prev symbols for {}".format(gene_symbol))
return
def get_id(gene_symbol: str, verbose: bool = True):
""" get the id of gene symbol
Args:
- gene_symbol: str
- verbose: bool
Returns:
- str
- None
"""
gene_symbol = gene_symbol.strip().upper()
ext = "fetch/symbol/{}".format(gene_symbol)
data = get_api_response("{}/{}".format(URL, ext))
res = data["response"]["docs"]
if len(res) == 1:
gene_id = res[0]["hgnc_id"]
if verbose:
print("{}\t{}".format(gene_symbol, gene_id))
return gene_id
else:
for data in res:
if data["symbol"] == gene_symbol:
return data["hgnc_id"]
if verbose:
print("Couldn't get the id for {}".format(gene_symbol))
return
def get_symbol_from_id(gene_id: str, verbose: bool = True):
""" get the gene symbol from a gene id
Args:
- gene_id: str
- verbose: bool
Returns:
- str
- None
"""
if not gene_id[0].isdigit():
if verbose:
print("{} doesn't start with a digit".format(gene_id))
return
gene_id = gene_id.strip()
ext = "search/hgnc_id/{}".format(gene_id)
data = get_api_response("{}/{}".format(URL, ext))
res = data["response"]["docs"]
if len(res) == 1:
gene_symbol = res[0]["symbol"]
if verbose:
print("{}\t{}".format(gene_id, gene_symbol))
return gene_symbol
elif len(res) == 0:
if verbose:
print("Got no symbol for {}".format(gene_id))
return
def get_hgnc_symbol(gene_symbol: str):
""" get the official hgnc symbol from a gene symbol
Args:
- gene_id: str
- verbose: bool
Returns:
- str
- None
"""
gene_symbol = gene_symbol.strip()
new_symbol = get_new_symbol(gene_symbol, False)
if new_symbol:
return new_symbol
else:
main_symbol = get_main_symbol(gene_symbol, False)
if main_symbol:
return main_symbol
else:
return
def get_refseq(gene_symbol: str, verbose: bool = True):
""" Get refseq given a gene symbol
Args:
gene_symbol (str): Gene symbol
verbose (bool, optional): Prints the output. Defaults to True.
Returns:
None
refseq (str)
"""
gene_symbol = gene_symbol.strip().upper()
ext = "fetch/symbol/{}".format(gene_symbol)
data = get_api_response("{}/{}".format(URL, ext))
res = data["response"]["docs"]
if not res:
if verbose:
print("Gene \"{}\" not found".format(gene_symbol))
return
else:
refseq = res[0]["refseq_accession"]
if verbose:
print("Refseq for \"{}\": {}".format(gene_symbol, refseq))
return refseq
def get_ensembl(gene_symbol: str, verbose: bool = True):
""" Get the ensembl id for given gene symbol
Args:
gene_symbol (str): Gene symbol
verbose (bool, optional): Prints the output. Defaults to True.
Returns:
None
ensembl_id (str)
"""
gene_symbol = gene_symbol.strip().upper()
ext = "fetch/symbol/{}".format(gene_symbol)
data = get_api_response("{}/{}".format(URL, ext))
res = data["response"]["docs"]
if not res:
if verbose:
print("Gene \"{}\" not found".format(gene_symbol))
return
else:
ensembl_id = res[0]["ensembl_gene_id"]
if verbose:
print("Ensembl_id for \"{}\": {}".format(gene_symbol, ensembl_id))
return ensembl_id
|
the-stack_0_18048 | #!/usr/bin/env python
import json
import os
import torch
import sys
import argparse
import numpy as np
# from constants.tacred import *
from constants import tacred
from constants import kbp37
from constants import semeval
from collections import Counter, OrderedDict
from keras.preprocessing.sequence import pad_sequences
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
def mask_entities(tokens, entity_offsets, subj_entity_start, subj_entity_end,
obj_entity_start, obj_entity_end):
subj_entity, obj_entity = entity_offsets
# print(tokens, entity_offsets, subj_entity_start, subj_entity_end,
# obj_entity_start, obj_entity_end)
if subj_entity[0] < obj_entity[0]:
tokens = tokens[:subj_entity[0]] + [subj_entity_start] + tokens[subj_entity[0]:subj_entity[1]] + \
[subj_entity_end] + tokens[subj_entity[1]:obj_entity[0]] + [obj_entity_start] + \
tokens[obj_entity[0]:obj_entity[1]] + [obj_entity_end] + tokens[obj_entity[1]:]
subj_entity = (subj_entity[0] + 1, subj_entity[1] + 1)
obj_entity = (obj_entity[0] + 3, obj_entity[1] + 3)
else:
tokens = tokens[:obj_entity[0]] + [obj_entity_start] + tokens[obj_entity[0]:obj_entity[1]] + \
[obj_entity_end] + tokens[obj_entity[1]:subj_entity[0]] + [subj_entity_start] + \
tokens[subj_entity[0]:subj_entity[1]] + [subj_entity_end] + tokens[subj_entity[1]:]
obj_entity = (obj_entity[0] + 1, obj_entity[1] + 1)
subj_entity = (subj_entity[0] + 3, subj_entity[1] + 3)
# print(tokens, (subj_entity, obj_entity))
return tokens, (subj_entity, obj_entity)
def convert_token(token):
""" Convert PTB tokens to normal tokens """
if (token.lower() == '-lrb-'):
return '('
elif (token.lower() == '-rrb-'):
return ')'
elif (token.lower() == '-lsb-'):
return '['
elif (token.lower() == '-rsb-'):
return ']'
elif (token.lower() == '-lcb-'):
return '{'
elif (token.lower() == '-rcb-'):
return '}'
return token
def parse_arguments():
parser = argparse.ArgumentParser(description='Score a prediction file using the gold labels.')
parser.add_argument('gold_file', help='The gold relation file; one relation per line')
parser.add_argument('pred_file',
help='A prediction file; one relation per line, in the same order as the gold file.')
args = parser.parse_args()
return args
def score(key, prediction, no_relation="no_relation", verbose=False):
# NO_RELATION = "no_relation"
correct_by_relation = Counter()
guessed_by_relation = Counter()
gold_by_relation = Counter()
# Loop over the data to compute a score
for row in range(len(key)):
gold = key[row]
guess = prediction[row]
if gold == no_relation and guess == no_relation:
pass
elif gold == no_relation and guess != no_relation:
guessed_by_relation[guess] += 1
elif gold != no_relation and guess == no_relation:
gold_by_relation[gold] += 1
elif gold != no_relation and guess != no_relation:
guessed_by_relation[guess] += 1
gold_by_relation[gold] += 1
if gold == guess:
correct_by_relation[guess] += 1
# Print verbose information
if verbose:
print("Per-relation statistics:")
relations = gold_by_relation.keys()
longest_relation = 0
for relation in sorted(relations):
longest_relation = max(len(relation), longest_relation)
for relation in sorted(relations):
# (compute the score)
correct = correct_by_relation[relation]
guessed = guessed_by_relation[relation]
gold = gold_by_relation[relation]
prec = 1.0
if guessed > 0:
prec = float(correct) / float(guessed)
recall = 0.0
if gold > 0:
recall = float(correct) / float(gold)
f1 = 0.0
if prec + recall > 0:
f1 = 2.0 * prec * recall / (prec + recall)
# (print the score)
sys.stdout.write(("{:<" + str(longest_relation) + "}").format(relation))
sys.stdout.write(" P: ")
if prec < 0.1: sys.stdout.write(' ')
if prec < 1.0: sys.stdout.write(' ')
sys.stdout.write("{:.2%}".format(prec))
sys.stdout.write(" R: ")
if recall < 0.1: sys.stdout.write(' ')
if recall < 1.0: sys.stdout.write(' ')
sys.stdout.write("{:.2%}".format(recall))
sys.stdout.write(" F1: ")
if f1 < 0.1: sys.stdout.write(' ')
if f1 < 1.0: sys.stdout.write(' ')
sys.stdout.write("{:.2%}".format(f1))
sys.stdout.write(" #: %d" % gold)
sys.stdout.write("\n")
print("")
# Print the aggregate score
if verbose:
print("Final Score:")
prec_micro = 0.0
if sum(guessed_by_relation.values()) > 0:
prec_micro = float(sum(correct_by_relation.values())) / float(sum(guessed_by_relation.values()))
recall_micro = 0.0
if sum(gold_by_relation.values()) > 0:
recall_micro = float(sum(correct_by_relation.values())) / float(sum(gold_by_relation.values()))
f1_micro = 0.0
if prec_micro + recall_micro > 0.0:
f1_micro = 2.0 * prec_micro * recall_micro / (prec_micro + recall_micro)
# if verbose:
print("Precision (micro): {:.3%}".format(prec_micro))
print(" Recall (micro): {:.3%}".format(recall_micro))
print(" F1 (micro): {:.3%}".format(f1_micro))
return prec_micro, recall_micro, f1_micro
def eval(trained_model, eval_dataloader, device, id2label, negative_label="no_relation"):
gold_list = []
pred_list = []
# Evaluate data for one epoch
for batch in eval_dataloader:
# Add batch to GPU
batch = tuple(t.to(device) for t in batch)
# Unpack the inputs from our dataloader
b_input_ids, b_input_mask, b_labels, b_subj_idx, b_obj_idx = batch
# Telling the model not to compute or store gradients, saving memory and speeding up dev
with torch.no_grad():
# Forward pass, calculate logit predictions
logits = trained_model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask,
subj_ent_start=b_subj_idx, obj_ent_start=b_obj_idx
)
# Move logits and labels to CPU
logits = logits[0].detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
pred_list += np.argmax(logits, axis=1).tolist()
gold_list += label_ids.tolist()
prec, rec, f1 = score([id2label[gold_id] for gold_id in gold_list],
[id2label[pred_id] for pred_id in pred_list],
no_relation=negative_label
)
return prec, rec, f1
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, sentence, label, relation, subj_ent_start, obj_ent_start, sen_len):
self.sentence = sentence
self.label = label
self.relation = relation
self.subj_ent_start = subj_ent_start
self.obj_ent_start = obj_ent_start
self.sen_len = sen_len
class DataProcessor(object):
"""Processor for the TACRED data set."""
def __init__(self, data_dir):
self.data_dir = data_dir
def load_tacred_json(self, file_name, encode_ent_type=False):
"""See base class."""
# return load_from_json(os.path.join(self.data_dir, file_name))
data_path = os.path.join(self.data_dir, file_name)
feature_list = []
with open(data_path) as f:
line = f.readlines()
cases = json.loads(line[0])
for case in cases[:]:
sen_id = case[u'id']
token_list = [convert_token(item) for item in case[u'token']]
# token_list = case[u'token']
relation = case[u'relation']
subj_start = case[u'subj_start']
subj_end = case[u'subj_end']
subj_type = case[u'subj_type']
obj_start = case[u'obj_start']
obj_end = case[u'obj_end']
obj_type = case[u'obj_type']
if encode_ent_type:
ent_offset = [[subj_start, subj_end + 1], [obj_start, obj_end + 1]]
subj_ent_start = "[subj-" + subj_type.lower() + "-start]"
subj_ent_end = "[subj-" + subj_type.lower() + "-end]"
obj_ent_start = "[obj-" + obj_type.lower() + "-start]"
obj_ent_end = "[obj-" + obj_type.lower() + "-end]"
else:
# If we don't encode type info into the special token, we just treat the first
# entity as the subject, and use the special token "[e1-start]" and "[e1-start]"
# if subj_start < obj_start:
# ent_offset = [[subj_start, subj_end + 1], [obj_start, obj_end + 1]]
# else:
# ent_offset = [[obj_start, obj_end + 1], [subj_start, subj_end + 1]]
ent_offset = [[subj_start, subj_end + 1], [obj_start, obj_end + 1]]
subj_ent_start = "[subj-start]"
subj_ent_end = "[subj-end]"
obj_ent_start = "[obj-start]"
obj_ent_end = "[obj-end]"
processed_token_list, _ = mask_entities(token_list, ent_offset,
subj_ent_start, subj_ent_end,
obj_ent_start, obj_ent_end,
)
feature_list.append(InputFeatures(sentence=' '.join(processed_token_list), label=tacred.LABEL_TO_ID[relation],
relation=relation, subj_ent_start=subj_ent_start, obj_ent_start=obj_ent_start,
sen_len=len(processed_token_list)))
# break
print("The number of mentions:", len(feature_list))
return feature_list
def load_kbp37_txt(self, file_name):
"""See base class."""
data_path = os.path.join(self.data_dir, file_name)
def load_from_txt(data_path, verbose=False, strip=True):
examples = []
with open(data_path, encoding='utf-8') as infile:
while True:
line = infile.readline()
if len(line) == 0:
break
if strip:
line = line.strip()
examples.append(line)
if verbose:
print("{} examples read in {} .".format(len(examples), data_path))
return examples
org_data = load_from_txt(data_path)
assert len(org_data) % 4 == 0
feature_list = []
for idx in range(0, len(org_data), 4):
sid_sen_str = org_data[idx]
relation = org_data[idx+1]
if len(sid_sen_str.split("\t")) != 2:
print(sid_sen_str)
raise
sid, sen_str = sid_sen_str.split("\t")
assert sen_str[:2] =='" ' and sen_str[-2:] == ' "'
sen_str = sen_str[2:-2] # Remove the prefix and suffix
subj_ent_start = "<e1>"
obj_ent_start = "<e2>"
feature_list.append(InputFeatures(sentence=sen_str, label=kbp37.LABEL_TO_ID[relation],
relation=relation, subj_ent_start=subj_ent_start,
obj_ent_start=obj_ent_start,
sen_len=len(sen_str.split())))
print("The number of mentions:", len(feature_list))
return feature_list
def load_semeval_txt(self, file_name, set_name):
"""See base class."""
data_path = os.path.join(self.data_dir, file_name)
def load_from_txt(data_path, verbose=False, strip=True):
examples = []
with open(data_path, encoding='utf-8') as infile:
while True:
line = infile.readline()
if len(line) == 0:
break
if strip:
line = line.strip()
examples.append(line)
if verbose:
print("{} examples read in {} .".format(len(examples), data_path))
return examples
org_data = load_from_txt(data_path)
assert len(org_data) % 4 == 0
feature_list = []
for idx in range(0, len(org_data), 4):
sid_sen_str = org_data[idx]
relation = org_data[idx+1]
if len(sid_sen_str.split("\t")) != 2:
print(sid_sen_str)
raise
sid, sen_str = sid_sen_str.split("\t")
assert sen_str.startswith('"') and sen_str.endswith('"')
sen_str = sen_str[1:-1] # Remove the prefix and suffix
subj_ent_start = "<e1>"
obj_ent_start = "<e2>"
feature_list.append(InputFeatures(sentence=sen_str, label=semeval.LABEL_TO_ID[relation],
relation=relation, subj_ent_start=subj_ent_start,
obj_ent_start=obj_ent_start,
sen_len=len(sen_str.split())))
if set_name == "train":
feature_list = feature_list[:6500]
elif set_name == "dev":
feature_list = feature_list[6500:]
print("The number of mentions:", len(feature_list))
return feature_list
def convert_features_to_dataloader(args, feature_list, tokenizer, logger, file_train=False):
"""Loads a data file into a list of `InputBatch`s."""
# if file_train:
# print(f"{tokenizer.cls_token} {sens[0]} {tokenizer.eos_token}")
logger.info(f"{tokenizer.cls_token} {feature_list[0].sentence} {tokenizer.sep_token}")
# Preprocess the sequence
tokenized_texts = [tokenizer.tokenize(f"{tokenizer.cls_token} {each_case.sentence} {tokenizer.sep_token}")
for each_case in feature_list]
# if file_train:
# print(tokenized_texts[0])
logger.info("tokenized_texts[0]: " + " ".join(tokenized_texts[0]))
# Pad our input tokens
input_ids = pad_sequences([tokenizer.convert_tokens_to_ids(txt) for txt in tokenized_texts],
maxlen=args.max_seq_length, dtype="long", value=tokenizer.pad_token_id,
truncating="post", padding="post")
# if file_train:
# print(input_ids[0])
# print(input_ids.shape)
logger.info("input_ids[0]: " + " ".join([str(item) for item in input_ids[0]]))
subj_idx_list = [tokenized_texts[sen_idx].index(feature_list[sen_idx].subj_ent_start)
if tokenized_texts[sen_idx].index(feature_list[sen_idx].subj_ent_start) < args.max_seq_length else args.max_seq_length - 1
for sen_idx in range(len(tokenized_texts))
]
logger.info(f"subj_idx_list[0]: {subj_idx_list[0]}")
obj_idx_list = [tokenized_texts[sen_idx].index(feature_list[sen_idx].obj_ent_start)
if tokenized_texts[sen_idx].index(feature_list[sen_idx].obj_ent_start) < args.max_seq_length else args.max_seq_length - 1
for sen_idx in range(len(tokenized_texts))
]
logger.info(f"obj_idx_list[0]: {obj_idx_list[0]}")
# Create attention masks
attention_masks = []
# Create a mask of 1s for each token followed by 0s for padding
for seq in input_ids:
seq_mask = [float(i != tokenizer.pad_token_id) for i in seq]
attention_masks.append(seq_mask)
# if file_train:
# print(attention_masks[0])
logger.info("attention_masks[0]: " + " ".join([str(item) for item in attention_masks[0]]))
# Convert all of our data into torch tensors, the required datatype for our model
inputs = torch.tensor(input_ids)
labels = torch.tensor([item.label for item in feature_list])
masks = torch.tensor(attention_masks)
subj_idxs = torch.tensor(subj_idx_list)
obj_idxs = torch.tensor(obj_idx_list)
if file_train:
data = TensorDataset(inputs, masks, labels, subj_idxs, obj_idxs)
sampler = RandomSampler(data)
dataloader = DataLoader(data, sampler=sampler, batch_size=args.batch_size)
else:
data = TensorDataset(inputs, masks, labels, subj_idxs, obj_idxs)
sampler = SequentialSampler(data)
dataloader = DataLoader(data, sampler=sampler, batch_size=args.batch_size)
return dataloader
def convert_examples_to_features(args, sens, labels, subj_ent_start_list, obj_ent_start_list, tokenizer,
file_train=False):
"""Loads a data file into a list of `InputBatch`s."""
# if file_train:
# print(f"{tokenizer.cls_token} {sens[0]} {tokenizer.eos_token}")
logger.info(f"{tokenizer.cls_token} {sens[0]} {tokenizer.sep_token}")
# Preprocess the sequence
tokenized_texts = [tokenizer.tokenize(f"{tokenizer.cls_token} {sent} {tokenizer.sep_token}") for sent in sens]
# if file_train:
# print(tokenized_texts[0])
logger.info("tokenized_texts[0]: " + " ".join(tokenized_texts[0]))
# Pad our input tokens
input_ids = pad_sequences([tokenizer.convert_tokens_to_ids(txt) for txt in tokenized_texts],
maxlen=args.max_seq_length, dtype="long", value=tokenizer.pad_token_id,
truncating="post", padding="post")
# if file_train:
# print(input_ids[0])
# print(input_ids.shape)
logger.info("input_ids[0]: " + " ".join([str(item) for item in input_ids[0]]))
subj_idx_list = [tokenized_texts[sen_idx].index(subj_ent_start_list[sen_idx])
if tokenized_texts[sen_idx].index(
subj_ent_start_list[sen_idx]) < args.max_seq_length else args.max_seq_length - 1
for sen_idx in range(len(tokenized_texts))
]
logger.info(f"subj_idx_list[0]: {subj_idx_list[0]}")
obj_idx_list = [tokenized_texts[sen_idx].index(obj_ent_start_list[sen_idx])
if tokenized_texts[sen_idx].index(
obj_ent_start_list[sen_idx]) < args.max_seq_length else args.max_seq_length - 1
for sen_idx in range(len(tokenized_texts))
]
logger.info(f"obj_idx_list[0]: {obj_idx_list[0]}")
# Create attention masks
attention_masks = []
# Create a mask of 1s for each token followed by 0s for padding
for seq in input_ids:
seq_mask = [float(i != tokenizer.pad_token_id) for i in seq]
attention_masks.append(seq_mask)
# if file_train:
# print(attention_masks[0])
logger.info("attention_masks[0]: " + " ".join([str(item) for item in attention_masks[0]]))
# Convert all of our data into torch tensors, the required datatype for our model
inputs = torch.tensor(input_ids)
labels = torch.tensor(labels)
masks = torch.tensor(attention_masks)
subj_idxs = torch.tensor(subj_idx_list)
obj_idxs = torch.tensor(obj_idx_list)
if file_train:
data = TensorDataset(inputs, masks, labels, subj_idxs, obj_idxs)
sampler = RandomSampler(data)
dataloader = DataLoader(data, sampler=sampler, batch_size=args.batch_size)
else:
data = TensorDataset(inputs, masks, labels, subj_idxs, obj_idxs)
sampler = SequentialSampler(data)
dataloader = DataLoader(data, sampler=sampler, batch_size=args.batch_size)
return dataloader |
the-stack_0_18051 | '''CLI entrance.'''
import argparse
import os
parser = argparse.ArgumentParser(prog='sudoku')
parser.add_argument('path', action='store', help='Set the path of image.', type=str)
parser.add_argument('-s', '--source', required=False, action='store_true', help='Show the source matrix.')
parser.add_argument('-t', '--tips', required=False, action='store_true', help='Show the tips.')
parser.add_argument('-a', '--answer', required=False, action='store_true', help='Show the answer.')
args = parser.parse_args()
if args.path:
if os.path.exists(args.path) == False:
print('Wrong image path.')
exit()
import process
result = process.run(args.path)
print()
if not (args.tips or args.answer) or args.source:
print('Here is the source matrix:')
process.show(result)
if args.tips:
process.show(result, tips=True)
if args.answer:
process.show(result, answer=True)
|
the-stack_0_18052 | import aiohttp
import asyncio
import io
import wrapt
import botocore.retryhandler
import aiohttp.http_exceptions
from aiohttp.client import URL
from aiohttp.client_reqrep import ClientResponse
from botocore.endpoint import EndpointCreator, Endpoint, DEFAULT_TIMEOUT, \
MAX_POOL_CONNECTIONS, logger
from botocore.exceptions import EndpointConnectionError, \
ConnectionClosedError
from botocore.hooks import first_non_none_response
from botocore.utils import is_valid_endpoint_url
from botocore.vendored.requests.structures import CaseInsensitiveDict
from botocore.history import get_global_history_recorder
from multidict import MultiDict
from urllib.parse import urlparse
from aiobotocore.response import StreamingBody
MAX_REDIRECTS = 10
history_recorder = get_global_history_recorder()
# Monkey patching: We need to insert the aiohttp exception equivalents
# The only other way to do this would be to have another config file :(
_aiohttp_retryable_exceptions = [
aiohttp.ClientConnectionError,
aiohttp.ClientPayloadError,
aiohttp.ServerDisconnectedError,
aiohttp.http_exceptions.HttpProcessingError,
asyncio.TimeoutError,
]
botocore.retryhandler.EXCEPTION_MAP['GENERAL_CONNECTION_ERROR'].extend(
_aiohttp_retryable_exceptions
)
def text_(s, encoding='utf-8', errors='strict'):
if isinstance(s, bytes):
return s.decode(encoding, errors)
return s # pragma: no cover
# Unfortunately aiohttp changed the behavior of streams:
# github.com/aio-libs/aiohttp/issues/1907
# We need this wrapper until we have a final resolution
class _IOBaseWrapper(wrapt.ObjectProxy):
def close(self):
# this stream should not be closed by aiohttp, like 1.x
pass
async def convert_to_response_dict(http_response, operation_model):
response_dict = {
# botocore converts keys to str, so make sure that they are in
# the expected case. See detailed discussion here:
# https://github.com/aio-libs/aiobotocore/pull/116
# aiohttp's CIMultiDict camel cases the headers :(
'headers': CaseInsensitiveDict(
{k.decode('utf-8').lower(): v.decode('utf-8')
for k, v in http_response.raw_headers}),
'status_code': http_response.status_code,
'context': {
'operation_name': operation_model.name,
}
}
if response_dict['status_code'] >= 300:
response_dict['body'] = await http_response.read()
elif operation_model.has_event_stream_output:
response_dict['body'] = http_response.raw
elif operation_model.has_streaming_output:
length = response_dict['headers'].get('content-length')
response_dict['body'] = StreamingBody(http_response.raw, length)
else:
response_dict['body'] = await http_response.read()
return response_dict
# This is similar to botocore.response.StreamingBody
class ClientResponseContentProxy(wrapt.ObjectProxy):
"""Proxy object for content stream of http response. This is here in case
you want to pass around the "Body" of the response without closing the
response itself."""
def __init__(self, response):
super().__init__(response.__wrapped__.content)
self._self_response = response
# Note: we don't have a __del__ method as the ClientResponse has a __del__
# which will warn the user if they didn't close/release the response
# explicitly. A release here would mean reading all the unread data
# (which could be very large), and a close would mean being unable to re-
# use the connection, so the user MUST chose. Default is to warn + close
async def __aenter__(self):
await self._self_response.__aenter__()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
return await self._self_response.__aexit__(exc_type, exc_val, exc_tb)
def close(self):
self._self_response.close()
class ClientResponseProxy(wrapt.ObjectProxy):
"""Proxy object for http response useful for porting from
botocore underlying http library."""
def __init__(self, *args, **kwargs):
super().__init__(ClientResponse(*args, **kwargs))
# this matches ClientResponse._body
self._self_body = None
@property
def status_code(self):
return self.status
@status_code.setter
def status_code(self, value):
# botocore tries to set this, see:
# https://github.com/aio-libs/aiobotocore/issues/190
# Luckily status is an attribute we can set
self.status = value
@property
def content(self):
return self._self_body
@property
def raw(self):
return ClientResponseContentProxy(self)
async def read(self):
self._self_body = await self.__wrapped__.read()
return self._self_body
class AioEndpoint(Endpoint):
def __init__(self, host,
endpoint_prefix, event_emitter, proxies=None, verify=True,
timeout=DEFAULT_TIMEOUT, response_parser_factory=None,
max_pool_connections=MAX_POOL_CONNECTIONS,
loop=None, connector_args=None):
super().__init__(host, endpoint_prefix,
event_emitter, proxies=proxies, verify=verify,
timeout=timeout,
response_parser_factory=response_parser_factory,
max_pool_connections=max_pool_connections)
if isinstance(timeout, (list, tuple)):
self._conn_timeout, self._read_timeout = timeout
else:
self._conn_timeout = self._read_timeout = timeout
self._loop = loop or asyncio.get_event_loop()
if connector_args is None:
# AWS has a 20 second idle timeout:
# https://forums.aws.amazon.com/message.jspa?messageID=215367
# aiohttp default timeout is 30s so set something reasonable here
connector_args = dict(keepalive_timeout=12)
timeout = aiohttp.ClientTimeout(
sock_connect=self._conn_timeout,
sock_read=self._read_timeout
)
connector = aiohttp.TCPConnector(
loop=self._loop,
limit=max_pool_connections,
verify_ssl=self.verify,
**connector_args)
self._aio_session = aiohttp.ClientSession(
connector=connector,
timeout=timeout,
skip_auto_headers={'CONTENT-TYPE'},
response_class=ClientResponseProxy,
loop=self._loop,
auto_decompress=False)
async def _request(self, method, url, headers, data, verify, stream):
# Note: When using aiobotocore with dynamodb, requests fail on crc32
# checksum computation as soon as the response data reaches ~5KB.
# When AWS response is gzip compressed:
# 1. aiohttp is automatically decompressing the data
# (http://aiohttp.readthedocs.io/en/stable/client.html#binary-response-content)
# 2. botocore computes crc32 on the uncompressed data bytes and fails
# cause crc32 has been computed on the compressed data
# The following line forces aws not to use gzip compression,
# if there is a way to configure aiohttp not to perform decompression,
# we can remove the following line and take advantage of
# aws gzip compression.
# https://github.com/boto/botocore/issues/1255
headers['Accept-Encoding'] = 'identity'
headers_ = MultiDict(
(z[0], text_(z[1], encoding='utf-8')) for z in headers.items())
# botocore does this during the request so we do this here as well
proxy = self.proxies.get(urlparse(url.lower()).scheme)
if isinstance(data, io.IOBase):
data = _IOBaseWrapper(data)
url = URL(url, encoded=True)
resp = await self._aio_session.request(
method, url=url, headers=headers_, data=data, proxy=proxy,
verify_ssl=verify)
# If we're not streaming, read the content so we can retry any timeout
# errors, see:
# https://github.com/boto/botocore/blob/develop/botocore/vendored/requests/sessions.py#L604
if not stream:
await resp.read()
return resp
async def _send_request(self, request_dict, operation_model):
attempts = 1
request = self.create_request(request_dict, operation_model)
success_response, exception = await self._get_response(
request, operation_model, attempts)
while (await self._needs_retry(attempts, operation_model,
request_dict, success_response,
exception)):
attempts += 1
# If there is a stream associated with the request, we need
# to reset it before attempting to send the request again.
# This will ensure that we resend the entire contents of the
# body.
request.reset_stream()
# Create a new request when retried (including a new signature).
request = self.create_request(
request_dict, operation_model)
success_response, exception = await self._get_response(
request, operation_model, attempts)
if success_response is not None and \
'ResponseMetadata' in success_response[1]:
# We want to share num retries, not num attempts.
total_retries = attempts - 1
success_response[1]['ResponseMetadata']['RetryAttempts'] = \
total_retries
if exception is not None:
raise exception
else:
return success_response
# NOTE: The only line changed here changing time.sleep to asyncio.sleep
async def _needs_retry(self, attempts, operation_model, request_dict,
response=None, caught_exception=None):
event_name = 'needs-retry.%s.%s' % (self._endpoint_prefix,
operation_model.name)
responses = self._event_emitter.emit(
event_name, response=response, endpoint=self,
operation=operation_model, attempts=attempts,
caught_exception=caught_exception, request_dict=request_dict)
handler_response = first_non_none_response(responses)
if handler_response is None:
return False
else:
# Request needs to be retried, and we need to sleep
# for the specified number of times.
logger.debug("Response received to retry, sleeping for "
"%s seconds", handler_response)
await asyncio.sleep(handler_response, loop=self._loop)
return True
async def _get_response(self, request, operation_model, attempts):
# This will return a tuple of (success_response, exception)
# and success_response is itself a tuple of
# (http_response, parsed_dict).
# If an exception occurs then the success_response is None.
# If no exception occurs then exception is None.
try:
# http request substituted too async one
logger.debug("Sending http request: %s", request)
history_recorder.record('HTTP_REQUEST', {
'method': request.method,
'headers': request.headers,
'streaming': operation_model.has_streaming_input,
'url': request.url,
'body': request.body
})
streaming = any([
operation_model.has_streaming_output,
operation_model.has_event_stream_output
])
http_response = await self._request(
request.method, request.url, request.headers, request.body,
verify=self.verify,
stream=streaming)
except aiohttp.ClientConnectionError as e:
e.request = request # botocore expects the request property
# For a connection error, if it looks like it's a DNS
# lookup issue, 99% of the time this is due to a misconfigured
# region/endpoint so we'll raise a more specific error message
# to help users.
logger.debug("ConnectionError received when sending HTTP request.",
exc_info=True)
if self._looks_like_dns_error(e):
better_exception = EndpointConnectionError(
endpoint_url=request.url, error=e)
return None, better_exception
else:
return None, e
except aiohttp.http_exceptions.BadStatusLine:
better_exception = ConnectionClosedError(
endpoint_url=request.url, request=request)
return None, better_exception
except Exception as e:
logger.debug("Exception received when sending HTTP request.",
exc_info=True)
return None, e
# This returns the http_response and the parsed_data.
response_dict = await convert_to_response_dict(http_response,
operation_model)
http_response_record_dict = response_dict.copy()
http_response_record_dict['streaming'] = \
operation_model.has_streaming_output
history_recorder.record('HTTP_RESPONSE', http_response_record_dict)
protocol = operation_model.metadata['protocol']
parser = self._response_parser_factory.create_parser(protocol)
parsed_response = parser.parse(
response_dict, operation_model.output_shape)
history_recorder.record('PARSED_RESPONSE', parsed_response)
return (http_response, parsed_response), None
class AioEndpointCreator(EndpointCreator):
def __init__(self, event_emitter, loop):
super().__init__(event_emitter)
self._loop = loop
def create_endpoint(self, service_model, region_name=None,
endpoint_url=None, verify=None,
response_parser_factory=None, timeout=DEFAULT_TIMEOUT,
max_pool_connections=MAX_POOL_CONNECTIONS,
proxies=None, connector_args=None):
if not is_valid_endpoint_url(endpoint_url):
raise ValueError("Invalid endpoint: %s" % endpoint_url)
if proxies is None:
proxies = self._get_proxies(endpoint_url)
return AioEndpoint(
endpoint_url,
endpoint_prefix=service_model.endpoint_prefix,
event_emitter=self._event_emitter,
proxies=proxies,
verify=self._get_verify_value(verify),
timeout=timeout,
max_pool_connections=max_pool_connections,
response_parser_factory=response_parser_factory,
loop=self._loop, connector_args=connector_args)
|
the-stack_0_18053 | # PyGEP: Gene Expression Programming for Python
# Copyright (C) 2007 Ryan J. O'Neil
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
'''
Provides basic logical operators: and, or, not, if. These are same to use
with the mathematics operators as they return either 1, 0 or the types
passed in to them.
Common logic non-terminal functions:
- (&) and_op: i if i and j else 0
- (|) or_op: i or j or 0
- (!) not_op: 0 if i else 1
- (I) if_op: j if i else k
'''
from pygep.chromosome import symbol
__all__ = 'LOGIC_ALL', 'LOGIC_ARITY_1', 'LOGIC_ARITY_2', 'LOGIC_ARITY_3'
and_op = symbol('&')(lambda i, j: i if i and j else 0)
or_op = symbol('|')(lambda i, j: i or j or 0)
not_op = symbol('!')(lambda i: 0 if i else 1)
if_op = symbol('I')(lambda i, j, k: j if i else k)
LOGIC_ARITY_1 = not_op,
LOGIC_ARITY_2 = and_op, or_op
LOGIC_ARITY_3 = if_op,
LOGIC_ALL = LOGIC_ARITY_1 + LOGIC_ARITY_2 + LOGIC_ARITY_3
|
the-stack_0_18054 | import torch
import os
import h5py
import numpy as np
from haven import haven_utils as hu
from torchvision import transforms
import pydicom, tqdm
from PIL import Image
import PIL
class Covid19V2(torch.utils.data.Dataset):
def __init__(
self,
split,
datadir,
exp_dict,
seperate=True,
):
self.exp_dict = exp_dict
self.datadir = datadir
self.split = split
self.n_classes = exp_dict['dataset']['n_classes']
self.size = 352
self.img_path = os.path.join(datadir, 'OpenSourceDCMs')
self.lung_path = os.path.join(datadir, 'LungMasks')
self.tgt_path = os.path.join(datadir, 'InfectionMasks')
self.img_list = []
scan_list = set()
for tgt_name in os.listdir(self.tgt_path):
lung_name = tgt_name
scan_id, slice_id = tgt_name.split('_')
scan_list.add(int(scan_id))
slice_id = str(int(slice_id.replace('z', '').replace('.png', ''))).zfill(4)
img_name = [f for f in os.listdir(os.path.join(self.img_path,
'DCM'+scan_id)) if 's%s' % slice_id in f][0]
img_name = os.path.join('DCM'+scan_id, img_name)
self.img_list += [{'img': img_name,
'tgt': tgt_name,
'lung': lung_name,
'scan_id':int(scan_id),
'slice_id':int(slice_id)}]
scan_list = list(scan_list)
scan_list.sort()
if seperate:
if split == 'train':
scan_list = scan_list[:5]
elif split == 'val':
scan_list = scan_list[5:6]
elif split == 'test':
scan_list = scan_list[6:]
img_list_new = []
for img_dict in self.img_list:
if img_dict['scan_id'] in scan_list:
img_list_new += [img_dict]
else:
img_list_new = []
for scan in scan_list:
img_list = [img_dict for img_dict in self.img_list if img_dict['scan_id']==scan]
if split == 'train':
s = 0
e = int(0.45*len(img_list))
elif split == 'val':
s = int(0.45*len(img_list))
e = int(0.5*len(img_list))
elif split == 'test':
s = int(0.5*len(img_list))
e = len(img_list)
img_list_new += img_list[s:e]
self.img_list = img_list_new
self.img_transform = transforms.Compose([
transforms.CenterCrop((384, 385)),
# transforms.Resize((self.size, self.size)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
if split == 'train':
self.gt_transform = transforms.Compose([
transforms.CenterCrop((384, 385)),
# transforms.Resize((self.size, self.size), interpolation=PIL.Image.NEAREST),
# transforms.ToTensor()]
])
else:
self.gt_transform = transforms.Compose([
transforms.CenterCrop((384, 385)),
# transforms.ToTensor()
])
def __getitem__(self, i):
out = self.img_list[i]
img_name, tgt_name, lung_name = out['img'], out['tgt'], out['lung']
# read image
img_dcm = pydicom.dcmread(os.path.join(self.img_path, img_name))
image = img_dcm.pixel_array.astype('float')
# read infection mask
tgt_mask = np.array(Image.open(os.path.join(self.tgt_path, tgt_name)).transpose(Image.FLIP_LEFT_RIGHT).rotate(90))
# read lung mask
lung_mask = np.array(Image.open(os.path.join(self.lung_path,
lung_name)).transpose(Image.FLIP_LEFT_RIGHT))
mask = np.zeros(lung_mask.shape)
# mask[lung_mask== 255] = 1
# mask[tgt_mask== 127] = 2
# mask[tgt_mask== 255] = 3
if self.n_classes == 2:
mask[tgt_mask!= 0] = 1
elif self.n_classes == 3:
mask[tgt_mask== 127] = 1
mask[tgt_mask== 255] = 2
# assert that these are the only classes
assert(len(np.setdiff1d(np.unique(tgt_mask),[0,127,255] ))==0)
assert(len(np.setdiff1d(np.unique(lung_mask),[0,255] ))==0)
# image, mask = transformers.apply_transform(self.split, image=image, label=mask,
# transform_name=self.exp_dict['dataset']['transform'],
# exp_dict=self.exp_dict)
img_uint8 = ((image/4095)*255).astype('uint8')
image = self.img_transform(Image.fromarray(img_uint8).convert('RGB'))
mask = self.gt_transform(Image.fromarray((mask).astype('uint8')))
mask = torch.LongTensor(np.array(mask))
if self.n_classes == 2:
assert (len(np.setdiff1d(np.unique(mask), [0, 1])) == 0)
if self.n_classes == 3:
assert (len(np.setdiff1d(np.unique(mask), [0, 1, 2])) == 0)
from src.modules.lcfcn import lcfcn_loss
points = lcfcn_loss.get_points_from_mask(mask.numpy().squeeze(), bg_points=-1)
# if (points == 255).mean() == 1:
# points[:] = 0
return {'images': image,
'masks': mask.long()[None],
'points':torch.LongTensor(points),
'meta': {'shape':mask.squeeze().shape,
'index':i,
'hash':hu.hash_dict({'id':os.path.join(self.img_path, img_name)}),
'name':img_name,
'slice_thickness':img_dcm.SliceThickness,
'pixel_spacing':str(img_dcm.PixelSpacing),
'img_name': img_name,
'tgt_name':tgt_name,
'image_id': i,
'split': self.split}}
def __len__(self):
return len(self.img_list)
|
the-stack_0_18056 | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Verify that starting brownie with -h works as expected."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class HelpTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self):
self.add_nodes(self.num_nodes)
# Don't start the node
def get_node_output(self, *, ret_code_expected):
ret_code = self.nodes[0].process.wait(timeout=5)
assert_equal(ret_code, ret_code_expected)
self.nodes[0].stdout.seek(0)
self.nodes[0].stderr.seek(0)
out = self.nodes[0].stdout.read()
err = self.nodes[0].stderr.read()
self.nodes[0].stdout.close()
self.nodes[0].stderr.close()
# Clean up TestNode state
self.nodes[0].running = False
self.nodes[0].process = None
self.nodes[0].rpc_connected = False
self.nodes[0].rpc = None
return out, err
def run_test(self):
self.log.info("Start brownie with -h for help text")
self.nodes[0].start(extra_args=['-h'])
# Node should exit immediately and output help to stdout.
output, _ = self.get_node_output(ret_code_expected=0)
assert b'Options' in output
self.log.info("Help text received: {} (...)".format(output[0:60]))
self.log.info("Start brownie with -version for version information")
self.nodes[0].start(extra_args=['-version'])
# Node should exit immediately and output version to stdout.
output, _ = self.get_node_output(ret_code_expected=0)
assert b'version' in output
self.log.info("Version text received: {} (...)".format(output[0:60]))
# Test that arguments not in the help results in an error
self.log.info("Start brownied with -fakearg to make sure it does not start")
self.nodes[0].start(extra_args=['-fakearg'])
# Node should exit immediately and output an error to stderr
_, output = self.get_node_output(ret_code_expected=1)
assert b'Error parsing command line arguments' in output
self.log.info("Error message received: {} (...)".format(output[0:60]))
if __name__ == '__main__':
HelpTest().main()
|
the-stack_0_18057 | from django import forms
from django.core import validators
class FormName(forms.Form):
name = forms.CharField()
email = forms.EmailField()
text = forms.CharField(widget=forms.Textarea)
verify_email = forms.EmailField(label='enter email again')
botcatcher = forms.CharField(
required=False, widget=forms.HiddenInput, validators=[validators.MaxLengthValidator(0)])
def clean(self):
all_clean_data = super().clean()
email = all_clean_data['email']
vmail = all_clean_data['verify_email']
if email != vmail:
raise forms.ValidationError('Make email and vemail same')
""" def clean_botcatcher(self):
botcatcher = self.cleaned_data['botcatcher']
if len(botcatcher) > 0:
raise forms.ValidationError("opps")
return botcatcher
"""
|
the-stack_0_18061 | # Copyright 2021 DAI Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pymongo.database import Database
from ...models.semantics_model import (
AddressSemantics,
ContractSemantics,
ERC20Semantics,
)
from ...models.semantics_model import (
EventSemantics,
FunctionSemantics,
TransformationSemantics,
ParameterSemantics,
)
class SemanticsDatabase:
""" Semantics Database. """
def __init__(self, db: Database):
self._db = db
self._addresses = self._db["addresses"]
self._contracts = self._db["contracts"]
self._signatures = self._db["signatures"]
def get_raw_semantics(self, chain_id, address):
def decode_parameter(parameter):
components_semantics = []
for component in parameter["components"]:
components_semantics.append(decode_parameter(component))
decoded_parameter = ParameterSemantics(
parameter["parameter_name"],
parameter["parameter_type"],
components_semantics,
parameter["indexed"],
parameter["dynamic"],
)
return decoded_parameter
ZERO_HASH = "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
_id = f"{chain_id}-{address}"
raw_address_semantics = self._addresses.find_one({"_id": _id}, {"_id": 0})
if raw_address_semantics:
if raw_address_semantics.get("erc20"):
erc20_semantics = ERC20Semantics(
raw_address_semantics["erc20"]["name"],
raw_address_semantics["erc20"]["symbol"],
raw_address_semantics["erc20"]["decimals"],
)
else:
erc20_semantics = None
if raw_address_semantics["contract"] == ZERO_HASH:
contract_semantics = ContractSemantics(
raw_address_semantics["contract"], "EOA", dict(), dict(), dict()
)
else:
raw_contract_semantics = self._contracts.find_one(
{"_id": raw_address_semantics["contract"]}, {"_id": 0}
)
events = dict()
for signature, event in raw_contract_semantics["events"].items():
parameters_semantics = []
for parameter in event["parameters"]:
parameters_semantics.append(decode_parameter(parameter))
events[signature] = EventSemantics(
signature,
event["anonymous"],
event["name"],
parameters_semantics,
)
functions = dict()
for signature, function in raw_contract_semantics["functions"].items():
inputs_semantics = []
for parameter in function["inputs"]:
inputs_semantics.append(decode_parameter(parameter))
outputs_semantics = []
for parameter in function["outputs"]:
outputs_semantics.append(decode_parameter(parameter))
functions[signature] = FunctionSemantics(
signature, function["name"], inputs_semantics, outputs_semantics
)
transformations = dict()
for signature, parameters_transformations in raw_contract_semantics[
"transformations"
].items():
transformations[signature] = dict()
for parameter, transformation in parameters_transformations.items():
transformations[signature][parameter] = TransformationSemantics(
transformation["transformed_name"],
transformation["transformed_type"],
transformation["transformation"],
)
contract_semantics = ContractSemantics(
raw_contract_semantics["code_hash"],
raw_contract_semantics["name"],
events,
functions,
transformations,
)
address_semantics = AddressSemantics(
chain_id,
address,
raw_address_semantics["name"],
raw_address_semantics["is_contract"],
contract_semantics,
raw_address_semantics["standard"],
erc20_semantics,
)
return address_semantics
else:
return None
def insert_contract(self, contract, update_if_exist=False):
contract_with_id = {"_id": contract["code_hash"], **contract}
if update_if_exist:
self._contracts.replace_one(
{"_id": contract_with_id["_id"]}, contract_with_id, upsert=True
)
else:
self._contracts.insert_one(contract_with_id)
def insert_address(self, address, update_if_exist=False):
address_with_id = {
"_id": f"{address['chain_id']}-{address['address']}",
**address,
}
if update_if_exist:
self._addresses.replace_one(
{"_id": address_with_id["_id"]}, address_with_id, upsert=True
)
else:
self._addresses.insert_one(address_with_id)
def insert_signature(self, signature, update_if_exist=False):
signature_with_id = {"_id": signature["hash"], **signature}
if update_if_exist:
self._signatures.replace_one(
{"_id": signature_with_id["_id"]}, signature_with_id, upsert=True
)
else:
self._signatures.insert_one(signature_with_id)
|
the-stack_0_18064 | # Copyright (C) 2020-2021 Intel Corporation
#
# SPDX-License-Identifier: MIT
import logging as log
import os
import os.path as osp
from datumaro.components.annotation import (
AnnotationType, Label, LabelCategories,
)
from datumaro.components.converter import Converter
from datumaro.components.extractor import DatasetItem, Importer, SourceExtractor
from datumaro.util.image import find_images
class ImagenetPath:
IMAGE_DIR_NO_LABEL = 'no_label'
class ImagenetExtractor(SourceExtractor):
def __init__(self, path, subset=None):
assert osp.isdir(path), path
super().__init__(subset=subset)
self._categories = self._load_categories(path)
self._items = list(self._load_items(path).values())
def _load_categories(self, path):
label_cat = LabelCategories()
for dirname in sorted(os.listdir(path)):
if dirname != ImagenetPath.IMAGE_DIR_NO_LABEL:
label_cat.add(dirname)
return { AnnotationType.label: label_cat }
def _load_items(self, path):
items = {}
for image_path in find_images(path, recursive=True, max_depth=1):
label = osp.basename(osp.dirname(image_path))
image_name = osp.splitext(osp.basename(image_path))[0]
item_id = osp.join(label, image_name)
item = items.get(item_id)
if item is None:
item = DatasetItem(id=item_id, subset=self._subset,
image=image_path)
items[item_id] = item
annotations = item.annotations
if label != ImagenetPath.IMAGE_DIR_NO_LABEL:
label = self._categories[AnnotationType.label].find(label)[0]
annotations.append(Label(label=label))
return items
class ImagenetImporter(Importer):
@classmethod
def find_sources(cls, path):
if not osp.isdir(path):
return []
return [{ 'url': path, 'format': ImagenetExtractor.NAME }]
class ImagenetConverter(Converter):
DEFAULT_IMAGE_EXT = '.jpg'
def apply(self):
def _get_dir_name(id_parts, label_name):
if 1 < len(id_parts) and id_parts[0] == label_name:
return ''
else:
return label_name
if 1 < len(self._extractor.subsets()):
log.warning("ImageNet format only supports exporting a single "
"subset, subset information will not be used.")
subset_dir = self._save_dir
extractor = self._extractor
labels = {}
for item in self._extractor:
id_parts = item.id.split('/')
labels = set(p.label for p in item.annotations
if p.type == AnnotationType.label)
for label in labels:
label_name = extractor.categories()[AnnotationType.label][label].name
self._save_image(item, subdir=osp.join(subset_dir,
_get_dir_name(id_parts, label_name)))
if not labels:
self._save_image(item, subdir=osp.join(subset_dir,
_get_dir_name(id_parts, ImagenetPath.IMAGE_DIR_NO_LABEL)))
|
the-stack_0_18066 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Stegano - Stegano is a pure Python steganography module.
# Copyright (C) 2010-2021 Cédric Bonhomme - https://www.cedricbonhomme.org
#
# For more information : https://git.sr.ht/~cedric/stegano
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
__author__ = "Cedric Bonhomme"
__version__ = "$Revision: 0.3 $"
__date__ = "$Date: 2011/12/28 $"
__revision__ = "$Date: 2019/06/04 $"
__license__ = "GPLv3"
import itertools
import math
from typing import Dict
from typing import Iterator
from typing import List
def identity() -> Iterator[int]:
"""f(x) = x
"""
n = 0
while True:
yield n
n += 1
def triangular_numbers() -> Iterator[int]:
"""Triangular numbers: a(n) = C(n+1,2) = n(n+1)/2 = 0+1+2+...+n.
http://oeis.org/A000217
"""
n = 0
while True:
yield (n * (n + 1)) // 2
n += 1
def fermat() -> Iterator[int]:
"""Generate the n-th Fermat Number.
https://oeis.org/A000215
"""
y = 3
while True:
yield y
y = pow(y - 1, 2) + 1
def mersenne() -> Iterator[int]:
"""Generate 2^p - 1, where p is prime.
https://oeis.org/A001348
"""
prime_numbers = eratosthenes()
while True:
yield 2 ** next(prime_numbers) - 1
def eratosthenes() -> Iterator[int]:
"""Generate the prime numbers with the sieve of Eratosthenes.
https://oeis.org/A000040
"""
d = {} # type: Dict[int, List[int]]
for i in itertools.count(2):
if i in d:
for j in d[i]:
d[i + j] = d.get(i + j, []) + [j]
del d[i]
else:
d[i * i] = [i]
yield i
def composite() -> Iterator[int]:
"""Generate the composite numbers using the sieve of Eratosthenes.
https://oeis.org/A002808
"""
p1 = 3
for p2 in eratosthenes():
for n in range(p1 + 1, p2):
yield n
p1 = p2
def carmichael() -> Iterator[int]:
"""Composite numbers n such that a^(n-1) == 1 (mod n) for every a coprime
to n.
https://oeis.org/A002997
"""
for m in composite():
for a in range(2, m):
if pow(a, m, m) != a:
break
else:
yield m
def ackermann_slow(m: int, n: int) -> int:
"""Ackermann number.
"""
if m == 0:
return n + 1
elif n == 0:
return ackermann_slow(m - 1, 1)
else:
return ackermann_slow(m - 1, ackermann_slow(m, n - 1))
def ackermann_naive(m: int) -> Iterator[int]:
"""Naive Ackermann encapsulated in a generator
"""
n = 0
while True:
yield ackermann_slow(m, n)
n += 1
def ackermann_fast(m: int, n: int) -> int:
"""Ackermann number.
"""
while m >= 4:
if n == 0:
n = 1
else:
n = ackermann_fast(m, n - 1)
m -= 1
if m == 3:
return (1 << n + 3) - 3
elif m == 2:
return (n << 1) + 3
elif m == 1:
return n + 2
else:
return n + 1
def ackermann(m: int) -> Iterator[int]:
"""Ackermann encapsulated in a generator.
"""
n = 0
while True:
yield ackermann_fast(m, n)
n += 1
def fibonacci() -> Iterator[int]:
"""Generate the sequence of Fibonacci.
https://oeis.org/A000045
"""
a, b = 1, 2
while True:
yield a
a, b = b, a + b
def log_gen() -> Iterator[int]:
"""Logarithmic generator.
"""
y = 1
while True:
adder = max(1, math.pow(10, int(math.log10(y))))
yield int(y)
y = y + int(adder)
polys = {
2: [2, 1],
3: [3, 1],
4: [4, 1],
5: [5, 2],
6: [6, 1],
7: [7, 1],
8: [8, 4, 3, 2],
9: [9, 4],
10: [10, 3],
11: [11, 2],
12: [12, 6, 4, 1],
13: [13, 4, 3, 1],
14: [14, 8, 6, 1],
15: [15, 1],
16: [16, 12, 3, 1],
17: [17, 3],
18: [18, 7],
19: [19, 5, 2, 1],
20: [20, 3],
21: [21, 2],
22: [22, 1],
23: [23, 5],
24: [24, 7, 2, 1],
25: [25, 3],
26: [26, 6, 2, 1],
27: [27, 5, 2, 1],
28: [28, 3],
29: [29, 2],
30: [30, 23, 2, 1],
31: [31, 3],
}
def LFSR(m: int) -> Iterator[int]:
"""LFSR generator of the given size
https://en.wikipedia.org/wiki/Linear-feedback_shift_register
"""
n: int = m.bit_length() - 1
# Set initial state to {1 0 0 ... 0}
state: List[int] = [0] * n
state[0] = 1
feedback: int = 0
poly: List[int] = polys[n]
while True:
# Compute the feedback bit
feedback = 0
for i in range(len(poly)):
feedback = feedback ^ state[poly[i] - 1]
# Roll the registers
state.pop()
# Add the feedback bit
state.insert(0, feedback)
# Convert the registers to an int
out = sum([e * (2 ** i) for i, e in enumerate(state)])
yield out
|
the-stack_0_18067 | #!/bin/python3
# Copyright 2022, Max Brueggemann, www.maxbrueggemann.de
# This program searches the current directory for python files
# that have a filename beginning with "rRun". These files are then
# executed as subprocesses. The subprocesses will be terminated when
# the corresponding file has vanished. Has a file has been modified,
# the subprocess will be restarted.
import os
import subprocess
import sys
import multiprocessing
import time
import signal
import sys
class Rule:
def __init__(self,filename):
self.filename=filename
self.fileExists=True
try:
self.time=os.path.getmtime(self.filename)
except:
self.time=0
self.subprocess=None
print("Found script "+filename)
self.start()
def kill(self):
print("Killing script "+self.filename)
self.subprocess.kill()
def start(self):
print("Starting script "+self.filename)
self.subprocess = subprocess.Popen([sys.executable, self.filename])
def modified(self):
time=self.time
try:
time=os.path.getmtime(self.filename)
except:
pass
if self.time != time:
self.time=time
print(self.filename+" changed. Reloading.")
self.kill()
self.start()
scriptList=[]
def signal_handler(signal, frame):
for x in scriptList:
x.kill()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
while True:
fileList=os.listdir(".")
for x in scriptList:
x.fileExists=False
for x in fileList:
found = False
if x.startswith("rRun") and x.endswith(".py"):
for y in scriptList:
if y.filename == x:
y.fileExists=True
found=True
if not found:
scriptList.append(Rule(x))
time.sleep(3)
for x in scriptList:
x.modified()
if not x.fileExists:
print("Script "+x.filename+" is gone.")
x.kill()
scriptList.remove(x)
|
the-stack_0_18068 | import os
from shutil import copyfile
from datetime import datetime
import numpy as np
from mikeio.dfs3 import Dfs3
from mikeio.eum import EUMType, ItemInfo, TimeStep
def test_read_dfs3():
dfs = Dfs3()
ds = dfs.read("tests/testdata/Grid1.dfs3")
assert len(ds.data) == 2
assert len(ds.time) == 30
assert ds.data[0].shape == (30, 10, 10, 10) # t # z # y # x
assert ds.items[0].name == "Untitled"
def test_create_single_item(tmpdir):
outfilename = os.path.join(tmpdir.dirname, "simple.dfs3")
start_time = datetime(2012, 1, 1)
items = [ItemInfo(EUMType.Relative_moisture_content)]
data = []
# t , z, y, x
d = np.random.random([20, 2, 5, 10])
d[:, 0, 0, 0] = 0.0
data.append(d)
title = "test dfs3"
dfs = Dfs3()
dfs.create(
filename=outfilename,
data=data,
start_time=start_time,
timeseries_unit=TimeStep.SECOND,
dt=3600.0,
items=items,
coordinate=["UTM-33", 450000, 560000, 0],
length_x=0.1,
length_y=0.1,
length_z=10.0,
title=title,
)
def test_read_create(tmpdir):
dfs = Dfs3()
ds = dfs.read("tests/testdata/Grid1.dfs3")
outfilename = os.path.join(tmpdir.dirname, "rw.dfs3")
start_time = datetime(2012, 1, 1)
items = ds.items
data = ds.data
title = "test dfs3"
dfs = Dfs3()
dfs.create(
filename=outfilename,
data=data,
start_time=ds.time[0],
timeseries_unit=TimeStep.SECOND,
dt=(ds.time[1] - ds.time[0]).total_seconds(),
items=items,
coordinate=["LONG/LAT", 5, 10, 0],
length_x=0.1,
length_y=0.1,
title=title,
)
def test_write(tmpdir):
filename1 = "tests/testdata/Grid1.dfs3"
filename2 = os.path.join(tmpdir.dirname, "written.dfs3")
copyfile(filename1, filename2)
# read contents of original file
dfs = Dfs3()
res1 = dfs.read(filename1)
# overwrite
res1.data[0] = -2 * res1.data[0]
dfs.write(filename2, res1.data)
|
the-stack_0_18069 | #!/usr/bin/env python3
##
#######################################################################################################################
#
# Copyright (c) 2020-2022 Advanced Micro Devices, Inc. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#######################################################################################################################
import sys
import os
import json
import argparse
import warnings
import textwrap
from itertools import chain
from shaderProfileTemplate import *
outputFile = "g_shader_profile"
configFileName = "profile.json"
headerFileName = outputFile + ".h"
sourceFileName = outputFile + ".cpp"
###################################################################################################################
# Functions to parse app profiles from JSON files and convert to C++ structures and functions (AT COMPILE TIME)
###################################################################################################################
# Parses stage patterns from the input json file and fetches code template from shaderProfileTemplate.py
def parseJsonProfilePatternShader(shaderPatterns):
success = checkValidKeys(shaderPatterns, SHADER_PATTERN)
codeShaderPattern = ""
if success:
for shaderPatternKey, shaderPatternValue in shaderPatterns.items():
if type(shaderPatternValue) in SHADER_PATTERN[shaderPatternKey]["type"]:
success &= True
else:
success &= False
warnings.warn("********** Warning: Type Mismatch for shader_pattern **********\n"
"Parsed Stage Pattern key: {0}\n"
"Parsed Stage Pattern value: {1}\n"
"Parsed Stage Pattern value type: {2}\n"
"Expected value type: {3}\n".format(shaderPatternKey,
shaderPatternValue,
type(shaderPatternValue),
ENTRIES_TEMPLATE["entries"]["pattern"][shaderPatternKey]["type"]))
cppCode = SHADER_PATTERN[shaderPatternKey]["codeTemplate"]
if shaderPatternKey == "stageActive":
cppCode = cppCode.replace("%Value%", str(shaderPatternValue).lower())
elif shaderPatternKey == "stageInactive":
cppCode = cppCode.replace("%Value%", str(shaderPatternValue).lower())
elif shaderPatternKey == "codeHash":
codeHash = str(shaderPatternValue).split(' ')
valueUpper = (codeHash[0][2:]).zfill(16).upper()
valueLower = codeHash[1].zfill(16).upper()
cppCode = cppCode.replace("%valueLower%", valueLower)
cppCode = cppCode.replace("%valueUpper%", valueUpper)
elif shaderPatternKey == "codeSizeLessThan":
cppCode = cppCode.replace("%Value%", str(shaderPatternValue).lower())
codeShaderPattern = codeShaderPattern + cppCode
return success, codeShaderPattern
else:
print("************Parsing failed****************")
return success, codeShaderPattern
# Parses patterns from the input json file and fetches code template from shaderProfileTemplate.py
def parseJsonProfileEntryPattern(pattern):
success = checkValidKeys(pattern, ENTRIES_TEMPLATE["entries"]["pattern"])
codePattern = ""
if success:
for patternKey, patternValue in pattern.items():
cppCode = ""
if type(patternValue) in ENTRIES_TEMPLATE["entries"]["pattern"][patternKey]["type"]:
success &= True
else:
success &= False
warnings.warn("********** Warning: Type Mismatch for pattern **********\n"
"Parsed Pattern key: {0}\n"
"Parsed Pattern value: {1}\n"
"Parsed Pattern value type: {2}\n"
"Expected value type: {3}\n".format(patternKey,
patternValue,
type(patternValue),
ENTRIES_TEMPLATE["entries"]["pattern"][patternKey]["type"]))
if patternKey == "always":
cppCode = ENTRIES_TEMPLATE["entries"]["pattern"][patternKey]["codeTemplate"]
cppCode = cppCode.replace("%Value%", str(patternValue).lower())
elif patternKey in ["vs",
"hs",
"ds",
"gs",
"ps",
"cs"]:
success, cppCode = parseJsonProfilePatternShader(patternValue)
shaderStage = ENTRIES_TEMPLATE["entries"]["pattern"][patternKey]["shaderStage"]
cppCode = cppCode.replace("%ShaderStage%", shaderStage)
codePattern = codePattern + cppCode
return success, codePattern
else:
print("************ Parsing failed ****************")
return success, codePattern
def parseJsonFlags(key, flags):
cppCode = ""
success = False
return success, cppCode
# Parses stage actions from the input json file and fetches code template from shaderProfileTemplate.py.
# Includes parsing options for
# [
# 'optStrategyFlags', 'optStrategyFlags2', 'vgprLimit', 'sgprLimit', 'ldsSpillLimitDwords',
# 'maxArraySizeForFastDynamicIndexing',
# 'userDataSpillThreshold', 'maxThreadGroupsPerComputeUnit', 'scOptions', 'scOptionsMask', 'trapPresent',
# 'debugMode', 'allowReZ', 'shaderReplaceEnabled', 'fpControlFlags', 'optimizationIntent', 'disableLoopUnrolls',
# 'enableSelectiveInline', 'maxOccupancyOptions', 'lowLatencyOptions', 'waveSize', 'wgpMode', 'waveBreakSize',
# 'nggDisable', 'nggFasterLaunchRate', 'nggVertexReuse', 'nggEnableFrustumCulling', 'nggEnableBoxFilterCulling',
# 'nggEnableSphereCulling', 'nggEnableBackfaceCulling', 'nggEnableSmallPrimFilter', 'enableSubvector',
# 'enableSubvectorSharedVgprs', 'maxWavesPerCu', 'cuEnableMask', 'maxThreadGroupsPerCu', 'useSiScheduler',
# 'reconfigWorkgroupLayout', 'forceLoopUnrollCount', 'enableLoadScalarizer', 'disableLicm', 'unrollThreshold'
# ]
def parseJsonProfileActionShader(shaderActions):
success = checkValidKeys(shaderActions, SHADER_ACTION)
result = {}
result['success'] = success
for branch in BRANCHES:
if branch not in result:
result[branch] = False
codeShaderAction = ""
if success:
for shaderActionKey, shaderActionValue in shaderActions.items():
cppCode = ""
if type(shaderActionValue) in SHADER_ACTION[shaderActionKey]["type"]:
success &= True
else:
success &= False
warnings.warn("********** Warning: Type Mismatch for shader action **********\n"
"Parsed Stage Action Key: {0}\n"
"Parsed Stage Action value: {1}\n"
"Parsed Stage Action Value type: {2}\n"
"Expected value type: {3}".format(shaderActionKey,
shaderActionValue,
type(shaderActionValue),
SHADER_ACTION[shaderActionKey]["type"]))
result['success'] |= success
if shaderActionKey in BRANCHES:
if shaderActionKey == 'optStrategyFlags':
result["optStrategyFlags"] = True
elif shaderActionKey == 'optStrategyFlags2':
result["optStrategyFlags2"] = True
elif shaderActionKey == 'fpControlFlags':
result["fpControlFlags"] = True
elif shaderActionKey == 'maxOccupancyOptions':
result["maxOccupancyOptions"] = True
elif shaderActionKey == 'lowLatencyOptions':
result["lowLatencyOptions"] = True
if (isinstance(shaderActionValue, int) or
isinstance(shaderActionValue, list) or
isinstance(shaderActionValue, str) or
isinstance(shaderActionValue, bool)):
if "codeTemplate" in SHADER_ACTION[shaderActionKey]:
cppCode = SHADER_ACTION[shaderActionKey]["codeTemplate"]
else:
cppCode = ""
continue
if "%FieldName%" in cppCode:
cppCode = cppCode.replace("%FieldName%", str(shaderActionKey))
if "%IntValue%" in cppCode:
cppCode = cppCode.replace("%IntValue%", str(shaderActionValue).lower())
if "%EnumValue%" in cppCode:
cppCode = cppCode.replace("%EnumValue%", str(SHADER_ACTION[shaderActionKey]["validValues"][shaderActionValue]))
if "%ListValue%" in cppCode:
cppCode = cppCode.replace("%ListValue%", convertToArray(str(shaderActionValue)))
if "%StrValue%" in cppCode:
cppCode = cppCode.replace("%StrValue%", str(shaderActionValue))
if "%BoolValue%" in cppCode:
cppCode = cppCode.replace("%BoolValue%", str(shaderActionValue).lower())
else:
# should be a dictionary type
success, cppCode = parseJsonFlags(shaderActionKey, shaderActionValue)
result['success'] |= success
# wrap with directive only if the buildType dictionary does not contain only a compiler related build type
if "buildTypes" in SHADER_ACTION[shaderActionKey] \
and len(SHADER_ACTION[shaderActionKey]["buildTypes"]) != 0 \
and not isCompilerOnlyBuildType(SHADER_ACTION[shaderActionKey]["buildTypes"]):
cppCode = wrapWithDirective(cppCode, SHADER_ACTION[shaderActionKey]["buildTypes"])
codeShaderAction = codeShaderAction + cppCode
return result, codeShaderAction
else:
print("************Parsing failed****************")
return result, codeShaderAction
# Parses actions from the input json file and fetches code template from shaderProfileTemplate.py
def parseJsonProfileEntryAction(action):
result = {}
result['success'] = False
for branch in BRANCHES:
if branch not in result:
result[branch] = False
success = True
for actionKey in action:
if actionKey in ENTRIES_TEMPLATE["entries"]["action"]:
success &= True
elif actionKey in PIPELINE_ACTION:
success &= True
else:
success = False
codeAction = ""
if success:
for actionKey, actionValue in action.items():
cppCode = ""
if actionKey in ENTRIES_TEMPLATE["entries"]["action"]:
if type(actionValue) in ENTRIES_TEMPLATE["entries"]["action"][actionKey]["type"]:
success &= True
elif actionKey in PIPELINE_ACTION:
if type(actionValue) in PIPELINE_ACTION[actionKey]["type"]:
success &= True
else:
success &= False
warnings.warn("********** Warning: Type Mismatch for action **********\n")
result['success'] |= success
if actionKey in [ "vs",
"hs",
"ds",
"gs",
"ps",
"cs"]:
actionResult, cppCode = parseJsonProfileActionShader(actionValue)
success = actionResult['success']
result = actionResult
shaderStage = ENTRIES_TEMPLATE["entries"]["action"][actionKey]["shaderStage"]
cppCode = cppCode.replace("%ShaderStage%", shaderStage)
else:
if actionKey in PIPELINE_ACTION:
cppCode = PIPELINE_ACTION[actionKey]["codeTemplate"]
if "validValues" in PIPELINE_ACTION[actionKey]:
value = PIPELINE_ACTION[actionKey]["validValues"][actionValue]
cppCode = cppCode.replace("%EnumValue%", value)
else:
cppCode = cppCode.replace("%Value%", str(actionValue))
codeAction = codeAction + cppCode
return result, codeAction
else:
print("************ Parsing failed ****************")
return result, codeAction
# Takes the entire json object as input, fetches corresponding code template from shaderProfileTemplate.py, manipulates
# it according to tuning parameters present in the json file and finally returns a block of code that is going to reside
# inside g_shader_profile.cpp . The block of code that is returned builds the shader profile in g_shader_profile.cpp
def genProfile(dict, compiler, gfxip):
entries = dict["entries"]
entryCount = 0
cppCode = ""
result = {}
result['success'] = False
for branch in BRANCHES:
if branch not in result:
result[branch] = False
if len(entries) != 0:
for entry in entries:
if checkValidKeys(entry, ENTRIES_TEMPLATE["entries"]):
pattern = entry["pattern"]
action = entry["action"]
success, cppPattern = parseJsonProfileEntryPattern(pattern)
actionResult, cppAction = parseJsonProfileEntryAction(action)
for branch in actionResult:
if actionResult[branch]:
result[branch] = True
if gfxip == "generic":
cppCode = cppCode + IncrementEntryTemplate + cppPattern + cppAction + "\n"
cppCode = cppCode.replace("%EntryNum%", 'i')
else:
cppCode = cppCode + cppPattern + cppAction + "\n"
cppCode = cppCode.replace("%EntryNum%", str(entryCount))
entryCount = entryCount + 1
else:
print("************ Parsing failed ****************")
if gfxip == "generic":
entryCountTemplate = ""
else:
entryCountTemplate = EntryCountTemplate.replace("%entryCount%", str(entryCount))
var = ""
varTemplate = ""
if gfxip == "generic":
var = InitializedVarTemplate.replace("%DataType%", 'uint32_t')
var = var.replace("%VarName%", 'i')
var = var.replace("%DefaultValue%", str(0))
varTemplate = varTemplate + var + "\n"
cppCode = varTemplate + entryCountTemplate + cppCode
return dedentAll(cppCode.rstrip("\n"))
# recursive function
def createStructAndVarDefinitions(dictObjects, parent = None):
contentAll = ''
if not isinstance(dictObjects, list):
dictObjects = [dictObjects]
for dictObject in dictObjects:
content = ''
for key, value in dictObject.items():
if "entityInfo" in value:
# fetch entityInfo with the given parent name
value = retrieveEntityInfo(value, parent)
if not value:
continue
if "entity" in value:
success = checkValidKeys(ValidKeysForEntity[value["entity"]], value)
template = ''
if success:
if value["entity"] == "struct":
if value["structName"] != "":
template = StructTemplate.replace("%StructName%", " " + value["structName"])
else:
template = StructTemplate.replace("%StructName%", value["structName"])
template = template.replace("%StructObj%", value["objectName"])
if value["buildTypes"]:
template = wrapWithDirective(template, value["buildTypes"])
if value["child"]:
structBody = createStructAndVarDefinitions(value["child"], parent=key)
else:
structBody = ''
template = template.replace("%StructDefs%", indent(structBody))
if value["entity"] == "union":
if value["unionName"] != "":
template = StructTemplate.replace("%UnionName%", " " + value["unionName"])
else:
template = UnionTemplate.replace("%UnionName%", value["unionName"])
template = template.replace("%UnionObj%", value["objectName"])
if value["buildTypes"]:
template = wrapWithDirective(template, value["buildTypes"])
if value["child"]:
unionBody = createStructAndVarDefinitions(value["child"], parent=key)
else:
unionBody = ''
template = template.replace("%UnionDefs%", indent(unionBody))
if value["entity"] == "var":
# Initialized Variable
if value["defaultValue"]:
template = InitializedVarTemplate.replace("%DataType%", value["dataType"])
template = template.replace("%VarName%", value["varName"])
template = template.replace("%DefaultValue%", str(value["defaultValue"]))
# Uninitialized variable
else:
template = UninitializedVarTemplate.replace("%DataType%", value["dataType"])
template = template.replace("%VarName%", value["varName"])
if value["buildTypes"]:
template = wrapWithDirective(template, value["buildTypes"])
if value["entity"] == "bitField":
template = BitFieldVarTemplate.replace("%DataType%", value["dataType"])
template = template.replace("%VarName%", value["varName"])
template = template.replace("%DefaultValue%", str(value["defaultValue"]))
if value["buildTypes"]:
template = wrapWithDirective(template, value["buildTypes"])
if value["entity"] == "array":
# initialized array
if value["arrayValue"]:
template = InitializedArrTemplate.replace("%DataType%", value["dataType"])
template = template.replace("%VarName%", value["varName"])
template = template.replace("%ArrSize%", value["arraySize"])
template = template.replace("%ArrValue%", value["arrayValue"])
# Uninitialized array
else:
template = UnInitializedArrTemplate.replace("%DataType%", value["dataType"])
template = template.replace("%VarName%", value["varName"])
template = template.replace("%ArrSize%", value["arraySize"])
if value["buildTypes"]:
template = wrapWithDirective(template, value["buildTypes"])
if "description" in value:
if "secured" in value:
template = wrapWithComment(template, value["description"], value["secured"])
else:
template = wrapWithComment(template, value["description"], "false")
content = content + template
else:
print("************ Parsing failed ****************")
contentAll = contentAll + content
return contentAll.strip("\n")
# Reads app_profile.h from its location in the driver, and parses the AppProfile Class to retrieve the names of game
# titles and their build type. Returns a dictionary of the form
# {
# "released": {
# "gameTitles": [],
# "buildTypes": {"andType": []}
# },
# }
def getGameTitles(filePath):
content = open(filePath).readlines()
tmpContent = None
appProfileContent = None
start = -1
for i, line in enumerate(content):
if line.startswith("enum class AppProfile"):
start = i + 1
elif ("};" in line) and (start >= 0):
appProfileContent = content[start:i]
break
if appProfileContent is not None:
gameTitleInfo = {
"released": {
"gameTitles": [],
"buildTypes": {"andType": []}
}
}
hasBuildType = False
directive = ""
for i, title in enumerate(appProfileContent):
title = title.replace("\n", "")
title = title.replace(" ", "")
title = title.replace(",", "")
# removes comments
if "//" in title:
title = title.split("//")[0]
if title.startswith("Default"):
continue
if title.startswith("{"):
continue
if title == "":
continue
if "#if" in title:
hasBuildType = True
directive = title.strip("#if ")
gameTitleInfo[directive] = {
"gameTitles": [],
"buildTypes": {"andType": [directive]}
}
continue
elif ("#end" in title) or ("#else" in title):
hasBuildType = False
continue
if hasBuildType:
gameTitleInfo[directive]["gameTitles"].append(title)
else:
gameTitleInfo["released"]["gameTitles"].append(title)
return gameTitleInfo
else:
return {}
###################################################################################################################
# Build methods to dump Pipeline profile of a specific (currently running) app to a JSON file
###################################################################################################################
def buildProfileEntryPatternToJson():
cppCode = ""
conditionStr = ""
defs = ""
patternCount = 0
for pattern in SHADER_PATTERN:
if (patternCount < len(SHADER_PATTERN) - 1):
conditionStr = conditionStr + "shader.match." + pattern + " ||\n"
else:
conditionStr = conditionStr + "shader.match." + pattern
defs = defs + ConditionShaderMatchPattern.replace("%Pattern%", pattern)
defs = defs.replace("%Defs%", SHADER_PATTERN[pattern]["jsonWriterTemplate"])
patternCount += 1
cppCode = ProfileEntryPatternToJsonFunc.replace("%Condition%", indent(conditionStr,times=3))
cppCode = cppCode.replace("%Defs%", indent(defs, times=3))
return cppCode
# Iterates over SHADER_ACTION but dumps only the keys/actions that are declared in ShaderTuningOptions, dynamicShaderInfo
# and shaderCreate structures. This essentially means that this key in SHADER_ACTION should have at least one of these in
# in the parent field in entityInfo
def buildProfileEntryActionToJson():
cppCode = ""
for action in PIPELINE_ACTION:
conditionStr = ""
for entity in PIPELINE_ACTION[action]["entityInfo"]:
if entity["parent"] == "createInfo.anonStruct":
conditionStr = ConditionCreateInfoApply.replace("%Defs%", PIPELINE_ACTION[action]["jsonWriterTemplate"])
conditionStr = conditionStr.replace("%Flag%", action)
cppCode = cppCode + conditionStr
funcDef = ProfileEntryActionToJsonFunc.replace("%CreateInfoApply%", indent(cppCode.strip("\n")))
cppCode = ""
for action in SHADER_ACTION:
conditionStr = ""
if "jsonWriterTemplate" in SHADER_ACTION[action]:
for entity in SHADER_ACTION[action]["entityInfo"]:
if "jsonWritable" in entity and entity["jsonWritable"]:
if entity["parent"] == "shaderCreate.anonStruct":
conditionStr = ConditionShaderCreateApply.replace("%Defs%", SHADER_ACTION[action]["jsonWriterTemplate"])
conditionStr = conditionStr.replace("%Flag%", action)
cppCode = cppCode + wrapWithDirective(conditionStr, SHADER_ACTION[action]["buildTypes"])
break
elif entity["parent"] == "dynamicShaderInfo.anonStruct":
conditionStr = ConditionDynamicShaderInfoApply.replace("%Defs%", SHADER_ACTION[action]["jsonWriterTemplate"])
conditionStr = conditionStr.replace("%Flag%", action)
cppCode = cppCode + wrapWithDirective(conditionStr, SHADER_ACTION[action]["buildTypes"])
break
elif entity["parent"] == "ShaderTuningOptions":
conditionStr = ConditionShaderCreateTuningOptions.replace("%Defs%", SHADER_ACTION[action]["jsonWriterTemplate"])
conditionStr = conditionStr.replace("%Flag%", action)
cppCode = cppCode + wrapWithDirective(conditionStr, entity["buildTypes"])
break
conditionStr = ""
patternCount = 0
for pattern in SHADER_PATTERN:
if (patternCount < len(SHADER_PATTERN) - 1):
conditionStr = conditionStr + "pattern.shaders[i].match." + pattern + " ||\n"
else:
conditionStr = conditionStr + "pattern.shaders[i].match." + pattern
patternCount += 1
funcDef = funcDef.replace("%Condition%", indent(conditionStr,times=3))
funcDef = funcDef.replace("%ShaderCreateApply%", indent(cppCode.strip("\n"), times=3))
return funcDef
###################################################################################################################
# Build methods to parse a JSON file and apply the read app profile to driver (AT RUNTIME)
###################################################################################################################
def parseJsonProfileEntryPatternRuntime():
cppCode = ""
validKeys = ""
defs = ""
for key in ENTRIES_TEMPLATE["entries"]["pattern"]:
validKeys = validKeys + '"' + key + '",\n'
defs = defs + ConditionParseJsonProfileEntryRuntime.replace("%Key%", key)
strValue = convertTypeToStrValue(ENTRIES_TEMPLATE["entries"]["pattern"][key]["type"][0])
if strValue == "dictValue":
shaderStage = ENTRIES_TEMPLATE["entries"]["pattern"][key]["shaderStage"]
parseJsonProfileEntryPatternTemplateCode = parseJsonProfileEntryPatternTemplate.replace("%ShaderStage%", shaderStage)
defs = defs.replace("%Defs%", parseJsonProfileEntryPatternTemplateCode)
else:
defs = defs.replace("%Defs%", ENTRIES_TEMPLATE["entries"]["pattern"][key]["jsonReaderTemplate"])
defs = defs.replace("%Value%", strValue)
cppCode = ParseJsonProfileEntryPatternFunc.replace("%FuncDefs%", ParseJsonProfileEntryRuntimeFunc)
cppCode = cppCode.replace("%ValidKeys%", indent(validKeys.rstrip("\n"), times=2))
cppCode = cppCode.replace("%Defs%", indent(defs.rstrip("\n")))
return cppCode
def parseJsonProfileEntryActionRuntime():
cppCode = ""
validKeys = ""
defs = ""
for key in chain(PIPELINE_ACTION, ENTRIES_TEMPLATE["entries"]["action"]):
validKeys = validKeys + '"' + key + '",\n'
defs = defs + ConditionParseJsonProfileEntryRuntime.replace("%Key%", key)
if key in PIPELINE_ACTION:
strValue = convertTypeToStrValue(PIPELINE_ACTION[key]["type"][0])
if strValue != "unknownValue":
defs = defs.replace("%Defs%", PIPELINE_ACTION[key]["jsonReaderTemplate"])
elif key in ENTRIES_TEMPLATE["entries"]["action"]:
strValue = convertTypeToStrValue(ENTRIES_TEMPLATE["entries"]["action"][key]["type"][0])
if strValue == "dictValue":
shaderStage = ENTRIES_TEMPLATE["entries"]["action"][key]["shaderStage"]
parseJsonProfileEntryActionTemplateCode = parseJsonProfileEntryActionTemplate.replace("%ShaderStage%", shaderStage)
defs = defs.replace("%Defs%", parseJsonProfileEntryActionTemplateCode)
else:
defs = defs.replace("%Defs%", ENTRIES_TEMPLATE["entries"]["action"][key]["jsonReaderTemplate"])
defs = defs.replace("%Value%", strValue)
cppCode = ParseJsonProfileEntryActionFunc.replace("%FuncDefs%", ParseJsonProfileEntryRuntimeFunc)
cppCode = cppCode.replace("%ValidKeys%", indent(validKeys.rstrip("\n"), times=2))
cppCode = cppCode.replace("%Defs%", indent(defs.rstrip("\n")))
return cppCode
def parseJsonProfilePatternShaderRuntime():
cppCode = ""
validKeys = ""
defs = ""
for pattern in SHADER_PATTERN:
validKeys = validKeys + '"' + pattern + '",\n'
defs = defs + ConditionParseJsonProfileEntryRuntime.replace("%Key%", pattern)
strValue = convertTypeToStrValue(SHADER_PATTERN[pattern]["type"][0])
conditionBody = SHADER_PATTERN[pattern]["jsonReaderTemplate"]
conditionBody = conditionBody.replace("%Value%", strValue)
defs = defs.replace("%Defs%", conditionBody)
cppCode = ParseJsonProfilePatternShaderFunc.replace("%FuncDefs%", ParseJsonProfileEntryRuntimeFunc)
cppCode = cppCode.replace("%ValidKeys%", indent(validKeys.rstrip("\n"), times=2))
cppCode = cppCode.replace("%Defs%", indent(defs.rstrip("\n")))
return cppCode
def parseJsonProfileActionShaderRuntime():
cppCode = ""
validKeys = ""
defs = ""
for action in SHADER_ACTION:
if "jsonReaderTemplate" in SHADER_ACTION[action] and SHADER_ACTION[action]["jsonReadable"]:
validKeys = validKeys + '"' + action + '",\n'
conditionBlock = ConditionParseJsonProfileEntryRuntime.replace("%Key%", action)
strValue = convertTypeToStrValue(SHADER_ACTION[action]["type"][0])
conditionBody = SHADER_ACTION[action]["jsonReaderTemplate"]
conditionBody = conditionBody.replace("%Action%", action).replace("%ValueType%", strValue)
if strValue in TypeValues:
conditionBody = conditionBody.replace("%Value%", TypeValues[strValue])
conditionBlock = conditionBlock.replace("%Defs%", conditionBody)
defs = defs + wrapWithDirective(conditionBlock, SHADER_ACTION[action]["buildTypes"])
cppCode = ParseJsonProfileActionShaderFunc.replace("%FuncDefs%", ParseJsonProfileEntryRuntimeFunc)
cppCode = cppCode.replace("%ValidKeys%", indent(validKeys.rstrip("\n"), times=2))
cppCode = cppCode.replace("%Defs%", indent(defs.rstrip("\n")))
return cppCode
###################################################################################################################
# Generic functions
###################################################################################################################
def writeToFile(text, filePath):
open(filePath, 'w').write(text)
def readFromFile(fileToRead):
try:
with open(fileToRead, 'r') as file:
content = file.read()
dictObj = json.loads(content)
return dictObj, True
except Exception as e:
print("\nException Occurred:\n{0} \nCould not read from file: {1}\n".format(e, fileToRead))
return "", False
def dedentAll(text):
tempText = ""
for line in text.splitlines(True):
tempText += textwrap.dedent(line)
return tempText
def convertTypeToStrValue(valType):
if valType == int:
return "integerValue"
elif valType == bool:
return "booleanValue"
elif valType == str:
return "pStringValue"
elif valType == dict:
return "dictValue"
elif valType == list:
return "listValue"
else:
warnings.warn("********** Warning: Type unknown for action. Check 'type' key for action **********\n")
return "unknownValue"
# Checks if the keys in obj1 are also present in obj2 (list or dict)
def checkValidKeys(obj1, obj2):
if isinstance(obj1, dict) and isinstance(obj2, dict):
for key in [*obj1]:
if key in [*obj2]:
pass
else:
return False
return True
elif isinstance(obj1, dict) and isinstance(obj2, list):
for key in [*obj1]:
if key in obj2:
pass
else:
return False
return True
elif isinstance(obj1, list) and isinstance(obj2, dict):
for key in obj1:
if key in [*obj2]:
pass
else:
return False
return True
def indent(text, **kwargs):
ch = ' '
if "n_spaces" in kwargs:
n_spaces = kwargs["n_spaces"]
else:
n_spaces = 4
if "times" in kwargs:
times = kwargs["times"]
else:
times = 1
if "width" in kwargs:
wrapper = textwrap.TextWrapper()
wrapper.width = kwargs["width"]
wrapper.initial_indent = n_spaces * times * ch
wrapper.subsequent_indent = n_spaces * times * ch
contentList = wrapper.wrap(text)
for i, line in enumerate(contentList):
dedentedLine = dedentAll(line)
if dedentedLine.startswith("#if") or dedentedLine.startswith("#else") or dedentedLine.startswith("#end"):
contentList[i] = dedentedLine
return '\n'.join(contentList)
else:
padding = n_spaces * times * ch
content = ''
for line in text.splitlines(True):
if line.startswith("#if") or line.startswith("#else") or line.startswith("#end") or line.isspace():
content = content + dedentAll(line)
else:
content = content + padding + line
return content
def convertToArray(txt):
txt = txt.replace("[", "{")
txt = txt.replace("]", "}")
return txt
def isCompilerOnlyBuildType(buildObj):
if isinstance(buildObj, dict):
if len(buildObj) == 1:
if "andType" in buildObj \
and len(buildObj["andType"]) == 1 \
and buildObj["andType"][0] == BuildTypesTemplate["llpc"]:
return True
return False
def wrapWithDirective(content, buildObj):
if isinstance(buildObj, str):
if buildObj:
content = "#if "+ buildObj + "\n" + content.strip("\n") + "\n#endif\n"
elif isinstance(buildObj, dict):
if "andType" in buildObj:
if buildObj["andType"]:
valueIfDefTmp = ""
valueEndDefTmp = ""
for directive in buildObj["andType"]:
valueIfDefTmp += "#if " + directive + "\n"
valueEndDefTmp += "#endif" + "\n"
content = valueIfDefTmp + content + valueEndDefTmp
if "orType" in buildObj:
if buildObj["orType"]:
valueIfDefTmp = "#if "
valueEndDefTmp = "#endif\n"
numOfBuildTypes = len(buildObj["orType"])
for i in range(numOfBuildTypes):
type = buildObj["orType"][i]
valueIfDefTmp += type
if i < (numOfBuildTypes) -1:
valueIfDefTmp += " || "
else:
valueIfDefTmp += "\n"
content = valueIfDefTmp + content + valueEndDefTmp
if "custom" in buildObj:
if buildObj["custom"]:
if "startWith" in buildObj["custom"]:
startWith = buildObj["custom"]["startWith"]
content = startWith + "\n" + content.strip("\n")
if "endWith" in buildObj["custom"]:
endWith = buildObj["custom"]["endWith"]
content = content + "\n" + endWith + "\n"
return content
def retrieveEntityInfo(value, parent):
success = False
entityInfo = {}
listOfEntityInfoObjs = value["entityInfo"]
if not isinstance(listOfEntityInfoObjs, list):
listOfEntityInfoObjs = [listOfEntityInfoObjs]
for entityInfo in listOfEntityInfoObjs:
if entityInfo["parent"] == parent:
success = True
break
if success:
return entityInfo
else:
return {}
def wrapWithComment(content, comment, secured):
comment = indent(comment, n_spaces=0, width=110)
if secured == "true":
comment = ''.join("//" + "# " + line for line in comment.splitlines(True))
else:
comment = ''.join("// " + line for line in comment.splitlines(True))
if content.startswith("\n"):
content = comment + content
else:
content = comment + "\n" + content
return content
###################################################################################################################
# Parse all files and generate code
###################################################################################################################
def main():
shaderProfileDir = ''
outputDir = ''
genDir = ''
if len(sys.argv) >= 2:
shaderProfileDir = sys.argv[1]
# if genDir was specified by the user
if len(sys.argv) == 3:
genDir = sys.argv[2]
else:
print("Error: include directory path in the argument \n"
"usage: python3 genshaderprofile.py <vulkancodebase>\\xgl\\icd\\api\\appopt\\shader_profiles\\ genDir [optional]")
return -1
if not os.path.isabs(shaderProfileDir):
shaderProfileDir = os.path.abspath(shaderProfileDir)
splitShaderProfileDir = os.path.split(shaderProfileDir)
if splitShaderProfileDir[1] == '':
outputDir = os.path.split(splitShaderProfileDir[0])[0]
else:
outputDir = splitShaderProfileDir[0]
if genDir != "":
outputDir = genDir
headerDoxComment = HeaderFileDoxComment.replace("%FileName%", outputFile)
compilers = os.listdir(shaderProfileDir)
gameTitleInfo = getGameTitles(AppProfileHeaderFilePath)
if not gameTitleInfo:
print("Could Not read 'enum class AppProfile' from app_profile.h. Exiting Program")
return -1
funcSetAppProfileGroup = ""
classShaderProfileBodyDict = {}
ifGameTitleGroupDict = {}
for compiler in compilers:
compilerDir = os.path.join(shaderProfileDir, compiler)
gfxips = os.listdir(compilerDir)
gameTitlesList = []
ifGfxipGroupDict = {}
ifGenericDict = {}
if compiler not in classShaderProfileBodyDict:
classShaderProfileBodyDict[compiler] = ""
if compiler not in ifGameTitleGroupDict:
ifGameTitleGroupDict[compiler] = ""
for gfxip in gfxips:
gfxipDir = os.path.join(compilerDir, gfxip)
gameTitlesGfxList = []
ifAsicGroupDict = {}
ifAsicGenericDict = {}
if gfxip != "generic":
asics = os.listdir(os.path.join(gfxipDir))
else:
asics = [gfxip]
for asic in asics:
if gfxip != "generic":
asicDir = os.path.join(gfxipDir, asic)
else:
asicDir = gfxipDir
print("Parsing " + asicDir)
gameTitles = os.listdir(os.path.join(asicDir))
for title in gameTitles:
gameTitleDir = os.path.join(asicDir, title)
fileToRead = os.path.join(gfxip, gameTitleDir, configFileName)
content, readSuccess = readFromFile(fileToRead)
if readSuccess:
if title not in gameTitlesList:
gameTitlesList.append(title)
if title not in gameTitlesGfxList:
gameTitlesGfxList.append(title)
if title not in ifGfxipGroupDict:
ifGfxipGroupDict[title] = ""
if title not in ifGenericDict:
ifGenericDict[title] = ""
if title not in ifAsicGroupDict:
ifAsicGroupDict[title] = ""
if title not in ifAsicGenericDict:
ifAsicGenericDict[title] = ""
# for header file: g_shader_profile.h********************************************************
funcName = compiler.title() + title + gfxip[0].upper() + gfxip[1:]
if gfxip != "generic":
if asic != "generic":
funcName = compiler.title() + title + asic[0].upper() + asic[1:]
else:
funcName += asic[0].upper() + asic[1:]
funcCompGameGfxAsic = FuncDecSetAppProfile.replace("%FuncName%", funcName)
for buildType, obj in gameTitleInfo.items():
if title in obj["gameTitles"]:
funcCompGameGfxAsic = wrapWithDirective(funcCompGameGfxAsic, obj["buildTypes"])
if asic in BuildTypesTemplate:
funcCompGameGfxAsic = wrapWithDirective(funcCompGameGfxAsic, BuildTypesTemplate[asic])
if gfxip in BuildTypesTemplate:
funcCompGameGfxAsic = wrapWithDirective(funcCompGameGfxAsic, BuildTypesTemplate[gfxip])
classShaderProfileBodyDict[compiler] += funcCompGameGfxAsic
# ********************************************************************************************
# for cpp file: g_shader_profile.cpp *********************************************************
if asic == "generic":
ifAsicGeneric = GenericAsicAppProfile.replace("%FuncName%", funcName)
ifAsicGenericDict[title] = ifAsicGeneric
else:
ifAsic = ConditionAsic.replace("%Asic%", asic[0].upper() + asic[1:])
ifAsic = ifAsic.replace("%FuncName%", funcName)
if asic in BuildTypesTemplate:
ifAsic = wrapWithDirective(ifAsic, BuildTypesTemplate[asic])
ifAsicGroupDict[title] = ifAsicGroupDict[title] + ifAsic
if gfxip == "generic":
ifGeneric = GenericGfxIpAppProfile.replace("%FuncName%", funcName)
ifGenericDict[title] = ifGeneric
appProfile = genProfile(content, compiler, gfxip)
funcSetAppProfile = SetAppProfileFunc.replace("%FuncName%", funcName)
funcSetAppProfile = funcSetAppProfile.replace("%FuncDefs%", indent(appProfile))
if compiler in BuildTypesTemplate:
funcSetAppProfile = wrapWithDirective(funcSetAppProfile, BuildTypesTemplate[compiler])
for buildType, obj in gameTitleInfo.items():
if title in obj["gameTitles"]:
funcSetAppProfile = wrapWithDirective(funcSetAppProfile, obj["buildTypes"])
if asic in BuildTypesTemplate:
funcSetAppProfile = wrapWithDirective(funcSetAppProfile, BuildTypesTemplate[asic])
if gfxip in BuildTypesTemplate:
funcSetAppProfile = wrapWithDirective(funcSetAppProfile, BuildTypesTemplate[gfxip])
funcSetAppProfileGroup = funcSetAppProfileGroup + funcSetAppProfile
# ********************************************************************************************
# for cpp file: g_shader_profile.cpp******************************************************************
for title in gameTitlesGfxList:
if gfxip != "generic":
if ifAsicGenericDict[title]:
ifGfxipBody = indent(ifAsicGroupDict[title] + ifAsicGenericDict[title])
else:
ifGfxipBody = indent(ifAsicGroupDict[title])
ifGfxip = ConditionGfxIp.replace("%Gfxip%", gfxip[0].upper() + gfxip[1:])
ifGfxip = ifGfxip.replace("%Defs%", ifGfxipBody)
if gfxip in BuildTypesTemplate:
ifGfxip = wrapWithDirective(ifGfxip, BuildTypesTemplate[gfxip])
ifGfxipGroupDict[title] = ifGfxipGroupDict[title] + ifGfxip
# ****************************************************************************************************
# for cpp file: g_shader_profile.cpp******************************************************************
for title in gameTitlesList:
if ifGenericDict[title]:
ifGameTitleBody = indent(ifGfxipGroupDict[title] + ifGenericDict[title])
else:
ifGameTitleBody = indent(ifGfxipGroupDict[title])
ifGameTitle = ConditionGameTitle.replace("%GameTitle%", title)
ifGameTitle = ifGameTitle.replace("%Defs%", ifGameTitleBody)
for buildType, obj in gameTitleInfo.items():
if title in obj["gameTitles"]:
ifGameTitle = wrapWithDirective(ifGameTitle, obj["buildTypes"])
ifGameTitleGroupDict[compiler] += ifGameTitle
# ****************************************************************************************************
###################################################################################################################
# Build the Header File
###################################################################################################################
classShaderProfilePrivateDefs = ""
for compiler in classShaderProfileBodyDict:
if compiler in BuildTypesTemplate:
if classShaderProfileBodyDict[compiler] != "":
classShaderProfilePrivateDefs = classShaderProfilePrivateDefs + "\n" + \
wrapWithDirective(classShaderProfileBodyDict[compiler], BuildTypesTemplate[compiler])
else:
classShaderProfilePrivateDefs = classShaderProfilePrivateDefs + classShaderProfileBodyDict[compiler]
funcDecJsonReader = (
FuncDecJsonReader + "\n"
)
classShaderProfilePrivateBody = FuncDecJsonWriter + "\n" + \
wrapWithDirective(funcDecJsonReader, BuildTypesTemplate["icdRuntimeAppProfile"]) + \
classShaderProfilePrivateDefs
classShaderProfilePublicBody = ( FuncDecClassShaderProfilePublic + "\n" +
wrapWithDirective(FuncDecParseJsonProfile, BuildTypesTemplate["icdRuntimeAppProfile"]) + "\n" +
wrapWithDirective(FuncDecBuildAppProfileLlpc, BuildTypesTemplate["llpc"])
)
classShaderProfile = ClassTemplate.replace("%ClassName%", "ShaderProfile")
classShaderProfile = classShaderProfile.replace("%ClassPublicDefs%", indent(classShaderProfilePublicBody))
classShaderProfile = classShaderProfile.replace("%ClassPrivateDefs%", indent(classShaderProfilePrivateBody))
content = createStructAndVarDefinitions(ShaderTuningStructsAndVars)
namespaceBody = content + "\n" + classShaderProfile
headerBody = NamespaceVK.replace("%NamespaceDefs%", namespaceBody)
headerContent = CopyrightAndWarning + headerDoxComment + HeaderIncludes + "\n" + headerBody
headerFilePath = os.path.join(outputDir, headerFileName)
writeToFile(headerContent, headerFilePath)
###################################################################################################################
# Build the Source File
###################################################################################################################
if "llpc" in ifGameTitleGroupDict:
funcBuildAppProfileLlpc = BuildAppProfileLlpcFunc.replace("%FuncDefs%",
indent(ifGameTitleGroupDict["llpc"].rstrip("\n")))
funcBuildAppProfileLlpc = wrapWithDirective(funcBuildAppProfileLlpc, BuildTypesTemplate["llpc"])
else:
funcBuildAppProfileLlpc = ""
funcProfileEntryActionToJson = buildProfileEntryActionToJson()
funcProfileEntryPatternToJson = buildProfileEntryPatternToJson()
funcJsonWriter = JsonWriterGenericDef + \
"\n" + \
funcProfileEntryPatternToJson + \
"\n" + \
funcProfileEntryActionToJson
funcJsonReader = (JsonReaderGenericDef + "\n" +
parseJsonProfileEntryPatternRuntime() + "\n" +
parseJsonProfileEntryActionRuntime() + "\n" +
parseJsonProfilePatternShaderRuntime() + "\n" +
parseJsonProfileActionShaderRuntime() + "\n"
)
cppBody = NamespaceVK.replace("%NamespaceDefs%", funcBuildAppProfileLlpc
+ "\n"
+ funcSetAppProfileGroup
+ "\n"
+ funcJsonWriter
+ "\n"
+ wrapWithDirective(funcJsonReader,
BuildTypesTemplate["icdRuntimeAppProfile"])
)
includeStr = ""
CppIncludes = CppInclude.replace("%Includes%", includeStr)
cppContent = CopyrightAndWarning + CppIncludes + cppBody
cppFilePath = os.path.join(outputDir, sourceFileName)
writeToFile(cppContent, cppFilePath)
return 0
if __name__ == '__main__':
if sys.version_info[:2] < (3, 6):
raise Exception("Python 3.6 (CPython) or a more recent python version is required.")
print("Generating shader profiles code ")
result = main()
if not result:
print("Finished generating " + headerFileName + " and " + sourceFileName)
else:
print("Error: Exiting without code generation. Driver code compilation will fail.")
exit(1)
|
the-stack_0_18074 | import modin.pandas as pd
import numpy as np
import os
import sys
from utils import *
from models_classes import SklearnModel, KerasTFModel
__author__ = 'Nicolas de Montigny'
__all__ = ['bacterial_classification','classify']
# TODO: FINISH CONVERTING TO CLASSES FOR MODELS
def bacterial_classification(classified_data, database_k_mers, k, outdirs, dataset, training_epochs, classifier = 'lstm_attention', batch_size = 32, threshold = 0.8, verbose = 1, cv = 1, n_jobs = 1):
previous_taxa_unclassified = None
taxas = database_k_mers['taxas'].copy()
for taxa in taxas:
train = False
classified_kmers_file = '{}Xy_classified_{}_K{}_{}_database_{}_data'.format(outdirs['data_dir'], taxa, k, classifier, dataset)
unclassified_kmers_file = '{}Xy_unclassified_{}_K{}_{}_database_{}_data'.format(outdirs['data_dir'], taxa, k, classifier, dataset)
if taxa == taxas[-1]:
classified_data[taxa] = previous_taxa_unclassified
classified_data['order'].append(taxa)
else:
if classifier in ['sgd','svm','mlr','mnb']:
model = Sklearn_model(classifier, clf_file, outdirs['results_dir'], batch_size, k, verbose)
elif classifier in ['lstm_attention','cnn','widecnn']:
model = Keras_TF_model(classifier, clf_file, outdirs['results_dir'], nb_classes, batch_size, k, verbose)
else:
print('Bacteria classifier type unknown !!!\n\tModels implemented at this moment are :\n\tLinear models : Ridge regressor (sgd), Linear SVM (svm), Multiple Logistic Regression (mlr)\n\tProbability classifier : Multinomial Bayes (mnb)\n\tNeural networks : Deep hybrid between LSTM and Attention (lstm_attention), CNN (cnn) and Wide CNN (widecnn)')
sys.exit()
if not os.path.isfile(model.clf_file):
train = True
# Load extracted data if already exists or train and classify bacteria depending on chosen method and taxonomic rank
if os.path.isfile(classified_kmers_file) and os.path.isfile(unclassified_kmers_file):
if verbose:
print('Bacteria sequences at {} level already classified'.format(taxa))
classified_data[taxa] = load_Xy_data(classified_kmers_file)
previous_taxa_unclassified = load_Xy_data(unclassified_kmers_file)
classified_data['order'].append(taxa)
else:
if verbose:
print('Training classifier with bacteria sequences at {} level'.format(taxa))
# If classifier exists load it or train if not
if train is True:
# Get training dataset and assign to variables
X_train = ray.data.read_parquet(database_k_mers['profile'])
y_train = pd.DataFrame(database_k_mers['classes'], columns = database_k_mers['taxas']).loc[:,taxa]
model.train(X_train, y_train, cv)
# Classify sequences into taxa and build k-mers profiles for classified and unclassified data
# Keep previous taxa to reclassify only unclassified reads at a higher taxonomic level
if previous_taxa_unclassified is None:
if verbose:
print('Classifying bacteria sequences at {} level'.format(taxa))
df = ray.data.read_parquet(classified_data['bacteria']['profile'])
classified_data[taxa], previous_taxa_unclassified = classify(df, clf_file, label_encoder, taxa, classified_kmers_file, unclassified_kmers_file, threshold = threshold, verbose = verbose)
else:
if verbose:
print('Classifying bacteria sequences at {} level'.format(taxa))
classified_data[taxa], previous_taxa_unclassified = classify(previous_taxa_unclassified['profile'], model, threshold, verbose)
save_Xy_data(classified_data[taxa], classified_kmers_file)
save_Xy_data(previous_taxa_unclassified, unclassified_kmers_file)
classified_data['order'].append(taxa)
return classified_data
def classify(df_file, model, threshold = 0.8, verbose = True):
if verbose:
print('Extracting predicted sequences at {} taxonomic level'.format(taxa))
df = ray.data.read_parquet(df_file)
classified_data = {}
pred = model.predict(df, threshold)
# Make sure classes are writen in lowercase
pred = pred.str.lower()
df_classified = df[pred.str.notequals('unknown')]
df_unclassified = df[pred.str.match('unknown')]
# Save / add to classified/unclassified data
try:
df_classified.to_parquet(classified_kmers_file)
classified_data['classified'] = {}
classified_data['classified']['profile'] = str(classified_kmers_file)
except:
if verbose:
print('No classified data at {} taxonomic level, cannot save it to file or add it to classified data'.format(taxa))
try:
df_unclassified.to_parquet(unclassified_kmers_file)
classified_data['unclassified'] = {}
classified_data['unclassified']['profile'] = str(unclassified_kmers_file)
except:
if verbose:
print('No unclassified data at {} taxonomic level, cannot save it to file or add it to unclassified data'.format(taxa))
return classified_data, unclassified_data
|
the-stack_0_18076 | import vaex
import pytest
class Foo(object):
def __init__(self, df):
self.df = df
class Spam(object):
def __init__(self, df):
self.df = df
class Egg(object):
def __init__(self, spam):
self.spam = spam
self.df = spam.df
def test_accessor_basic():
vaex._add_lazy_accessor('foo', lambda: Foo)
df = vaex.example()
assert isinstance(df.foo, Foo)
assert df.foo is df.foo
assert df.foo.df is df
def test_accessor_nested():
df = vaex.example()
vaex._add_lazy_accessor('spam.egg', lambda: Egg)
with pytest.raises(expected_exception=AttributeError):
a = df.spam
vaex._add_lazy_accessor('spam.egg.foo', lambda: Foo)
with pytest.raises(expected_exception=AttributeError):
a = df.spam
vaex._add_lazy_accessor('spam', lambda: Spam)
assert df.spam is df.spam
assert df.spam.df is df
assert isinstance(df.spam, Spam)
assert df.spam.egg is df.spam.egg
assert df.spam.egg.spam is df.spam
assert isinstance(df.spam.egg, Egg)
assert df.spam.egg.foo is df.spam.egg.foo
assert df.spam.egg.foo.df is df.spam.egg # abuse of foo
assert isinstance(df.spam.egg.foo, Foo)
|
the-stack_0_18079 | import torch.nn as nn
from torchvision.models.resnet import Bottleneck, ResNet
def resnet_init(m):
""" Layer initialization used by ResNet
(https://github.com/pytorch/vision/blob/27278ec8887a511bd7d6f1202d50b0da7537fc3d/
torchvision/models/resnet.py#L160)
Parameters
----------
m : nn.Module
layer to initialize
"""
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def modify_resnet_model(model, in_channels, dim_z):
""" Modifies some layers of a given torchvision resnet model to
match the one used for the CIFAR-10 experiments in the SimCLR paper.
Modified from https://github.com/Spijkervet/SimCLR/blob/master/modules/resnet_hacks.py
by also adding the ability to change the output dimension of the last
bottleneck.
Parameters
----------
model : ResNet
Instance of a torchvision ResNet model.
in_channels : int
number of input channels (1 or 3)
dim_z : int
desired dimension of the output of the model.
Returns
-------
nn.Module
Modified ResNet model.
"""
assert isinstance(model, ResNet), "model must be a ResNet instance"
conv1 = nn.Conv2d(in_channels, 64, kernel_size=3, stride=1, padding=1, bias=False)
resnet_init(conv1)
model.conv1 = conv1
model.maxpool = nn.Identity()
for i in range(2, 5):
layer = getattr(model, "layer{}".format(i))
block = list(layer.children())[0]
if isinstance(block, Bottleneck):
assert block.conv1.kernel_size == (1, 1) and block.conv1.stride == (1, 1,)
assert block.conv2.kernel_size == (3, 3) and block.conv2.stride == (2, 2,)
assert block.conv2.dilation == (
1,
1,
), "Currently, only models with dilation=1 are supported"
block.conv1.stride = (2, 2)
block.conv2.stride = (1, 1)
out_dim = model.fc.in_features
model.fc = nn.Identity()
if out_dim == dim_z:
return model
return nn.Sequential(model, nn.Linear(out_dim, dim_z))
|
the-stack_0_18080 | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient, Recipe
from recipe.serializers import IngredientSerializer
INGREDIENTS_URL = reverse('recipe:ingredient-list')
class PublicIngredientsApiTests(TestCase):
"""Tests for publicly available ingredients API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Tests that login is required to access endpoint"""
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngedientsApiTests(TestCase):
"""Test ingredients can be retrieved by auth user"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'[email protected]',
'password1234',
)
self.client.force_authenticate(self.user)
def test_retrieve_ingredients_list(self):
"""Test retrieving list of ingredients"""
Ingredient.objects.create(user=self.user, name='Kale')
Ingredient.objects.create(user=self.user, name='Carrot')
res = self.client.get(INGREDIENTS_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.data, serializer.data)
self.assertEqual(len(res.data), 2)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_ingredients_limited_to_user(self):
"""Test that only ingredints of the auth user are returned"""
user2 = get_user_model().objects.create_user(
'[email protected]',
'password1234',
)
Ingredient.objects.create(name='apple', user=user2)
my = Ingredient.objects.create(name='pear', user=self.user)
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(my.name, res.data[0]['name'])
def test_create_ingredient_successful(self):
"""Test that aut user can create ingredient"""
payload = {'name': 'cabbage'}
self.client.post(INGREDIENTS_URL, payload)
exists = Ingredient.objects.filter(
user=self.user,
name=payload['name'],
).exists()
self.assertTrue(exists)
def test_create_ingredient_invalid(self):
"""Test that no ingredient created with invalid input"""
payload = {'name': ''}
res = self.client.post(INGREDIENTS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_ingredients_assigned_to_recipe(self):
"""Test filtering ingredients by those assigned to recipes"""
ingredient1 = Ingredient.objects.create(
name='broccoli', user=self.user
)
ingredient2 = Ingredient.objects.create(
name='kale', user=self.user
)
recipe = Recipe.objects.create(
title='apple crumble',
time_minutes=209,
price=38,
user=self.user
)
recipe.ingredients.add(ingredient1)
res = self.client.get(INGREDIENTS_URL, {'assigned_only': 1})
serializer1 = IngredientSerializer(ingredient1)
serializer2 = IngredientSerializer(ingredient2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retrieve_ingredient_assigned_unique(self):
"""Test filter ingredients by assigned returns unique items"""
ingredient = Ingredient.objects.create(user=self.user, name='turkey')
Ingredient.objects.create(user=self.user, name='bok choi')
recipe1 = Recipe.objects.create(
title='eggs benny',
time_minutes=3,
price=3,
user=self.user
)
recipe1.ingredients.add(ingredient)
recipe2 = Recipe.objects.create(
title='turkey ham',
time_minutes=5,
price=6.66,
user=self.user
)
recipe2.ingredients.add(ingredient)
res = self.client.get(INGREDIENTS_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1)
|
the-stack_0_18081 | import numpy as np
import matplotlib.pyplot as plt
import os
os.system ("g++ JuanVelasquez_Ejercicio27.cpp")
os.system ("./a.out")
a = np.loadtxt("potencial.dat")
plt.figure(figsize = (30,20))
ax = fig.add_subplot(111,projection='3d')
ax.plot(a[:,0],a[:,1],a[:,2])
plt.savefig("Potencial.png")
|
the-stack_0_18082 | #!/usr/bin/env python3
"""
The entrypoint to the courthouse application
"""
import datetime
import json
import logging
import os
import random
import textwrap
from logging.handlers import RotatingFileHandler
from logging import StreamHandler
from os import path
from flask import Flask, render_template, send_from_directory, request, current_app
from flask_cors import CORS
from flask_debugtoolbar import DebugToolbarExtension
from flask_jwt_extended import JWTManager
from flask_login import LoginManager, current_user
from flask_sqlalchemy import get_debug_queries
import werkzeug
import model
import util
from database import db_session, init_db, engine
from views.main import main
from views.api import api
from views.admin.admin import admin
from views.admin.configurations import configurations
from views.admin.clarifications import clarifications
from views.admin.languages import languages
from views.admin.problems import problems
from views.admin.users import users
from views.admin.runs import runs
from views.admin.contests import contests
from views.defendant import defendant
from views.auth import auth
# turn down log level for werkzeug
logging.getLogger("werkzeug").setLevel(logging.INFO)
log_location = "logs/code_court.log"
app = Flask(__name__)
CODE_COURT_PRODUCTION_ENV_VAR = "CODE_COURT_PRODUCTION"
def create_app():
"""
Initializes the flask app object
Returns:
Flask: the initialized flask app
"""
app = Flask(__name__)
app.config["SEND_FILE_MAX_AGE_DEFAULT"] = 86400 # 1 day
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config["SQLALCHEMY_ECHO"] = False
app.config["SQLALCHEMY_RECORD_QUERIES"] = False
app.config["model"] = model
app.config[
"SECRET_KEY"
] = "2jrlkfjoi1j3kljekdlasjdklasjdk139999d9d" # TODO: put this in config
app.config["JWT_ACCESS_TOKEN_EXPIRES"] = datetime.timedelta(days=30)
if app.config.get("SSL"):
app.config.update(dict(PREFERRED_URL_SCHEME="https"))
app.config["RUNMODE"] = "PRODUCTION" if os.getenv(
CODE_COURT_PRODUCTION_ENV_VAR
) else "DEVELOPMENT"
# Add custom filters to Jinja2
# http://flask.pocoo.org/docs/0.12/templating/
app.jinja_env.filters["dt_to_str"] = util.dt_to_str
app.jinja_env.filters["dt_to_date_str"] = util.dt_to_date_str
app.jinja_env.filters["dt_to_time_str"] = util.dt_to_time_str
setup_logging(app)
app.logger.setLevel(logging.DEBUG)
init_db()
if not app.config["TESTING"]:
setup_database(app)
with app.app_context():
app.config["MAX_CONTENT_LENGTH"] = util.get_configuration(
"max_output_length"
) * 1024 # kilobytes
CORS(app, supports_credentials=True)
JWTManager(app)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = "auth.login_view"
DebugToolbarExtension(app)
app.logger.info("Setting up app")
@login_manager.user_loader
def load_user(username):
return model.User.query.filter_by(username=username).scalar()
app.register_blueprint(main, url_prefix="")
app.register_blueprint(api, url_prefix="/api")
app.register_blueprint(admin, url_prefix="/admin")
app.register_blueprint(configurations, url_prefix="/admin/configurations")
app.register_blueprint(clarifications, url_prefix="/admin/clarifications")
app.register_blueprint(languages, url_prefix="/admin/languages")
app.register_blueprint(problems, url_prefix="/admin/problems")
app.register_blueprint(users, url_prefix="/admin/users")
app.register_blueprint(runs, url_prefix="/admin/runs")
app.register_blueprint(contests, url_prefix="/admin/contests")
app.register_blueprint(defendant, url_prefix="/defendant")
app.register_blueprint(auth, url_prefix="/admin")
@app.context_processor
def inject_user():
return {}
@app.route("/")
def defendant_index():
return send_from_directory("static/defendant-frontend", "index.html")
@app.route("/<path:path>")
def all(path):
try:
return send_from_directory("static/defendant-frontend", path)
except werkzeug.exceptions.NotFound as e:
return send_from_directory("static/defendant-frontend", "index.html")
@app.errorhandler(404)
def page_not_found(e):
return render_template("404.html"), 404
@app.errorhandler(401)
@login_manager.unauthorized_handler
def unauthorized(callback=None):
if not current_user.is_authenticated:
return render_template("auth/login.html"), 401
return render_template("401.html"), 401
@app.teardown_appcontext
def teardown(exception=None):
db_session.remove()
@app.after_request
def after_request(resp):
if app.config.get("SQLALCHEMY_RECORD_QUERIES"):
with open("/home/ben/sql", "a+") as f:
f.write("=========\n{}:\n\n".format(request.url))
for q in get_debug_queries():
f.write("{}\n\n".format(q))
f.write("=========\n\n")
return resp
return app
def setup_database(app):
"""Creates the database tables on initial startup"""
with app.app_context():
if not is_db_inited():
populate_db()
if not app.config["TESTING"] and app.config["RUNMODE"] == "DEVELOPMENT":
dev_populate_db()
def is_db_inited():
"""Checks if the db is initialized
Args:
app: the flask app
Returns:
bool: True if the database has been initialized
"""
if not engine.dialect.has_table(engine, "user_role"):
return False
return model.UserRole.query.count() > 0
def populate_db():
"""Performs the initial database setup for the application
"""
current_app.logger.info("Initializing db tables")
db_session.add_all(
[
model.UserRole("defendant"),
model.UserRole("operator"),
model.UserRole("judge"),
model.UserRole("executioner"),
model.UserRole("observer"),
]
)
# TODO: extract these out into a folder
db_session.add_all(
[
model.Language(
"python",
"python",
True,
textwrap.dedent(
"""
#!/bin/bash
cat $input_file | python3 $program_file
exit $?"""
).strip(),
),
model.Language(
"python2",
"python",
True,
textwrap.dedent(
"""
#!/bin/bash
cat $input_file | python2 $program_file
exit $?"""
).strip(),
),
model.Language(
"perl",
"perl",
True,
textwrap.dedent(
"""
#!/bin/bash
cat $input_file | perl $program_file
exit $?"""
).strip(),
),
model.Language(
"lua",
"lua",
True,
textwrap.dedent(
"""
#!/bin/bash
cat $input_file | lua $program_file
exit $?"""
).strip(),
),
model.Language(
"nodejs",
"javascript",
True,
textwrap.dedent(
"""
#!/bin/bash
cat $input_file | node $program_file
exit $?"""
).strip(),
),
model.Language(
"guile",
"scheme",
True,
textwrap.dedent(
"""
#!/bin/bash
cat $input_file | guile --no-auto-compile $program_file
exit $?"""
).strip(),
),
model.Language(
"fortran",
"fortran",
True,
textwrap.dedent(
"""
#!/bin/bash
cp /share/program $scratch_dir/program.f
cd $scratch_dir
gfortran -o program $scratch_dir/program.f
if [[ $? != 0 ]]; then
exit $?
fi
cat $input_file | ./program
exit $?"""
).strip(),
),
model.Language(
"c",
"clike",
True,
textwrap.dedent(
"""
#!/bin/bash
cp $program_file $scratch_dir/program.c
cd $3
gcc -o program $scratch_dir/program.c
if [[ $? != 0 ]]; then
exit $?
fi
cat $input_file | ./program
exit $?"""
).strip(),
textwrap.dedent(
"""
#include <stdio.h>
int main(int argc, const char* argv[]) {
}"""
),
),
model.Language(
"c++",
"clike",
True,
textwrap.dedent(
"""
#!/bin/bash
cp $program_file $scratch_dir/program.cpp
cd $scratch_dir
g++ -o program $scratch_dir/program.cpp
if [[ $? != 0 ]]; then
exit $?
fi
cat $input_file | ./program
exit $?"""
).strip(),
textwrap.dedent(
"""
#include <iostream>
int main() {
std::cout << "Hello World!";
}"""
),
),
model.Language(
"java",
"clike",
True,
textwrap.dedent(
"""
#!/bin/bash
export PATH=$PATH:/usr/lib/jvm/java-1.8-openjdk/bin
cp $program_file $scratch_dir/Main.java
cd $scratch_dir
javac Main.java
if [[ $? != 0 ]]; then
exit $?
fi
cat $input_file | java Main
exit $?"""
).strip(),
textwrap.dedent(
"""
public class Main {
public static void main(String[] args) {
}
}"""
),
),
model.Language(
"ruby",
"ruby",
True,
textwrap.dedent(
"""
#!/bin/bash
cat $input_file | ruby $program_file
exit $?"""
).strip(),
),
model.Language(
"rust",
"rust",
True,
textwrap.dedent(
"""
#!/bin/bash
cp /share/program $scratch_dir/main.rs
cd $scratch_dir
rustc $scratch_dir/main.rs
if [[ $? != 0 ]]; then
exit $?
fi
cat $input_file | ./main
exit $?"""
).strip(),
textwrap.dedent(
"""
fn main() {
}
"""
).strip(),
),
]
)
db_session.add_all(
[
model.Configuration("strict_whitespace_diffing", "False", "bool", "admin"),
model.Configuration(
"contestants_see_sample_output", "True", "bool", "defendant"
),
model.Configuration("max_user_submissions", "5", "integer", "defendant"),
model.Configuration(
"user_submission_time_limit", "1", "integer", "defendant"
),
model.Configuration(
"max_output_length", str(10 * 1024), "integer", "defendant"
),
model.Configuration(
"run_refresh_interval_millseconds", 5000, "integer", "defendant"
),
model.Configuration(
"score_refresh_interval_millseconds", 30000, "integer", "defendant"
),
model.Configuration(
"misc_refresh_interval_millseconds", 12000, "integer", "defendant"
),
model.Configuration("extra_signup_fields", "[]", "json", "defendant"),
]
)
db_session.add_all(
[model.ProblemType("input-output", '#!/bin/bash\ntest "$1" = "$2"')]
)
db_session.commit()
roles = {x.name: x for x in model.UserRole.query.all()}
db_session.add_all(
[
model.User("admin", "Admin", "pass", user_roles=[roles["operator"]]),
model.User(
"exec", "Executioner", "epass", user_roles=[roles["executioner"]]
),
]
)
db_session.commit()
# Version scraper run
with open("init_data/printver.py", "r") as f:
src_code = "\n".join(f.readlines())
executioner_user = model.User.query.filter_by(username="exec").scalar()
python = model.Language.query.filter_by(name="python").scalar()
empty_input = ""
version_contest = model.Contest(
name="version_contest",
start_time=datetime.datetime.utcnow(),
end_time=datetime.datetime.utcnow() + datetime.timedelta(hours=1),
is_public=True,
activate_time=datetime.datetime.utcnow(),
freeze_time=None,
deactivate_time=None,
)
db_session.add(version_contest)
db_session.commit()
verscrape_run = model.Run(
executioner_user,
version_contest,
python,
None,
datetime.datetime.utcnow(),
src_code,
empty_input,
empty_input,
True,
None,
)
db_session.add(verscrape_run)
db_session.commit()
def dev_populate_db():
"""Performs the initial database setup for the application
"""
current_app.logger.info("Initializing tables with dev data")
roles = {x.name: x for x in model.UserRole.query.all()}
db_session.add_all(
[
model.User(
"superuser", "SuperUser", "pass", user_roles=list(roles.values())
),
model.User(
"observer", "ObserverUser", "pass", user_roles=[roles["observer"]]
),
]
)
contestants = []
names = [
"Fred", "George", "Jenny", "Sam", "Jo", "Joe", "Sarah", "Ben", "Josiah", "Micah"
]
for i in range(1, 5):
test_contestant = model.User(
"testuser{}".format(i),
names[i - 1],
"pass",
user_roles=[roles["defendant"]],
)
db_session.add(test_contestant)
contestants.append(test_contestant)
# create test contest
now = datetime.datetime.utcnow()
test_contest = model.Contest(
name="test_contest",
start_time=now,
end_time=now + datetime.timedelta(hours=2),
is_public=True,
activate_time=now,
freeze_time=None,
deactivate_time=None,
)
test_contest.users += contestants
db_session.add(test_contest)
io_problem_type = model.ProblemType.query.filter_by(name="input-output").one()
problems = []
hello_world = model.Problem(
io_problem_type,
"hello-world",
"Hello, World!",
'Print the string "Hello, World!"',
"",
"Hello, World!",
"",
"Hello, World!",
)
problems.append(hello_world)
test_contest.problems.append(hello_world)
db_session.add(hello_world)
n = 5000
hello_worlds = model.Problem(
io_problem_type,
"hello-worlds",
"Hello, Worlds!",
'Print the string "Hello, World!" n times',
"2",
"Hello, World!\nHello, World!",
str(n),
"Hello, World!\n" * n,
)
problems.append(hello_worlds)
test_contest.problems.append(hello_worlds)
db_session.add(hello_worlds)
fizzbuzz = model.Problem(
io_problem_type,
"fizzbuzz",
"FizzBuzz",
"Perform fizzbuzz up to the given number\n\nMore info can be found [here](https://en.wikipedia.org/wiki/Fizz_buzz)",
"3",
"1\n2\nFizz",
"15",
"1\n2\nFizz\n4\nBuzz\nFizz\n7\n8\nFizz\nBuzz\n11\nFizz\n13\n14\nFizzBuzz\n",
)
problems.append(fizzbuzz)
test_contest.problems.append(fizzbuzz)
db_session.add(fizzbuzz)
fibonacci = model.Problem(
io_problem_type,
"fibonoacci",
"Fibonacci",
"Give the nth number in the Fibonacci sequence",
"4",
"3",
"5",
"5",
)
problems.append(fibonacci)
test_contest.problems.append(fibonacci)
db_session.add(fibonacci)
ext_fibonacci = model.Problem(
io_problem_type,
"ext-fib",
"Extended Fibonacci",
"Give the the numbers of the Fibonacci sequence between 0 and n, inclusive.\nIf n is positive, the range is [0,n].\nIf n is negative, the range is [n,0].",
"-3",
"2\n-1\n1\n0",
"-5",
"5\n-3\n2\n-1\n1\n0",
)
problems.append(ext_fibonacci)
test_contest.problems.append(ext_fibonacci)
db_session.add(ext_fibonacci)
# insert submissions
python = model.Language.query.filter_by(name="python").one()
solutions = {
"Hello, World!": "print('Hello, World!')",
"Hello, Worlds!": "for i in range(int(input())):\n\tprint('Hello, World!')",
"FizzBuzz": 'print("\\n".join("Fizz"*(i%3==0)+"Buzz"*(i%5==0) or str(i) for i in range(1,int(input())+1)))',
"Fibonacci": "fib = lambda n: n if n < 2 else fib(n-1) + fib(n-2)\nprint(fib(int(input())))",
"Extended Fibonacci": "print('5\\n-3\\n2\\n-1\\n1\\n0')",
}
problem_subs = []
for problem in problems:
for user in contestants:
for _ in range(2):
problem_subs.append((problem, user))
random.shuffle(problem_subs)
for problem, user in problem_subs:
src_code = solutions[problem.name]
is_submission = random.randint(1, 7) != 5
is_priority = random.randint(1, 9) == 7
is_correct = random.randint(1, 2) == 2
if not is_correct:
src_code = src_code + "\nprint('Wait this isn\\'t correct')"
test_run = model.Run(
user,
test_contest,
python,
problem,
datetime.datetime.utcnow(),
src_code,
problem.secret_input,
problem.secret_output,
is_submission,
)
test_run.is_correct = is_correct
test_run.is_priority = is_priority
test_run.state = model.RunState.JUDGING
db_session.add(test_run)
util.set_configuration("extra_signup_fields", json.dumps(["email"]))
db_session.commit()
def setup_logging(app):
"""Sets up the flask app loggers"""
formatter = logging.Formatter(
"%(asctime)s - %(filename)s - %(levelname)s - %(message)s"
)
# remove existing handlers
handlers = app.logger.handlers
for handler in handlers:
app.logger.removeHandler(handler)
if not path.isdir(path.dirname(log_location)):
os.makedirs(path.dirname(log_location))
file_handler = RotatingFileHandler(log_location, maxBytes=10000, backupCount=2)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
app.logger.addHandler(file_handler)
stdout_handler = StreamHandler()
stdout_handler.setLevel(logging.DEBUG)
stdout_handler.setFormatter(formatter)
app.logger.addHandler(stdout_handler)
app = create_app()
if __name__ == "__main__":
PORT = 9191
app.logger.info("Running on port %s", PORT)
app.run(host="0.0.0.0", port=PORT, debug=True)
|
the-stack_0_18083 | import sys
sys.path.append('PythonTools')
from Constants import CONST_IDX, LINR_IDX, KAPPA_IDX, CALPHA_IDX, SQRTPLUS_IDX
from LoewnerRunFactory import LoewnerRunFactory
# Declare final time for Loewner runs
start_time = 0
cubic_final_time = 10
outer_points = 1000
inner_points = 10
compile_modules = True
save_plot = True
save_data = True
# Create lists of kappa and alpha values
kappas = [i + 0.5 for i in range(1,10)]
alphas = [i * 0.1 for i in range(1,10)]
# Make a LoewnerRunFactory
cubic_factory = LoewnerRunFactory(start_time, cubic_final_time, outer_points, inner_points, compile_modules, save_plot, save_data)
# Create a list of cubic runs
cubic_runs = cubic_factory.create_standard_runs() + cubic_factory.vary_kappa(kappas) + cubic_factory.vary_alpha(alphas) + [cubic_factory.select_single_run(index=CONST_IDX,constant=1)]
# Solve the cubic runs
for run in cubic_runs:
print("Starting cubic forward for driving function " + str(run.name))
run.cubic_forward_loewner()
print("Finished cubic forward for driving function " + str(run.name))
if run.index == KAPPA_IDX:
print("Finished cubic forward for kappa = " + str(run.kappa)[:3])
# Create a list of cubic runs for which there is an exact solution
exact_cubic_runs = cubic_factory.create_exact_cubic()
# Solve for the exact solution
for run in exact_cubic_runs:
run.exact_cubic_forward_loewner()
print("Finished exact cubic forward for driving function " + str(run.name))
|
the-stack_0_18085 | # coding: utf-8
"""
Properties
All HubSpot objects store data in default and custom properties. These endpoints provide access to read and modify object properties in HubSpot. # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.crm.properties.configuration import Configuration
class StandardError(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"status": "str",
"id": "str",
"category": "ErrorCategory",
"sub_category": "object",
"message": "str",
"errors": "list[ErrorDetail]",
"context": "dict(str, list[str])",
"links": "dict(str, str)",
}
attribute_map = {"status": "status", "id": "id", "category": "category", "sub_category": "subCategory", "message": "message", "errors": "errors", "context": "context", "links": "links"}
def __init__(self, status=None, id=None, category=None, sub_category=None, message=None, errors=None, context=None, links=None, local_vars_configuration=None): # noqa: E501
"""StandardError - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._status = None
self._id = None
self._category = None
self._sub_category = None
self._message = None
self._errors = None
self._context = None
self._links = None
self.discriminator = None
self.status = status
if id is not None:
self.id = id
self.category = category
if sub_category is not None:
self.sub_category = sub_category
self.message = message
self.errors = errors
self.context = context
self.links = links
@property
def status(self):
"""Gets the status of this StandardError. # noqa: E501
:return: The status of this StandardError. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this StandardError.
:param status: The status of this StandardError. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
self._status = status
@property
def id(self):
"""Gets the id of this StandardError. # noqa: E501
:return: The id of this StandardError. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this StandardError.
:param id: The id of this StandardError. # noqa: E501
:type: str
"""
self._id = id
@property
def category(self):
"""Gets the category of this StandardError. # noqa: E501
:return: The category of this StandardError. # noqa: E501
:rtype: ErrorCategory
"""
return self._category
@category.setter
def category(self, category):
"""Sets the category of this StandardError.
:param category: The category of this StandardError. # noqa: E501
:type: ErrorCategory
"""
if self.local_vars_configuration.client_side_validation and category is None: # noqa: E501
raise ValueError("Invalid value for `category`, must not be `None`") # noqa: E501
self._category = category
@property
def sub_category(self):
"""Gets the sub_category of this StandardError. # noqa: E501
:return: The sub_category of this StandardError. # noqa: E501
:rtype: object
"""
return self._sub_category
@sub_category.setter
def sub_category(self, sub_category):
"""Sets the sub_category of this StandardError.
:param sub_category: The sub_category of this StandardError. # noqa: E501
:type: object
"""
self._sub_category = sub_category
@property
def message(self):
"""Gets the message of this StandardError. # noqa: E501
:return: The message of this StandardError. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this StandardError.
:param message: The message of this StandardError. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and message is None: # noqa: E501
raise ValueError("Invalid value for `message`, must not be `None`") # noqa: E501
self._message = message
@property
def errors(self):
"""Gets the errors of this StandardError. # noqa: E501
:return: The errors of this StandardError. # noqa: E501
:rtype: list[ErrorDetail]
"""
return self._errors
@errors.setter
def errors(self, errors):
"""Sets the errors of this StandardError.
:param errors: The errors of this StandardError. # noqa: E501
:type: list[ErrorDetail]
"""
if self.local_vars_configuration.client_side_validation and errors is None: # noqa: E501
raise ValueError("Invalid value for `errors`, must not be `None`") # noqa: E501
self._errors = errors
@property
def context(self):
"""Gets the context of this StandardError. # noqa: E501
:return: The context of this StandardError. # noqa: E501
:rtype: dict(str, list[str])
"""
return self._context
@context.setter
def context(self, context):
"""Sets the context of this StandardError.
:param context: The context of this StandardError. # noqa: E501
:type: dict(str, list[str])
"""
if self.local_vars_configuration.client_side_validation and context is None: # noqa: E501
raise ValueError("Invalid value for `context`, must not be `None`") # noqa: E501
self._context = context
@property
def links(self):
"""Gets the links of this StandardError. # noqa: E501
:return: The links of this StandardError. # noqa: E501
:rtype: dict(str, str)
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this StandardError.
:param links: The links of this StandardError. # noqa: E501
:type: dict(str, str)
"""
if self.local_vars_configuration.client_side_validation and links is None: # noqa: E501
raise ValueError("Invalid value for `links`, must not be `None`") # noqa: E501
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, StandardError):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, StandardError):
return True
return self.to_dict() != other.to_dict()
|
the-stack_0_18087 | import math
import bisect
class Song:
DIFFICULTIES = {
"ExpertSingle": "Expert Guitar",
"HardSingle": "Hard Guitar",
"MediumSingle": "Medium Guitar",
"EasySingle": "Easy Guitar",
"ExpertDoubleBass": "Expert Bass",
"HardDoubleBass": "Hard Bass",
"MediumDoubleBass": "Medium Bass",
"EasyDoubleBass": "Easy Bass",
"ExpertDoubleRhythm": "Expert Rhythm",
"HardDoubleRhythm": "Hard Rhythm",
"MediumDoubleRhythm": "Medium Rhythm",
"EasyDoubleRhythm": "Easy Rhythm",
"ExpertKeyboard": "Expert Keys",
"HardKeyboard": "Hard Keys",
"MediumKeyboard": "Medium Keys",
"EasyKeyboard": "Easy Keys",
"ExpertDrums": "Expert Drums",
"HardDrums": "Hard Drums",
"MediumDrums": "Medium Drums",
"EasyDrums": "Easy Drums"
}
def __init__(self, name, charter, resolution=192):
self.name = name if name else "Unknown"
self.charter = charter if charter else "Unknown Charter"
self.resolution = resolution
self.bpms = []
self.time_signatures = []
self.sections = []
self.charts = []
def add_section(self, section):
self.sections.append(section)
def add_bpm(self, bpm):
self.bpms.append(bpm)
def add_time_signature(self, time_signature):
self.time_signatures.append(time_signature)
def add_chart(self, chart):
self.charts.append(chart) |
the-stack_0_18089 | from random import randint
def roll_dice():
""" simulate roll dice """
results = []
for num in range(times):
result = randint(1, sides)
results.append(result)
return results
def frequencies(x):
""" calculate frequency of each time """
results = []
for num in range(3, sides*3+1):
result = x.count(num)
results.append(result)
return results
# define the die sides and roll times
sides = 6
times = 1000000
# calculate results and the frequency of each side
roll_results_1 = roll_dice()
roll_results_2 = roll_dice()
roll_results_3 = roll_dice()
roll_results = [roll_results_1[i]+roll_results_2[i]+roll_results_3[i] \
for i in range(times)]
# print(roll_results)
frequencies = frequencies(roll_results)
print(frequencies)
# visualize using pygal
import pygal
# plot the chart using bars
freq_visual = pygal.Bar()
# optimize the chart
freq_visual.title = 'Rolling Results of 1,000,000 times'
freq_visual.x_labels = [str(x) for x in range(3, 19)]
freq_visual.x_title = 'Results'
freq_visual.y_title = 'Frequency'
# plot and save to file
freq_visual.add('6-side Dice *3', frequencies)
freq_visual.render_to_file('dice_three.svg') |
the-stack_0_18092 | import scrapy
import datetime
import re
import json
from ZenCrawlerSource.items import ChannelItem, ArticleItem, GalleryItem, ZencrawlersourceItem
from tqdm import tqdm
non_arbitrage = ['instagram.com', 'twitter.com', 'wikipedia.org', 'google.ru', 'vimeo', 'youtube', 'vk',
"yandex.ru/news", "yandex.ru/images"]
class Galleries:
def __init__(self, created_at, modified_at, header, url, views=-1, reads=-1, arb_link='', arbitrage=False,
zen_related=False, has_bad_text=False, had_bad_image=False, dark_post=False, native_ads=False):
self.created_at = created_at
self.modified_at = modified_at
self.header = header
self.url = url
self.views = views
self.reads = reads
self.arb_link = arb_link
self.arbitrage = arbitrage
self.zen_related = zen_related
self.has_bad_text = has_bad_text
self.had_bad_image = had_bad_image
self.dark_post = dark_post
self.native_ads = native_ads
def get_static_stats(self, response):
my_data = response.css("script#all-data::text").get().encode('utf-8').strip().decode()
try:
my_ind = my_data.index("window._data = ")
my_ind_fin = my_data.index("window._uatraits =")
except ValueError:
my_ind = my_data.index("w._data = ")
my_ind_fin = my_data.index("w._uatraits =")
my_json = json.loads(my_data[my_data[my_ind:].index("{")+my_ind:my_data[:my_ind_fin].rfind(';')])
# print(json.dumps(my_json, indent=4, sort_keys=True)) # - a tangible output
try:
self.header = my_json["publication"]["content"]["preview"]["title"]
except KeyError:
self.header = "error"
self.header = self.header.replace("'", "")
try:
datestamp = datetime.date.fromtimestamp(int(int(my_json["publication"]["addTime"])/1000))
except KeyError:
datestamp = None
try:
mod_datestamp = datetime.date.fromtimestamp(int(int(my_json["publication"]["content"]["modTime"])/1000))
except KeyError:
mod_datestamp = None
try:
self.native_ads = my_json["publication"]["hasNativeAds"]
except KeyError:
pass
try:
self.dark_post = my_json["publication"]["darkPost"]
except KeyError:
pass
search_scope = json.loads(my_json["publication"]["content"]["articleContent"]["contentState"])
link = ""
tmp = False
for i in search_scope['items']:
try:
if i['has_bad_text']:
self.has_bad_text = True
if i['had_bad_image']:
self.had_bad_image = True
for j in i["rich_text"]["json"]:
if "attribs" in j.keys():
if "href" in j["attribs"].keys():
link = j["attribs"]["href"]
for i in non_arbitrage: # check if url aren't common urls so that author is actually using some custom shop url or whatever
if (link).find(i) != -1:
tmp = True
link = ""
break
except KeyError:
pass
if_link = None or link
if if_link and (not tmp):
self.arbitrage = True
self.arb_link = if_link.replace("'", "")
if self.arb_link.find("zen.yandex.ru") != -1:
self.zen_related = True
self.created_at = datestamp
self.modified_at = mod_datestamp
class Articles:
def __init__(self, created_at, modified_at, header, url, views=-1, reads=-1, arb_link='', arbitrage=False,
streaming=False, form=False, zen_related=False, using_direct=False, has_bad_text=False,
had_bad_image=False, native_ads=False, dark_post=False):
self.created_at = created_at
self.modified_at = modified_at
self.header = header
self.url = url
self.views = views
self.reads = reads
self.arb_link = arb_link
self.arbitrage = arbitrage
self.form = form
self.streaming = streaming
self.zen_related = zen_related
self.using_direct = using_direct
self.has_bad_text = has_bad_text
self.had_bad_image = had_bad_image
self.native_ads = native_ads
self.dark_post = dark_post
def __str__(self):
return f'{str(vars(self))}'
def __repr__(self):
return f'{str(vars(self))}'
def using_form(self, response):
forms = response.css("div.yandex-forms-embed").get()
streaming = response.css("div.yandex-efir-embed").get()
y_music = response.css("div.yandex-music-embed").get()
yt = response.css("div.youtube-embed").get()
kino = response.css("div.kinopoisk-embed").get()
y_direct = response.css("div.yandex-direct-embed").get()
donation = response.css("div.yandex-wallet-iframe").get()
other_embeds = response.css("div.article-render__block_embed").getall() # bug fixed
if y_direct:
self.using_direct = True
if len(other_embeds) == 1 and y_direct:
other_embeds = None
if streaming:
self.streaming = True
elif (not (y_music or kino or yt or donation)) and (forms or other_embeds):
self.form = True
self.arbitrage = True
def is_arbitrage(self, response): # checks straight-up links
if_p = response.css("p.article-render__block a.article-link::attr(href)").get() or ""
if_blockquote = response.css("blockquote.article-render__block a.article-link::attr(href)").get() or ""
if_header = response.css("h2.article-render__block a.article-link::attr(href)").get() or response.css("h3.article-render__block a.article-link::attr(href)").get() or ""
tmp = False
for i in non_arbitrage: # check that urls are not your common urls like twitter.com etc
if (if_p + if_blockquote + if_header).find(i) != -1:
tmp = True
break
if if_p or if_blockquote or if_header and (not tmp):
self.arbitrage = True
# питонно пиздец)
self.arb_link = if_p.replace("'", "") or if_blockquote.replace("'", "") or if_header.replace("'", "")
if self.arb_link.find("zen.yandex.ru") != -1:
self.zen_related = True
self.using_form(response)
class Channels:
def __init__(self, subs, audience, url, links=None):
self.subs = int(subs)
self.audience = int(audience)
self.url = url
self.links = links or []
def __str__(self):
my_dict = vars(self)
return f'{str(my_dict)}'
@staticmethod
def parse_description(response):
desc_links = response.css("div.zen-app div.channel-header-view-desktop__description-block a::attr(href)").getall()
desc_text = response.css("div.zen-app div.channel-header-view-desktop__description-block p::text").get()
emails = None
if desc_text:
emails = re.findall("[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+", desc_text)
if emails and desc_links:
return desc_links + emails
elif emails:
return emails
elif desc_links:
return desc_links
else:
return []
def get_contacts(self, response):
contacts = response.css("div.zen-app div.social-links-view__wrapper li a::attr(href)").getall()
contacts += Channels.parse_description(response)
if contacts:
self.links = contacts
class ExampleSpider(scrapy.Spider):
name = "nightcrawler"
allowed_domains = ["zen.yandex.ru", "zen.yandex.com"]
start_urls = ["https://zen.yandex.ru/media/zen/channels"]
def parse(self, response):
for a in tqdm(response.css("div.alphabet__list a.alphabet__item::attr(href)").getall()):
if "firstChars" in a:
yield response.follow(a, callback=self.parse_by_letter, dont_filter=True)
def parse_by_letter(self, response):
channel_top = response.css("a.channel-item__link").get()
if channel_top:
next_page = response.css(
"div.pagination-prev-next__button a.pagination-prev-next__link::attr(href)").getall()
if len(next_page) > 1:
yield response.follow(next_page[-1], callback=self.parse_by_letter)
chans = response.css("a.channel-item__link::attr(href)").getall()
for chan in chans:
yield response.follow(chan, callback=self.parse_channel)
elif len(next_page) == 1:
yield response.follow(next_page[0], callback=self.parse_by_letter)
chans = response.css("a.channel-item__link::attr(href)").getall()
for chan in chans:
yield response.follow(chan, callback=self.parse_channel)
def parse_channel(self, response):
self.logger.info("Channel name: " +
response.css("div.zen-app div.channel-header-view-desktop__info-block h1 span::text").get())
default_stats = response.css("div.zen-app div.channel-info-view__block div.channel-info-view__value::text").getall()
stat_kword = response.css("div.zen-app div.channel-info-view__block div.channel-info-view__name::text").get()
# DONE implemented PC UA
if len(default_stats) == 2:
audience = int("".join(("".join(default_stats[0].split("<"))).split(" ")))
subs = int("".join(("".join(default_stats[1].split("<"))).split(" ")))
else:
if stat_kword.find("одписч") != -1:
subs = int("".join(("".join(default_stats[0].split("<"))).split(" ")))
audience = 0
elif stat_kword.find("удитори") != -1:
audience = int("".join(("".join(default_stats[0].split("<"))).split(" ")))
subs = 0
else:
audience = 0
subs = 0
chan = Channels(subs, audience, response.url)
chan.get_contacts(response)
# can move that line to top and make if statement, so we only get channels w/ articles to bd
urls = response.css("div.card-wrapper__inner a.card-image-view__clickable::attr(href)").getall()[:5]
galls = response.css("div.card-wrapper__inner a.card-gallery-desktop-view__clickable::attr(href)").getall()[:5]
# CHANGE x in [:x] for different MAX amount of articles/galleries to be fetched
self.logger.info(f"Itemizing {chan}")
my_item = self.itemize_channel(chan)
yield my_item
if chan.audience < 10000: # TODO adjust this condition if there will be not that many channels
if urls:
if urls[0].find("zen.yandex.ru"): # checking that this channel isn't a website connected to zen
yield response.follow(urls[0],
callback=self.fetch_article,
cb_kwargs=dict(other_pubs=urls[1:])
)
if galls:
yield response.follow(galls[0],
callback=self.fetch_gallery,
cb_kwargs=dict(other_pubs=galls[1:])
)
def fetch_gallery(self, response, other_pubs=None):
base_date = datetime.date(1900, 12, 12)
title = ""
pub_id = ''.join(response.url.split("?")[0].split('-')[-1])
views_req_url = f"https://zen.yandex.ru/media-api/publication-view-stat?publicationId={pub_id}"
gall = Galleries(base_date, base_date, title, response.url)
gall.get_static_stats(response)
try:
yield response.follow(views_req_url, callback=self.get_reads,
cb_kwargs=dict(publication=gall))
except Exception:
gall_item = self.itemize_gallery(gall)
yield gall_item
if other_pubs and gall.created_at > (datetime.date.today() - datetime.timedelta(days=10)):
yield from response.follow_all(other_pubs,
callback=self.fetch_gallery
)
def fetch_article(self, response, other_pubs=None):
title = response.css("div#article__page-root h1.article__title::text").get().encode('utf-8').strip()
if title:
title = title.decode().replace("'", "")
else:
title = ""
base_date = datetime.date(1900, 12, 12)
article = Articles(base_date, base_date, title, response.url)
article.is_arbitrage(response)
ExampleSpider.get_date(article, response)
pub_id = ''.join(response.url.split("?")[0].split('-')[-1])
views_req_url = f"https://zen.yandex.ru/media-api/publication-view-stat?publicationId={pub_id}"
try:
yield response.follow(views_req_url, callback=self.get_reads, cb_kwargs=dict(publication=article))
except Exception:
art_item = self.itemize_article(article)
yield art_item
if other_pubs and article.created_at > (datetime.date.today() - datetime.timedelta(days=10)):
yield from response.follow_all(other_pubs,
callback=self.fetch_article
)
def get_reads(self, response, publication):
reads = -1
views = -1
resp_string = response.text
try:
my_dict = json.loads(resp_string)
if my_dict:
reads = my_dict["viewsTillEnd"]
views = my_dict["views"]
except json.decoder.JSONDecodeError:
self.logger.warning("Error in getting reads due to json.decoder.JSONDecodeError")
except NameError:
self.logger.warning("Error in getting reads due to NameError")
publication.reads = reads
publication.views = views
if isinstance(publication, Articles):
item = self.itemize_article(publication)
else:
item = self.itemize_gallery(publication)
yield item
@staticmethod
def itemize_channel(channel):
item = ChannelItem(
subs=channel.subs,
audience=channel.audience,
url=channel.url,
contacts=channel.links,
last_checked=datetime.datetime.now()
)
return item
@staticmethod
def itemize_article(article):
item = ArticleItem(
created_at=article.created_at,
modified_at=article.modified_at,
header=article.header,
url=article.url,
views=article.views,
reads=article.reads,
arb_link=article.arb_link,
arbitrage=article.arbitrage,
form=article.form,
streaming=article.streaming,
zen_related=article.zen_related,
has_bad_text=article.has_bad_text,
had_bad_image=article.had_bad_image,
native_ads=article.native_ads,
dark_post=article.native_ads
)
return item
@staticmethod
def itemize_gallery(gallery):
item = GalleryItem(
created_at=gallery.created_at,
modified_at=gallery.modified_at,
header=gallery.header,
url=gallery.url,
views=gallery.views,
reads=gallery.reads,
arb_link=gallery.arb_link,
arbitrage=gallery.arbitrage,
zen_related=gallery.zen_related,
has_bad_text=gallery.has_bad_text,
had_bad_image=gallery.had_bad_image,
native_ads=gallery.native_ads,
dark_post=gallery.native_ads
)
return item
@staticmethod
def get_date(publication, response):
try:
my_data = response.css("script#all-data::text").get().encode('utf-8').strip().decode()
try:
my_ind = my_data.index("window._data = ")
my_ind_fin = my_data.index("window._uatraits =")
except ValueError:
my_ind = my_data.index("w._data = ")
my_ind_fin = my_data.index("w._uatraits =")
my_json = json.loads(my_data[my_data[my_ind:].index("{") + my_ind:my_data[:my_ind_fin].rfind(';')])
publication.created_at = datetime.date.fromtimestamp(int(int(my_json["publication"]["addTime"]) / 1000))
publication.modified_at = datetime.date.fromtimestamp(int(int(my_json["publication"]["content"]["modTime"]) / 1000))
except Exception:
d_str = response.css("footer.article__statistics span.article-stat__date::text").get()
if not d_str:
d_str = response.css("footer.article__statistics div::text").get()
publication.created_at = publication.modified_at = ExampleSpider.get_date_old(d_str)
del d_str
else:
try:
publication.native_ads = my_json["publication"]["hasNativeAds"]
except KeyError:
pass
try:
publication.dark_post = my_json["publication"]["darkPost"]
except KeyError:
pass
search_scope = json.loads(my_json["publication"]["content"]["articleContent"]["contentState"])
try:
for i in search_scope['items']:
try:
if i['has_bad_text']:
publication.has_bad_text = True
if i['had_bad_image']:
publication.had_bad_image = True
except Exception:
pass
except KeyError:
publication.has_bad_text = False
publication.had_bad_image = False
@staticmethod
def get_date_old(datestring):
elements = datestring.lower().split("\xa0")
final_date = datetime.datetime(1900, 12, 12)
# datestring.lower().find('ago') == -1 and datestring.lower().find('day') == -1 and
if datestring.lower().find('дня') == -1 and datestring.lower().find('чера') ==-1 and datestring.lower().find('назад') == -1:
# yesterday, today, 3 days ago - всё тут)
# months = ['january', 'february', 'march', 'april',
# 'may', 'june', 'july', 'august',
# 'september', 'october', 'november', 'december']
months = ["января", "февраля", "марта", "апреля", "мая", "июня", "июля", "августа", "сентября", "октября",
"ноября", "декабря"]
month = months.index(elements[1]) + 1
if len(elements) < 3:
final_date = datetime.datetime(2020, month, int(elements[0]), 4, 20, 0, 0) # WARNING помни)
else:
final_date = datetime.datetime(int(elements[2]), month, int(elements[0]), 4, 20, 0, 0)
# datestring.lower().find('today') != -1 or
elif datestring.lower().find('егодня') != -1: # TODO fix default time
final_date = datetime.datetime.now()
# datestring.lower().find('yesterday') != -1 or
elif datestring.lower().find('чера') != -1:
tmp = datetime.datetime.now()
final_date = datetime.datetime(tmp.year, tmp.month, tmp.day - 1, 4, 20, 0, 0)
elif datestring.lower().find('назад') != -1:
tmp = datetime.datetime.now()
if elements[0] != "год":
shift = int(elements[0])
if elements[1] == 'дня' or elements[1] == 'дней':
final_date = datetime.datetime(tmp.year, tmp.month, tmp.day - shift)
elif elements[1] == 'года' or elements[1] == 'лет':
final_date = datetime.datetime(tmp.year - shift, tmp.month, tmp.day)
elif elements[1] == 'месяцев':
final_date = datetime.datetime(tmp.year, tmp.month - shift, tmp.day)
else:
final_date = datetime.datetime(tmp.year-1, tmp.month, tmp.day)
return final_date
def closed(self, reason="Done"):
self.logger.warning(reason)
class MySpider(scrapy.Spider): # a spider to check if proxies are actually hiding us
name = "ip_spider"
allowed_domains = ["httpbin.org"]
start_urls = ["https://httpbin.org/ip"]
def parse(self, response):
print("Visible IP: " + json.loads(response)["origin"])
|
the-stack_0_18093 | """Module __main__. Entry point."""
__author__ = 'Joan A. Pinol (japinol)'
__version__ = '0.0.1'
from argparse import ArgumentParser
import gc
import traceback
import pygame as pg
from codemaster.game_entry_point import Game
from codemaster.config.settings import logger
from codemaster import screens
def main():
"""Entry point of The CodeMaster program."""
# Parse optional arguments from the command line
parser = ArgumentParser(description="The CodeMaster. Nightmare on Bots' Island.",
prog="codemaster",
usage="%(prog)s [-h] [-d] [-t]")
parser.add_argument('-d', '--debug', default=None, action='store_true',
help='Debug actions, information and traces')
parser.add_argument('-t', '--debugtraces', default=None, action='store_true',
help='Show debug back traces information when something goes wrong')
args = parser.parse_args()
pg.init()
pg.mouse.set_visible(False)
is_music_paused = False
# Multiple games loop
while not Game.is_exit_game:
try:
game = Game(is_debug=args.debug)
game.is_music_paused = is_music_paused
screen_start_game = screens.StartGame(game)
while game.is_start_screen:
screen_start_game.start_up()
if not Game.is_exit_game:
game.start()
is_music_paused = game.is_music_paused
del screen_start_game
del game
gc.collect()
except FileNotFoundError as e:
if args.debugtraces or args.debug:
traceback.print_tb(e.__traceback__)
logger.critical(f'File not found error: {e}')
break
except Exception as e:
if args.debugtraces or args.debug:
traceback.print_tb(e.__traceback__)
logger.critical(f'Error: {e}')
break
pg.quit()
if __name__ == '__main__':
main()
|
the-stack_0_18097 | import os
from glob import glob
import json
root_path = "/SSD/ILSVRC_2012_ImageFolder/train" # 디렉토리 시작은 /, 디렉토리 끝은 아무것도 없는것으로 지정해줘야함
labels = os.listdir(root_path)
print(labels)
num_labels = len(labels)
print(num_labels)
label_check_split = len(root_path.split('/'))
print(label_check_split)
init_data = {"labels": []}
result = [y for x in os.walk(root_path) for y in glob(os.path.join(x[0], '*'))]
for i in result:
if '.jpg' in i or '.JPG' in i or '.png' in i or '.PNG' in i or '.jpeg' in i or '.JPEG' in i:
for j in range(num_labels):
if i.split('/')[label_check_split] == labels[j]:
init_data["labels"].append([i.replace(root_path+'/', ''), j])
with open (root_path+"/dataset.json", 'w') as json_file:
json.dump(init_data, json_file) |
the-stack_0_18098 | import random
import asyncio
import graphene
from graphql_ws.pubsub import AsyncioPubsub
pubsub = AsyncioPubsub()
class Query(graphene.ObjectType):
base = graphene.String()
async def resolve_base(root, info):
return 'Hello World!'
class MutationExample(graphene.Mutation):
class Arguments:
input_text = graphene.String()
output_text = graphene.String()
async def mutate(root, info, input_text):
await pubsub.publish('BASE', input_text)
return MutationExample(output_text=input_text)
class Mutations(graphene.ObjectType):
mutation_example = MutationExample.Field()
class RandomType(graphene.ObjectType):
seconds = graphene.Int()
random_int = graphene.Int()
class Subscription(graphene.ObjectType):
count_seconds = graphene.Float(up_to=graphene.Int())
random_int = graphene.Field(RandomType)
mutation_example = graphene.String()
async def resolve_mutation_example(root, info):
try:
sub_id, q = pubsub.subscribe_to_channel('BASE')
while True:
payload = await q.get()
yield payload
finally:
pubsub.unsubscribe('BASE', sub_id)
async def resolve_count_seconds(root, info, up_to=5):
for i in range(up_to):
print("YIELD SECOND", i)
yield i
await asyncio.sleep(1.)
yield up_to
async def resolve_random_int(root, info):
i = 0
while True:
yield RandomType(seconds=i, random_int=random.randint(0, 500))
await asyncio.sleep(1.)
i += 1
schema = graphene.Schema(query=Query, mutation=Mutations,
subscription=Subscription)
|
the-stack_0_18102 | import pytest
numpy = pytest.importorskip("numpy")
scipy = pytest.importorskip("scipy")
import networkx as nx
from networkx.algorithms.centrality.subgraph_alg import (
estrada_index,
communicability_betweenness_centrality,
subgraph_centrality,
subgraph_centrality_exp,
)
from networkx.testing import almost_equal
class TestSubgraph:
def test_subgraph_centrality(self):
answer = {0: 1.5430806348152433, 1: 1.5430806348152433}
result = subgraph_centrality(nx.path_graph(2))
for k, v in result.items():
assert almost_equal(answer[k], result[k], places=7)
answer1 = {
"1": 1.6445956054135658,
"Albert": 2.4368257358712189,
"Aric": 2.4368257358712193,
"Dan": 3.1306328496328168,
"Franck": 2.3876142275231915,
}
G1 = nx.Graph(
[
("Franck", "Aric"),
("Aric", "Dan"),
("Dan", "Albert"),
("Albert", "Franck"),
("Dan", "1"),
("Franck", "Albert"),
]
)
result1 = subgraph_centrality(G1)
for k, v in result1.items():
assert almost_equal(answer1[k], result1[k], places=7)
result1 = subgraph_centrality_exp(G1)
for k, v in result1.items():
assert almost_equal(answer1[k], result1[k], places=7)
def test_subgraph_centrality_big_graph(self):
g199 = nx.complete_graph(199)
g200 = nx.complete_graph(200)
comm199 = nx.subgraph_centrality(g199)
comm199_exp = nx.subgraph_centrality_exp(g199)
comm200 = nx.subgraph_centrality(g200)
comm200_exp = nx.subgraph_centrality_exp(g200)
def test_communicability_betweenness_centrality(self):
answer = {
0: 0.07017447951484615,
1: 0.71565598701107991,
2: 0.71565598701107991,
3: 0.07017447951484615,
}
result = communicability_betweenness_centrality(nx.path_graph(4))
for k, v in result.items():
assert almost_equal(answer[k], result[k], places=7)
answer1 = {
"1": 0.060039074193949521,
"Albert": 0.315470761661372,
"Aric": 0.31547076166137211,
"Dan": 0.68297778678316201,
"Franck": 0.21977926617449497,
}
G1 = nx.Graph(
[
("Franck", "Aric"),
("Aric", "Dan"),
("Dan", "Albert"),
("Albert", "Franck"),
("Dan", "1"),
("Franck", "Albert"),
]
)
result1 = communicability_betweenness_centrality(G1)
for k, v in result1.items():
assert almost_equal(answer1[k], result1[k], places=7)
def test_estrada_index(self):
answer = 1041.2470334195475
result = estrada_index(nx.karate_club_graph())
assert almost_equal(answer, result, places=7)
|
the-stack_0_18105 | from django.core.management import call_command
from feder.cases.factories import CaseFactory
from feder.letters.factories import IncomingLetterFactory, OutgoingLetterFactory
from django.test import TestCase
from feder.letters.models import Letter
from io import StringIO
class FixDuplicateMailTestCase(TestCase):
def test_delete_only_duplicated(self):
case = CaseFactory()
in1 = IncomingLetterFactory(record__case=case)
in2 = IncomingLetterFactory(record__case=case)
ou1 = OutgoingLetterFactory(record__case=case)
in_static_id = IncomingLetterFactory(
record__case=case, eml__msg_id="[email protected]"
)
in_dupe_id = IncomingLetterFactory(
record__case=case, eml__msg_id="[email protected]"
)
stdout = StringIO()
call_command(
"fix_duplicate_mail",
"--monitoring-pk={}".format(case.monitoring.pk),
"--delete",
stdout=stdout,
)
self.assertTrue(Letter.objects.filter(pk=in1.id).exists())
self.assertTrue(Letter.objects.filter(pk=in2.id).exists())
self.assertTrue(Letter.objects.filter(pk=ou1.id).exists())
self.assertTrue(Letter.objects.filter(pk=in_static_id.id).exists())
self.assertFalse(Letter.objects.filter(pk=in_dupe_id.id).exists())
def test_delete_only_when_force_delete(self):
case = CaseFactory()
IncomingLetterFactory(record__case=case, eml__msg_id="[email protected]")
in_dupe_id = IncomingLetterFactory(
record__case=case, eml__msg_id="[email protected]"
)
stdout = StringIO()
call_command(
"fix_duplicate_mail",
"--monitoring-pk={}".format(case.monitoring.pk),
stdout=stdout,
)
self.assertTrue(Letter.objects.filter(pk=in_dupe_id.id).exists())
call_command(
"fix_duplicate_mail",
"--monitoring-pk={}".format(case.monitoring.pk),
"--delete",
stdout=stdout,
)
self.assertFalse(Letter.objects.filter(pk=in_dupe_id.id).exists())
|
the-stack_0_18106 | # -*- coding: utf-8 -*-
"""
Inverse Logistic Regression Recommender
Created on 2019
@author: Alex Xu <[email protected]>
"""
import numpy as np
import pandas as pd
class InverseLogisticRegressionRecommender:
"""
Predict or recommend a select feature value, so that it can be classified
as a select class. Based on binary logistic regression coefficients, other
feature values.
Parameters
----------
df : pandas.dataframe
y : str
Dependent or Target Variable with binary positive and negative
class within 'df'
coefs : list
List of floats corresponding to the logistic regression binary
classification coefficients. Should correspond with non-target
column names of "df", i.e. Feature Column 0's regression
coefficient is coefs[0], and so on.
Attributes
----------
df : pandas.dataframe
From __init__
y : str
From __init__
coefs : list
From __init__
mean_feature_values : pandas.dataframe
Mean feature values by target variable class
interim_logits : list
Interim logits created by mean feature values matrix multiplied by
logistic regression coefficients. Used to create distinction between
classes to be used by recommender
"""
def __init__(self, df, y, coefs):
self.df = df
self.y = y
self.coefs = coefs
# Compute mean feature values by dependent variable class
self.mean_feature_values = df.groupby(y).mean()
# Compute interim logits using mean feature values of each dependent
# variable class
self.interim_logits = self.mean_feature_values.\
apply(lambda row: np.dot(row, coefs), axis=1).tolist()
def predict(self, predict_column, target_class, feature_values):
"""
Predict minimum/maximum column value, given a row of other fixed
feature values, to reach a desired class
Parameters
----------
predict_column : str
Name of feature value column to recommend and predict on
target_class : bool
Representing desired class, which 'predict_column' will be tuned
for. 1 or True is positive label. 0 or False is negative label.
feature_values : list
List of numerics representing fixed feature values, which
'predict_column' and 'target_class' will predict on
Returns
-------
prediction : float
Predicted column value to achieve desired class, when all other
features are held constant
"""
# Create a copy of coefficients list
coefs = self.coefs.copy()
# Get index position of column to predict on by feature values
# self.df column names
predict_column_index = self.df.drop(self.y, axis=1).columns.\
tolist().index(predict_column)
# Extract predict column coefficient, and remove it from feature
# value coefficients
predict_column_coef = coefs[predict_column_index]
del coefs[predict_column_index]
# Compute prediction of feature value given interim logits for
# specified class, feature values
prediction = (self.interim_logits[target_class] - np.dot(feature_values, coefs))/predict_column_coef
return(prediction)
def predict_df(self, original_target=True, rows='all', columns='all'):
"""
Approximate all or specified dataset feature values using inverse
logistic regression classification. Based on true target class labels
, and true feature values. Could be used to evaluate accuracy of
recommendations
OR
Predict all or specified dataset feature values using inverse logistic
regression classification to achieve opposite target class labels,
using true feature values.
Parameters
----------
self
original_target : bool, optional
if True:
'approximate' to make feature value recommendations
using the true label
if False:
'predict' to make feature value recommendations
using the opposite to true label
rows : str (default) or list, optional
Rows/Indices to include for approximation/prediction
columns : str (default) or list, optional
Columns to approximate or predict
Returns
-------
approximation : pandas.dataframe
Prediction/Approximation/Recommendation of every feature value
, given original other feature values, to achieve specified
class labels
"""
## Recommendation Strategy: Predicting or Approximating
# Predicting would be predicting feature values that would achieve
# opposite to original class labels
if original_target==False:
# Function for opposite of original class label
target = lambda row: int(not int(row[self.y]))
# Approximating would be predicting feature values that would achieve
# original class labels
elif original_target==True:
# Function for original class label
target = lambda row: int(row[self.y])
## Rows
if rows=='all':
# Rows to include will be all of them
rows_to_include = self.df.index.tolist()
else:
rows_to_include = rows
## Columns
if columns=='all':
# Columns to approximate will be all of them
col_to_approx = self.df.columns.tolist()
else:
# Columns to approximate will be only what is specified
col_to_approx = columns
# Create function that approximates feature values by the row
def predict_row(self, row):
# Iterate over all col_to_approx column names
# If column iterated is target variable, then return target value
col_predict = list(map(lambda col: int(row[self.y]) if col==self.y
# Else, run ilrc recommender to approximate features, based on
# Other features and specified class labels
else InverseLogisticRegressionRecommender.\
predict(self, col, target(row),
row.loc[(row.index != col) & (row.index != self.y)].tolist()),
col_to_approx))
return(col_predict)
# Iterate function above over specified or all rows of dataset
approximation = pd.DataFrame(self.df[self.df.index.isin(rows_to_include)].\
apply(lambda row: predict_row(self, row), axis=1).\
tolist())
approximation.columns = col_to_approx
return(approximation) |
the-stack_0_18108 | # ViT Online Class
# Author: Dr. Zhu
# Project: PaddleViT (https://github.com/BR-IDL/PaddleViT)
# 2021.11
import paddle
import paddle.nn as nn
import numpy as np
from PIL import Image
paddle.set_device('cpu')
class Identity(nn.Layer):
def __init__(self):
super().__init__()
def forward(self, x):
return x
class Mlp(nn.Layer):
def __init__(self, embed_dim, mlp_ratio=4.0, dropout=0.):
super().__init__()
self.fc1 = nn.Linear(embed_dim, int(embed_dim * mlp_ratio))
self.fc2 = nn.Linear(int(embed_dim * mlp_ratio), embed_dim)
self.act = nn.GELU()
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.dropout(x)
x = self.fc2(x)
x = self.dropout(x)
return x
class PatchEmbedding(nn.Layer):
def __init__(self, image_size, patch_size, in_channels, embed_dim, dropout=0.):
super().__init__()
self.patch_embedding = nn.Conv2D(in_channels=in_channels,
out_channels=embed_dim,
kernel_size=patch_size,
stride=patch_size,
weight_attr=paddle.ParamAttr(initializer=nn.initializer.Constant(1.0)),
bias_attr=False)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
# [n, c, h, w]
x = self.patch_embedding(x) # [n, c', h', w']
x = x.flatten(2) # [n, c', h'*w']
x = x.transpose([0, 2, 1]) # [n, h'*w', c']
x = self.dropout(x)
return x
class Attention(nn.Layer):
def __init__(self):
super().__init__()
def forward(self, x):
return x
class EncoderLayer(nn.Layer):
def __init__(self, embed_dim):
super().__init__()
self.attn_norm = nn.LayerNorm(embed_dim)
self.attn = Attention()
self.mlp_norm = nn.LayerNorm(embed_dim)
self.mlp = Mlp(embed_dim)
def forward(self, x):
h = x
x = self.attn_norm(x)
x = self.attn(x)
x = x + h
h = x
x = self.mlp_norm(x)
x = self.mlp(x)
x = x + h
return x
class ViT(nn.Layer):
def __init__(self):
super().__init__()
self.patch_embed = PatchEmbedding(224, 7, 3, 16)
layer_list = [EncoderLayer(16) for i in range(5)]
self.encoders = nn.LayerList(layer_list)
self.head = nn.Linear(16, 10)
self.avgpool = nn.AdaptiveAvgPool1D(1)
def forward(self, x):
x = self.patch_embed(x) # [n, h*w, c]: 4, 1024, 16
for encoder in self.encoders:
x = encoder(x)
# avg
x = x.transpose([0, 2, 1])
x = self.avgpool(x)
x = x.flatten(1)
x = self.head(x)
return x
def main():
t = paddle.randn([4, 3, 224, 224])
model = ViT()
out = model(t)
print(out.shape)
if __name__ == "__main__":
main()
|
the-stack_0_18109 | import numpy as np
# from openbox.utils.config_space import ConfigurationSpace, UniformFloatHyperparameter, UniformIntegerHyperparameter, Constant
from ConfigSpace import ConfigurationSpace, UniformFloatHyperparameter, UniformIntegerHyperparameter, \
Constant, CategoricalHyperparameter, InCondition, EqualsCondition, UnParametrizedHyperparameter, \
ForbiddenEqualsClause, ForbiddenInClause, ForbiddenAndConjunction
def get_problem(problem_str, **kwargs):
# problem_str = problem_str.lower() # dataset name may be uppercase
if problem_str == 'branin':
problem = Branin
elif problem_str.startswith('ackley'):
problem = Ackley
params = problem_str.split('-')
if len(params) == 1:
dim = 2
elif len(params) == 2:
dim = int(params[1])
else:
raise ValueError
kwargs['dim'] = dim
elif problem_str == 'beale':
problem = Beale
elif problem_str.startswith('hartmann'):
problem = Hartmann6d
elif 'lgb' in problem_str:
problem = lgb
kwargs['dataset'] = '_'.join(problem_str.split('_')[1:])
elif 'svc' in problem_str:
problem = svc
kwargs['dataset'] = '_'.join(problem_str.split('_')[1:])
else:
raise ValueError('Unknown problem_str %s.' % problem_str)
return problem(**kwargs)
class BaseSingleObjectiveProblem:
def __init__(self, dim, **kwargs):
self.dim = dim
def evaluate_config(self, config, optimizer='smac'):
raise NotImplementedError
def evaluate(self, X: np.ndarray):
raise NotImplementedError
@staticmethod
def get_config_dict(config, optimizer='smac'):
if optimizer == 'smac':
config_dict = config.get_dictionary()
elif optimizer == 'tpe':
config_dict = config
else:
raise ValueError('Unknown optimizer %s' % optimizer)
return config_dict
@staticmethod
def checkX(X: np.ndarray):
X = np.atleast_2d(X)
assert len(X.shape) == 2 and X.shape[0] == 1
X = X.flatten()
return X
def get_configspace(self, optimizer='smac'):
raise NotImplementedError
def load_data(self, **kwargs):
from test.reproduction.test_utils import load_data
from sklearn.model_selection import train_test_split
dataset = kwargs['dataset']
try:
data_dir = kwargs.get('data_dir', '../soln-ml/data/cls_datasets/')
x, y = load_data(dataset, data_dir)
except Exception as e:
data_dir = '../../soln-ml/data/cls_datasets/'
x, y = load_data(dataset, data_dir)
self.train_x, self.val_x, self.train_y, self.val_y = train_test_split(x, y, stratify=y, random_state=1,
test_size=0.3)
class Ackley(BaseSingleObjectiveProblem):
optimal_value = 0.0
def __init__(self, dim=2, lb=-15, ub=30, **kwargs):
super().__init__(dim=dim, **kwargs)
self.lb = lb
self.ub = ub
self.bounds = [(self.lb, self.ub)] * self.dim
def evaluate_config(self, config, optimizer='smac'):
config_dict = self.get_config_dict(config, optimizer)
x_list = [config_dict['x%d' % i] for i in range(self.dim)]
X = np.array(x_list)
return self.evaluate(X)
def evaluate(self, X: np.ndarray):
X = self.checkX(X)
a = 20
b = 0.2
c = 2 * np.pi
t1 = -a * np.exp(-b * np.sqrt(np.mean(X ** 2)))
t2 = -np.exp(np.mean(np.cos(c * X)))
t3 = a + np.exp(1)
y = t1 + t2 + t3
return y
def get_configspace(self, optimizer='smac'):
if optimizer == 'smac':
cs = ConfigurationSpace()
for i in range(self.dim):
xi = UniformFloatHyperparameter("x%d" % i, self.lb, self.ub)
cs.add_hyperparameter(xi)
return cs
elif optimizer == 'tpe':
from hyperopt import hp
space = {'x%d' % i: hp.uniform('hp_x%d' % i, self.lb, self.ub) for i in range(self.dim)}
return space
elif optimizer == 'gpflowopt':
import gpflowopt
domain = np.sum([
gpflowopt.domain.ContinuousParameter('x%d' % i, self.lb, self.ub) for i in range(self.dim)
])
return domain
else:
raise ValueError('Unknown optimizer %s when getting configspace' % optimizer)
class Beale(BaseSingleObjectiveProblem):
optimal_value = 0.0
def __init__(self, lb=-4.5, ub=4.5, **kwargs):
super().__init__(dim=2, **kwargs)
self.lb = lb
self.ub = ub
self.bounds = [(self.lb, self.ub)] * self.dim
def evaluate_config(self, config, optimizer='smac'):
config_dict = self.get_config_dict(config, optimizer)
x_list = [config_dict['x%d' % i] for i in range(self.dim)]
X = np.array(x_list)
return self.evaluate(X)
def evaluate(self, X: np.ndarray):
X = self.checkX(X)
x1 = X[0]
x2 = X[1]
part1 = (1.5 - x1 + x1 * x2) ** 2
part2 = (2.25 - x1 + x1 * x2 ** 2) ** 2
part3 = (2.625 - x1 + x1 * x2 ** 3) ** 2
y = part1 + part2 + part3
return y
def get_configspace(self, optimizer='smac'):
if optimizer == 'smac':
cs = ConfigurationSpace()
for i in range(self.dim):
xi = UniformFloatHyperparameter("x%d" % i, self.lb, self.ub)
cs.add_hyperparameter(xi)
return cs
elif optimizer == 'tpe':
from hyperopt import hp
space = {'x%d' % i: hp.uniform('hp_x%d' % i, self.lb, self.ub) for i in range(self.dim)}
return space
elif optimizer == 'gpflowopt':
import gpflowopt
domain = (
gpflowopt.domain.ContinuousParameter('x0', self.lb, self.ub) +
gpflowopt.domain.ContinuousParameter('x1', self.lb, self.ub)
)
return domain
else:
raise ValueError('Unknown optimizer %s when getting configspace' % optimizer)
class Branin(BaseSingleObjectiveProblem):
"""
y = (x(2)-(5.1/(4*pi^2))*x(1)^2+5*x(1)/pi-6)^2+10*(1-1/(8*pi))*cos(x(1))+10
"""
optimal_value = 0.397887
optimal_point = [(-np.pi, 12.275), (np.pi, 2.275), (9.42478, 2.475)]
def __init__(self, **kwargs):
super().__init__(dim=2, **kwargs)
self.bounds = [(-5.0, 10.0), (0.0, 15.0)]
def evaluate_config(self, config, optimizer='smac'):
config_dict = self.get_config_dict(config, optimizer)
x1 = config_dict['x1']
x2 = config_dict['x2']
X = np.array([x1, x2])
return self.evaluate(X)
def evaluate(self, X: np.ndarray):
X = self.checkX(X)
x1 = X[0]
x2 = X[1]
y = (x2 - (5.1 / (4 * np.pi ** 2)) * x1 ** 2 + 5 * x1 / np.pi - 6) ** 2 + 10 * (1 - 1 / (8 * np.pi)) * np.cos(
x1) + 10
return y
def get_configspace(self, optimizer='smac'):
if optimizer == 'smac':
cs = ConfigurationSpace()
x1 = UniformFloatHyperparameter("x1", -5, 10)
x2 = UniformFloatHyperparameter("x2", 0, 15)
cs.add_hyperparameters([x1, x2])
return cs
elif optimizer == 'tpe':
from hyperopt import hp
space = {'x1': hp.uniform('hp_x1', -5, 10),
'x2': hp.uniform('hp_x2', 0, 15),
}
return space
elif optimizer == 'gpflowopt':
import gpflowopt
domain = (
gpflowopt.domain.ContinuousParameter('x1', -5, 10) +
gpflowopt.domain.ContinuousParameter('x2', 0, 15)
)
return domain
else:
raise ValueError('Unknown optimizer %s when getting configspace' % optimizer)
class Hartmann6d(BaseSingleObjectiveProblem):
optimal_value = -3.86278
def __init__(self, **kwargs):
super().__init__(dim=6, **kwargs)
self.bounds = [(0.0, 1.0)] * self.dim
self.a = np.array([
[10, 3, 17, 3.5, 1.7, 8],
[0.05, 10, 17, 0.1, 8, 14],
[3, 3.5, 1.7, 10, 17, 8],
[17, 8, 0.05, 10, 0.1, 14],
])
self.c = np.array([1.0, 1.2, 3.0, 3.2])
self.p = np.array([
[0.1312, 0.1696, 0.5569, 0.0124, 0.8283, 0.5886],
[0.2329, 0.4135, 0.8307, 0.3736, 0.1004, 0.9991],
[0.2348, 0.1451, 0.3522, 0.2883, 0.3047, 0.6650],
[0.4047, 0.8828, 0.8732, 0.5743, 0.1091, 0.0381],
])
def evaluate_config(self, config, optimizer='smac'):
config_dict = self.get_config_dict(config, optimizer)
x_list = [config_dict['x%d' % i] for i in range(self.dim)]
X = np.array(x_list)
return self.evaluate(X)
def evaluate(self, X: np.ndarray):
X = self.checkX(X)
inner_sum = np.sum(self.a * (X - self.p) ** 2, axis=1)
y = -np.sum(self.c * np.exp(-inner_sum))
return y
def get_configspace(self, optimizer='smac'):
if optimizer == 'smac':
cs = ConfigurationSpace()
for i in range(self.dim):
xi = UniformFloatHyperparameter("x%d" % i, 0, 1)
cs.add_hyperparameter(xi)
return cs
elif optimizer == 'tpe':
from hyperopt import hp
space = {'x%d' % i: hp.uniform('hp_x%d' % i, 0, 1) for i in range(self.dim)}
return space
elif optimizer == 'gpflowopt':
import gpflowopt
domain = (
gpflowopt.domain.ContinuousParameter('x0', 0, 1) +
gpflowopt.domain.ContinuousParameter('x1', 0, 1) +
gpflowopt.domain.ContinuousParameter('x2', 0, 1) +
gpflowopt.domain.ContinuousParameter('x3', 0, 1) +
gpflowopt.domain.ContinuousParameter('x4', 0, 1) +
gpflowopt.domain.ContinuousParameter('x5', 0, 1)
)
return domain
else:
raise ValueError('Unknown optimizer %s when getting configspace' % optimizer)
class lgb(BaseSingleObjectiveProblem):
def __init__(self, n_jobs=3, **kwargs):
super().__init__(dim=7, **kwargs)
self.n_jobs = n_jobs
self.load_data(**kwargs)
self.bounds = [
(100, 1000),
(31, 2047),
(15, 16),
(1e-3, 0.3),
(5, 30),
(0.7, 1),
(0.7, 1),
]
def evaluate_config(self, config, optimizer='smac'):
config_dict = self.get_config_dict(config, optimizer)
n_estimators = int(config_dict['n_estimators'])
num_leaves = int(config_dict['num_leaves'])
max_depth = int(config_dict['max_depth'])
learning_rate = config_dict['learning_rate']
min_child_samples = config_dict['min_child_samples']
subsample = config_dict['subsample']
colsample_bytree = config_dict['colsample_bytree']
from lightgbm import LGBMClassifier
from sklearn.metrics.scorer import balanced_accuracy_scorer
lgbc = LGBMClassifier(n_estimators=n_estimators,
num_leaves=num_leaves,
max_depth=max_depth,
learning_rate=learning_rate,
min_child_samples=min_child_samples,
subsample=subsample,
colsample_bytree=colsample_bytree,
n_jobs=self.n_jobs)
lgbc.fit(self.train_x, self.train_y)
return -balanced_accuracy_scorer(lgbc, self.val_x, self.val_y)
def evaluate(self, x):
x = self.checkX(x)
from lightgbm import LGBMClassifier
from sklearn.metrics.scorer import balanced_accuracy_scorer
lgbc = LGBMClassifier(n_estimators=int(x[0]),
num_leaves=int(x[1]),
max_depth=int(x[2]),
learning_rate=x[3],
min_child_samples=int(x[4]),
subsample=x[5],
colsample_bytree=x[6],
n_jobs=self.n_jobs)
lgbc.fit(self.train_x, self.train_y)
return -balanced_accuracy_scorer(lgbc, self.val_x, self.val_y)
def get_configspace(self, optimizer='smac'):
if optimizer == 'smac':
cs = ConfigurationSpace()
n_estimators = UniformIntegerHyperparameter("n_estimators", 100, 1000, default_value=500, q=50)
num_leaves = UniformIntegerHyperparameter("num_leaves", 31, 2047, default_value=128)
max_depth = Constant('max_depth', 15)
learning_rate = UniformFloatHyperparameter("learning_rate", 1e-3, 0.3, default_value=0.1, log=True)
min_child_samples = UniformIntegerHyperparameter("min_child_samples", 5, 30, default_value=20)
subsample = UniformFloatHyperparameter("subsample", 0.7, 1, default_value=1, q=0.1)
colsample_bytree = UniformFloatHyperparameter("colsample_bytree", 0.7, 1, default_value=1, q=0.1)
cs.add_hyperparameters([n_estimators, num_leaves, max_depth, learning_rate, min_child_samples, subsample,
colsample_bytree])
return cs
elif optimizer == 'tpe':
from hyperopt import hp
space = {'n_estimators': (hp.randint('lgb_n_estimators', 19) + 2) * 50,
'num_leaves': hp.randint('lgb_num_leaves', 2017) + 31,
'max_depth': 15,
'learning_rate': hp.loguniform('lgb_learning_rate', np.log(1e-3), np.log(0.3)),
'min_child_samples': hp.randint('lgb_min_child_samples', 26) + 5,
'subsample': (hp.randint('lgb_subsample', 4) + 7) * 0.1,
'colsample_bytree': (hp.randint('lgb_colsample_bytree', 4) + 7) * 0.1,
}
return space
elif optimizer == 'gpflowopt':
from gpflowopt.domain import ContinuousParameter
domain = (
ContinuousParameter('n_estimators', 100, 1000) +
ContinuousParameter('num_leaves', 31, 2047) +
ContinuousParameter('max_depth', 15, 16) +
ContinuousParameter("learning_rate", 1e-3, 0.3) +
ContinuousParameter("min_child_samples", 5, 30) +
ContinuousParameter("subsample", 0.7, 1) +
ContinuousParameter("colsample_bytree", 0.7, 1)
)
return domain
else:
raise ValueError('Unknown optimizer %s when getting configspace' % optimizer)
class svc(BaseSingleObjectiveProblem):
def __init__(self, **kwargs):
super().__init__(dim=8, **kwargs)
self.load_data(**kwargs)
self.bounds = None
def evaluate_config(self, config, optimizer='smac'):
config_dict = self.get_config_dict(config, optimizer)
penalty = config_dict['penalty']
loss = config_dict.get('loss', None)
dual = config_dict.get('dual', None)
C = config_dict['C']
tol = config_dict['tol']
fit_intercept = config_dict['fit_intercept']
intercept_scaling = config_dict['intercept_scaling']
if isinstance(penalty, dict):
combination = penalty
penalty = combination['penalty']
loss = combination['loss']
dual = combination['dual']
from sklearn.svm import LinearSVC
from sklearn.metrics.scorer import balanced_accuracy_scorer
if dual == 'True':
dual = True
elif dual == 'False':
dual = False
svcc = LinearSVC(penalty=penalty,
loss=loss,
dual=dual,
tol=tol,
C=C,
fit_intercept=fit_intercept,
intercept_scaling=intercept_scaling,
multi_class='ovr',
random_state=1)
svcc.fit(self.train_x, self.train_y)
return -balanced_accuracy_scorer(svcc, self.val_x, self.val_y)
def get_configspace(self, optimizer='smac'):
if optimizer == 'smac':
cs = ConfigurationSpace()
penalty = CategoricalHyperparameter(
"penalty", ["l1", "l2"], default_value="l2")
loss = CategoricalHyperparameter(
"loss", ["hinge", "squared_hinge"], default_value="squared_hinge")
dual = CategoricalHyperparameter("dual", ['True', 'False'], default_value='True')
# This is set ad-hoc
tol = UniformFloatHyperparameter(
"tol", 1e-5, 1e-1, default_value=1e-4, log=True)
C = UniformFloatHyperparameter(
"C", 0.03125, 32768, log=True, default_value=1.0)
multi_class = Constant("multi_class", "ovr")
# These are set ad-hoc
fit_intercept = Constant("fit_intercept", "True")
intercept_scaling = Constant("intercept_scaling", 1)
cs.add_hyperparameters([penalty, loss, dual, tol, C, multi_class,
fit_intercept, intercept_scaling])
penalty_and_loss = ForbiddenAndConjunction(
ForbiddenEqualsClause(penalty, "l1"),
ForbiddenEqualsClause(loss, "hinge")
)
constant_penalty_and_loss = ForbiddenAndConjunction(
ForbiddenEqualsClause(dual, "False"),
ForbiddenEqualsClause(penalty, "l2"),
ForbiddenEqualsClause(loss, "hinge")
)
penalty_and_dual = ForbiddenAndConjunction(
ForbiddenEqualsClause(dual, "True"),
ForbiddenEqualsClause(penalty, "l1")
)
cs.add_forbidden_clause(penalty_and_loss)
cs.add_forbidden_clause(constant_penalty_and_loss)
cs.add_forbidden_clause(penalty_and_dual)
return cs
elif optimizer == 'tpe':
from hyperopt import hp
space = {'penalty': hp.choice('liblinear_combination',
[{'penalty': "l1", 'loss': "squared_hinge", 'dual': "False"},
{'penalty': "l2", 'loss': "hinge", 'dual': "True"},
{'penalty': "l2", 'loss': "squared_hinge", 'dual': "True"},
{'penalty': "l2", 'loss': "squared_hinge", 'dual': "False"}]),
'loss': None,
'dual': None,
'tol': hp.loguniform('liblinear_tol', np.log(1e-5), np.log(1e-1)),
'C': hp.loguniform('liblinear_C', np.log(0.03125), np.log(32768)),
'multi_class': hp.choice('liblinear_multi_class', ["ovr"]),
'fit_intercept': hp.choice('liblinear_fit_intercept', ["True"]),
'intercept_scaling': hp.choice('liblinear_intercept_scaling', [1])}
return space
else:
raise ValueError('Unknown optimizer %s when getting configspace' % optimizer)
|
the-stack_0_18110 | def get_list_authors(authors):
displayed_authors = []
for author in authors:
displayed_authors.append('{} <{}>'.format(
author['name'], author['email']))
return displayed_authors
def parse_authors(stdout):
authors = []
for author in stdout.strip().split('\n'):
author_info = author.split(';')
authors.append({
'name': author_info[0],
'email': author_info[1]
})
return authors
|
the-stack_0_18112 | import heapq
import io
import numpy as np
from numpy.linalg import norm
import zipfile
class Embedding:
"""
Base class for all embeddings. SGNS can be directly instantiated with it.
"""
def __init__(self, matrix, vocabulary, word2index, normalize):
'''
Args:
matrix A numpy array, words associated with rows
vocabulary List of strings
word2index Dictionary mapping word to its index in
"vocabulary".
normalized Boolean
'''
self.m=matrix
self.normalized=normalize
if normalize:
self.normalize()
self.dim=self.m.shape[1]
self.wi=word2index
self.iw=vocabulary
def normalize(self):
norm = np.sqrt(np.sum(self.m * self.m, axis=1))
self.m = self.m / norm[:, np.newaxis]
self.normalized=True
def represent(self, w):
if w in self.wi:
return self.m[self.wi[w], :]
else:
return np.zeros(self.dim)
def similarity(self, w1, w2):
"""
Assumes the vectors have been normalized.
"""
if self.normalized:
return self.represent(w1).dot(self.represent(w2))
else:
e1=self.represent(w1)
e2=self.represent(w2)
return e1.dot(e2)/(norm(e1)*norm(e2))
def closest(self, w, n=10):
"""
Assumes the vectors have been normalized.
"""
scores = self.m.dot(self.represent(w))
return heapq.nlargest(n, zip(scores, self.iw))
def subsample(self, kept_words):
'''
Return new embeddings model where only the "kept_words" remain
represented.
'''
#keep all words present in both.
# new_iw=list(set(kept_words).intersection(self.iw))
new_iw=list(set(kept_words)) #it might be that my new embeddings space
# should contain words that are acutally
# not in the original one...
# print(kept_words)
new_wi={new_iw[i]:i for i in range(len(new_iw))}
new_m=np.zeros(shape=[len(new_iw), self.dim])
# print(new_m)
for i in range(len(new_iw)):
new_m[i,:]=self.represent(new_iw[i])
new_embeddings=Embedding( matrix=new_m,
vocabulary=new_iw,
word2index=new_wi,
normalize=self.normalized)
# print('New embeddings: ',new_embeddings.m)
# print(' old embeddings: \n', self.m)
# print('new embeddings: \n', new_embeddings.m)
return new_embeddings
@classmethod
def from_word2vec_bin(cls, path, vocab_limit=None, normalize=False):
from gensim.models.keyedvectors import KeyedVectors
model = KeyedVectors.load_word2vec_format(path, binary=True, limit=vocab_limit)
iw=list(model.vocab)
dim=len(model[iw[0]])
m=np.zeros([len(iw),dim])
for i in range(len(iw)):
m[i,]=model[iw[i]]
wi={iw[i]:i for i in range(len(iw))}
return cls( matrix=m,
vocabulary=iw,
word2index=wi,
normalize=normalize)
@classmethod
def from_raw_format( cls,
path,
vocab_limit=-1,
normalize=False,
delim=' '):
'''
Method to read embeddings from textfile. One word per line. Word is first
entry and is seperated by "delim". The vector components are also
separated by "delim".
'''
with io.open(path,'r', encoding='utf8') as f:
vectors=[]
wi={}
iw=[]
line='start'
count=0
# READ FIRST LINE TO FIND OUT DIMENSIONALITY
line=f.readline().strip()
parts=line.split(delim)
dim=len(parts)-1
word=' '.join(parts[:-dim])
vec=[float(x) for x in parts[-dim:]]
iw+=[word]
wi[word]=count
vectors.append(vec)
count+=1
# READ ALL THE OTHER LINES
while count<vocab_limit or vocab_limit==-1:
line=f.readline().strip()
# STOPS ITERATING END END OF FILE IS REACHED
if line=='':
break
parts=line.split(delim)
word=' '.join(parts[:-dim])
vec=[float(x) for x in parts[-dim:]]
iw+=[word]
wi[word]=count
vectors.append(vec)
count+=1
return cls( matrix=np.array(vectors),
vocabulary=iw,
word2index=wi,
normalize=normalize)
@classmethod
def from_fasttext_vec( cls,
path,
zipped=False,
file=None,
vocab_limit=None,
normalize=False):
'''
Method to read the plain text format of fasttext (usually ending with
.vec).
First line consists of <vocab_size><blank><dimensions>.
'''
if zipped==True and file is None:
raise ValueError('You are trying to load a file withing a ZIP '+\
'but have not indicated the name of this file.')
if not zipped:
with io.open(path,'r', encoding='utf8') as f:
vectors=[]
wi={}
iw=[]
# for line in f.readlines(vocab_limit+1)[1:]:
# count+=1
first_line=f.readline().split()
vocab_size=int(first_line[0])
dim=int(first_line[1])
if vocab_limit is None:
vocab_limit=vocab_size
for count in range(vocab_limit):
line=f.readline().strip()
# print(count)
parts=line.split()
word=' '.join(parts[:-dim])
# print(len(parts))
# print(word)
vec=[float(x) for x in parts[-dim:]]
iw+=[word]
wi[word]=count
vectors.append(vec)
elif zipped:
with zipfile.ZipFile(path) as z:
with z.open(file) as f:
vectors=[]
wi={}
iw=[]
# for line in f.readlines(vocab_limit+1)[1:]:
# count+=1
first_line=f.readline().split()
vocab_size=int(first_line[0])
dim=int(first_line[1])
if vocab_limit is None:
vocab_limit=vocab_size
for count in range(vocab_limit):
line=f.readline().decode('utf8').strip()
# print(count)
parts=line.split()
word=' '.join(parts[:-dim])
# print(len(parts))
# print(word)
vec=[float(x) for x in parts[-dim:]]
iw+=[word]
wi[word]=count
vectors.append(vec)
return cls( matrix=np.array(vectors),
vocabulary=iw,
word2index=wi,
normalize=normalize)
|
the-stack_0_18113 | import torch
from utils import *
from lie_group_utils import SO3, SE3_2
import matplotlib.pyplot as plt
plt.rcParams["legend.loc"] = "upper right"
plt.rcParams['axes.titlesize'] = 'x-large'
plt.rcParams['axes.labelsize'] = 'x-large'
plt.rcParams['legend.fontsize'] = 'x-large'
plt.rcParams['xtick.labelsize'] = 'x-large'
plt.rcParams['ytick.labelsize'] = 'x-large'
plt.rcParams['text.usetex'] =True
params= {'text.latex.preamble' : [r'\usepackage{amsmath}',
r'\usepackage{amssymb}']}
plt.rcParams.update(params)
import numpy as np
torch.set_default_dtype(torch.float64)
from preintegration_utils import *
def propagate(T0, Sigma, Upsilon, Q, method, dt, g, cholQ=0):
"""Propagate state for one time step"""
Gamma = f_Gamma(g, dt)
Phi = f_flux(T0, dt)
# propagate the mean
T = Gamma.mm(Phi).mm(Upsilon)
# Jacobian for propagating prior along time
F = torch.eye(9)
F[6:9, 3:6] = torch.eye(3)*dt
# compute Adjoint of right transformation mean
AdUps = SE3_2.uAd(SE3_2.uinv(Upsilon))
Sigma_tmp = axat(AdUps.mm(F), Sigma)
# compound the covariances based on the second-order method
Sigma_prop = Sigma_tmp + Q
if method == 1:
# add fourth-order method
Sigma_prop += four_order(Sigma_tmp, Q)
Sigma_prop = (Sigma_prop + Sigma_prop.t())/2 # symmetric
return T, Sigma_prop
def main(i_max, k_max, T0, Sigma0, Upsilon, Q, cholQ, dt, g):
# Generate some random samples
T = torch.zeros(i_max, k_max, 5, 5).cuda()
T[:, 0] = T0.cuda().repeat(i_max, 1, 1)
tmp = Sigma0.sqrt().cuda().expand(i_max, 9, 9) # Sigma0 assumed diagonal!
T[:, 0] = T[:, 0].bmm(SE3_2.exp(bmv(tmp, torch.randn(i_max, 9).cuda())))
Gamma = f_Gamma(g, dt).cuda().expand(i_max, 5, 5)
tmp = cholQ.cuda().expand(i_max, 9, 9)
tmp2 = Upsilon.cuda().expand(i_max, 5, 5)
for k in range(1, k_max):
T_k = SE3_2.exp(bmv(tmp, torch.randn(i_max, 9).cuda()))
Phi = f_flux(T[:, k-1], dt)
T[:, k] = Gamma.bmm(Phi).bmm(tmp2).bmm(T_k)
T = T.cpu()
# Propagate the uncertainty using second- and fourth-order methods
T_est = torch.zeros(k_max, 5, 5)
Sigma2th = torch.zeros(k_max, 9, 9) # second order covariance
Sigma4th = torch.zeros(k_max, 9, 9) # fourth order covariance
T_est[0] = T0
Sigma2th[0] = Sigma0.clone()
Sigma4th[0] = Sigma0.clone()
for k in range(1, k_max):
# Second-order method
T_est[k], Sigma2th[k] = propagate(T_est[k-1], Sigma2th[k-1], Upsilon, Q, 0, dt, g)
# Fourth-order method
_, Sigma4th[k] = propagate(T_est[k-1], Sigma4th[k-1], Upsilon, Q, 1, dt, g)
xi = SE3_2.log((T_est[-1].inverse().expand(i_max, 5, 5).bmm(T[:, -1])).cuda())
P_est_mc = bouter(xi, xi).sum(dim=0).cpu()/(i_max-1)
res = torch.zeros(3)
res[1] = fro_norm(P_est_mc[-1], Sigma2th[-1])
res[2] = fro_norm(P_est_mc[-1], Sigma4th[-1])
return res
if __name__ == '__main__':
path = 'figures/second_vs_four_order.txt'
### Parameters ###
i_max = 5000 # number of random points
k_max = 301 # number of compounded poses
g = torch.Tensor([0, 0, 9.81]) # gravity vector
dt = 0.05 # step time (s)
sigmas = 0.03*torch.Tensor([0.1, 0.3, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5])
# Constant acceleration, noise on IMU
# Define a PDF over transformations (mean and covariance)
xibar = torch.Tensor([0, 0, 0, 1, 0, 0, 0, 0, 0]).cuda()*dt
Upsilon = SE3_2.uexp(xibar).cpu()
Upsilon[:3, 3] += -g*dt
Upsilon[:3, 4] = Upsilon[:3, 3]*dt/2
T0 = torch.eye(5)
Sigma0 = torch.zeros(9, 9)
res = torch.zeros(sigmas.shape[0], 3)
for i in range(sigmas.shape[0]):
# Define right perturbation noise
cholQ = torch.Tensor([0, 0, sigmas[i], 0, 0, 0, 0, 0, 0]).diag()
Q = cholQ.mm(cholQ.t())
res[i] = main(i_max, k_max, T0, Sigma0, Upsilon, Q, cholQ, dt, g)
res[:, 0] = sigmas
# np.savetxt(path, res.numpy(), comments="", header="sigma second four")
plt.plot(res[:, 0], res[:, 2], color='cyan')
plt.plot(res[:, 0], res[:, 1], color='green')
plt.xlabel(r'propagation noise $\sigma$ (rad/s)')
plt.ylabel(r'covariance error')
plt.legend(["fourth-order", "second-order"])
plt.grid()
plt.xlim(0, sigmas[-1])
plt.ylim(0)
plt.show()
|
the-stack_0_18114 | from django.test import SimpleTestCase
from localflavor.cl.forms import CLRegionSelect, CLRutField
class CLLocalFlavorTests(SimpleTestCase):
def test_CLRegionSelect(self):
f = CLRegionSelect()
out = '''<select name="foo">
<option value="RM">Regi\xf3n Metropolitana de Santiago</option>
<option value="I">Regi\xf3n de Tarapac\xe1</option>
<option value="II">Regi\xf3n de Antofagasta</option>
<option value="III">Regi\xf3n de Atacama</option>
<option value="IV">Regi\xf3n de Coquimbo</option>
<option value="V">Regi\xf3n de Valpara\xedso</option>
<option value="VI">Regi\xf3n del Libertador Bernardo O'Higgins</option>
<option value="VII">Regi\xf3n del Maule</option>
<option value="VIII">Regi\xf3n del B\xedo B\xedo</option>
<option value="IX">Regi\xf3n de la Araucan\xeda</option>
<option value="X">Regi\xf3n de los Lagos</option>
<option value="XI">Regi\xf3n de Ays\xe9n del General Carlos Ib\xe1\xf1ez del Campo</option>
<option value="XII">Regi\xf3n de Magallanes y la Ant\xe1rtica Chilena</option>
<option value="XIV">Regi\xf3n de Los R\xedos</option>
<option value="XV">Regi\xf3n de Arica-Parinacota</option>
</select>'''
self.assertHTMLEqual(f.render('foo', 'bar'), out)
def test_CLRutField(self):
error_invalid = ['The Chilean RUT is not valid.']
error_format = ['Enter a valid Chilean RUT. The format is XX.XXX.XXX-X.']
valid = {
'11-6': '11-6',
'116': '11-6',
'767484100': '76.748.410-0',
'78.412.790-7': '78.412.790-7',
'8.334.6043': '8.334.604-3',
'76793310-K': '76.793.310-K',
'76793310-k': '76.793.310-K',
}
invalid = {
'11.111.111-0': error_invalid,
'111': error_invalid,
}
self.assertFieldOutput(CLRutField, valid, invalid)
# deal with special "Strict Mode".
invalid = {
'11-6': error_format,
'767484100': error_format,
'8.334.6043': error_format,
'76793310-K': error_format,
'11.111.111-0': error_invalid
}
self.assertFieldOutput(CLRutField,
{}, invalid, field_kwargs={"strict": True}
)
|
the-stack_0_18118 | # SPDX-License-Identifier: Apache-2.0
'''
Blender 2.81
Save the alpha, metallic, and roughness values for all low_res collection materials
'''
import bpy
from xrs import automate as xra
working_dir = xra.get_dir();
xra.log_info("Saving Values for low_res materials")
# Collections
if "web" not in bpy.data.collections:
xra.log_error('Web Collection Not Found')
exit(1)
collection = bpy.data.collections["web"]
if len(collection.objects) == 0:
xra.log_error('No objects in the web collection')
exit(1)
for obj in collection.objects:
for slot in obj.material_slots:
# Alpha (only when no inputs are linked)
if (xra.get_material_alpha_link_count(slot.material) == 0):
alpha = 100 * xra.get_material_alpha_value(slot.material)
alpha = xra.round_to_fives(alpha)
# Only save if not 1.0 (100)
if alpha < 100:
with open(working_dir + slot.material.name + "_alpha.value", "w") as file:
file.write("%i" % alpha)
# Metallic
metallic = 100 * xra.get_material_metallic_value(slot.material)
metallic = xra.round_to_fives(metallic)
with open(working_dir + slot.material.name + "_metallic.value", "w") as file:
file.write("%i" % metallic)
# Roughness
roughness = 100 * xra.get_material_roughness_value(slot.material)
roughness = xra.round_to_fives(roughness)
with open(working_dir + slot.material.name + "_roughness.value", "w") as file:
file.write("%i" % roughness)
|
the-stack_0_18119 | import sys
from GPT_GNN.data import *
from GPT_GNN.model import *
from warnings import filterwarnings
filterwarnings("ignore")
import argparse
parser = argparse.ArgumentParser(description='Pre-training HGT on a given graph (heterogeneous / homogeneous)')
'''
GPT-GNN arguments
'''
parser.add_argument('--attr_ratio', type=float, default=0.5,
help='Ratio of attr-loss against link-loss, range: [0-1]')
parser.add_argument('--attr_type', type=str, default='text',
choices=['text', 'vec'],
help='The type of attribute decoder')
parser.add_argument('--neg_samp_num', type=int, default=255,
help='Maximum number of negative sample for each target node.')
parser.add_argument('--queue_size', type=int, default=256,
help='Max size of adaptive embedding queue.')
parser.add_argument('--w2v_dir', type=str, default='/datadrive/dataset/w2v_all',
help='The address of preprocessed graph.')
'''
Dataset arguments
'''
parser.add_argument('--data_dir', type=str, default='/datadrive/dataset/graph_CS.pk',
help='The address of preprocessed graph.')
parser.add_argument('--pretrain_model_dir', type=str, default='/datadrive/models/test',
help='The address for storing the models and optimization results.')
parser.add_argument('--cuda', type=int, default=2,
help='Avaiable GPU ID')
parser.add_argument('--sample_depth', type=int, default=6,
help='How many layers within a mini-batch subgraph')
parser.add_argument('--sample_width', type=int, default=128,
help='How many nodes to be sampled per layer per type')
'''
Model arguments
'''
parser.add_argument('--conv_name', type=str, default='hgt',
choices=['hgt', 'gcn', 'gat', 'rgcn', 'han', 'hetgnn'],
help='The name of GNN filter. By default is Heterogeneous Graph Transformer (hgt)')
parser.add_argument('--n_hid', type=int, default=400,
help='Number of hidden dimension')
parser.add_argument('--n_heads', type=int, default=8,
help='Number of attention head')
parser.add_argument('--n_layers', type=int, default=3,
help='Number of GNN layers')
parser.add_argument('--prev_norm', help='Whether to add layer-norm on the previous layers', action='store_true')
parser.add_argument('--last_norm', help='Whether to add layer-norm on the last layers', action='store_true')
parser.add_argument('--dropout', type=int, default=0.2,
help='Dropout ratio')
'''
Optimization arguments
'''
parser.add_argument('--max_lr', type=float, default=1e-3,
help='Maximum learning rate.')
parser.add_argument('--scheduler', type=str, default='cycle',
help='Name of learning rate scheduler.' , choices=['cycle', 'cosine'])
parser.add_argument('--n_epoch', type=int, default=20,
help='Number of epoch to run')
parser.add_argument('--n_pool', type=int, default=8,
help='Number of process to sample subgraph')
parser.add_argument('--n_batch', type=int, default=32,
help='Number of batch (sampled graphs) for each epoch')
parser.add_argument('--batch_size', type=int, default=256,
help='Number of output nodes for training')
parser.add_argument('--clip', type=float, default=0.5,
help='Gradient Norm Clipping')
args = parser.parse_args()
args_print(args)
if args.cuda != -1:
device = torch.device("cuda:" + str(args.cuda))
else:
device = torch.device("cpu")
print('Start Loading Graph Data...')
graph = renamed_load(open(args.data_dir, 'rb'))
print('Finish Loading Graph Data!')
pre_range = {t: True for t in graph.times if t != None and t < 2014}
train_range = {t: True for t in graph.times if t != None and t >= 2014 and t <= 2016}
valid_range = {t: True for t in graph.times if t != None and t > 2016 and t <= 2017}
test_range = {t: True for t in graph.times if t != None and t > 2017}
pre_target_nodes = []
train_target_nodes = []
target_type = 'paper'
rel_stop_list = ['self', 'rev_PF_in_L0', 'rev_PF_in_L5', 'rev_PV_Repository', 'rev_PV_Patent']
for p_id, _time in enumerate(list(graph.node_feature[target_type]['time'])):
if _time in pre_range:
pre_target_nodes += [[p_id, _time]]
elif _time in train_range:
train_target_nodes += [[p_id, _time]]
pre_target_nodes = np.array(pre_target_nodes)
train_target_nodes = np.array(train_target_nodes)
def GPT_sample(seed, target_nodes, time_range, batch_size, feature_extractor):
np.random.seed(seed)
samp_target_nodes = target_nodes[np.random.choice(len(target_nodes), batch_size)]
threshold = 0.5
feature, times, edge_list, _, attr = sample_subgraph(graph, time_range, \
inp = {target_type: samp_target_nodes}, feature_extractor = feature_extractor, \
sampled_depth = args.sample_depth, sampled_number = args.sample_width)
rem_edge_list = defaultdict( #source_type
lambda: defaultdict( #relation_type
lambda: [] # [target_id, source_id]
))
ori_list = {}
for source_type in edge_list[target_type]:
ori_list[source_type] = {}
for relation_type in edge_list[target_type][source_type]:
ori_list[source_type][relation_type] = np.array(edge_list[target_type][source_type][relation_type])
el = []
for target_ser, source_ser in edge_list[target_type][source_type][relation_type]:
if relation_type not in rel_stop_list and target_ser < batch_size and np.random.random() > threshold:
rem_edge_list[source_type][relation_type] += [[target_ser, source_ser]]
continue
el += [[target_ser, source_ser]]
el = np.array(el)
edge_list[target_type][source_type][relation_type] = el
if relation_type == 'self':
continue
else:
if 'rev_' in relation_type:
rev_relation = relation_type[4:]
else:
rev_relation = 'rev_' + relation_type
edge_list[source_type]['paper'][rev_relation] = list(np.stack((el[:,1], el[:,0])).T)
'''
Adding feature nodes:
'''
n_target_nodes = len(feature[target_type])
feature[target_type] = np.concatenate((feature[target_type], np.zeros([batch_size, feature[target_type].shape[1]])))
times[target_type] = np.concatenate((times[target_type], times[target_type][:batch_size]))
for source_type in edge_list[target_type]:
for relation_type in edge_list[target_type][source_type]:
el = []
for target_ser, source_ser in edge_list[target_type][source_type][relation_type]:
if target_ser < batch_size:
if relation_type == 'self':
el += [[target_ser + n_target_nodes, target_ser + n_target_nodes]]
else:
el += [[target_ser + n_target_nodes, source_ser]]
if len(el) > 0:
edge_list[target_type][source_type][relation_type] = \
np.concatenate((edge_list[target_type][source_type][relation_type], el))
rem_edge_lists = {}
for source_type in rem_edge_list:
rem_edge_lists[source_type] = {}
for relation_type in rem_edge_list[source_type]:
rem_edge_lists[source_type][relation_type] = np.array(rem_edge_list[source_type][relation_type])
del rem_edge_list
return to_torch(feature, times, edge_list, graph), rem_edge_lists, ori_list, \
attr[:batch_size], (n_target_nodes, n_target_nodes + batch_size)
def prepare_data(pool):
jobs = []
for _ in np.arange(args.n_batch - 1):
jobs.append(pool.apply_async(GPT_sample, args=(randint(), pre_target_nodes, pre_range, args.batch_size, feature_OAG)))
jobs.append(pool.apply_async(GPT_sample, args=(randint(), train_target_nodes, train_range, args.batch_size, feature_OAG)))
return jobs
pool = mp.Pool(args.n_pool)
st = time.time()
jobs = prepare_data(pool)
repeat_num = int(len(pre_target_nodes) / args.batch_size // args.n_batch)
data, rem_edge_list, ori_edge_list, _, _ = GPT_sample(randint(), pre_target_nodes, pre_range, args.batch_size, feature_OAG)
node_feature, node_type, edge_time, edge_index, edge_type, node_dict, edge_dict = data
types = graph.get_types()
gnn = GNN(conv_name = args.conv_name, in_dim = len(graph.node_feature[target_type]['emb'].values[0]) + 401, n_hid = args.n_hid, \
n_heads = args.n_heads, n_layers = args.n_layers, dropout = args.dropout, num_types = len(types), \
num_relations = len(graph.get_meta_graph()) + 1, prev_norm = args.prev_norm, last_norm = args.last_norm)
if args.attr_type == 'text':
from gensim.models import Word2Vec
w2v_model = Word2Vec.load(args.w2v_dir)
n_tokens = len(w2v_model.wv.vocab)
attr_decoder = RNNModel(n_word = n_tokens, ninp = gnn.n_hid, \
nhid = w2v_model.vector_size, nlayers = 2)
attr_decoder.from_w2v(torch.FloatTensor(w2v_model.wv.vectors))
else:
attr_decoder = Matcher(gnn.n_hid, gnn.in_dim)
gpt_gnn = GPT_GNN(gnn = gnn, rem_edge_list = rem_edge_list, attr_decoder = attr_decoder, \
neg_queue_size = 0, types = types, neg_samp_num = args.neg_samp_num, device = device)
gpt_gnn.init_emb.data = node_feature[node_type == node_dict[target_type][1]].mean(dim=0).detach()
gpt_gnn = gpt_gnn.to(device)
best_val = 100000
train_step = 0
stats = []
optimizer = torch.optim.AdamW(gpt_gnn.parameters(), weight_decay = 1e-2, eps=1e-06, lr = args.max_lr)
if args.scheduler == 'cycle':
scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, pct_start=0.02, anneal_strategy='linear', final_div_factor=100,\
max_lr = args.max_lr, total_steps = repeat_num * args.n_batch * args.n_epoch + 1)
elif args.scheduler == 'cosine':
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, repeat_num * args.n_batch, eta_min=1e-6)
print('Start Pretraining...')
for epoch in np.arange(args.n_epoch) + 1:
gpt_gnn.neg_queue_size = args.queue_size * epoch // args.n_epoch
for batch in np.arange(repeat_num) + 1:
train_data = [job.get() for job in jobs[:-1]]
valid_data = jobs[-1].get()
pool.close()
pool.join()
pool = mp.Pool(args.n_pool)
jobs = prepare_data(pool)
et = time.time()
print('Data Preparation: %.1fs' % (et - st))
train_link_losses = []
train_attr_losses = []
gpt_gnn.train()
for data, rem_edge_list, ori_edge_list, attr, (start_idx, end_idx) in train_data:
node_feature, node_type, edge_time, edge_index, edge_type, node_dict, edge_dict = data
node_feature = node_feature.detach()
node_feature[start_idx : end_idx] = gpt_gnn.init_emb
node_emb = gpt_gnn.gnn(node_feature.to(device), node_type.to(device), edge_time.to(device), \
edge_index.to(device), edge_type.to(device))
loss_link, _ = gpt_gnn.link_loss(node_emb, rem_edge_list, ori_edge_list, node_dict, target_type, use_queue = True, update_queue=True)
if args.attr_type == 'text':
loss_attr = gpt_gnn.text_loss(node_emb[start_idx : end_idx], attr, w2v_model, device)
else:
loss_attr = gpt_gnn.feat_loss(node_emb[start_idx : end_idx], torch.FloatTensor(attr).to(device))
loss = loss_link + loss_attr * args.attr_ratio
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(gpt_gnn.parameters(), args.clip)
optimizer.step()
train_link_losses += [loss_link.item()]
train_attr_losses += [loss_attr.item()]
scheduler.step()
'''
Valid
'''
gpt_gnn.eval()
with torch.no_grad():
data, rem_edge_list, ori_edge_list, attr, (start_idx, end_idx) = valid_data
node_feature, node_type, edge_time, edge_index, edge_type, node_dict, edge_dict = data
node_feature = node_feature.detach()
node_feature[start_idx : end_idx] = gpt_gnn.init_emb
node_emb = gpt_gnn.gnn(node_feature.to(device), node_type.to(device), edge_time.to(device), \
edge_index.to(device), edge_type.to(device))
loss_link, ress = gpt_gnn.link_loss(node_emb, rem_edge_list, ori_edge_list, node_dict, target_type, use_queue = False, update_queue=True)
loss_link = loss_link.item()
if args.attr_type == 'text':
loss_attr = gpt_gnn.text_loss(node_emb[start_idx : end_idx], attr, w2v_model, device)
else:
loss_attr = gpt_gnn.feat_loss(node_emb[start_idx : end_idx], torch.FloatTensor(attr).to(device))
ndcgs = []
for i in ress:
ai = np.zeros(len(i[0]))
ai[0] = 1
ndcgs += [ndcg_at_k(ai[j.cpu().numpy()], len(j)) for j in i.argsort(descending = True)]
valid_loss = loss_link + loss_attr * args.attr_ratio
st = time.time()
print(("Epoch: %d, (%d / %d) %.1fs LR: %.5f Train Loss: (%.3f, %.3f) Valid Loss: (%.3f, %.3f) NDCG: %.3f Norm: %.3f queue: %d") % \
(epoch, batch, repeat_num, (st-et), optimizer.param_groups[0]['lr'], np.average(train_link_losses), np.average(train_attr_losses), \
loss_link, loss_attr, np.average(ndcgs), node_emb.norm(dim=1).mean(), gpt_gnn.neg_queue_size))
if valid_loss < best_val:
best_val = valid_loss
print('UPDATE!!!')
torch.save(gpt_gnn.state_dict(), args.pretrain_model_dir)
stats += [[np.average(train_link_losses), loss_link, loss_attr, valid_loss]]
|
the-stack_0_18120 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Classes to abstract 2D pose and orientation using matrices in SE(2) and SO(2)
To use::
from spatialmath.pose2d import *
T = SE2(1, 2, 0.3)
import spatialmath as sm
T = sm.SE2.Rx(1, 2, 0.3)
.. inheritance-diagram:: spatialmath.pose3d
:top-classes: collections.UserList
:parts: 1
"""
# pylint: disable=invalid-name
import math
import numpy as np
from spatialmath.base import argcheck
from spatialmath import base as base
from spatialmath.baseposematrix import BasePoseMatrix
import spatialmath.pose3d as p3
# ============================== SO2 =====================================#
class SO2(BasePoseMatrix):
"""
SO(2) matrix class
This subclass represents rotations in 2D space. Internally it is a 2x2 orthogonal matrix belonging
to the group SO(2).
.. inheritance-diagram:: spatialmath.pose2d.SO2
:top-classes: collections.UserList
:parts: 1
"""
# SO2() identity matrix
# SO2(angle, unit)
# SO2( obj ) # deep copy
# SO2( np ) # make numpy object
# SO2( nplist ) # make from list of numpy objects
# constructor needs to take ndarray -> SO2, or list of ndarray -> SO2
def __init__(self, arg=None, *, unit='rad', check=True):
"""
Construct new SO(2) object
:param unit: angular units 'deg' or 'rad' [default] if applicable
:type unit: str, optional
:param check: check for valid SO(2) elements if applicable, default to True
:type check: bool
:return: SO(2) rotation
:rtype: SO2 instance
- ``SO2()`` is an SO2 instance representing a null rotation -- the identity matrix.
- ``SO2(θ)`` is an SO2 instance representing a rotation by ``θ`` radians. If ``θ`` is array_like
`[θ1, θ2, ... θN]` then an SO2 instance containing a sequence of N rotations.
- ``SO2(θ, unit='deg')`` is an SO2 instance representing a rotation by ``θ`` degrees. If ``θ`` is array_like
`[θ1, θ2, ... θN]` then an SO2 instance containing a sequence of N rotations.
- ``SO2(R)`` is an SO2 instance with rotation described by the SO(2) matrix R which is a 2x2 numpy array. If ``check``
is ``True`` check the matrix belongs to SO(2).
- ``SO2([R1, R2, ... RN])`` is an SO2 instance containing a sequence of N rotations, each described by an SO(2) matrix
Ri which is a 2x2 numpy array. If ``check`` is ``True`` then check each matrix belongs to SO(2).
- ``SO2([X1, X2, ... XN])`` is an SO2 instance containing a sequence of N rotations, where each Xi is an SO2 instance.
"""
super().__init__()
if isinstance(arg, SE2):
self.data = [base.t2r(x) for x in arg.data]
elif super().arghandler(arg, check=check):
return
elif argcheck.isscalar(arg):
self.data = [base.rot2(arg, unit=unit)]
elif argcheck.isvector(arg):
self.data = [base.rot2(x, unit=unit) for x in argcheck.getvector(arg)]
else:
raise ValueError('bad argument to constructor')
@staticmethod
def _identity():
return np.eye(2)
@property
def shape(self):
"""
Shape of the object's interal matrix representation
:return: (2,2)
:rtype: tuple
"""
return (2, 2)
@classmethod
def Rand(cls, N=1, arange=(0, 2 * math.pi), unit='rad'):
r"""
Construct new SO(2) with random rotation
:param arange: rotation range, defaults to :math:`[0, 2\pi)`.
:type arange: 2-element array-like, optional
:param unit: angular units as 'deg or 'rad' [default]
:type unit: str, optional
:param N: number of random rotations, defaults to 1
:type N: int
:return: SO(2) rotation matrix
:rtype: SO2 instance
- ``SO2.Rand()`` is a random SO(2) rotation.
- ``SO2.Rand([-90, 90], unit='deg')`` is a random SO(2) rotation between
-90 and +90 degrees.
- ``SO2.Rand(N)`` is a sequence of N random rotations.
Rotations are uniform over the specified interval.
"""
rand = np.random.uniform(low=arange[0], high=arange[1], size=N) # random values in the range
return cls([base.rot2(x) for x in argcheck.getunit(rand, unit)])
@classmethod
def Exp(cls, S, check=True):
"""
Construct new SO(2) rotation matrix from so(2) Lie algebra
:param S: element of Lie algebra so(2)
:type S: numpy ndarray
:param check: check that passed matrix is valid so(2), default True
:type check: bool
:return: SO(2) rotation matrix
:rtype: SO2 instance
- ``SO2.Exp(S)`` is an SO(2) rotation defined by its Lie algebra
which is a 2x2 so(2) matrix (skew symmetric)
:seealso: :func:`spatialmath.base.transforms2d.trexp`, :func:`spatialmath.base.transformsNd.skew`
"""
if isinstance(S, (list, tuple)):
return cls([base.trexp2(s, check=check) for s in S])
else:
return cls(base.trexp2(S, check=check), check=False)
@staticmethod
def isvalid(x, check=True):
"""
Test if matrix is valid SO(2)
:param x: matrix to test
:type x: numpy.ndarray
:return: True if the matrix is a valid element of SO(2), ie. it is a 2x2
orthonormal matrix with determinant of +1.
:rtype: bool
:seealso: :func:`~spatialmath.base.transform3d.isrot`
"""
return not check or base.isrot2(x, check=True)
def inv(self):
"""
Inverse of SO(2)
:return: inverse rotation
:rtype: SO2 instance
- ``x.inv()`` is the inverse of `x`.
Notes:
- for elements of SO(2) this is the transpose.
- if `x` contains a sequence, returns an `SO2` with a sequence of inverses
"""
if len(self) == 1:
return SO2(self.A.T)
else:
return SO2([x.T for x in self.A])
@property
def R(self):
"""
SO(2) or SE(2) as rotation matrix
:return: rotational component
:rtype: numpy.ndarray, shape=(2,2)
``x.R`` returns the rotation matrix, when `x` is `SO2` or `SE2`. If `len(x)` is:
- 1, return an ndarray with shape=(2,2)
- N>1, return ndarray with shape=(N,2,2)
"""
return self.A[:2, :2]
def theta(self, unit='rad'):
"""
SO(2) as a rotation angle
:param unit: angular units 'deg' or 'rad' [default]
:type unit: str, optional
:return: rotation angle
:rtype: float or list
``x.theta`` is the rotation angle such that `x` is `SO2(x.theta)`.
"""
if unit == 'deg':
conv = 180.0 / math.pi
else:
conv = 1.0
if len(self) == 1:
return conv * math.atan2(self.A[1, 0], self.A[0, 0])
else:
return [conv * math.atan2(x.A[1, 0], x.A[0, 0]) for x in self]
def SE2(self):
"""
Create SE(2) from SO(2)
:return: SE(2) with same rotation but zero translation
:rtype: SE2 instance
"""
return SE2(base.rt2tr(self.A, [0, 0]))
# ============================== SE2 =====================================#
class SE2(SO2):
"""
SE(2) matrix class
This subclass represents rigid-body motion (pose) in 2D space. Internally
it is a 3x3 homogeneous transformation matrix belonging to the group SE(2).
.. inheritance-diagram:: spatialmath.pose2d.SE2
:top-classes: collections.UserList
:parts: 1
"""
# constructor needs to take ndarray -> SO2, or list of ndarray -> SO2
def __init__(self, x=None, y=None, theta=None, *, unit='rad', check=True):
"""
Construct new SE(2) object
:param unit: angular units 'deg' or 'rad' [default] if applicable :type
unit: str, optional :param check: check for valid SE(2) elements if
applicable, default to True :type check: bool :return: homogeneous
rigid-body transformation matrix :rtype: SE2 instance
- ``SE2()`` is an SE2 instance representing a null motion -- the
identity matrix
- ``SE2(θ)`` is an SE2 instance representing a pure rotation of
``θ`` radians
- ``SE2(θ, unit='deg')`` as above but ``θ`` in degrees
- ``SE2(x, y)`` is an SE2 instance representing a pure translation of
(``x``, ``y``)
- ``SE2(t)`` is an SE2 instance representing a pure translation of
(``x``, ``y``) where``t``=[x,y] is a 2-element array_like
- ``SE2(x, y, θ)`` is an SE2 instance representing a translation of
(``x``, ``y``) and a rotation of ``θ`` radians
- ``SE2(x, y, θ, unit='deg')`` as above but ``θ`` in degrees
- ``SE2(t)`` where ``t``=[x,y] is a 2-element array_like, is an SE2
instance representing a pure translation of (``x``, ``y``)
- ``SE2(q)`` where ``q``=[x,y,θ] is a 3-element array_like, is an SE2
instance representing a translation of (``x``, ``y``) and a rotation
of ``θ`` radians
- ``SE2(t, unit='deg')`` as above but ``θ`` in degrees
- ``SE2(T)`` is an SE2 instance with rigid-body motion described by the
SE(2) matrix T which is a 3x3 numpy array. If ``check`` is ``True``
check the matrix belongs to SE(2).
- ``SE2([T1, T2, ... TN])`` is an SE2 instance containing a sequence of
N rigid-body motions, each described by an SE(2) matrix Ti which is a
3x3 numpy array. If ``check`` is ``True`` then check each matrix
belongs to SE(2).
- ``SE2([X1, X2, ... XN])`` is an SE2 instance containing a sequence of
N rigid-body motions, where each Xi is an SE2 instance.
"""
if y is None and theta is None:
# just one argument passed
if super().arghandler(x, check=check):
return
if isinstance(x, SO2):
self.data = [base.r2t(_x) for _x in x.data]
elif argcheck.isscalar(x):
self.data = [base.trot2(x, unit=unit)]
elif len(x) == 2:
# SE2([x,y])
self.data = [base.transl2(x)]
elif len(x) == 3:
# SE2([x,y,theta])
self.data = [base.trot2(x[2], t=x[:2], unit=unit)]
else:
raise ValueError('bad argument to constructor')
elif x is not None:
if y is not None and theta is None:
# SE2(x, y)
self.data = [base.transl2(x, y)]
elif y is not None and theta is not None:
# SE2(x, y, theta)
self.data = [base.trot2(theta, t=[x, y], unit=unit)]
else:
raise ValueError('bad arguments to constructor')
@staticmethod
def _identity():
return np.eye(3)
@property
def shape(self):
"""
Shape of the object's interal matrix representation
:return: (3,3)
:rtype: tuple
"""
return (3, 3)
@classmethod
def Rand(cls, N=1, xrange=(-1, 1), yrange=(-1, 1), arange=(0, 2 * math.pi), unit='rad'): # pylint: disable=arguments-differ
r"""
Construct a new random SE(2)
:param xrange: x-axis range [min,max], defaults to [-1, 1]
:type xrange: 2-element sequence, optional
:param yrange: y-axis range [min,max], defaults to [-1, 1]
:type yrange: 2-element sequence, optional
:param arange: angle range [min,max], defaults to :math:`[0, 2\pi)`
:type arange: 2-element sequence, optional
:param N: number of random rotations, defaults to 1
:type N: int
:param unit: angular units 'deg' or 'rad' [default] if applicable
:type unit: str, optional
:return: homogeneous rigid-body transformation matrix
:rtype: SE2 instance
Return an SE2 instance with random rotation and translation.
- ``SE2.Rand()`` is a random SE(2) rotation.
- ``SE2.Rand(N)`` is an SE2 object containing a sequence of N random
poses.
Example, create random ten vehicles in the xy-plane::
>>> x = SE3.Rand(N=10, xrange=[-2,2], yrange=[-2,2])
>>> len(x)
10
"""
x = np.random.uniform(low=xrange[0], high=xrange[1], size=N) # random values in the range
y = np.random.uniform(low=yrange[0], high=yrange[1], size=N) # random values in the range
theta = np.random.uniform(low=arange[0], high=arange[1], size=N) # random values in the range
return cls([base.trot2(t, t=[x, y]) for (t, x, y) in zip(x, y, argcheck.getunit(theta, unit))])
@classmethod
def Exp(cls, S, check=True): # pylint: disable=arguments-differ
"""
Construct a new SE(2) from se(2) Lie algebra
:param S: element of Lie algebra se(2)
:type S: numpy ndarray
:param check: check that passed matrix is valid se(2), default True
:type check: bool
:return: homogeneous transform matrix
:rtype: SE2 instance
- ``SE2.Exp(S)`` is an SE(2) rotation defined by its Lie algebra
which is a 3x3 se(2) matrix (skew symmetric)
- ``SE2.Exp(t)`` is an SE(2) rotation defined by a 3-element twist
vector array_like (the unique elements of the se(2) skew-symmetric matrix)
- ``SE2.Exp(T)`` is a sequence of SE(2) rigid-body motions defined by an Nx3 matrix of twist vectors, one per row.
Note:
- an input 3x3 matrix is ambiguous, it could be the first or third case above. In this case the argument ``se2`` is the decider.
:seealso: :func:`spatialmath.base.transforms2d.trexp`, :func:`spatialmath.base.transformsNd.skew`
"""
if isinstance(S, (list, tuple)):
return cls([base.trexp2(s) for s in S])
else:
return cls(base.trexp2(S), check=False)
@classmethod
def Tx(cls, x):
"""
Create an SE(2) translation along the X-axis
:param x: translation distance along the X-axis
:type x: float
:return: SE(2) matrix
:rtype: SE2 instance
`SE2.Tx(x)` is an SE(2) translation of ``x`` along the x-axis
Example:
.. runblock:: pycon
>>> SE2.Tx(2)
>>> SE2.Tx([2,3])
:seealso: :func:`~spatialmath.base.transforms3d.transl`
:SymPy: supported
"""
return cls([base.transl2(_x, 0) for _x in base.getvector(x)], check=False)
@classmethod
def Ty(cls, y):
"""
Create an SE(2) translation along the Y-axis
:param y: translation distance along the Y-axis
:type y: float
:return: SE(2) matrix
:rtype: SE2 instance
`SE2.Ty(y) is an SE(2) translation of ``y`` along the y-axis
Example:
.. runblock:: pycon
>>> SE2.Ty(2)
>>> SE2.Ty([2,3])
:seealso: :func:`~spatialmath.base.transforms3d.transl`
:SymPy: supported
"""
return cls([base.transl2(0, _y) for _y in base.getvector(y)], check=False)
@staticmethod
def isvalid(x, check=True):
"""
Test if matrix is valid SE(2)
:param x: matrix to test
:type x: numpy.ndarray
:return: true if the matrix is a valid element of SE(2), ie. it is a
3x3 homogeneous rigid-body transformation matrix.
:rtype: bool
:seealso: :func:`~spatialmath.base.transform2d.ishom`
"""
return not check or base.ishom2(x, check=True)
@property
def t(self):
"""
Translational component of SE(2)
:param self: SE(2)
:type self: SE2 instance
:return: translational component
:rtype: numpy.ndarray
``x.t`` is the translational vector component. If ``len(x)`` is:
- 1, return an ndarray with shape=(2,)
- N>1, return an ndarray with shape=(N,2)
"""
if len(self) == 1:
return self.A[:2, 2]
else:
return np.array([x[:2, 2] for x in self.A])
def xyt(self):
r"""
SE(2) as a configuration vector
:return: An array :math:`[x, y, \theta]` :rtype: numpy.ndarray
``x.xyt`` is the rigidbody motion in minimal form as a translation and
rotation expressed in vector form as :math:`[x, y, \theta]`. If
``len(x)`` is:
- 1, return an ndarray with shape=(3,)
- N>1, return an ndarray with shape=(N,3)
"""
if len(self) == 1:
return base.tr2xyt(self.A)
else:
return [base.tr2xyt(x) for x in self.A]
def inv(self):
r"""
Inverse of SE(2)
:param self: pose
:type self: SE2 instance
:return: inverse
:rtype: SE2
Notes:
- for elements of SE(2) this takes into account the matrix structure :math:`T^{-1} = \left[ \begin{array}{cc} R & t \\ 0 & 1 \end{array} \right], T^{-1} = \left[ \begin{array}{cc} R^T & -R^T t \\ 0 & 1 \end{array} \right]`
- if `x` contains a sequence, returns an `SE2` with a sequence of inverses
"""
if len(self) == 1:
return SE2(base.rt2tr(self.R.T, -self.R.T @ self.t), check=False)
else:
return SE2([base.rt2tr(x.R.T, -x.R.T @ x.t) for x in self], check=False)
def SE3(self, z=0):
"""
Create SE(3) from SE(2)
:param z: default z coordinate, defaults to 0
:type z: float
:return: SE(2) with same rotation but zero translation
:rtype: SE2 instance
"Lifts" 2D rigid-body motion to 3D, rotation in the xy-plane (about the z-axis) and
z-coordinate is settable.
"""
def lift3(x):
y = np.eye(4)
y[:2, :2] = x.A[:2, :2]
y[:2, 3] = x.A[:2, 2]
y[2, 3] = z
return y
return p3.SE3([lift3(x) for x in self])
def Twist2(self):
from spatialmath.twist import Twist2
return Twist2(self.log(twist=True))
if __name__ == '__main__': # pragma: no cover
import pathlib
exec(open(pathlib.Path(__file__).parent.parent.absolute() / "tests" / "test_pose2d.py").read()) # pylint: disable=exec-used
|
the-stack_0_18121 | import discord, json
from discord.ext import commands
from QuentiumBot import HandleData, get_translations
# Basic command configs
cmd_name = "definition"
tran = get_translations()
aliases = [] if not tran[cmd_name]["fr"]["aliases"] else tran[cmd_name]["fr"]["aliases"].split("/")
with open("data/definitions_sneakers.json", "r", encoding="utf-8", errors="ignore") as file:
definitions_sneakers = json.loads(file.read(), strict=False)
class DefinitionSparse(commands.Cog):
"""Definition command in Sparse Sneakers section"""
def __init__(self, client):
self.client = client
@commands.command(
name=cmd_name,
aliases=aliases,
pass_context=True
)
@commands.guild_only()
async def definition_cmd(self, ctx, *, args="list"):
# Get specific server data
if isinstance(ctx.channel, discord.TextChannel):
await HandleData.retrieve_data(self, ctx.message.guild)
lang_server = "fr"
cmd_tran = tran[cmd_name][lang_server]
# Doesn't respond to bots
if not ctx.message.author.bot == True:
if ctx.guild.id == 798518855529005076: # Sparse Sneakers server ID
embed = discord.Embed(color=0x158DEE)
if any(args == x for x in ["list", "liste", "show"]):
content = "- " + "\n- ".join(definitions_sneakers.keys())
embed.title = cmd_tran["msg_def_list"]
embed.description = content
return await ctx.send(embed=embed)
for word, definition in definitions_sneakers.items():
if args.lower() == word.lower():
embed.set_author(name=word.title(), icon_url=tran[cmd_name]["logo_url"])
embed.title = definition["equivalent"]
embed.url = definition["url"]
embed.description = definition["definition"]
embed.set_footer(text=cmd_tran["msg_footer"])
await ctx.send(embed=embed)
def setup(client):
client.add_cog(DefinitionSparse(client))
|
the-stack_0_18123 | # -*- coding: utf-8 -*-
# Scrapy settings for proxyspider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'proxyspider'
SPIDER_MODULES = ['proxyspider.spiders']
NEWSPIDER_MODULE = 'proxyspider.spiders'
LOG_FILE=r"./log.log"
LOG_ENCODING="utf-8"
LOG_ENABLED=True
#**********************CUSTOM SETTINGS START******************************
LOC_PROXY_LIST=r"./proxyspider/util/proxylist.csv"
LOC_USERAGENT_LIST=r"./proxyspider/util/useragentswitcher.xml"
USERAGENT_FILTER=r'Windows'#Windows Mac Linux Unix Mobile Spiders Miscellaneous
LOC_WRITE_JSON=r"./x.json"
LOC_WRITE_CSV=r"./x.csv"
MYSQL_DB={
"host":"192.168.63.131",
"port":"3306",
"username":"dev",
"passwd":"dev",
"database_name":"devdb",
"table_name":"tb_ip",
}
MONGO_DB={
"host":"192.168.63.133",
"port":"27017",
"username":"dev",
"passwd":"dev",
"database_name":"devdb",
"collection_name":"test",
}
REDIS_DB={
"host":"192.168.63.134",
"port":"6379",
"passwd":"dev",
"database_name":"0",
}
#usual_xpath
XICI_PATTERN=dict(
allowed_domains=["xicidaili.com"],
start_urls=["http://www.xicidaili.com/{}/{}".format(i,j) for i in ['nn','nt','wn','wt'] for j in range(1,10)],
xpath=r"//table/tr[position()>1]",
item_xpath={
'address':r'td[2]/text()',
'port':r'td[3]/text()',
'protocol':r'td[6]/text()',
}
)
_66IP_PATTERN=dict(
allowed_domains=["66ip.cn"],
start_urls=['http://www.66ip.cn/%s.html' % n for n in ['index'] + list(range(2, 34))] + ['http://www.66ip.cn/areaindex_%s/%s.html' % (m, n) for m in range(1, 34) for n in range(1, 10)],
xpath="//table/tr[position()>1]",
item_xpath={
'address':'td[1]/text()',
'port':'td[2]/text()',
'protocol':None,
}
)
CNPROXY_PATTERN=dict(
allowed_domains=["cn-proxy.com"],
start_urls=['http://cn-proxy.com/', 'http://cn-proxy.com/archives/218'],
xpath="//table[@class='sortable']/tbody/tr",
item_xpath={
'address':'td[1]/text()',
'port':'td[2]/text()',
'protocol':None,
}
)
MIMIIP_PATTERN=dict(
allowed_domains=["mimiip.com"],
start_urls=['http://www.mimiip.com/gngao/%s' % n for n in range(1, 10)],
xpath="//table[@class='list']/tr[position()>1]",
item_xpath={
'address':'td[1]/text()',
'port':'td[2]/text()',
'protocol':None,
}
)
INCLOAK_PATTERN=dict(
allowed_domains=["incloak.com"],
start_urls=['http://incloak.com/proxy-list/%s#list' % n for n in ([''] + ['?start=%s' % (64 * m) for m in range(1, 10)])],
xpath="//table[@class='proxy__t']/tbody/tr",
item_xpath={
'address':'td[1]/text()',
'port':'td[2]/text()',
'protocol':None,
}
)
KUAIDAILI_PATTERN=dict(
allowed_domains=["kuaidaili.com"],
start_urls=['http://www.kuaidaili.com/proxylist/%s/' % n for n in range(1, 11)] + ['http://www.kuaidaili.com/free/%s/%s/' %(m, n) for m in ['inha','intr','outha','outtr'] for n in range(1,11)],
xpath="//*[@id='freelist' or 'list']/table/tbody/tr",
item_xpath={
'address':'td[1]/text()',
'port':'td[2]/text()',
'protocol':'td[4]/text()',
}
)
CZ88_PATTERN=dict(
allowed_domains=["cz88.net"],
start_urls=['http://www.cz88.net/proxy/%s' % m for m in ['index.shtml'] + ['http_%s.shtml' % n for n in range(2, 11)]],
xpath="//*[@id='boxright']/div/ul/li[position()>1]",
item_xpath={
'address':'td[1]/text()',
'port':'td[2]/text()',
'protocol':'td[3]/text()',
}
)
IP181_PATTERN=dict(
allowed_domains=["ip181.com"],
start_urls=['http://www.ip181.com/daili/%s.html' % n for n in range(1, 11)],
xpath="//div[@class='row']/div[3]/table/tbody/tr[position()>1]",
item_xpath={
'address':'td[1]/text()',
'port':'td[2]/text()',
'protocol':'td[4]/text()',
}
)
#special
GBJ_PATTERN=dict(
allowed_domains=["goubanjia.com"],
start_urls=["http://www.goubanjia.com"],
)
HMA_PATTERN=dict(
allowed_domains=["proxylist.hidemyass.com"],
start_urls=['http://proxylist.hidemyass.com/'],
)
FPL_PATTERN=dict(
allowed_domains = ["freeproxylists.net"],
start_urls = ['http://freeproxylists.net/country/'],
)
MIMVP_PATTERN=dict(
allowed_domains = ["proxy.mimvp.com"],
start_urls = ['https://proxy.mimvp.com/free.php?proxy={}&sort=&page={}'.format(i,j) for i in ['in_hp','out_hp','in_socks','out_socks'] for j in range(1,10)],
xpath="//div[@class='free-list']/table/tbody/tr",
item_xpath={
'address':'td[2]/text()',
'port':None,
'protocol':'td[4]/text()',
}
)
#***********************CUSTOM SETTINGS END*********************************
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'proxyspider (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 2
# The download delay setting will honor only one of:
CONCURRENT_REQUESTS_PER_DOMAIN = 16
CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
'User-Agent' : 'Mozilla/5.0 (Windows; Windows NT 5.1; en-US; rv:1.7.8) Gecko/20050511 Firefox/1.0.4',
'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'proxyspider.middlewares.ProxyspiderSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'proxyspider.middlewares.randomUseragent.randomUseragent': 543,
# 'proxyspider.middlewares.randomProxy.randomProxy': 544,
}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'proxyspider.pipelines.json.dumpTo': 300,
# 'proxyspider.pipelines.mysql.writeTo': 301,
# 'proxyspider.pipelines.csv.writeTo': 302,
# 'proxyspider.pipelines.csv.hashWriteTo': 303,
# 'proxyspider.pipelines.mongo.writeTo': 304,
# 'proxyspider.pipelines.redis.writeToSet': 305,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
HTTPCACHE_ENABLED = True
HTTPCACHE_EXPIRATION_SECS = 0
HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
the-stack_0_18124 | '''
Python Logging Blueprint for Multiple Loggers.
'''
import argparse
import logging
from blueprints.loggers import basic_logger
from blueprints.loggers import logging_utils as lu
def create_multiple_loggers():
"""
This function will create a parent logger with children loggers.
The passed in parameters must be lists where the f
Creates a logger with a StreamHandler that sends messages to stdout. The logging level of
the logger itself is set to NOTSET. The logging level of the handler is set to the value
passed in via the logging_level parameter.
The logging level must numeric. Typically it is one of the contants found in the logging
module (ex. logging.INFO) but it can be any number. As an example, setting it to
logging.CRITICAL + 1 will turn off the handler.
Setting propagate_message to True will cause messages to be sent to parent loggers where
the messages will be sent to the parents handlers regardless of the level of the logger.
When this parameter is false the logger will behave like a root logger.
IMPORTANT: If the logger is set to NOTSET then the logger will propagate to the parent
regardless of how the propagate property is set.
"""
logging_config = lu.get_logging_metadata('logging_config.json')
# Create the parent logger.
parent_name = logging_config['parent']['name']
parent_level = lu.convert_logging_level(logging_config['parent']['level'])
basic_logger.create_basic_logger(parent_name, parent_level)
#parent_logger = logging.getLogger(parent_name)
#parent_logger.setLevel(parent_level)
# Create the child loggers. Note that the parent name is added as a prefix to the child names.
# This is a requirement of the logging module. It establishes a parent/child relationship within
# the logger.
for child in logging_config['children']:
child_name = parent_name + '.' + child['name']
child_level = lu.convert_logging_level(child['level'])
basic_logger.create_basic_logger(child_name, child_level)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-pl', '--print_levels',
help='Print all log levels and their numeric values.',
action='store_true')
parser.add_argument('-ph', '--print_handlers',
help='Print all handlers within all loggers.',
action='store_true')
parser.add_argument('-pal', '--print_all_loggers',
help='Print all loggers.',
action='store_true')
parser.add_argument('-s', '--sample_logger',
help='Logger to use for sending sample messages.')
args = parser.parse_args()
# Create the parent logger.
create_multiple_loggers()
if args.print_all_loggers:
lu.print_all_loggers()
if args.print_handlers:
lu.print_handlers()
if args.print_levels:
lu.print_logging_levels()
# Send sample messages to the specified logger.
if args.sample_logger:
lu.log_sample_messages(args.sample_logger)
|
the-stack_0_18125 | from keycloak import KeycloakOpenID
import pprint
keycloak_openid = KeycloakOpenID(server_url="https://localhost:8181/auth/",
client_id="example_client",
realm_name="example_realm",
client_secret_key="0f0f6195-41bb-4077-bd30-469964328ba0",
verify=False)
config_well_know = keycloak_openid.well_know()
token = keycloak_openid.token("example_client", "0f0f6195-41bb-4077-bd30-469964328ba0", grant_type="client_credentials")
pprint.pprint(token)
|
the-stack_0_18127 | """
The model atmosphere
====================
"""
import numpy as np
from artssat.atmosphere.cloud_box import CloudBox
from artssat.jacobian import JacobianBase
from artssat.retrieval import RetrievalBase, RetrievalQuantity
from artssat.atmosphere.catalogs import LineCatalog, Perrin
class TemperatureJacobian(JacobianBase):
def __init__(self,
quantity,
index,
p_grid = [],
lat_grid = [],
lon_grid = [],
hse = "on"):
super().__init__(quantity, index)
self.p_grid = p_grid
self.lat_grid = lat_grid
self.lon_grid = lon_grid
self.hse = hse
def _make_setup_kwargs(self, ws):
if self.p_grid.size == 0:
g1 = ws.p_grid
else:
g1 = self.p_grid
if self.lat_grid.size == 0:
g2 = ws.lat_grid
else:
g2 = self.lat_grid
if self.lon_grid.size == 0:
g3 = ws.lon_grid
else:
g3 = self.lon_grid
kwargs = {"g1" : g1, "g2" : g2, "g3" : g3,
"hse" : self.hse}
return kwargs
def setup(self, ws, data_provider, *args, **kwargs):
kwargs = self._make_setup_kwargs(ws)
ws.jacobianAddTemperature(**kwargs)
class TemperatureRetrieval(RetrievalBase, TemperatureJacobian):
def __init__(self,
quantity,
index,
p_grid = [],
lat_grid = [],
lon_grid = [],
hse = "on"):
RetrievalBase.__init__(self)
TemperatureJacobian.__init__(self, quantity, index,
p_grid, lat_grid, lon_grid, hse)
def add(self, ws):
ws.retrievalAddTemperature(**self._make_setup_kwargs(ws))
class Temperature(RetrievalQuantity):
def __init__(self, atmosphere):
super().__init__()
self.atmosphere = atmosphere
def get_data(self, ws, data_provider, *args, **kwargs):
t = data_provider.get_temperature(*args, **kwargs)
self.atmosphere.__check_dimensions__(t, "temperature")
ws.t_field = self.atmosphere.__reshape__(t)
def set_from_x(self, ws, xa):
x = self.transformation.invert(xa)
self.t_field = x
@property
def name(self):
return "temperature"
@property
def jacobian_class(self):
return TemperatureJacobian
@property
def retrieval_class(self):
return TemperatureRetrieval
class Atmosphere:
def __init__(self,
dimensions,
absorbers = [],
scatterers = [],
surface = None,
catalog = None):
self.__set_dimensions__(dimensions)
self._required_data = [("p_grid", dimensions[:1], False),
("temperature", dimensions, False),
("altitude", dimensions, False),
("surface_altitude", dimensions[1:], True)]
self.absorbers = absorbers
self.scatterers = scatterers
self.scattering = len(scatterers) > 0
self._dimensions = dimensions
self._cloud_box = CloudBox(n_dimensions = len(dimensions),
scattering = self.scattering)
self._surface_data_indices = []
self._surface = surface
self.temperature = Temperature(self)
if not surface is None:
nd = len(self._required_data)
self._required_data += surface.required_data
self.surface_data_indices = range(nd, len(self._required_data))
self._catalog = catalog
#
# Dimensions
#
def __set_dimensions__(self, dimensions):
if not type(dimensions) == tuple or not type(dimensions[0]) == int:
raise Exception("Dimensions of atmosphere must be given as a tuple "
"of integers.")
if not len(dimensions) in [1, 2, 3]:
raise Exception("The number of dimensions of the atmosphere "
"must be 1, 2 or 3.")
if not all([n >= 0 for n in dimensions]):
raise Exception("The dimension tuple must contain only positive "
"integers.")
else:
self._dimensions = dimensions
@property
def dimensions(self):
return self._dimensions
#
# Absorbers
#
@property
def absorbers(self):
return self._absorbers
@absorbers.setter
def absorbers(self, absorbers):
for a in absorbers:
self.__dict__[a.name] = a
self._required_data += [(a.name, self._dimensions, False)]
self._absorbers = absorbers
def add_absorber(self, absorber):
self.__dict__[absorber.name] = absorber
self._required_data += [(absorber.name, self._dimensions, False)]
self._absorbers += absorber
#
# Cloud box
#
@property
def cloud_box(self):
return self._cloud_box
#
# Catalog
#
@property
def catalog(self):
"""
Line catalog from which to read absorption line data.
"""
return self._catalog
@catalog.setter
def catalog(self, c):
if isinstance(c, LineCatalog) or c is None:
self._catalog = c
else:
raise ValueError("Line catalog must be of type LineCatalog.")
#
# Jacobian
#
def has_jacobian(self):
for a in self.absorbers:
if not a.jacobian is None:
return True
for b in self.scatterers:
for m in b.moments:
if not a.jacobian is None:
return True
#
# Scatterers
#
@property
def scatterers(self):
return self._scatterers
@scatterers.setter
def scatterers(self, scatterers):
if not type(scatterers) is list:
raise ValueError("The 'scatterers' property can only be set to a list.")
for s in scatterers:
self.__dict__[s.name] = s
self._required_data += [(n, self._dimensions, False) \
for n in s.moment_names]
self._scatterers = scatterers
self.scattering = True
self._cloud_box = CloudBox(n_dimensions = len(self.dimensions),
scattering = self.scattering)
def add_scatterer(self, scatterer):
self.__dict__[scatterer.name] = scatterer
self._required_data += [(n, self._dimensions, False) \
for n in scatterer.moment_names]
self._scatterers += [scatterer]
self.scattering = True
self._cloud_box = CloudBox(n_dimensions = len(self.dimensions),
scattering = self.scattering)
#
# Surface
#
@property
def surface(self):
return self._surface
@surface.setter
def set_surface(self, s):
if not self._surface is None:
rd = [d for i, d in enumerate(self._required_data) \
if i not in self._required_data_indices]
nd = len(rd)
rd += surface.required_data
self._required_data = rd
self._requried_data_indices = range(nd, len(rd))
self._surface = surface
@property
def required_data(self):
return self._required_data
#
# Setup
#
def __setup_absorption__(self, ws, sensors):
species = []
lineshapes = []
normalizations = []
cutoffs = []
for i, a in enumerate(self._absorbers):
a.setup(ws, i)
species += [a.get_tag_string()]
ws.abs_speciesSet(species = species)
# Set the line shape
if not self.catalog is None:
self.catalog.setup(ws, sensors)
ws.abs_lines_per_speciesCreateFromLines()
ws.abs_lines_per_speciesSetMirroring(option = "Same")
else:
for a in self._absorbers:
if a.from_catalog:
raise Exception("Absorber {} has from_catalog set to true "
"but no catalog is provided".format(a.name))
ws.abs_lines_per_speciesSetEmpty()
for i, a in enumerate(self._absorbers):
tag = a.get_tag_string()
cutoff = np.float32(a.cutoff)
cutoff_type = a.cutoff_type
#ws.abs_lines_per_speciesSetCutoffForSpecies(option = cutoff_type,
# value = cutoff,
# species_tag = tag)
lineshape = a.lineshape
ws.abs_lines_per_speciesSetLineShapeTypeForSpecies(option = lineshape,
species_tag = tag)
normalization = a.normalization
ws.abs_lines_per_speciesSetNormalizationForSpecies(option = normalization,
species_tag = tag)
ws.Copy(ws.abs_xsec_agenda, ws.abs_xsec_agenda__noCIA)
ws.Copy(ws.propmat_clearsky_agenda,
ws.propmat_clearsky_agenda__OnTheFly)
ws.lbl_checkedCalc()
def __setup_scattering__(self, ws):
ws.ScatSpeciesInit()
pb_names = []
for s in self._scatterers:
s.setup(ws, len(pb_names))
pb_names += s.moment_names
ws.particle_bulkprop_names = pb_names
def setup(self, ws, sensors):
if len(self.dimensions) == 1:
ws.AtmosphereSet1D()
if len(self.dimensions) == 2:
ws.AtmosphereSet2D()
if len(self.dimensions) == 3:
ws.AtmosphereSet3D()
self.__setup_absorption__(ws, sensors)
self.__setup_scattering__(ws)
self.surface.setup(ws)
self.cloud_box.setup(ws)
def setup_jacobian(self, ws):
for a in self.absorbers:
a.setup_jacobian(ws)
for s in self.scatterers:
for m in s.moments:
m.setup_jacobian(ws)
#
# Data
#
def __check_dimensions__(self, f, name):
s = f.shape
err = "Provided atmospheric " + name + " field"
err += " is inconsistent with the dimensions of the atmosphere."
if len(s) != len(self.dimensions):
raise Exception(err)
if not all([i == j or j == 0 for i,j \
in zip(s, self.dimensions)]):
raise Exception(err)
def __reshape__(self, f):
s = [1, 1, 1]
j = 0
for i in range(len(self.dimensions)):
if self.dimensions[0] > 0:
s[i] = self.dimensions[i]
else:
s[i] = f.shape[i]
return np.reshape(f, tuple(s))
def __get_pressure__(self, ws, provider, *args, **kwargs):
p = provider.get_pressure(*args, **kwargs).ravel()
if self.dimensions[0] != 0 and p.size != self.dimensions[0]:
raise Exception("Provided pressure grid is inconsistent with"
" dimensions of the atmosphere.")
ws.p_grid = p
def __get_altitude__(self, ws, provider, *args, **kwargs):
dimensions = ws.t_field.value.shape
z = provider.get_altitude(*args, **kwargs)
self.__check_dimensions__(z, "altitude")
z = self.__reshape__(z)
if not z.shape == dimensions:
raise Exception("Dimensions of altitude field inconsistent"
" with dimensions of temperature field.")
ws.z_field = z
# Surface altitude
dimensions = ws.t_field.value.shape
if hasattr(provider, "get_surface_altitude"):
zs = provider.get_surface_altitude(*args, **kwargs)
try:
zs = zs.reshape(dimensions[1:])
ws.z_surface = zs
except:
raise Exception("Shape " + str(zs.shape) + "of provided "
"surface altitude is inconsistent with "
"the horizontal dimensions of the "
"atmosphere " + str(dimensions) + ".")
else:
ws.z_surface = ws.z_field.value[0, :, :]
def __get_latitude__(self, ws, provider, *args, **kwargs):
if len(self.dimensions) > 1:
dimensions = ws.t_field.value.shape
lats = provider.get_latitude(*args, **kwargs)
ws.lat_grid = np.arange(lats.size)
ws.lat_true = lats
def __get_longitude__(self, ws, provider, *args, **kwargs):
if len(self.dimensions) > 1:
dimensions = ws.t_field.value.shape
lons = provider.get_longitude(*args, **kwargs)
ws.lon_true = lons
if len(self.dimensions) < 3:
ws.lon_grid = []
def __get_absorbers__(self, ws, provider, *args, **kwargs):
dimensions = ws.t_field.value.shape
ws.vmr_field = np.zeros((len(self.absorbers),) + dimensions)
for i, a in enumerate(self.absorbers):
if a.retrieval is None:
fname = "get_" + a.name
f = provider.__getattribute__(fname)
x = f(*args, **kwargs)
self.__check_dimensions__(x, a.name)
x = self.__reshape__(x)
if not x.shape == dimensions:
raise Exception("Dimensions of " + a.name + " VMR field "
"inconcistent with dimensions of temperature "
"field.")
ws.vmr_field.value[i, :, :, :] = x
i = 0
n_moments = sum([len(s.moment_names) for s in self.scatterers])
ws.particle_bulkprop_field = np.zeros(((n_moments,)
+ ws.t_field.value.shape))
def __get_scatterers__(self, ws, provider, *args, **kwargs):
dimensions = ws.t_field.value.shape
ws.cloudbox_on = 1
ws.cloudbox_limits = [0, dimensions[0] - 1,
0, dimensions[1] - 1,
0, dimensions[2] - 1]
#if not self.scatterers is None and len(self.scatterers) > 0:
# ws.cloudboxSetFullAtm()
for s in self.scatterers:
s.get_data(ws, provider, *args, **kwargs)
def get_data(self, ws, provider, *args, **kwargs):
self.__get_pressure__(ws, provider, *args, **kwargs)
self.temperature.get_data(ws, provider, *args, **kwargs)
self.__get_altitude__(ws, provider, *args, **kwargs)
self.__get_latitude__(ws, provider, *args, **kwargs)
self.__get_longitude__(ws, provider, *args, **kwargs)
self.__get_absorbers__(ws, provider, *args, **kwargs)
self.__get_scatterers__(ws, provider, *args, **kwargs)
self.cloud_box.get_data(ws, provider, *args, **kwargs)
self.surface.get_data(ws, provider, *args, **kwargs)
#
# Checks
#
def run_checks(self, ws):
ws.atmgeom_checkedCalc()
ws.atmfields_checkedCalc(bad_partition_functions_ok = 1)
ws.propmat_clearsky_agenda_checkedCalc()
ws.propmat_clearsky_agenda_checkedCalc()
ws.abs_xsec_agenda_checkedCalc()
self.cloud_box.run_checks(ws)
self.surface.run_checks(ws)
class Atmosphere1D(Atmosphere):
def __init__(self,
absorbers = [],
scatterers = [],
surface = None,
levels = None,
catalog = None):
if levels is None:
dimensions = (0,)
else:
if not type(levels) == int:
raise Exception("The number of levels of the 1D atmosphere "
"must be given by an integer.")
else:
dimensions = (level, )
super().__init__(dimensions,
absorbers = absorbers,
scatterers = scatterers,
surface = surface,
catalog = catalog)
class Atmosphere2D(Atmosphere):
def __init__(self,
absorbers=[],
scatterers=[],
surface=None,
levels=None,
catalog=None):
if levels is None:
dimensions = (0, 0)
super().__init__(dimensions,
absorbers=absorbers,
scatterers=scatterers,
surface=surface,
catalog=catalog)
|
the-stack_0_18128 | from random import randint
from Pil import Image
from pathlib import Path
import tempfile
import os
def insert_image_to_other(base_image, insert_image, coord=None):
"""
Used to insert one image on top of other, in order to optimize space usage and reduce amount of images to mantain.
:param base_image: path to source image which will be sent to the back. \
Final image will the the suffix on this file.
:param insert_image: path to image to be added on top of the base image
:param coord: Tuple with x and y coordinates. If declared, it will force image to be set at given coordinate.
If no coord are set, a random one will be generated.
:return: Tuple with path to final image with the modification, and coordiantes of topleft point of insertion.
"""
_base_image = Image.open(base_image)
_insert_image = Image.open(insert_image)
if not coord:
coord = (
randint(0, _base_image.size[0] - _insert_image.size[0]),
randint(0, _base_image.size[1] - _insert_image.size[1])
)
_base_image.paste(_insert_image, coord)
file = tempfile.TemporaryFile().name+Path(base_image).suffix
_base_image.save(os.path.abspath(file))
return file, coord
def generate_similar_images(base_image, image_1, image_2):
"""
Will insert 2 images into a base image at the same location.
:param base_image: path to image which both imates will be inserted.
:param image_1: path to first image to be inserted
:param image_2: path to second image to be inserted
:return: tuple with path to image_1 and image_2 inserted to baseline.
"""
file_1, coord = insert_image_to_other(base_image, image_1)
file_2, _ = insert_image_to_other(base_image, image_2, coord)
return file_1, file_2
|
the-stack_0_18130 | import glob
import os
import subprocess
import sys
from distutils.version import LooseVersion
from typing import Iterable, List, Optional, Tuple
from scripts.lib.zulip_tools import get_dev_uuid_var_path
from version import PROVISION_VERSION
ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
def get_major_version(v: str) -> int:
return int(v.split(".")[0])
def get_version_file() -> str:
uuid_var_path = get_dev_uuid_var_path()
return os.path.join(uuid_var_path, "provision_version")
PREAMBLE = """
Before we run tests, we make sure your provisioning version
is correct by looking at var/provision_version, which is at
version {}, and we compare it to the version in source
control (version.py), which is {}.
"""
def preamble(version: str) -> str:
text = PREAMBLE.format(version, PROVISION_VERSION)
text += "\n"
return text
NEED_TO_DOWNGRADE = """
It looks like you checked out a branch that expects an older
version of dependencies than the version you provisioned last.
This may be ok, but it's likely that you either want to rebase
your branch on top of upstream/master or re-provision your VM.
Do this: `./tools/provision`
"""
NEED_TO_UPGRADE = """
It looks like you checked out a branch that has added
dependencies beyond what you last provisioned. Your command
is likely to fail until you add dependencies by provisioning.
Do this: `./tools/provision`
"""
def get_provisioning_status() -> Tuple[bool, Optional[str]]:
version_file = get_version_file()
if not os.path.exists(version_file):
# If the developer doesn't have a version_file written by
# a previous provision, then we don't do any safety checks
# here on the assumption that the developer is managing
# their own dependencies and not running provision.
return True, None
with open(version_file) as f:
version = f.read().strip()
# Normal path for people that provision--we're all good!
if version == PROVISION_VERSION:
return True, None
# We may be more provisioned than the branch we just moved to. As
# long as the major version hasn't changed, then we should be ok.
if LooseVersion(version) > LooseVersion(PROVISION_VERSION):
if get_major_version(version) == get_major_version(PROVISION_VERSION):
return True, None
else:
return False, preamble(version) + NEED_TO_DOWNGRADE
return False, preamble(version) + NEED_TO_UPGRADE
def assert_provisioning_status_ok(force: bool) -> None:
if not force:
ok, msg = get_provisioning_status()
if not ok:
print(msg)
print("If you really know what you are doing, use --force to run anyway.")
sys.exit(1)
def find_js_test_files(test_dir: str, files: Iterable[str]) -> List[str]:
test_files = []
for file in files:
for file_name in os.listdir(test_dir):
if file_name.startswith(file):
file = file_name
break
if not os.path.exists(file):
file = os.path.join(test_dir, file)
test_files.append(os.path.abspath(file))
if not test_files:
test_files = sorted(glob.glob(os.path.join(test_dir, "*.js")))
return test_files
def prepare_puppeteer_run() -> None:
os.chdir(ZULIP_PATH)
subprocess.check_call(["node", "node_modules/puppeteer/install.js"])
os.makedirs("var/puppeteer", exist_ok=True)
for f in glob.glob("var/puppeteer/puppeteer-failure*.png"):
os.remove(f)
|
the-stack_0_18131 | # This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from flask import flash, redirect, request, session
from sqlalchemy.orm import joinedload
from werkzeug.datastructures import MultiDict
from werkzeug.exceptions import Forbidden, NotFound
from indico.core.db import db
from indico.core.db.sqlalchemy.util.session import no_autoflush
from indico.modules.auth.util import redirect_to_login
from indico.modules.events.controllers.base import RHDisplayEventBase
from indico.modules.events.models.events import EventType
from indico.modules.events.surveys.models.anonymous_submissions import AnonymousSurveySubmission
from indico.modules.events.surveys.models.submissions import SurveyAnswer, SurveySubmission
from indico.modules.events.surveys.models.surveys import Survey, SurveyState
from indico.modules.events.surveys.util import (is_submission_in_progress, make_survey_form, query_active_surveys,
save_submitted_survey_to_session, was_survey_submitted)
from indico.modules.events.surveys.views import WPDisplaySurveyConference, WPDisplaySurveySimpleEvent
from indico.util.date_time import now_utc
from indico.util.i18n import _
from indico.web.flask.util import url_for
def _can_redirect_to_single_survey(surveys):
# Make sure redirection to first survey does not happen before login if user is required.
if not session.user and len(surveys) == 1 and surveys[0].is_active and surveys[0].require_user:
return False
return len(surveys) == 1 and surveys[0].is_active and not was_survey_submitted(surveys[0])
class RHSurveyBaseDisplay(RHDisplayEventBase):
@property
def view_class(self):
return WPDisplaySurveyConference if self.event.type_ == EventType.conference else WPDisplaySurveySimpleEvent
class RHSurveyList(RHSurveyBaseDisplay):
def _process(self):
surveys = (query_active_surveys(self.event)
.options(joinedload('questions'),
joinedload('submissions'))
.all())
if _can_redirect_to_single_survey(surveys):
return redirect(url_for('.display_survey_form', surveys[0]))
return self.view_class.render_template('display/survey_list.html', self.event,
surveys=surveys, states=SurveyState,
is_submission_in_progress=is_submission_in_progress,
was_survey_submitted=was_survey_submitted)
class RHSubmitSurveyBase(RHSurveyBaseDisplay):
normalize_url_spec = {
'locators': {
lambda self: self.survey
}
}
def _check_access(self):
RHSurveyBaseDisplay._check_access(self)
if self.survey.require_user and not session.user:
raise Forbidden(response=redirect_to_login(reason=_('You are trying to answer a survey '
'that requires you to be logged in')))
if self.survey.private and request.args.get('token') != self.survey.uuid and not self.submission:
# We don't use forbidden since that would redirect to login - but logging in won't help here
raise NotFound
def _process_args(self):
RHSurveyBaseDisplay._process_args(self)
self.survey = (Survey.query
.filter(Survey.id == request.view_args['survey_id'], Survey.is_visible)
.options(joinedload('submissions'),
joinedload('sections').joinedload('children'))
.one())
self.submission = (session.user.survey_submissions.filter_by(survey=self.survey, is_submitted=False).first()
if session.user else None)
if not self.survey.is_active:
flash(_('This survey is not active'), 'error')
return redirect(url_for('.display_survey_list', self.event))
elif was_survey_submitted(self.survey):
flash(_('You have already answered this survey'), 'error')
return redirect(url_for('.display_survey_list', self.event))
class RHSubmitSurvey(RHSubmitSurveyBase):
def _process(self):
form = self._make_form()
if form.validate_on_submit():
submission = self._save_answers(form)
if submission.is_anonymous and submission.user:
submission.user = None
self.survey.anonymous_submissions.append(AnonymousSurveySubmission(user=session.user))
submission.submitted_dt = now_utc()
submission.is_submitted = True
submission.pending_answers = {}
db.session.flush()
save_submitted_survey_to_session(submission)
self.survey.send_submission_notification(submission)
flash(_('The survey has been submitted'), 'success')
return redirect(url_for('.display_survey_list', self.event))
surveys = Survey.query.with_parent(self.event).filter(Survey.is_visible).all()
if not _can_redirect_to_single_survey(surveys):
back_button_endpoint = '.display_survey_list'
elif self.event.type_ != EventType.conference:
back_button_endpoint = 'events.display'
else:
back_button_endpoint = None
return self.view_class.render_template('display/survey_questionnaire.html', self.event,
form=form, survey=self.survey,
back_button_endpoint=back_button_endpoint,
partial_completion=self.survey.partial_completion)
def _make_form(self):
survey_form_class = make_survey_form(self.survey)
if self.submission and request.method == 'GET':
return survey_form_class(formdata=MultiDict(self.submission.pending_answers))
else:
return survey_form_class()
@no_autoflush
def _save_answers(self, form):
survey = self.survey
if not self.submission:
self.submission = SurveySubmission(survey=survey, user=session.user)
self.submission.is_anonymous = survey.anonymous
for question in survey.questions:
answer = SurveyAnswer(question=question, data=getattr(form, f'question_{question.id}').data)
self.submission.answers.append(answer)
return self.submission
class RHSaveSurveyAnswers(RHSubmitSurveyBase):
def _check_access(self):
RHSubmitSurveyBase._check_access(self)
if not self.survey.partial_completion or not session.user:
raise Forbidden
def _process(self):
pending_answers = {k: v for k, v in request.form.lists() if k.startswith('question_')}
if not self.submission:
self.submission = SurveySubmission(survey=self.survey, user=session.user)
self.submission.pending_answers = pending_answers
self.submission.is_anonymous = self.survey.anonymous
|
the-stack_0_18132 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Antidote documentation build configuration file, created by
# sphinx-quickstart on Sat Oct 14 22:07:20 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# sys.path.insert(0, os.path.abspath('..'))
# sys.path.insert(0, os.path.abspath('_themes'))
import antidote
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
'sphinx_autodoc_typehints',
'sphinx.ext.autosectionlabel',
'sphinx.ext.intersphinx'
]
# Python code that is treated like it were put in a testcleanup directive for
# every file that is tested, and for every group.
doctest_global_setup = """
from antidote._internal import state
state.init()
"""
doctest_global_cleanup = """
from antidote._internal import state
state.reset()
"""
autodoc_member_order = 'bysource'
autoclass_content = "both"
# This config value contains the locations and names of other projects
# that should be linked to in this documentation.
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None)
}
# Prefix each section label with the name of the document it is in.
autosectionlabel_prefix_document = True
autosectionlabel_maxdepth = 2
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Antidote'
copyright = '2017, Benjamin Rabier'
author = 'Benjamin Rabier'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tag.
release = antidote.__version__
# The short X.Y version.
version = release.rsplit(".", 1)[0]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
# pygments_style = 'flask_theme_support.FlaskyStyle'
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
add_module_names = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = "sphinx_rtd_theme"
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {
# 'show_powered_by': False,
# 'github_user': 'Finistere',
# 'github_repo': 'antidote',
# 'github_banner': True,
# 'show_related': False,
# 'note_bg': '#FFF59C'
# }
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Antidotedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Antidote.tex', 'Antidote Documentation',
'Benjamin Rabier', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'dependency manager', 'Antidote Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Antidote', 'Antidote Documentation',
author, 'Antidote', 'One line description of project.',
'Miscellaneous'),
]
# def do_not_skip_antidote(app, what, name, obj, skip, options):
# return (name != "__antidote__") and skip
def setup(app):
# Fix image path
with open('../README.rst', 'r') as readme, \
open('_build/README.rst', 'w') as build_readme:
build_readme.write(readme.read()
.replace('docs/_static/img', '_static/img')
.replace('.. code-block:: python',
'.. testcode:: readme'))
app.add_css_file('css/style.css') # may also be an URL
|
the-stack_0_18133 | from PyFreatboard.song_xml import parse_song_xml
from PyFreatboard.build_shape import BuildShape
from PyFreatboard.draw_freatboard import DrawFreatboard
from os.path import join
class Song:
"""Song class"""
def __init__(self, xml_file_path):
"""Parse XML song file into Song Object"""
self.title, self.author, self.sections = parse_song_xml(xml_file_path)
self.scale_shapes = Song.__get_scales__(self.sections)
self.arpeggio_shapes = Song.__get_arpeggios__(self.sections)
self.drop2_shapes = Song.__get_drops2__(self.sections)
def get_scales(self):
return self.scale_shapes
def get_arpeggios(self):
return self.arpeggio_shapes
def get_drops2(self):
return self.drop2_shapes
def get_melody(self, melody_id):
melody = []
for s in self.sections:
for c in s.chords:
for m in c.melody.keys():
if m == melody_id:
melody += c.melody[m]
return melody
def __str__(self):
return "{} by {}".format(self.title, self.author) + "\n- " + "\n- ".join(str(s) for s in self.sections)
@staticmethod
def __get_scales__(sections):
"""Get all scale shapes from song"""
shapes = {}
for section in sections:
key = section.root + section.type
if key not in shapes:
shapes[key] = BuildShape(section.root, section.type).all_shapes
return shapes
@staticmethod
def __get_arpeggios__(sections):
"""Get all shapes from song"""
shapes = {}
for section in sections:
for chord in section.chords:
key = chord.root + chord.type
if key not in shapes:
shapes[key] = BuildShape(chord.root, chord.type).all_shapes
return shapes
@staticmethod
def __get_drops2__(sections):
shapes = {}
for section in sections:
for chord in section.chords:
key = chord.root + chord.type
if key not in shapes:
sh = BuildShape(chord.root, chord.type)
shapes[key] = sh.build_drop(drop=2, bass_string='D')
return shapes
@staticmethod
def draw_shapes(shapes, path='.', init_freat=None, vertical=False):
draw = DrawFreatboard()
for shape_name, all_shapes in zip(shapes.keys(), shapes.values()):
for e, shape in enumerate(all_shapes):
if shape.valid:
min_freat = shape.get_max_min_freat()[1]
if init_freat is None or min_freat == init_freat or (min_freat + 1) == init_freat:
if vertical:
f = draw.draw_shape_vertical(shape, shape_name=shape_name, return_fig=True)
else:
f = draw.draw_shape(shape, shape_name=shape_name, return_fig=True)
f.savefig(join(path, '__{}_{}.png'.format(shape_name, e)), bbox_inches='tight')
|
the-stack_0_18134 | """Support for MySensors sensors."""
from typing import Callable
from awesomeversion import AwesomeVersion
from homeassistant.components import mysensors
from homeassistant.components.mysensors import on_unload
from homeassistant.components.mysensors.const import MYSENSORS_DISCOVERY
from homeassistant.components.sensor import DOMAIN, SensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONDUCTIVITY,
DEGREE,
ELECTRICAL_CURRENT_AMPERE,
ELECTRICAL_VOLT_AMPERE,
ENERGY_KILO_WATT_HOUR,
FREQUENCY_HERTZ,
LENGTH_METERS,
LIGHT_LUX,
MASS_KILOGRAMS,
PERCENTAGE,
POWER_WATT,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
VOLT,
VOLUME_CUBIC_METERS,
)
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.typing import HomeAssistantType
SENSORS = {
"V_TEMP": [None, "mdi:thermometer"],
"V_HUM": [PERCENTAGE, "mdi:water-percent"],
"V_DIMMER": [PERCENTAGE, "mdi:percent"],
"V_PERCENTAGE": [PERCENTAGE, "mdi:percent"],
"V_PRESSURE": [None, "mdi:gauge"],
"V_FORECAST": [None, "mdi:weather-partly-cloudy"],
"V_RAIN": [None, "mdi:weather-rainy"],
"V_RAINRATE": [None, "mdi:weather-rainy"],
"V_WIND": [None, "mdi:weather-windy"],
"V_GUST": [None, "mdi:weather-windy"],
"V_DIRECTION": [DEGREE, "mdi:compass"],
"V_WEIGHT": [MASS_KILOGRAMS, "mdi:weight-kilogram"],
"V_DISTANCE": [LENGTH_METERS, "mdi:ruler"],
"V_IMPEDANCE": ["ohm", None],
"V_WATT": [POWER_WATT, None],
"V_KWH": [ENERGY_KILO_WATT_HOUR, None],
"V_LIGHT_LEVEL": [PERCENTAGE, "mdi:white-balance-sunny"],
"V_FLOW": [LENGTH_METERS, "mdi:gauge"],
"V_VOLUME": [f"{VOLUME_CUBIC_METERS}", None],
"V_LEVEL": {
"S_SOUND": ["dB", "mdi:volume-high"],
"S_VIBRATION": [FREQUENCY_HERTZ, None],
"S_LIGHT_LEVEL": [LIGHT_LUX, "mdi:white-balance-sunny"],
},
"V_VOLTAGE": [VOLT, "mdi:flash"],
"V_CURRENT": [ELECTRICAL_CURRENT_AMPERE, "mdi:flash-auto"],
"V_PH": ["pH", None],
"V_ORP": ["mV", None],
"V_EC": [CONDUCTIVITY, None],
"V_VAR": ["var", None],
"V_VA": [ELECTRICAL_VOLT_AMPERE, None],
}
async def async_setup_entry(
hass: HomeAssistantType, config_entry: ConfigEntry, async_add_entities: Callable
):
"""Set up this platform for a specific ConfigEntry(==Gateway)."""
async def async_discover(discovery_info):
"""Discover and add a MySensors sensor."""
mysensors.setup_mysensors_platform(
hass,
DOMAIN,
discovery_info,
MySensorsSensor,
async_add_entities=async_add_entities,
)
await on_unload(
hass,
config_entry,
async_dispatcher_connect(
hass,
MYSENSORS_DISCOVERY.format(config_entry.entry_id, DOMAIN),
async_discover,
),
)
class MySensorsSensor(mysensors.device.MySensorsEntity, SensorEntity):
"""Representation of a MySensors Sensor child node."""
@property
def force_update(self):
"""Return True if state updates should be forced.
If True, a state change will be triggered anytime the state property is
updated, not just when the value changes.
"""
return True
@property
def state(self):
"""Return the state of the device."""
return self._values.get(self.value_type)
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
icon = self._get_sensor_type()[1]
return icon
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
set_req = self.gateway.const.SetReq
if (
AwesomeVersion(self.gateway.protocol_version) >= AwesomeVersion("1.5")
and set_req.V_UNIT_PREFIX in self._values
):
return self._values[set_req.V_UNIT_PREFIX]
unit = self._get_sensor_type()[0]
return unit
def _get_sensor_type(self):
"""Return list with unit and icon of sensor type."""
pres = self.gateway.const.Presentation
set_req = self.gateway.const.SetReq
SENSORS[set_req.V_TEMP.name][0] = (
TEMP_CELSIUS if self.hass.config.units.is_metric else TEMP_FAHRENHEIT
)
sensor_type = SENSORS.get(set_req(self.value_type).name, [None, None])
if isinstance(sensor_type, dict):
sensor_type = sensor_type.get(pres(self.child_type).name, [None, None])
return sensor_type
|
the-stack_0_18135 | import copy
import logging
import os
import math
import pprint
import time
from typing import Union
import numpy as np
import pandas as pd
import networkx as nx
from autogluon.core.data.label_cleaner import LabelCleanerMulticlassToBinary
from autogluon.core.dataset import TabularDataset
from autogluon.core.scheduler.scheduler_factory import scheduler_factory
from autogluon.core.constants import BINARY, MULTICLASS, REGRESSION, QUANTILE, AUTO_WEIGHT, BALANCE_WEIGHT
from autogluon.core.utils import plot_performance_vs_trials, plot_summary_of_models, plot_tabular_models
from autogluon.core.utils import get_pred_from_proba_df, set_logger_verbosity
from autogluon.core.utils.loaders import load_pkl
from autogluon.core.utils.savers import save_pkl
from autogluon.core.utils.utils import setup_outputdir, default_holdout_frac, get_approximate_df_mem_usage
from autogluon.core.utils.decorators import apply_presets
from ..configs.hyperparameter_configs import get_hyperparameter_config
from ..configs.feature_generator_presets import get_default_feature_generator
from ..configs.presets_configs import tabular_presets_dict
from ..learner import AbstractLearner, DefaultLearner
from ..trainer import AbstractTrainer
logger = logging.getLogger() # return root logger
# TODO: num_bag_sets -> ag_args
# Extra TODOs (Stretch): Can occur post v0.1
# TODO: make core_kwargs a kwargs argument to predictor.fit
# TODO: add aux_kwargs to predictor.fit
# TODO: add pip freeze + python version output after fit + log file, validate that same pip freeze on load as cached
# TODO: predictor.clone()
# TODO: Add logging comments that models are serialized on disk after fit
# TODO: consider adding kwarg option for data which has already been preprocessed by feature generator to skip feature generation.
# TODO: Resolve raw text feature usage in default feature generator
# Done for Tabular
# TODO: Remove all `time_limits` in project, replace with `time_limit`
class TabularPredictor:
"""
AutoGluon TabularPredictor predicts values in a column of a tabular dataset (classification or regression).
Parameters
----------
label : str
Name of the column that contains the target variable to predict.
problem_type : str, default = None
Type of prediction problem, i.e. is this a binary/multiclass classification or regression problem (options: 'binary', 'multiclass', 'regression', 'quantile').
If `problem_type = None`, the prediction problem type is inferred based on the label-values in provided dataset.
eval_metric : function or str, default = None
Metric by which predictions will be ultimately evaluated on test data.
AutoGluon tunes factors such as hyperparameters, early-stopping, ensemble-weights, etc. in order to improve this metric on validation data.
If `eval_metric = None`, it is automatically chosen based on `problem_type`.
Defaults to 'accuracy' for binary and multiclass classification, 'root_mean_squared_error' for regression, and 'pinball_loss' for quantile.
Otherwise, options for classification:
['accuracy', 'balanced_accuracy', 'f1', 'f1_macro', 'f1_micro', 'f1_weighted',
'roc_auc', 'roc_auc_ovo_macro', 'average_precision', 'precision', 'precision_macro', 'precision_micro',
'precision_weighted', 'recall', 'recall_macro', 'recall_micro', 'recall_weighted', 'log_loss', 'pac_score']
Options for regression:
['root_mean_squared_error', 'mean_squared_error', 'mean_absolute_error', 'median_absolute_error', 'r2']
For more information on these options, see `sklearn.metrics`: https://scikit-learn.org/stable/modules/classes.html#sklearn-metrics-metrics
You can also pass your own evaluation function here as long as it follows formatting of the functions defined in folder `autogluon.core.metrics`.
path : str, default = None
Path to directory where models and intermediate outputs should be saved.
If unspecified, a time-stamped folder called "AutogluonModels/ag-[TIMESTAMP]" will be created in the working directory to store all models.
Note: To call `fit()` twice and save all results of each fit, you must specify different `path` locations or don't specify `path` at all.
Otherwise files from first `fit()` will be overwritten by second `fit()`.
verbosity : int, default = 2
Verbosity levels range from 0 to 4 and control how much information is printed.
Higher levels correspond to more detailed print statements (you can set verbosity = 0 to suppress warnings).
If using logging, you can alternatively control amount of information printed via `logger.setLevel(L)`,
where `L` ranges from 0 to 50 (Note: higher values of `L` correspond to fewer print statements, opposite of verbosity levels).
sample_weight : str, default = None
If specified, this column-name indicates which column of the data should be treated as sample weights. This column will NOT be considered as a predictive feature.
Sample weights should be non-negative (and cannot be nan), with larger values indicating which rows are more important than others.
If you want your usage of sample weights to match results obtained outside of this Predictor, then ensure sample weights for your training (or tuning) data sum to the number of rows in the training (or tuning) data.
You may also specify two special strings: 'auto_weight' (automatically choose a weighting strategy based on the data) or 'balance_weight' (equally weight classes in classification, no effect in regression). If specifying your own sample_weight column, make sure its name does not match these special strings.
weight_evaluation : bool, default = False
Only considered when `sample_weight` column is not None. Determines whether sample weights should be taken into account when computing evaluation metrics on validation/test data.
If True, then weighted metrics will be reported based on the sample weights provided in the specified `sample_weight` (in which case `sample_weight` column must also be present in test data).
In this case, the 'best' model used by default for prediction will also be decided based on a weighted version of evaluation metric.
Note: we do not recommend specifying `weight_evaluation` when `sample_weight` is 'auto_weight' or 'balance_weight', instead specify appropriate `eval_metric`.
**kwargs :
learner_type : AbstractLearner, default = DefaultLearner
A class which inherits from `AbstractLearner`. This dictates the inner logic of predictor.
If you don't know what this is, keep it as the default.
learner_kwargs : dict, default = None
Kwargs to send to the learner. Options include:
positive_class : str or int, default = None
Used to determine the positive class in binary classification.
This is used for certain metrics such as 'f1' which produce different scores depending on which class is considered the positive class.
If not set, will be inferred as the second element of the existing unique classes after sorting them.
If classes are [0, 1], then 1 will be selected as the positive class.
If classes are ['def', 'abc'], then 'def' will be selected as the positive class.
If classes are [True, False], then True will be selected as the positive class.
ignored_columns : list, default = None
Banned subset of column names that predictor may not use as predictive features (e.g. unique identifier to a row or user-ID).
These columns are ignored during `fit()`.
label_count_threshold : int, default = 10
For multi-class classification problems, this is the minimum number of times a label must appear in dataset in order to be considered an output class.
AutoGluon will ignore any classes whose labels do not appear at least this many times in the dataset (i.e. will never predict them).
cache_data : bool, default = True
When enabled, the training and validation data are saved to disk for future reuse.
Enables advanced functionality in predictor such as `fit_extra()` and feature importance calculation on the original data.
trainer_type : AbstractTrainer, default = AutoTrainer
A class inheriting from `AbstractTrainer` that controls training/ensembling of many models.
If you don't know what this is, keep it as the default.
Attributes
----------
path : str
Path to directory where all models used by this Predictor are stored.
problem_type : str
What type of prediction problem this Predictor has been trained for.
eval_metric : function or str
What metric is used to evaluate predictive performance.
label : str
Name of table column that contains data from the variable to predict (often referred to as: labels, response variable, target variable, dependent variable, Y, etc).
feature_metadata : :class:`autogluon.core.features.feature_metadata.FeatureMetadata`
Inferred data type of each predictive variable after preprocessing transformation (i.e. column of training data table used to predict `label`).
Contains both raw dtype and special dtype information. Each feature has exactly 1 raw dtype (such as 'int', 'float', 'category') and zero to many special dtypes (such as 'datetime_as_int', 'text', 'text_ngram').
Special dtypes are AutoGluon specific feature types that are used to identify features with meaning beyond what the raw dtype can convey.
`feature_metadata.type_map_raw`: Dictionary of feature name -> raw dtype mappings.
`feature_metadata.type_group_map_special`: Dictionary of lists of special feature names, grouped by special feature dtype.
positive_class : str or int
Returns the positive class name in binary classification. Useful for computing metrics such as F1 which require a positive and negative class.
In binary classification, :meth:`TabularPredictor.predict_proba` returns the estimated probability that each row belongs to the positive class.
Will print a warning and return None if called when `predictor.problem_type != 'binary'`.
class_labels : list
For multiclass problems, this list contains the class labels in sorted order of `predict_proba()` output.
For binary problems, this list contains the class labels in sorted order of `predict_proba(as_multiclass=True)` output.
`class_labels[0]` corresponds to internal label = 0 (negative class), `class_labels[1]` corresponds to internal label = 1 (positive class).
This is relevant for certain metrics such as F1 where True and False labels impact the metric score differently.
For other problem types, will equal None.
For example if `pred = predict_proba(x, as_multiclass=True)`, then ith index of `pred` provides predicted probability that `x` belongs to class given by `class_labels[i]`.
class_labels_internal : list
For multiclass problems, this list contains the internal class labels in sorted order of internal `predict_proba()` output.
For binary problems, this list contains the internal class labels in sorted order of internal `predict_proba(as_multiclass=True)` output.
The value will always be `class_labels_internal=[0, 1]` for binary problems, with 0 as the negative class, and 1 as the positive class.
For other problem types, will equal None.
class_labels_internal_map : dict
For binary and multiclass classification problems, this dictionary contains the mapping of the original labels to the internal labels.
For example, in binary classification, label values of 'True' and 'False' will be mapped to the internal representation `1` and `0`.
Therefore, class_labels_internal_map would equal {'True': 1, 'False': 0}
For other problem types, will equal None.
For multiclass, it is possible for not all of the label values to have a mapping.
This indicates that the internal models will never predict those missing labels, and training rows associated with the missing labels were dropped.
"""
Dataset = TabularDataset
predictor_file_name = 'predictor.pkl'
def __init__(
self,
label,
problem_type=None,
eval_metric=None,
path=None,
verbosity=2,
sample_weight=None,
weight_evaluation=False,
**kwargs
):
self.verbosity = verbosity
set_logger_verbosity(self.verbosity, logger=logger)
if sample_weight == AUTO_WEIGHT: # TODO: update auto_weight strategy and make it the default
sample_weight = None
logger.log(15, f"{AUTO_WEIGHT} currently does not use any sample weights.")
self.sample_weight = sample_weight
self.weight_evaluation = weight_evaluation # TODO: sample_weight and weight_evaluation can both be properties that link to self._learner.sample_weight, self._learner.weight_evaluation
if self.sample_weight in [AUTO_WEIGHT, BALANCE_WEIGHT] and self.weight_evaluation:
logger.warning(f"We do not recommend specifying weight_evaluation when sample_weight='{self.sample_weight}', instead specify appropriate eval_metric.")
self._validate_init_kwargs(kwargs)
path = setup_outputdir(path)
learner_type = kwargs.pop('learner_type', DefaultLearner)
learner_kwargs = kwargs.pop('learner_kwargs', dict())
quantile_levels = kwargs.get('quantile_levels', None)
self._learner: AbstractLearner = learner_type(path_context=path, label=label, feature_generator=None, eval_metric=eval_metric, problem_type=problem_type,
quantile_levels=quantile_levels,
sample_weight=self.sample_weight, weight_evaluation=self.weight_evaluation, **learner_kwargs)
self._learner_type = type(self._learner)
self._trainer = None
@property
def class_labels(self):
return self._learner.class_labels
@property
def class_labels_internal(self):
return self._learner.label_cleaner.ordered_class_labels_transformed
@property
def class_labels_internal_map(self):
return self._learner.label_cleaner.inv_map
@property
def quantile_levels(self):
return self._learner.quantile_levels
@property
def eval_metric(self):
return self._learner.eval_metric
@property
def problem_type(self):
return self._learner.problem_type
@property
def feature_metadata(self):
return self._trainer.feature_metadata
@property
def feature_metadata_in(self):
return self._learner.feature_generator.feature_metadata_in
@property
def label(self):
return self._learner.label
@property
def path(self):
return self._learner.path
@apply_presets(tabular_presets_dict)
def fit(self,
train_data,
tuning_data=None,
time_limit=None,
presets=None,
hyperparameters=None,
feature_metadata='infer',
**kwargs):
"""
Fit models to predict a column of a data table (label) based on the other columns (features).
Parameters
----------
train_data : str or :class:`TabularDataset` or :class:`pd.DataFrame`
Table of the training data, which is similar to a pandas DataFrame.
If str is passed, `train_data` will be loaded using the str value as the file path.
tuning_data : str or :class:`TabularDataset` or :class:`pd.DataFrame`, default = None
Another dataset containing validation data reserved for tuning processes such as early stopping and hyperparameter tuning.
This dataset should be in the same format as `train_data`.
If str is passed, `tuning_data` will be loaded using the str value as the file path.
Note: final model returned may be fit on `tuning_data` as well as `train_data`. Do not provide your evaluation test data here!
In particular, when `num_bag_folds` > 0 or `num_stack_levels` > 0, models will be trained on both `tuning_data` and `train_data`.
If `tuning_data = None`, `fit()` will automatically hold out some random validation examples from `train_data`.
time_limit : int, default = None
Approximately how long `fit()` should run for (wallclock time in seconds).
If not specified, `fit()` will run until all models have completed training, but will not repeatedly bag models unless `num_bag_sets` is specified.
presets : list or str or dict, default = ['medium_quality_faster_train']
List of preset configurations for various arguments in `fit()`. Can significantly impact predictive accuracy, memory-footprint, and inference latency of trained models, and various other properties of the returned `predictor`.
It is recommended to specify presets and avoid specifying most other `fit()` arguments or model hyperparameters prior to becoming familiar with AutoGluon.
As an example, to get the most accurate overall predictor (regardless of its efficiency), set `presets='best_quality'`.
To get good quality with minimal disk usage, set `presets=['good_quality_faster_inference_only_refit', 'optimize_for_deployment']`
Any user-specified arguments in `fit()` will override the values used by presets.
If specifying a list of presets, later presets will override earlier presets if they alter the same argument.
For precise definitions of the provided presets, see file: `autogluon/tabular/configs/presets_configs.py`.
Users can specify custom presets by passing in a dictionary of argument values as an element to the list.
Available Presets: ['best_quality', 'high_quality_fast_inference_only_refit', 'good_quality_faster_inference_only_refit', 'medium_quality_faster_train', 'optimize_for_deployment', 'ignore_text']
It is recommended to only use one `quality` based preset in a given call to `fit()` as they alter many of the same arguments and are not compatible with each-other.
In-depth Preset Info:
best_quality={'auto_stack': True}
Best predictive accuracy with little consideration to inference time or disk usage. Achieve even better results by specifying a large time_limit value.
Recommended for applications that benefit from the best possible model accuracy.
high_quality_fast_inference_only_refit={'auto_stack': True, 'refit_full': True, 'set_best_to_refit_full': True, '_save_bag_folds': False}
High predictive accuracy with fast inference. ~10x-200x faster inference and ~10x-200x lower disk usage than `best_quality`.
Recommended for applications that require reasonable inference speed and/or model size.
good_quality_faster_inference_only_refit={'auto_stack': True, 'refit_full': True, 'set_best_to_refit_full': True, '_save_bag_folds': False, 'hyperparameters': 'light'}
Good predictive accuracy with very fast inference. ~4x faster inference and ~4x lower disk usage than `high_quality_fast_inference_only_refit`.
Recommended for applications that require fast inference speed.
medium_quality_faster_train={'auto_stack': False}
Medium predictive accuracy with very fast inference and very fast training time. ~20x faster training than `good_quality_faster_inference_only_refit`.
This is the default preset in AutoGluon, but should generally only be used for quick prototyping, as `good_quality_faster_inference_only_refit` results in significantly better predictive accuracy and faster inference time.
optimize_for_deployment={'keep_only_best': True, 'save_space': True}
Optimizes result immediately for deployment by deleting unused models and removing training artifacts.
Often can reduce disk usage by ~2-4x with no negatives to model accuracy or inference speed.
This will disable numerous advanced functionality, but has no impact on inference.
This will make certain functionality less informative, such as `predictor.leaderboard()` and `predictor.fit_summary()`.
Because unused models will be deleted under this preset, methods like `predictor.leaderboard()` and `predictor.fit_summary()` will no longer show the full set of models that were trained during `fit()`.
Recommended for applications where the inner details of AutoGluon's training is not important and there is no intention of manually choosing between the final models.
This preset pairs well with the other presets such as `good_quality_faster_inference_only_refit` to make a very compact final model.
Identical to calling `predictor.delete_models(models_to_keep='best', dry_run=False)` and `predictor.save_space()` directly after `fit()`.
ignore_text={'_feature_generator_kwargs': {'enable_text_ngram_features': False, 'enable_text_special_features': False, 'enable_raw_text_features': False}}
Disables automated feature generation when text features are detected.
This is useful to determine how beneficial text features are to the end result, as well as to ensure features are not mistaken for text when they are not.
Ignored if `feature_generator` was also specified.
hyperparameters : str or dict, default = 'default'
Determines the hyperparameters used by the models.
If `str` is passed, will use a preset hyperparameter configuration.
Valid `str` options: ['default', 'light', 'very_light', 'toy', 'multimodal']
'default': Default AutoGluon hyperparameters intended to maximize accuracy without significant regard to inference time or disk usage.
'light': Results in smaller models. Generally will make inference speed much faster and disk usage much lower, but with worse accuracy.
'very_light': Results in much smaller models. Behaves similarly to 'light', but in many cases with over 10x less disk usage and a further reduction in accuracy.
'toy': Results in extremely small models. Only use this when prototyping, as the model quality will be severely reduced.
'multimodal': [EXPERIMENTAL] Trains a multimodal transformer model alongside tabular models. Requires that some text columns appear in the data, a GPU, and CUDA-enabled MXNet.
When combined with 'best_quality' `presets` option, this can achieve extremely strong results in multimodal data tables that contain columns with text in addition to numeric/categorical columns.
Reference `autogluon/tabular/configs/hyperparameter_configs.py` for information on the hyperparameters associated with each preset.
Keys are strings that indicate which model types to train.
Stable model options include:
'GBM' (LightGBM)
'CAT' (CatBoost)
'XGB' (XGBoost)
'RF' (random forest)
'XT' (extremely randomized trees)
'KNN' (k-nearest neighbors)
'LR' (linear regression)
'NN' (neural network with MXNet backend)
'FASTAI' (neural network with FastAI backend)
Experimental model options include:
'FASTTEXT' (FastText)
'AG_TEXT_NN' (Multimodal Text+Tabular model, GPU is required)
'TRANSF' (Tabular Transformer, GPU is recommended)
If a certain key is missing from hyperparameters, then `fit()` will not train any models of that type. Omitting a model key from hyperparameters is equivalent to including this model key in `excluded_model_types`.
For example, set `hyperparameters = { 'NN':{...} }` if say you only want to train neural networks and no other types of models.
Values = dict of hyperparameter settings for each model type, or list of dicts.
Each hyperparameter can either be a single fixed value or a search space containing many possible values.
Unspecified hyperparameters will be set to default values (or default search spaces if `hyperparameter_tune = True`).
Caution: Any provided search spaces will be overridden by fixed defaults if `hyperparameter_tune = False`.
To train multiple models of a given type, set the value to a list of hyperparameter dictionaries.
For example, `hyperparameters = {'RF': [{'criterion': 'gini'}, {'criterion': 'entropy'}]}` will result in 2 random forest models being trained with separate hyperparameters.
Advanced functionality: Custom models
`hyperparameters` can also take special string values instead of a dictionary of model parameters which maps to a pre-configured model configuration (currently supported options = ['GBMLarge']).
These additional models will be trained using custom pre-specified hyperparameter settings that are known to work well.
Advanced functionality: Custom stack levels
By default, AutoGluon re-uses the same models and model hyperparameters at each level during stack ensembling.
To customize this behaviour, create a hyperparameters dictionary separately for each stack level, and then add them as values to a new dictionary, with keys equal to the stack level.
Example: `hyperparameters = {1: {'RF': rf_params1}, 2: {'CAT': [cat_params1, cat_params2], 'NN': {}}}`
This will result in a stack ensemble that has one custom random forest in level 1 followed by two CatBoost models with custom hyperparameters and a default neural network in level 2, for a total of 4 models.
If a level is not specified in `hyperparameters`, it will default to using the highest specified level to train models. This can also be explicitly controlled by adding a 'default' key.
Default:
hyperparameters = {
'NN': {},
'GBM': [
{'extra_trees': True, 'ag_args': {'name_suffix': 'XT'}},
{},
'GBMLarge',
],
'CAT': {},
'XGB': {},
'FASTAI': {},
'RF': [
{'criterion': 'gini', 'ag_args': {'name_suffix': 'Gini', 'problem_types': ['binary', 'multiclass']}},
{'criterion': 'entropy', 'ag_args': {'name_suffix': 'Entr', 'problem_types': ['binary', 'multiclass']}},
{'criterion': 'mse', 'ag_args': {'name_suffix': 'MSE', 'problem_types': ['regression']}},
],
'XT': [
{'criterion': 'gini', 'ag_args': {'name_suffix': 'Gini', 'problem_types': ['binary', 'multiclass']}},
{'criterion': 'entropy', 'ag_args': {'name_suffix': 'Entr', 'problem_types': ['binary', 'multiclass']}},
{'criterion': 'mse', 'ag_args': {'name_suffix': 'MSE', 'problem_types': ['regression']}},
],
'KNN': [
{'weights': 'uniform', 'ag_args': {'name_suffix': 'Unif'}},
{'weights': 'distance', 'ag_args': {'name_suffix': 'Dist'}},
],
}
Details regarding the hyperparameters you can specify for each model are provided in the following files:
NN: `autogluon.tabular.models.tabular_nn.hyperparameters.parameters`
Note: certain hyperparameter settings may cause these neural networks to train much slower.
GBM: `autogluon.tabular.models.lgb.hyperparameters.parameters`
See also the lightGBM docs: https://lightgbm.readthedocs.io/en/latest/Parameters.html
CAT: `autogluon.tabular.models.catboost.hyperparameters.parameters`
See also the CatBoost docs: https://catboost.ai/docs/concepts/parameter-tuning.html
XGB: `autogluon.tabular.models.xgboost.hyperparameters.parameters`
See also the XGBoost docs: https://xgboost.readthedocs.io/en/latest/parameter.html
FASTAI: `autogluon.tabular.models.fastainn.hyperparameters.parameters`
See also the FastAI docs: https://docs.fast.ai/tabular.models.html
RF: See sklearn documentation: https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
Note: Hyperparameter tuning is disabled for this model.
XT: See sklearn documentation: https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
Note: Hyperparameter tuning is disabled for this model.
KNN: See sklearn documentation: https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html
Note: Hyperparameter tuning is disabled for this model.
LR: `autogluon.tabular.models.lr.hyperparameters.parameters`
Note: Hyperparameter tuning is disabled for this model.
Note: 'penalty' parameter can be used for regression to specify regularization method: 'L1' and 'L2' values are supported.
Advanced functionality: Custom AutoGluon model arguments
These arguments are optional and can be specified in any model's hyperparameters.
Example: `hyperparameters = {'RF': {..., 'ag_args': {'name_suffix': 'CustomModelSuffix', 'disable_in_hpo': True}}`
ag_args: Dictionary of customization options related to meta properties of the model such as its name, the order it is trained, the problem types it is valid for, and the type of HPO it utilizes.
Valid keys:
name: (str) The name of the model. This overrides AutoGluon's naming logic and all other name arguments if present.
name_main: (str) The main name of the model. Example: 'RandomForest'.
name_prefix: (str) Add a custom prefix to the model name. Unused by default.
name_suffix: (str) Add a custom suffix to the model name. Unused by default.
priority: (int) Determines the order in which the model is trained. Larger values result in the model being trained earlier. Default values range from 100 (KNN) to 0 (custom), dictated by model type. If you want this model to be trained first, set priority = 999.
problem_types: (list) List of valid problem types for the model. `problem_types=['binary']` will result in the model only being trained if `problem_type` is 'binary'.
disable_in_hpo: (bool) If True, the model will only be trained if `hyperparameter_tune_kwargs=None`.
valid_stacker: (bool) If False, the model will not be trained as a level 2 or higher stacker model.
valid_base: (bool) If False, the model will not be trained as a level 1 (base) model.
hyperparameter_tune_kwargs: (dict) Refer to :meth:`TabularPredictor.fit` hyperparameter_tune_kwargs argument. If specified here, will override global HPO settings for this model.
Reference the default hyperparameters for example usage of these options.
ag_args_fit: Dictionary of model fit customization options related to how and with what constraints the model is trained. These parameters affect stacker fold models, but not stacker models themselves.
Clarification: `time_limit` is the internal time in seconds given to a particular model to train, which is dictated in part by the `time_limit` argument given during `predictor.fit()` but is not the same.
Valid keys:
stopping_metric: (str or :class:`autogluon.core.metrics.Scorer`, default=None) The metric to use for early stopping of the model. If None, model will decide.
max_memory_usage_ratio: (float, default=1.0) The ratio of memory usage relative to the default to allow before early stopping or killing the model. Values greater than 1.0 will be increasingly prone to out-of-memory errors.
max_time_limit_ratio: (float, default=1.0) The ratio of the provided time_limit to use during model `fit()`. If `time_limit=10` and `max_time_limit_ratio=0.3`, time_limit would be changed to 3. Does not alter max_time_limit or min_time_limit values.
max_time_limit: (float, default=None) Maximum amount of time to allow this model to train for (in sec). If the provided time_limit is greater than this value, it will be replaced by max_time_limit.
min_time_limit: (float, default=0) Allow this model to train for at least this long (in sec), regardless of the time limit it would otherwise be granted.
If `min_time_limit >= max_time_limit`, time_limit will be set to min_time_limit.
If `min_time_limit=None`, time_limit will be set to None and the model will have no training time restriction.
num_cpus : (int or str, default='auto')
How many CPUs to use during model fit.
If 'auto', model will decide.
num_gpus : (int or str, default='auto')
How many GPUs to use during model fit.
If 'auto', model will decide. Some models can use GPUs but don't by default due to differences in model quality.
Set to 0 to disable usage of GPUs.
ag_args_ensemble: Dictionary of hyperparameters shared by all models that control how they are ensembled, if bag mode is enabled.
Valid keys:
use_orig_features: (bool) Whether a stack model will use the original features along with the stack features to train (akin to skip-connections). If the model has no stack features (no base models), this value is ignored and the stack model will use the original features.
max_base_models: (int, default=25) Maximum number of base models whose predictions form the features input to this stacker model. If more than `max_base_models` base models are available, only the top `max_base_models` models with highest validation score are used.
max_base_models_per_type: (int, default=5) Similar to `max_base_models`. If more than `max_base_models_per_type` of any particular model type are available, only the top `max_base_models_per_type` of that type are used. This occurs before the `max_base_models` filter.
save_bag_folds: (bool, default=True)
If True, bagged models will save their fold models (the models from each individual fold of bagging). This is required to use bagged models for prediction.
If False, bagged models will not save their fold models. This means that bagged models will not be valid models during inference.
This should only be set to False when planning to call `predictor.refit_full()` or when `refit_full` is set and `set_best_to_refit_full=True`.
Particularly useful if disk usage is a concern. By not saving the fold models, bagged models will use only very small amounts of disk space during training.
In many training runs, this will reduce peak disk usage by >10x.
feature_metadata : :class:`autogluon.tabular.FeatureMetadata` or str, default = 'infer'
The feature metadata used in various inner logic in feature preprocessing.
If 'infer', will automatically construct a FeatureMetadata object based on the properties of `train_data`.
In this case, `train_data` is input into :meth:`autogluon.tabular.FeatureMetadata.from_df` to infer `feature_metadata`.
If 'infer' incorrectly assumes the dtypes of features, consider explicitly specifying `feature_metadata`.
**kwargs :
auto_stack : bool, default = False
Whether AutoGluon should automatically utilize bagging and multi-layer stack ensembling to boost predictive accuracy.
Set this = True if you are willing to tolerate longer training times in order to maximize predictive accuracy!
Automatically sets `num_bag_folds` and `num_stack_levels` arguments based on dataset properties.
Note: Setting `num_bag_folds` and `num_stack_levels` arguments will override `auto_stack`.
Note: This can increase training time (and inference time) by up to 20x, but can greatly improve predictive performance.
num_bag_folds : int, default = None
Number of folds used for bagging of models. When `num_bag_folds = k`, training time is roughly increased by a factor of `k` (set = 0 to disable bagging).
Disabled by default (0), but we recommend values between 5-10 to maximize predictive performance.
Increasing num_bag_folds will result in models with lower bias but that are more prone to overfitting.
`num_bag_folds = 1` is an invalid value, and will raise a ValueError.
Values > 10 may produce diminishing returns, and can even harm overall results due to overfitting.
To further improve predictions, avoid increasing `num_bag_folds` much beyond 10 and instead increase `num_bag_sets`.
num_bag_sets : int, default = None
Number of repeats of kfold bagging to perform (values must be >= 1). Total number of models trained during bagging = `num_bag_folds * num_bag_sets`.
Defaults to 1 if `time_limit` is not specified, otherwise 20 (always disabled if `num_bag_folds` is not specified).
Values greater than 1 will result in superior predictive performance, especially on smaller problems and with stacking enabled (reduces overall variance).
num_stack_levels : int, default = None
Number of stacking levels to use in stack ensemble. Roughly increases model training time by factor of `num_stack_levels+1` (set = 0 to disable stack ensembling).
Disabled by default (0), but we recommend values between 1-3 to maximize predictive performance.
To prevent overfitting, `num_bag_folds >= 2` must also be set or else a ValueError will be raised.
holdout_frac : float, default = None
Fraction of train_data to holdout as tuning data for optimizing hyperparameters (ignored unless `tuning_data = None`, ignored if `num_bag_folds != 0` unless `use_bag_holdout == True`).
Default value (if None) is selected based on the number of rows in the training data. Default values range from 0.2 at 2,500 rows to 0.01 at 250,000 rows.
Default value is doubled if `hyperparameter_tune_kwargs` is set, up to a maximum of 0.2.
Disabled if `num_bag_folds >= 2` unless `use_bag_holdout == True`.
use_bag_holdout : bool, default = False
If True, a `holdout_frac` portion of the data is held-out from model bagging.
This held-out data is only used to score models and determine weighted ensemble weights.
Enable this if there is a large gap between score_val and score_test in stack models.
Note: If `tuning_data` was specified, `tuning_data` is used as the holdout data.
Disabled if not bagging.
hyperparameter_tune_kwargs : str or dict, default = None
Hyperparameter tuning strategy and kwargs (for example, how many HPO trials to run).
If None, then hyperparameter tuning will not be performed.
Valid preset values:
'auto': Uses the 'bayesopt' preset.
'random': Performs HPO via random search using local scheduler.
'bayesopt': Performs HPO via bayesian optimization using local scheduler.
For valid dictionary keys, refer to :class:`autogluon.core.scheduler.FIFOScheduler` documentation.
The 'searcher' key is required when providing a dict.
ag_args : dict, default = None
Keyword arguments to pass to all models (i.e. common hyperparameters shared by all AutoGluon models).
See the `ag_args` argument from "Advanced functionality: Custom AutoGluon model arguments" in the `hyperparameters` argument documentation for valid values.
Identical to specifying `ag_args` parameter for all models in `hyperparameters`.
If a key in `ag_args` is already specified for a model in `hyperparameters`, it will not be altered through this argument.
ag_args_fit : dict, default = None
Keyword arguments to pass to all models.
See the `ag_args_fit` argument from "Advanced functionality: Custom AutoGluon model arguments" in the `hyperparameters` argument documentation for valid values.
Identical to specifying `ag_args_fit` parameter for all models in `hyperparameters`.
If a key in `ag_args_fit` is already specified for a model in `hyperparameters`, it will not be altered through this argument.
ag_args_ensemble : dict, default = None
Keyword arguments to pass to all models.
See the `ag_args_ensemble` argument from "Advanced functionality: Custom AutoGluon model arguments" in the `hyperparameters` argument documentation for valid values.
Identical to specifying `ag_args_ensemble` parameter for all models in `hyperparameters`.
If a key in `ag_args_ensemble` is already specified for a model in `hyperparameters`, it will not be altered through this argument.
excluded_model_types : list, default = None
Banned subset of model types to avoid training during `fit()`, even if present in `hyperparameters`.
Reference `hyperparameters` documentation for what models correspond to each value.
Useful when a particular model type such as 'KNN' or 'custom' is not desired but altering the `hyperparameters` dictionary is difficult or time-consuming.
Example: To exclude both 'KNN' and 'custom' models, specify `excluded_model_types=['KNN', 'custom']`.
refit_full : bool or str, default = False
Whether to retrain all models on all of the data (training + validation) after the normal training procedure.
This is equivalent to calling `predictor.refit_full(model=refit_full)` after fit.
If `refit_full=True`, it will be treated as `refit_full='all'`.
If `refit_full=False`, refitting will not occur.
Valid str values:
`all`: refits all models.
`best`: refits only the best model (and its ancestors if it is a stacker model).
`{model_name}`: refits only the specified model (and its ancestors if it is a stacker model).
For bagged models:
Reduces a model's inference time by collapsing bagged ensembles into a single model fit on all of the training data.
This process will typically result in a slight accuracy reduction and a large inference speedup.
The inference speedup will generally be between 10-200x faster than the original bagged ensemble model.
The inference speedup factor is equivalent to (k * n), where k is the number of folds (`num_bag_folds`) and n is the number of finished repeats (`num_bag_sets`) in the bagged ensemble.
The runtime is generally 10% or less of the original fit runtime.
The runtime can be roughly estimated as 1 / (k * n) of the original fit runtime, with k and n defined above.
For non-bagged models:
Optimizes a model's accuracy by retraining on 100% of the data without using a validation set.
Will typically result in a slight accuracy increase and no change to inference time.
The runtime will be approximately equal to the original fit runtime.
This process does not alter the original models, but instead adds additional models.
If stacker models are refit by this process, they will use the refit_full versions of the ancestor models during inference.
Models produced by this process will not have validation scores, as they use all of the data for training.
Therefore, it is up to the user to determine if the models are of sufficient quality by including test data in `predictor.leaderboard(test_data)`.
If the user does not have additional test data, they should reference the original model's score for an estimate of the performance of the refit_full model.
Warning: Be aware that utilizing refit_full models without separately verifying on test data means that the model is untested, and has no guarantee of being consistent with the original model.
The time taken by this process is not enforced by `time_limit`.
set_best_to_refit_full : bool, default = False
If True, will change the default model that Predictor uses for prediction when model is not specified to the refit_full version of the model that exhibited the highest validation score.
Only valid if `refit_full` is set.
keep_only_best : bool, default = False
If True, only the best model and its ancestor models are saved in the outputted `predictor`. All other models are deleted.
If you only care about deploying the most accurate predictor with the smallest file-size and no longer need any of the other trained models or functionality beyond prediction on new data, then set: `keep_only_best=True`, `save_space=True`.
This is equivalent to calling `predictor.delete_models(models_to_keep='best', dry_run=False)` directly after `fit()`.
If used with `refit_full` and `set_best_to_refit_full`, the best model will be the refit_full model, and the original bagged best model will be deleted.
`refit_full` will be automatically set to 'best' in this case to avoid training models which will be later deleted.
save_space : bool, default = False
If True, reduces the memory and disk size of predictor by deleting auxiliary model files that aren't needed for prediction on new data.
This is equivalent to calling `predictor.save_space()` directly after `fit()`.
This has NO impact on inference accuracy.
It is recommended if the only goal is to use the trained model for prediction.
Certain advanced functionality may no longer be available if `save_space=True`. Refer to `predictor.save_space()` documentation for more details.
feature_generator : :class:`autogluon.features.generators.AbstractFeatureGenerator`, default = :class:`autogluon.features.generators.AutoMLPipelineFeatureGenerator`
The feature generator used by AutoGluon to process the input data to the form sent to the models. This often includes automated feature generation and data cleaning.
It is generally recommended to keep the default feature generator unless handling an advanced use-case.
To control aspects of the default feature generation process, you can pass in an :class:`AutoMLPipelineFeatureGenerator` object constructed using some of these kwargs:
enable_numeric_features : bool, default True
Whether to keep features of 'int' and 'float' raw types.
These features are passed without alteration to the models.
Appends IdentityFeatureGenerator(infer_features_in_args=dict(valid_raw_types=['int', 'float']))) to the generator group.
enable_categorical_features : bool, default True
Whether to keep features of 'object' and 'category' raw types.
These features are processed into memory optimized 'category' features.
Appends CategoryFeatureGenerator() to the generator group.
enable_datetime_features : bool, default True
Whether to keep features of 'datetime' raw type and 'object' features identified as 'datetime_as_object' features.
These features will be converted to 'int' features representing milliseconds since epoch.
Appends DatetimeFeatureGenerator() to the generator group.
enable_text_special_features : bool, default True
Whether to use 'object' features identified as 'text' features to generate 'text_special' features such as word count, capital letter ratio, and symbol counts.
Appends TextSpecialFeatureGenerator() to the generator group.
enable_text_ngram_features : bool, default True
Whether to use 'object' features identified as 'text' features to generate 'text_ngram' features.
Appends TextNgramFeatureGenerator(vectorizer=vectorizer) to the generator group.
enable_raw_text_features : bool, default False
Whether to keep the raw text features.
Appends IdentityFeatureGenerator(infer_features_in_args=dict(required_special_types=['text'])) to the generator group.
vectorizer : CountVectorizer, default CountVectorizer(min_df=30, ngram_range=(1, 3), max_features=10000, dtype=np.uint8)
sklearn CountVectorizer object to use in TextNgramFeatureGenerator.
Only used if `enable_text_ngram_features=True`.
unlabeled_data : pd.DataFrame, default = None
[Experimental Parameter]
Collection of data without labels that we can use to pretrain on. This is the same schema as train_data, except
without the labels. Currently, unlabeled_data is only used for pretraining a TabTransformer model.
If you do not specify 'TRANSF' with unlabeled_data, then no pretraining will occur and unlabeled_data will be ignored!
After the pretraining step, we will finetune using the TabTransformer model as well. If TabTransformer is ensembled
with other models, like in typical AutoGluon fashion, then the output of this "pretrain/finetune" will be ensembled
with other models, which will not used the unlabeled_data. The "pretrain/finetune flow" is also known as semi-supervised learning.
The typical use case for unlabeled_data is to add signal to your model where you may not have sufficient training
data. e.g. 500 hand-labeled samples (perhaps a hard human task), whole data set (unlabeled) is thousands/millions.
However, this isn't the only use case. Given enough unlabeled data(millions of rows), you may see improvements
to any amount of labeled data.
verbosity : int
If specified, overrides the existing `predictor.verbosity` value.
Returns
-------
:class:`TabularPredictor` object. Returns self.
Examples
--------
>>> from autogluon.tabular import TabularDataset, TabularPredictor
>>> train_data = TabularDataset('https://autogluon.s3.amazonaws.com/datasets/Inc/train.csv')
>>> label = 'class'
>>> predictor = TabularPredictor(label=label).fit(train_data)
>>> test_data = TabularDataset('https://autogluon.s3.amazonaws.com/datasets/Inc/test.csv')
>>> leaderboard = predictor.leaderboard(test_data)
>>> y_test = test_data[label]
>>> test_data = test_data.drop(columns=[label])
>>> y_pred = predictor.predict(test_data)
>>> perf = predictor.evaluate_predictions(y_true=y_test, y_pred=y_pred)
To maximize predictive performance, use the following:
>>> eval_metric = 'roc_auc' # set this to the metric you ultimately care about
>>> time_limit = 3600 # set as long as you are willing to wait (in sec)
>>> predictor = TabularPredictor(label=label, eval_metric=eval_metric).fit(train_data, presets=['best_quality'], time_limit=time_limit)
"""
if self._learner.is_fit:
raise AssertionError('Predictor is already fit! To fit additional models, refer to `predictor.fit_extra`, or create a new `Predictor`.')
kwargs_orig = kwargs.copy()
kwargs = self._validate_fit_kwargs(kwargs)
verbosity = kwargs.get('verbosity', self.verbosity)
set_logger_verbosity(verbosity, logger=logger)
if presets:
if not isinstance(presets, list):
presets = [presets]
logger.log(20, f'Presets specified: {presets}')
if verbosity >= 3:
logger.log(20, '============ fit kwarg info ============')
logger.log(20, 'User Specified kwargs:')
logger.log(20, f'{pprint.pformat(kwargs_orig)}')
logger.log(20, 'Full kwargs:')
logger.log(20, f'{pprint.pformat(kwargs)}')
logger.log(20, '========================================')
holdout_frac = kwargs['holdout_frac']
num_bag_folds = kwargs['num_bag_folds']
num_bag_sets = kwargs['num_bag_sets']
num_stack_levels = kwargs['num_stack_levels']
auto_stack = kwargs['auto_stack']
feature_generator = kwargs['feature_generator']
unlabeled_data = kwargs['unlabeled_data']
ag_args = kwargs['ag_args']
ag_args_fit = kwargs['ag_args_fit']
ag_args_ensemble = kwargs['ag_args_ensemble']
excluded_model_types = kwargs['excluded_model_types']
use_bag_holdout = kwargs['use_bag_holdout']
if ag_args is None:
ag_args = {}
ag_args = self._set_hyperparameter_tune_kwargs_in_ag_args(kwargs['hyperparameter_tune_kwargs'], ag_args, time_limit=time_limit)
feature_generator_init_kwargs = kwargs['_feature_generator_kwargs']
if feature_generator_init_kwargs is None:
feature_generator_init_kwargs = dict()
train_data, tuning_data, unlabeled_data = self._validate_fit_data(train_data=train_data, tuning_data=tuning_data, unlabeled_data=unlabeled_data)
if hyperparameters is None:
hyperparameters = 'default'
if isinstance(hyperparameters, str):
hyperparameters = get_hyperparameter_config(hyperparameters)
###################################
# FIXME: v0.1 This section is a hack
if 'enable_raw_text_features' not in feature_generator_init_kwargs:
if 'AG_TEXT_NN' in hyperparameters:
feature_generator_init_kwargs['enable_raw_text_features'] = True
else:
for key in hyperparameters:
if isinstance(key, int) or key == 'default':
if 'AG_TEXT_NN' in hyperparameters[key]:
feature_generator_init_kwargs['enable_raw_text_features'] = True
break
###################################
if feature_metadata is not None and isinstance(feature_metadata, str) and feature_metadata == 'infer':
feature_metadata = None
self._set_feature_generator(feature_generator=feature_generator, feature_metadata=feature_metadata, init_kwargs=feature_generator_init_kwargs)
num_bag_folds, num_bag_sets, num_stack_levels = self._sanitize_stack_args(
num_bag_folds=num_bag_folds, num_bag_sets=num_bag_sets, num_stack_levels=num_stack_levels,
time_limit=time_limit, auto_stack=auto_stack, num_train_rows=len(train_data),
)
if holdout_frac is None:
holdout_frac = default_holdout_frac(len(train_data), ag_args.get('hyperparameter_tune_kwargs', None) is not None)
if kwargs['_save_bag_folds'] is not None:
if ag_args_ensemble is None:
ag_args_ensemble = {}
ag_args_ensemble['save_bag_folds'] = kwargs['_save_bag_folds']
if time_limit is None:
mb_mem_usage_train_data = get_approximate_df_mem_usage(train_data, sample_ratio=0.2).sum() / 1e6
num_rows_train = len(train_data)
if mb_mem_usage_train_data >= 50 or num_rows_train >= 100000:
logger.log(20, f'Warning: Training may take a very long time because `time_limit` was not specified and `train_data` is large ({num_rows_train} samples, {round(mb_mem_usage_train_data, 2)} MB).')
logger.log(20, f'\tConsider setting `time_limit` to ensure training finishes within an expected duration or experiment with a small portion of `train_data` to identify an ideal `presets` and `hyperparameters` configuration.')
core_kwargs = {
'ag_args': ag_args,
'ag_args_ensemble': ag_args_ensemble,
'ag_args_fit': ag_args_fit,
'excluded_model_types': excluded_model_types,
}
self._learner.fit(X=train_data, X_val=tuning_data, X_unlabeled=unlabeled_data,
holdout_frac=holdout_frac, num_bag_folds=num_bag_folds, num_bag_sets=num_bag_sets, num_stack_levels=num_stack_levels,
hyperparameters=hyperparameters, core_kwargs=core_kwargs, time_limit=time_limit, verbosity=verbosity, use_bag_holdout=use_bag_holdout)
self._set_post_fit_vars()
self._post_fit(
keep_only_best=kwargs['keep_only_best'],
refit_full=kwargs['refit_full'],
set_best_to_refit_full=kwargs['set_best_to_refit_full'],
save_space=kwargs['save_space'],
)
self.save()
return self
def _post_fit(self, keep_only_best=False, refit_full=False, set_best_to_refit_full=False, save_space=False):
if refit_full is True:
if keep_only_best is True:
if set_best_to_refit_full is True:
refit_full = 'best'
else:
logger.warning(f'refit_full was set to {refit_full}, but keep_only_best=True and set_best_to_refit_full=False. Disabling refit_full to avoid training models which would be automatically deleted.')
refit_full = False
else:
refit_full = 'all'
if refit_full is not False:
trainer_model_best = self._trainer.get_model_best()
self.refit_full(model=refit_full)
if set_best_to_refit_full:
if trainer_model_best in self._trainer.model_full_dict.keys():
self._trainer.model_best = self._trainer.model_full_dict[trainer_model_best]
# Note: model_best will be overwritten if additional training is done with new models, since model_best will have validation score of None and any new model will have a better validation score.
# This has the side-effect of having the possibility of model_best being overwritten by a worse model than the original model_best.
self._trainer.save()
else:
logger.warning(f'Best model ({trainer_model_best}) is not present in refit_full dictionary. Training may have failed on the refit model. AutoGluon will default to using {trainer_model_best} for predictions.')
if keep_only_best:
self.delete_models(models_to_keep='best', dry_run=False)
if save_space:
self.save_space()
def fit_extra(self, hyperparameters, time_limit=None, base_model_names=None, **kwargs):
"""
Fits additional models after the original :meth:`TabularPredictor.fit` call.
The original train_data and tuning_data will be used to train the models.
Parameters
----------
hyperparameters : str or dict
Refer to argument documentation in :meth:`TabularPredictor.fit`.
If `base_model_names` is specified and hyperparameters is using the level-based key notation,
the key of the level which directly uses the base models should be 1. The level in the hyperparameters
dictionary is relative, not absolute.
time_limit : int, default = None
Refer to argument documentation in :meth:`TabularPredictor.fit`.
base_model_names : list, default = None
The names of the models to use as base models for this fit call.
Base models will provide their out-of-fold predictions as additional features to the models in `hyperparameters`.
If specified, all models trained will be stack ensembles.
If None, models will be trained as if they were specified in :meth:`TabularPredictor.fit`, without depending on existing models.
Only valid if bagging is enabled.
**kwargs :
Refer to kwargs documentation in :meth:`TabularPredictor.fit`.
Note that the following kwargs are not available in `fit_extra` as they cannot be changed from their values set in `fit()`:
[`holdout_frac`, `num_bag_folds`, `auto_stack`, `feature_generator`, `unlabeled_data`]
"""
time_start = time.time()
kwargs_orig = kwargs.copy()
kwargs = self._validate_fit_extra_kwargs(kwargs)
verbosity = kwargs.get('verbosity', self.verbosity)
set_logger_verbosity(verbosity, logger=logger)
if verbosity >= 3:
logger.log(20, '============ fit kwarg info ============')
logger.log(20, 'User Specified kwargs:')
logger.log(20, f'{pprint.pformat(kwargs_orig)}')
logger.log(20, 'Full kwargs:')
logger.log(20, f'{pprint.pformat(kwargs)}')
logger.log(20, '========================================')
# TODO: Allow disable aux (default to disabled)
# TODO: num_bag_sets
# num_bag_sets = kwargs['num_bag_sets']
num_stack_levels = kwargs['num_stack_levels']
# save_bag_folds = kwargs['save_bag_folds'] # TODO: Enable
ag_args = kwargs['ag_args']
ag_args_fit = kwargs['ag_args_fit']
ag_args_ensemble = kwargs['ag_args_ensemble']
excluded_model_types = kwargs['excluded_model_types']
if ag_args is None:
ag_args = {}
ag_args = self._set_hyperparameter_tune_kwargs_in_ag_args(kwargs['hyperparameter_tune_kwargs'], ag_args, time_limit=time_limit)
fit_new_weighted_ensemble = False # TODO: Add as option
aux_kwargs = None # TODO: Add as option
if isinstance(hyperparameters, str):
hyperparameters = get_hyperparameter_config(hyperparameters)
if num_stack_levels is None:
hyperparameter_keys = list(hyperparameters.keys())
highest_level = 1
for key in hyperparameter_keys:
if isinstance(key, int):
highest_level = max(key, highest_level)
num_stack_levels = highest_level
# TODO: make core_kwargs a kwargs argument to predictor.fit, add aux_kwargs to predictor.fit
core_kwargs = {'ag_args': ag_args, 'ag_args_ensemble': ag_args_ensemble, 'ag_args_fit': ag_args_fit, 'excluded_model_types': excluded_model_types}
# TODO: Add special error message if called and training/val data was not cached.
X, y, X_val, y_val = self._trainer.load_data()
fit_models = self._trainer.train_multi_levels(
X=X, y=y, hyperparameters=hyperparameters, X_val=X_val, y_val=y_val,
base_model_names=base_model_names, time_limit=time_limit, relative_stack=True, level_end=num_stack_levels,
core_kwargs=core_kwargs, aux_kwargs=aux_kwargs
)
if time_limit is not None:
time_limit = time_limit - (time.time() - time_start)
if fit_new_weighted_ensemble:
if time_limit is not None:
time_limit_weighted = max(time_limit, 60)
else:
time_limit_weighted = None
fit_models += self.fit_weighted_ensemble(time_limit=time_limit_weighted)
self._post_fit(
keep_only_best=kwargs['keep_only_best'],
refit_full=kwargs['refit_full'],
set_best_to_refit_full=kwargs['set_best_to_refit_full'],
save_space=kwargs['save_space'],
)
self.save()
return self
def predict(self, data, model=None, as_pandas=True):
"""
Use trained models to produce predictions of `label` column values for new data.
Parameters
----------
data : str or :class:`TabularDataset` or :class:`pd.DataFrame`
The data to make predictions for. Should contain same column names as training Dataset and follow same format
(may contain extra columns that won't be used by Predictor, including the label-column itself).
If str is passed, `data` will be loaded using the str value as the file path.
model : str (optional)
The name of the model to get predictions from. Defaults to None, which uses the highest scoring model on the validation set.
Valid models are listed in this `predictor` by calling `predictor.get_model_names()`
as_pandas : bool, default = True
Whether to return the output as a :class:`pd.Series` (True) or :class:`np.ndarray` (False).
Returns
-------
Array of predictions, one corresponding to each row in given dataset. Either :class:`np.ndarray` or :class:`pd.Series` depending on `as_pandas` argument.
"""
data = self.__get_dataset(data)
return self._learner.predict(X=data, model=model, as_pandas=as_pandas)
def predict_proba(self, data, model=None, as_pandas=True, as_multiclass=True):
"""
Use trained models to produce predicted class probabilities rather than class-labels (if task is classification).
If `predictor.problem_type` is regression, this functions identically to `predict`, returning the same output.
Parameters
----------
data : str or :class:`TabularDataset` or :class:`pd.DataFrame`
The data to make predictions for. Should contain same column names as training dataset and follow same format
(may contain extra columns that won't be used by Predictor, including the label-column itself).
If str is passed, `data` will be loaded using the str value as the file path.
model : str (optional)
The name of the model to get prediction probabilities from. Defaults to None, which uses the highest scoring model on the validation set.
Valid models are listed in this `predictor` by calling `predictor.get_model_names()`.
as_pandas : bool, default = True
Whether to return the output as a pandas object (True) or numpy array (False).
Pandas object is a DataFrame if this is a multiclass problem or `as_multiclass=True`, otherwise it is a Series.
If the output is a DataFrame, the column order will be equivalent to `predictor.class_labels`.
as_multiclass : bool, default = True
Whether to return binary classification probabilities as if they were for multiclass classification.
Output will contain two columns, and if `as_pandas=True`, the column names will correspond to the binary class labels.
The columns will be the same order as `predictor.class_labels`.
If False, output will contain only 1 column for the positive class (get positive_class name via `predictor.positive_class`).
Only impacts output for binary classification problems.
Returns
-------
Array of predicted class-probabilities, corresponding to each row in the given data.
May be a :class:`np.ndarray` or :class:`pd.DataFrame` / :class:`pd.Series` depending on `as_pandas` and `as_multiclass` arguments and the type of prediction problem.
For binary classification problems, the output contains for each datapoint the predicted probabilities of the negative and positive classes, unless you specify `as_multiclass=False`.
"""
data = self.__get_dataset(data)
return self._learner.predict_proba(X=data, model=model, as_pandas=as_pandas, as_multiclass=as_multiclass)
def evaluate(self, data, model=None, silent=False, auxiliary_metrics=True, detailed_report=False) -> dict:
"""
Report the predictive performance evaluated over a given dataset.
This is basically a shortcut for: `pred_proba = predict_proba(data); evaluate_predictions(data[label], pred_proba)`.
Parameters
----------
data : str or :class:`TabularDataset` or :class:`pd.DataFrame`
This dataset must also contain the `label` with the same column-name as previously specified.
If str is passed, `data` will be loaded using the str value as the file path.
model : str (optional)
The name of the model to get prediction probabilities from. Defaults to None, which uses the highest scoring model on the validation set.
Valid models are listed in this `predictor` by calling `predictor.get_model_names()`.
silent : bool, default = False
If False, performance results are printed.
auxiliary_metrics: bool, default = True
Should we compute other (`problem_type` specific) metrics in addition to the default metric?
detailed_report : bool, default = False
Should we computed more detailed versions of the `auxiliary_metrics`? (requires `auxiliary_metrics = True`)
Returns
-------
Returns dict where keys = metrics, values = performance along each metric. To get the `eval_metric` score, do `output[predictor.eval_metric.name]`
NOTE: Metrics scores always show in higher is better form.
This means that metrics such as log_loss and root_mean_squared_error will have their signs FLIPPED, and values will be negative.
"""
data = self.__get_dataset(data)
y_pred_proba = self.predict_proba(data=data, model=model)
return self.evaluate_predictions(y_true=data[self.label], y_pred=y_pred_proba, silent=silent, auxiliary_metrics=auxiliary_metrics, detailed_report=detailed_report)
def evaluate_predictions(self, y_true, y_pred, silent=False, auxiliary_metrics=True, detailed_report=False) -> dict:
"""
Evaluate the provided prediction probabilities against ground truth labels.
Evaluation is based on the `eval_metric` previously specified to `fit()`, or default metrics if none was specified.
Parameters
----------
y_true : :class:`np.array` or :class:`pd.Series`
The ordered collection of ground-truth labels.
y_pred : :class:`pd.Series` or :class:`pd.DataFrame`
The ordered collection of prediction probabilities or predictions.
Obtainable via the output of `predictor.predict_proba`.
Caution: For certain types of `eval_metric` (such as 'roc_auc'), `y_pred` must be predicted-probabilities rather than predicted labels.
silent : bool, default = False
If False, performance results are printed.
auxiliary_metrics: bool, default = True
Should we compute other (`problem_type` specific) metrics in addition to the default metric?
detailed_report : bool, default = False
Should we computed more detailed versions of the `auxiliary_metrics`? (requires `auxiliary_metrics = True`)
Returns
-------
Returns dict where keys = metrics, values = performance along each metric.
NOTE: Metrics scores always show in higher is better form.
This means that metrics such as log_loss and root_mean_squared_error will have their signs FLIPPED, and values will be negative.
"""
return self._learner.evaluate_predictions(y_true=y_true, y_pred=y_pred, silent=silent,
auxiliary_metrics=auxiliary_metrics, detailed_report=detailed_report)
def leaderboard(self, data=None, extra_info=False, extra_metrics=None, only_pareto_frontier=False, silent=False):
"""
Output summary of information about models produced during `fit()` as a :class:`pd.DataFrame`.
Includes information on test and validation scores for all models, model training times, inference times, and stack levels.
Output DataFrame columns include:
'model': The name of the model.
'score_val': The validation score of the model on the 'eval_metric'.
NOTE: Metrics scores always show in higher is better form.
This means that metrics such as log_loss and root_mean_squared_error will have their signs FLIPPED, and values will be negative.
This is necessary to avoid the user needing to know the metric to understand if higher is better when looking at leaderboard.
'pred_time_val': The inference time required to compute predictions on the validation data end-to-end.
Equivalent to the sum of all 'pred_time_val_marginal' values for the model and all of its base models.
'fit_time': The fit time required to train the model end-to-end (Including base models if the model is a stack ensemble).
Equivalent to the sum of all 'fit_time_marginal' values for the model and all of its base models.
'pred_time_val_marginal': The inference time required to compute predictions on the validation data (Ignoring inference times for base models).
Note that this ignores the time required to load the model into memory when bagging is disabled.
'fit_time_marginal': The fit time required to train the model (Ignoring base models).
'stack_level': The stack level of the model.
A model with stack level N can take any set of models with stack level less than N as input, with stack level 1 models having no model inputs.
'can_infer': If model is able to perform inference on new data. If False, then the model either was not saved, was deleted, or an ancestor of the model cannot infer.
`can_infer` is often False when `save_bag_folds=False` was specified in initial `fit()`.
'fit_order': The order in which models were fit. The first model fit has `fit_order=1`, and the Nth model fit has `fit_order=N`. The order corresponds to the first child model fit in the case of bagged ensembles.
Parameters
----------
data : str or :class:`TabularDataset` or :class:`pd.DataFrame` (optional)
This Dataset must also contain the label-column with the same column-name as specified during fit().
If specified, then the leaderboard returned will contain additional columns 'score_test', 'pred_time_test', and 'pred_time_test_marginal'.
'score_test': The score of the model on the 'eval_metric' for the data provided.
NOTE: Metrics scores always show in higher is better form.
This means that metrics such as log_loss and root_mean_squared_error will have their signs FLIPPED, and values will be negative.
This is necessary to avoid the user needing to know the metric to understand if higher is better when looking at leaderboard.
'pred_time_test': The true end-to-end wall-clock inference time of the model for the data provided.
Equivalent to the sum of all 'pred_time_test_marginal' values for the model and all of its base models.
'pred_time_test_marginal': The inference time of the model for the data provided, minus the inference time for the model's base models, if it has any.
Note that this ignores the time required to load the model into memory when bagging is disabled.
If str is passed, `data` will be loaded using the str value as the file path.
extra_info : bool, default = False
If `True`, will return extra columns with advanced info.
This requires additional computation as advanced info data is calculated on demand.
Additional output columns when `extra_info=True` include:
'num_features': Number of input features used by the model.
Some models may ignore certain features in the preprocessed data.
'num_models': Number of models that actually make up this "model" object.
For non-bagged models, this is 1. For bagged models, this is equal to the number of child models (models trained on bagged folds) the bagged ensemble contains.
'num_models_w_ancestors': Equivalent to the sum of 'num_models' values for the model and its' ancestors (see below).
'memory_size': The amount of memory in bytes the model requires when persisted in memory. This is not equivalent to the amount of memory the model may use during inference.
For bagged models, this is the sum of the 'memory_size' of all child models.
'memory_size_w_ancestors': Equivalent to the sum of 'memory_size' values for the model and its' ancestors.
This is the amount of memory required to avoid loading any models in-between inference calls to get predictions from this model.
For online-inference, this is critical. It is important that the machine performing online inference has memory more than twice this value to avoid loading models for every call to inference by persisting models in memory.
'memory_size_min': The amount of memory in bytes the model minimally requires to perform inference.
For non-bagged models, this is equivalent to 'memory_size'.
For bagged models, this is equivalent to the largest child model's 'memory_size_min'.
To minimize memory usage, child models can be loaded and un-persisted one by one to infer. This is the default behavior if a bagged model was not already persisted in memory prior to inference.
'memory_size_min_w_ancestors': Equivalent to the max of the 'memory_size_min' values for the model and its' ancestors.
This is the minimum required memory to infer with the model by only loading one model at a time, as each of its ancestors will also have to be loaded into memory.
For offline-inference where latency is not a concern, this should be used to determine the required memory for a machine if 'memory_size_w_ancestors' is too large.
'num_ancestors': Number of ancestor models for the given model.
'num_descendants': Number of descendant models for the given model.
'model_type': The type of the given model.
If the model is an ensemble type, 'child_model_type' will indicate the inner model type. A stack ensemble of bagged LightGBM models would have 'StackerEnsembleModel' as its model type.
'child_model_type': The child model type. None if the model is not an ensemble. A stack ensemble of bagged LightGBM models would have 'LGBModel' as its child type.
child models are models which are used as a group to generate a given bagged ensemble model's predictions. These are the models trained on each fold of a bagged ensemble.
For 10-fold bagging, the bagged ensemble model would have 10 child models.
For 10-fold bagging with 3 repeats, the bagged ensemble model would have 30 child models.
Note that child models are distinct from ancestors and descendants.
'hyperparameters': The hyperparameter values specified for the model.
All hyperparameters that do not appear in this dict remained at their default values.
'hyperparameters_fit': The hyperparameters set by the model during fit.
This overrides the 'hyperparameters' value for a particular key if present in 'hyperparameters_fit' to determine the fit model's final hyperparameters.
This is most commonly set for hyperparameters that indicate model training iterations or epochs, as early stopping can find a different value from what 'hyperparameters' indicated.
In these cases, the provided hyperparameter in 'hyperparameters' is used as a maximum for the model, but the model is still able to early stop at a smaller value during training to achieve a better validation score or to satisfy time constraints.
For example, if a NN model was given `epochs=500` as a hyperparameter, but found during training that `epochs=60` resulted in optimal validation score, it would use `epoch=60` and `hyperparameters_fit={'epoch': 60}` would be set.
'ag_args_fit': Special AutoGluon arguments that influence model fit.
See the documentation of the `hyperparameters` argument in `TabularPredictor.fit()` for more information.
'features': List of feature names used by the model.
'child_hyperparameters': Equivalent to 'hyperparameters', but for the model's children.
'child_hyperparameters_fit': Equivalent to 'hyperparameters_fit', but for the model's children.
'child_ag_args_fit': Equivalent to 'ag_args_fit', but for the model's children.
'ancestors': The model's ancestors. Ancestor models are the models which are required to make predictions during the construction of the model's input features.
If A is an ancestor of B, then B is a descendant of A.
If a model's ancestor is deleted, the model is no longer able to infer on new data, and its 'can_infer' value will be False.
A model can only have ancestor models whose 'stack_level' are lower than itself.
'stack_level'=1 models have no ancestors.
'descendants': The model's descendants. Descendant models are the models which require this model to make predictions during the construction of their input features.
If A is a descendant of B, then B is an ancestor of A.
If this model is deleted, then all descendant models will no longer be able to infer on new data, and their 'can_infer' values will be False.
A model can only have descendant models whose 'stack_level' are higher than itself.
extra_metrics : list, default = None
A list of metrics to calculate scores for and include in the output DataFrame.
Only valid when `data` is specified. The scores refer to the scores on `data` (same data as used to calculate the `score_test` column).
This list can contain any values which would also be valid for `eval_metric` in predictor init.
For example, `extra_metrics=['accuracy', 'roc_auc', 'log_loss']` would be valid in binary classification.
This example would return 3 additional columns in the output DataFrame, whose column names match the names of the metrics.
Passing `extra_metrics=[predictor.eval_metric]` would return an extra column in the name of the eval metric that has identical values to `score_test`.
This also works with custom metrics. If passing an object instead of a string, the column name will be equal to the `.name` attribute of the object.
NOTE: Metrics scores always show in higher is better form.
This means that metrics such as log_loss and root_mean_squared_error will have their signs FLIPPED, and values will be negative.
This is necessary to avoid the user needing to know the metric to understand if higher is better when looking at leaderboard.
only_pareto_frontier : bool, default = False
If `True`, only return model information of models in the Pareto frontier of the accuracy/latency trade-off (models which achieve the highest score within their end-to-end inference time).
At minimum this will include the model with the highest score and the model with the lowest inference time.
This is useful when deciding which model to use during inference if inference time is a consideration.
Models filtered out by this process would never be optimal choices for a user that only cares about model inference time and score.
silent : bool, default = False
Should leaderboard DataFrame be printed?
Returns
-------
:class:`pd.DataFrame` of model performance summary information.
"""
data = self.__get_dataset(data) if data is not None else data
return self._learner.leaderboard(X=data, extra_info=extra_info, extra_metrics=extra_metrics,
only_pareto_frontier=only_pareto_frontier, silent=silent)
def fit_summary(self, verbosity=3, show_plot=False):
"""
Output summary of information about models produced during `fit()`.
May create various generated summary plots and store them in folder: `predictor.path`.
Parameters
----------
verbosity : int, default = 3
Controls how detailed of a summary to output.
Set <= 0 for no output printing, 1 to print just high-level summary,
2 to print summary and create plots, >= 3 to print all information produced during `fit()`.
show_plot : bool, default = False
If True, shows the model summary plot in browser when verbosity > 1.
Returns
-------
Dict containing various detailed information. We do not recommend directly printing this dict as it may be very large.
"""
# hpo_used = len(self._trainer.hpo_results) > 0
hpo_used = False # Disabled until a more memory efficient hpo_results object is implemented.
model_types = self._trainer.get_models_attribute_dict(attribute='type')
model_inner_types = self._trainer.get_models_attribute_dict(attribute='type_inner')
model_typenames = {key: model_types[key].__name__ for key in model_types}
model_innertypenames = {key: model_inner_types[key].__name__ for key in model_types if key in model_inner_types}
MODEL_STR = 'Model'
ENSEMBLE_STR = 'Ensemble'
for model in model_typenames:
if (model in model_innertypenames) and (ENSEMBLE_STR not in model_innertypenames[model]) and (ENSEMBLE_STR in model_typenames[model]):
new_model_typename = model_typenames[model] + "_" + model_innertypenames[model]
if new_model_typename.endswith(MODEL_STR):
new_model_typename = new_model_typename[:-len(MODEL_STR)]
model_typenames[model] = new_model_typename
unique_model_types = set(model_typenames.values()) # no more class info
# all fit() information that is returned:
results = {
'model_types': model_typenames, # dict with key = model-name, value = type of model (class-name)
'model_performance': self._trainer.get_models_attribute_dict('val_score'), # dict with key = model-name, value = validation performance
'model_best': self._trainer.model_best, # the name of the best model (on validation data)
'model_paths': self._trainer.get_models_attribute_dict('path'), # dict with key = model-name, value = path to model file
'model_fit_times': self._trainer.get_models_attribute_dict('fit_time'),
'model_pred_times': self._trainer.get_models_attribute_dict('predict_time'),
'num_bag_folds': self._trainer.k_fold,
'max_stack_level': self._trainer.get_max_level(),
}
if self.problem_type == QUANTILE:
results['num_quantiles'] = len(self.quantile_levels)
elif self.problem_type != REGRESSION:
results['num_classes'] = self._trainer.num_classes
# if hpo_used:
# results['hpo_results'] = self._trainer.hpo_results
# get dict mapping model name to final hyperparameter values for each model:
model_hyperparams = {}
for model_name in self._trainer.get_model_names():
model_obj = self._trainer.load_model(model_name)
model_hyperparams[model_name] = model_obj.params
results['model_hyperparams'] = model_hyperparams
if verbosity > 0: # print stuff
print("*** Summary of fit() ***")
print("Estimated performance of each model:")
results['leaderboard'] = self._learner.leaderboard(silent=False)
# self._summarize('model_performance', 'Validation performance of individual models', results)
# self._summarize('model_best', 'Best model (based on validation performance)', results)
# self._summarize('hyperparameter_tune', 'Hyperparameter-tuning used', results)
print("Number of models trained: %s" % len(results['model_performance']))
print("Types of models trained:")
print(unique_model_types)
num_fold_str = ""
bagging_used = results['num_bag_folds'] > 0
if bagging_used:
num_fold_str = f" (with {results['num_bag_folds']} folds)"
print("Bagging used: %s %s" % (bagging_used, num_fold_str))
num_stack_str = ""
stacking_used = results['max_stack_level'] > 2
if stacking_used:
num_stack_str = f" (with {results['max_stack_level']} levels)"
print("Multi-layer stack-ensembling used: %s %s" % (stacking_used, num_stack_str))
hpo_str = ""
# if hpo_used and verbosity <= 2:
# hpo_str = " (call fit_summary() with verbosity >= 3 to see detailed HPO info)"
# print("Hyperparameter-tuning used: %s %s" % (hpo_used, hpo_str))
# TODO: uncomment once feature_prune is functional: self._summarize('feature_prune', 'feature-selection used', results)
print("Feature Metadata (Processed):")
print("(raw dtype, special dtypes):")
print(self.feature_metadata)
if verbosity > 1: # create plots
plot_tabular_models(results, output_directory=self.path,
save_file="SummaryOfModels.html",
plot_title="Models produced during fit()",
show_plot=show_plot)
if hpo_used:
for model_type in results['hpo_results']:
if 'trial_info' in results['hpo_results'][model_type]:
plot_summary_of_models(
results['hpo_results'][model_type],
output_directory=self.path, save_file=model_type + "_HPOmodelsummary.html",
plot_title=f"Models produced during {model_type} HPO", show_plot=show_plot)
plot_performance_vs_trials(
results['hpo_results'][model_type],
output_directory=self.path, save_file=model_type + "_HPOperformanceVStrials.png",
plot_title=f"HPO trials for {model_type} models", show_plot=show_plot)
if verbosity > 2: # print detailed information
if hpo_used:
hpo_results = results['hpo_results']
print("*** Details of Hyperparameter optimization ***")
for model_type in hpo_results:
hpo_model = hpo_results[model_type]
if 'trial_info' in hpo_model:
print(f"HPO for {model_type} model: Num. configurations tried = {len(hpo_model['trial_info'])}, Time spent = {hpo_model['total_time']}s, Search strategy = {hpo_model['search_strategy']}")
print(f"Best hyperparameter-configuration (validation-performance: {self.eval_metric} = {hpo_model['validation_performance']}):")
print(hpo_model['best_config'])
"""
if bagging_used:
pass # TODO: print detailed bagging info
if stacking_used:
pass # TODO: print detailed stacking info, like how much it improves validation performance
if results['feature_prune']:
pass # TODO: print detailed feature-selection info once feature-selection is functional.
"""
if verbosity > 0:
print("*** End of fit() summary ***")
return results
def transform_features(self, data=None, model=None, base_models=None, return_original_features=True):
"""
Transforms data features through the AutoGluon feature generator.
This is useful to gain an understanding of how AutoGluon interprets the data features.
The output of this function can be used to train further models, even outside of AutoGluon.
This can be useful for training your own models on the same data representation as AutoGluon.
Individual AutoGluon models like the neural network may apply additional feature transformations that are not reflected in this method.
This method only applies universal transforms employed by all AutoGluon models.
When `data=None`, `base_models=[{best_model}], and bagging was enabled during fit():
This returns the out-of-fold predictions of the best model, which can be used as training input to a custom user stacker model.
Parameters
----------
data : str or :class:`TabularDataset` or :class:`pd.DataFrame` (optional)
The data to apply feature transformation to.
This data does not require the label column.
If str is passed, `data` will be loaded using the str value as the file path.
If not specified, the original data used during fit() will be used if fit() was previously called with `cache_data=True`. Otherwise, an exception will be raised.
For non-bagged mode predictors:
The data used when not specified is the validation set.
This can either be an automatically generated validation set or the user-defined `tuning_data` if passed during fit().
If all parameters are unspecified, then the output is equivalent to `predictor.load_data_internal(data='val', return_X=True, return_y=False)[0]`.
To get the label values of the output, call `predictor.load_data_internal(data='val', return_X=False, return_y=True)[1]`.
If the original training set is desired, it can be passed in through `data`.
Warning: Do not pass the original training set if `model` or `base_models` are set. This will result in overfit feature transformation.
For bagged mode predictors:
The data used when not specified is the full training set.
If all parameters are unspecified, then the output is equivalent to `predictor.load_data_internal(data='train', return_X=True, return_y=False)[0]`.
To get the label values of the output, call `predictor.load_data_internal(data='train', return_X=False, return_y=True)[1]`.
`base_model` features generated in this instance will be from out-of-fold predictions.
Note that the training set may differ from the training set originally passed during fit(), as AutoGluon may choose to drop or duplicate rows during training.
Warning: Do not pass the original training set through `data` if `model` or `base_models` are set. This will result in overfit feature transformation. Instead set `data=None`.
model : str, default = None
Model to generate input features for.
The output data will be equivalent to the input data that would be sent into `model.predict_proba(data)`.
Note: This only applies to cases where `data` is not the training data.
If `None`, then only return generically preprocessed features prior to any model fitting.
Valid models are listed in this `predictor` by calling `predictor.get_model_names()`.
Specifying a `refit_full` model will cause an exception if `data=None`.
`base_models=None` is a requirement when specifying `model`.
base_models : list, default = None
List of model names to use as base_models for a hypothetical stacker model when generating input features.
If `None`, then only return generically preprocessed features prior to any model fitting.
Valid models are listed in this `predictor` by calling `predictor.get_model_names()`.
If a stacker model S exists with `base_models=M`, then setting `base_models=M` is equivalent to setting `model=S`.
`model=None` is a requirement when specifying `base_models`.
return_original_features : bool, default = True
Whether to return the original features.
If False, only returns the additional output columns from specifying `model` or `base_models`.
This is useful to set to False if the intent is to use the output as input to further stacker models without the original features.
Returns
-------
:class:`pd.DataFrame` of the provided `data` after feature transformation has been applied.
This output does not include the label column, and will remove it if present in the supplied `data`.
If a transformed label column is desired, use `predictor.transform_labels`.
Examples
--------
>>> from autogluon.tabular import TabularPredictor
>>> predictor = TabularPredictor(label='class').fit('train.csv', label='class', auto_stack=True) # predictor is in bagged mode.
>>> model = 'WeightedEnsemble_L2'
>>> train_data_transformed = predictor.transform_features(model=model) # Internal training DataFrame used as input to `model.fit()` for each model trained in predictor.fit()`
>>> test_data_transformed = predictor.transform_features('test.csv', model=model) # Internal test DataFrame used as input to `model.predict_proba()` during `predictor.predict_proba(test_data, model=model)`
"""
data = self.__get_dataset(data) if data is not None else data
return self._learner.get_inputs_to_stacker(dataset=data, model=model, base_models=base_models, use_orig_features=return_original_features)
def transform_labels(self, labels, inverse=False, proba=False):
"""
Transforms data labels to the internal label representation.
This can be useful for training your own models on the same data label representation as AutoGluon.
Regression problems do not differ between original and internal representation, and thus this method will return the provided labels.
Warning: When `inverse=False`, it is possible for the output to contain NaN label values in multiclass problems if the provided label was dropped during training.
Parameters
----------
labels : :class:`np.ndarray` or :class:`pd.Series`
Labels to transform.
If `proba=False`, an example input would be the output of `predictor.predict(test_data)`.
If `proba=True`, an example input would be the output of `predictor.predict_proba(test_data, as_multiclass=False)`.
inverse : boolean, default = False
When `True`, the input labels are treated as being in the internal representation and the original representation is outputted.
proba : boolean, default = False
When `True`, the input labels are treated as probabilities and the output will be the internal representation of probabilities.
In this case, it is expected that `labels` be a :class:`pd.DataFrame` or :class:`np.ndarray`.
If the `problem_type` is multiclass:
The input column order must be equal to `predictor.class_labels`.
The output column order will be equal to `predictor.class_labels_internal`.
if `inverse=True`, the same logic applies, but with input and output columns interchanged.
When `False`, the input labels are treated as actual labels and the output will be the internal representation of the labels.
In this case, it is expected that `labels` be a :class:`pd.Series` or :class:`np.ndarray`.
Returns
-------
:class:`pd.Series` of labels if `proba=False` or :class:`pd.DataFrame` of label probabilities if `proba=True`.
"""
if inverse:
if proba:
labels_transformed = self._learner.label_cleaner.inverse_transform_proba(y=labels, as_pandas=True)
else:
labels_transformed = self._learner.label_cleaner.inverse_transform(y=labels)
else:
if proba:
labels_transformed = self._learner.label_cleaner.transform_proba(y=labels, as_pandas=True)
else:
labels_transformed = self._learner.label_cleaner.transform(y=labels)
return labels_transformed
def feature_importance(self, data=None, model=None, features=None, feature_stage='original', subsample_size=1000, time_limit=None, num_shuffle_sets=None, include_confidence_band=True, silent=False):
"""
Calculates feature importance scores for the given model via permutation importance. Refer to https://explained.ai/rf-importance/ for an explanation of permutation importance.
A feature's importance score represents the performance drop that results when the model makes predictions on a perturbed copy of the data where this feature's values have been randomly shuffled across rows.
A feature score of 0.01 would indicate that the predictive performance dropped by 0.01 when the feature was randomly shuffled.
The higher the score a feature has, the more important it is to the model's performance.
If a feature has a negative score, this means that the feature is likely harmful to the final model, and a model trained with the feature removed would be expected to achieve a better predictive performance.
Note that calculating feature importance can be a very computationally expensive process, particularly if the model uses hundreds or thousands of features. In many cases, this can take longer than the original model training.
To estimate how long `feature_importance(model, data, features)` will take, it is roughly the time taken by `predict_proba(data, model)` multiplied by the number of features.
Note: For highly accurate importance and p_value estimates, it is recommend to set `subsample_size` to at least 5,000 if possible and `num_shuffle_sets` to at least 10.
Parameters
----------
data : str or :class:`TabularDataset` or :class:`pd.DataFrame` (optional)
This data must also contain the label-column with the same column-name as specified during `fit()`.
If specified, then the data is used to calculate the feature importance scores.
If str is passed, `data` will be loaded using the str value as the file path.
If not specified, the original data used during `fit()` will be used if `cache_data=True`. Otherwise, an exception will be raised.
Do not pass the training data through this argument, as the feature importance scores calculated will be biased due to overfitting.
More accurate feature importances will be obtained from new data that was held-out during `fit()`.
model : str, default = None
Model to get feature importances for, if None the best model is chosen.
Valid models are listed in this `predictor` by calling `predictor.get_model_names()`
features : list, default = None
List of str feature names that feature importances are calculated for and returned, specify None to get all feature importances.
If you only want to compute feature importances for some of the features, you can pass their names in as a list of str.
Valid feature names change depending on the `feature_stage`.
To get the list of feature names for `feature_stage='original'`, call `predictor.feature_metadata_in.get_features()`.
To get the list of feature names for `feature_stage='transformed'`, call `list(predictor.transform_features().columns)`.
To get the list of feature names for `feature_stage=`transformed_model`, call `list(predictor.transform_features(model={model_name}).columns)`.
[Advanced] Can also contain tuples as elements of (feature_name, feature_list) form.
feature_name can be any string so long as it is unique with all other feature names / features in the list.
feature_list can be any list of valid features in the data.
This will compute importance of the combination of features in feature_list, naming the set of features in the returned DataFrame feature_name.
This importance will differ from adding the individual importances of each feature in feature_list, and will be more accurate to the overall group importance.
Example: ['featA', 'featB', 'featC', ('featBC', ['featB', 'featC'])]
In this example, the importance of 'featBC' will be calculated by jointly permuting 'featB' and 'featC' together as if they were a single two-dimensional feature.
feature_stage : str, default = 'original'
What stage of feature-processing should importances be computed for.
Options:
'original':
Compute importances of the original features.
Warning: `data` must be specified with this option, otherwise an exception will be raised.
'transformed':
Compute importances of the post-internal-transformation features (after automated feature engineering). These features may be missing some original features, or add new features entirely.
An example of new features would be ngram features generated from a text column.
Warning: For bagged models, feature importance calculation is not yet supported with this option when `data=None`. Doing so will raise an exception.
'transformed_model':
Compute importances of the post-model-transformation features. These features are the internal features used by the requested model. They may differ greatly from the original features.
If the model is a stack ensemble, this will include stack ensemble features such as the prediction probability features of the stack ensemble's base (ancestor) models.
subsample_size : int, default = 1000
The number of rows to sample from `data` when computing feature importance.
If `subsample_size=None` or `data` contains fewer than `subsample_size` rows, all rows will be used during computation.
Larger values increase the accuracy of the feature importance scores.
Runtime linearly scales with `subsample_size`.
time_limit : float, default = None
Time in seconds to limit the calculation of feature importance.
If None, feature importance will calculate without early stopping.
A minimum of 1 full shuffle set will always be evaluated. If a shuffle set evaluation takes longer than `time_limit`, the method will take the length of a shuffle set evaluation to return regardless of the `time_limit`.
num_shuffle_sets : int, default = None
The number of different permutation shuffles of the data that are evaluated.
Larger values will increase the quality of the importance evaluation.
It is generally recommended to increase `subsample_size` before increasing `num_shuffle_sets`.
Defaults to 3 if `time_limit` is None or 10 if `time_limit` is specified.
Runtime linearly scales with `num_shuffle_sets`.
include_confidence_band: bool, default = True
If True, will include output columns 'p99_high' and 'p99_low' which indicates that the true feature importance will be between 'p99_high' and 'p99_low' 99% of the time (99% confidence interval).
Increasing `subsample_size` and `num_shuffle_sets` will tighten the band.
silent : bool, default = False
Whether to suppress logging output.
Returns
-------
:class:`pd.DataFrame` of feature importance scores with 6 columns:
index: The feature name.
'importance': The estimated feature importance score.
'stddev': The standard deviation of the feature importance score. If NaN, then not enough num_shuffle_sets were used to calculate a variance.
'p_value': P-value for a statistical t-test of the null hypothesis: importance = 0, vs the (one-sided) alternative: importance > 0.
Features with low p-value appear confidently useful to the predictor, while the other features may be useless to the predictor (or even harmful to include in its training data).
A p-value of 0.01 indicates that there is a 1% chance that the feature is useless or harmful, and a 99% chance that the feature is useful.
A p-value of 0.99 indicates that there is a 99% chance that the feature is useless or harmful, and a 1% chance that the feature is useful.
'n': The number of shuffles performed to estimate importance score (corresponds to sample-size used to determine confidence interval for true score).
'p99_high': Upper end of 99% confidence interval for true feature importance score.
'p99_low': Lower end of 99% confidence interval for true feature importance score.
"""
data = self.__get_dataset(data) if data is not None else data
if (data is None) and (not self._trainer.is_data_saved):
raise AssertionError('No data was provided and there is no cached data to load for feature importance calculation. `cache_data=True` must be set in the `TabularPredictor` init `learner_kwargs` argument call to enable this functionality when data is not specified.')
if num_shuffle_sets is None:
num_shuffle_sets = 10 if time_limit else 3
fi_df = self._learner.get_feature_importance(model=model, X=data, features=features, feature_stage=feature_stage,
subsample_size=subsample_size, time_limit=time_limit, num_shuffle_sets=num_shuffle_sets, silent=silent)
if include_confidence_band:
import scipy.stats
num_features = len(fi_df)
confidence_threshold = 0.99
p99_low_dict = dict()
p99_high_dict = dict()
for i in range(num_features):
fi = fi_df.iloc[i]
mean = fi['importance']
stddev = fi['stddev']
n = fi['n']
if stddev == np.nan or n == np.nan or mean == np.nan or n == 1:
p99_high = np.nan
p99_low = np.nan
else:
t_val_99 = scipy.stats.t.ppf(1 - (1 - confidence_threshold) / 2, n - 1)
p99_high = mean + t_val_99 * stddev / math.sqrt(n)
p99_low = mean - t_val_99 * stddev / math.sqrt(n)
p99_high_dict[fi.name] = p99_high
p99_low_dict[fi.name] = p99_low
fi_df['p99_high'] = pd.Series(p99_high_dict)
fi_df['p99_low'] = pd.Series(p99_low_dict)
return fi_df
def persist_models(self, models='best', with_ancestors=True, max_memory=0.1) -> list:
"""
Persist models in memory for reduced inference latency. This is particularly important if the models are being used for online-inference where low latency is critical.
If models are not persisted in memory, they are loaded from disk every time they are asked to make predictions.
Parameters
----------
models : list of str or str, default = 'best'
Model names of models to persist.
If 'best' then the model with the highest validation score is persisted (this is the model used for prediction by default).
If 'all' then all models are persisted.
Valid models are listed in this `predictor` by calling `predictor.get_model_names()`.
with_ancestors : bool, default = True
If True, all ancestor models of the provided models will also be persisted.
If False, stacker models will not have the models they depend on persisted unless those models were specified in `models`. This will slow down inference as the ancestor models will still need to be loaded from disk for each predict call.
Only relevant for stacker models.
max_memory : float, default = 0.1
Proportion of total available memory to allow for the persisted models to use.
If the models' summed memory usage requires a larger proportion of memory than max_memory, they are not persisted. In this case, the output will be an empty list.
If None, then models are persisted regardless of estimated memory usage. This can cause out-of-memory errors.
Returns
-------
List of persisted model names.
"""
return self._learner.persist_trainer(low_memory=False, models=models, with_ancestors=with_ancestors, max_memory=max_memory)
def unpersist_models(self, models='all') -> list:
"""
Unpersist models in memory for reduced memory usage.
If models are not persisted in memory, they are loaded from disk every time they are asked to make predictions.
Note: Another way to reset the predictor and unpersist models is to reload the predictor from disk via `predictor = TabularPredictor.load(predictor.path)`.
Parameters
----------
models : list of str or str, default = 'all'
Model names of models to unpersist.
If 'all' then all models are unpersisted.
Valid models are listed in this `predictor` by calling `predictor.get_model_names_persisted()`.
Returns
-------
List of unpersisted model names.
"""
return self._learner.load_trainer().unpersist_models(model_names=models)
def refit_full(self, model='all'):
"""
Retrain model on all of the data (training + validation).
For bagged models:
Optimizes a model's inference time by collapsing bagged ensembles into a single model fit on all of the training data.
This process will typically result in a slight accuracy reduction and a large inference speedup.
The inference speedup will generally be between 10-200x faster than the original bagged ensemble model.
The inference speedup factor is equivalent to (k * n), where k is the number of folds (`num_bag_folds`) and n is the number of finished repeats (`num_bag_sets`) in the bagged ensemble.
The runtime is generally 10% or less of the original fit runtime.
The runtime can be roughly estimated as 1 / (k * n) of the original fit runtime, with k and n defined above.
For non-bagged models:
Optimizes a model's accuracy by retraining on 100% of the data without using a validation set.
Will typically result in a slight accuracy increase and no change to inference time.
The runtime will be approximately equal to the original fit runtime.
This process does not alter the original models, but instead adds additional models.
If stacker models are refit by this process, they will use the refit_full versions of the ancestor models during inference.
Models produced by this process will not have validation scores, as they use all of the data for training.
Therefore, it is up to the user to determine if the models are of sufficient quality by including test data in `predictor.leaderboard(test_data)`.
If the user does not have additional test data, they should reference the original model's score for an estimate of the performance of the refit_full model.
Warning: Be aware that utilizing refit_full models without separately verifying on test data means that the model is untested, and has no guarantee of being consistent with the original model.
`cache_data` must have been set to `True` during the original training to enable this functionality.
Parameters
----------
model : str, default = 'all'
Model name of model to refit.
If 'all' then all models are refitted.
If 'best' then the model with the highest validation score is refit.
All ancestor models will also be refit in the case that the selected model is a weighted or stacker ensemble.
Valid models are listed in this `predictor` by calling `predictor.get_model_names()`.
Returns
-------
Dictionary of original model names -> refit_full model names.
"""
refit_full_dict = self._learner.refit_ensemble_full(model=model)
return refit_full_dict
def get_model_best(self):
"""
Returns the string model name of the best model by validation score.
This is typically the same model used during inference when `predictor.predict` is called without specifying a model.
Returns
-------
String model name of the best model
"""
return self._trainer.get_model_best(can_infer=True)
def get_model_full_dict(self):
"""
Returns a dictionary of original model name -> refit full model name.
Empty unless `refit_full=True` was set during fit or `predictor.refit_full()` was called.
This can be useful when determining the best model based off of `predictor.leaderboard()`, then getting the _FULL version of the model by passing its name as the key to this dictionary.
Returns
-------
Dictionary of original model name -> refit full model name.
"""
return copy.deepcopy(self._trainer.model_full_dict)
def info(self):
"""
[EXPERIMENTAL] Returns a dictionary of `predictor` metadata.
Warning: This functionality is currently in preview mode.
The metadata information returned may change in structure in future versions without warning.
The definitions of various metadata values are not yet documented.
The output of this function should not be used for programmatic decisions.
Contains information such as row count, column count, model training time, validation scores, hyperparameters, and much more.
Returns
-------
Dictionary of `predictor` metadata.
"""
return self._learner.get_info(include_model_info=True)
# TODO: Add data argument
# TODO: Add option to disable OOF generation of newly fitted models
# TODO: Move code logic to learner/trainer
# TODO: Add fit() arg to perform this automatically at end of training
# TODO: Consider adding cutoff arguments such as top-k models
def fit_weighted_ensemble(self, base_models: list = None, name_suffix='Best', expand_pareto_frontier=False, time_limit=None):
"""
Fits new weighted ensemble models to combine predictions of previously-trained models.
`cache_data` must have been set to `True` during the original training to enable this functionality.
Parameters
----------
base_models : list, default = None
List of model names the weighted ensemble can consider as candidates.
If None, all previously trained models are considered except for weighted ensemble models.
As an example, to train a weighted ensemble that can only have weights assigned to the models 'model_a' and 'model_b', set `base_models=['model_a', 'model_b']`
name_suffix : str, default = 'Best'
Name suffix to add to the name of the newly fitted ensemble model.
expand_pareto_frontier : bool, default = False
If True, will train N-1 weighted ensemble models instead of 1, where `N=len(base_models)`.
The final model trained when True is equivalent to the model trained when False.
These weighted ensemble models will attempt to expand the pareto frontier.
This will create many different weighted ensembles which have different accuracy/memory/inference-speed trade-offs.
This is particularly useful when inference speed is an important consideration.
time_limit : int, default = None
Time in seconds each weighted ensemble model is allowed to train for. If `expand_pareto_frontier=True`, the `time_limit` value is applied to each model.
If None, the ensemble models train without time restriction.
Returns
-------
List of newly trained weighted ensemble model names.
If an exception is encountered while training an ensemble model, that model's name will be absent from the list.
"""
trainer = self._learner.load_trainer()
if trainer.bagged_mode:
X = trainer.load_X()
y = trainer.load_y()
fit = True
else:
X = trainer.load_X_val()
y = trainer.load_y_val()
fit = False
stack_name = 'aux1'
if base_models is None:
base_models = trainer.get_model_names(stack_name='core')
X_stack_preds = trainer.get_inputs_to_stacker(X=X, base_models=base_models, fit=fit, use_orig_features=False)
models = []
if expand_pareto_frontier:
leaderboard = self.leaderboard(silent=True)
leaderboard = leaderboard[leaderboard['model'].isin(base_models)]
leaderboard = leaderboard.sort_values(by='pred_time_val')
models_to_check = leaderboard['model'].tolist()
for i in range(1, len(models_to_check) - 1):
models_to_check_now = models_to_check[:i + 1]
max_base_model_level = max([trainer.get_model_level(base_model) for base_model in models_to_check_now])
weighted_ensemble_level = max_base_model_level + 1
models += trainer.generate_weighted_ensemble(X=X_stack_preds, y=y, level=weighted_ensemble_level, stack_name=stack_name, base_model_names=models_to_check_now, name_suffix=name_suffix + '_Pareto' + str(i), time_limit=time_limit)
max_base_model_level = max([trainer.get_model_level(base_model) for base_model in base_models])
weighted_ensemble_level = max_base_model_level + 1
models += trainer.generate_weighted_ensemble(X=X_stack_preds, y=y, level=weighted_ensemble_level, stack_name=stack_name, base_model_names=base_models, name_suffix=name_suffix, time_limit=time_limit)
return models
def get_oof_pred(self, model: str = None, transformed=False, train_data=None, internal_oof=False) -> pd.Series:
"""
Note: This is advanced functionality not intended for normal usage.
Returns the out-of-fold (OOF) predictions for every row in the training data.
For more information, refer to `get_oof_pred_proba()` documentation.
Parameters
----------
model : str (optional)
Refer to `get_oof_pred_proba()` documentation.
transformed : bool, default = False
Refer to `get_oof_pred_proba()` documentation.
train_data : pd.DataFrame, default = None
Refer to `get_oof_pred_proba()` documentation.
internal_oof : bool, default = False
Refer to `get_oof_pred_proba()` documentation.
Returns
-------
:class:`pd.Series` object of the out-of-fold training predictions of the model.
"""
y_pred_proba_oof = self.get_oof_pred_proba(model=model,
transformed=transformed,
as_multiclass=True,
train_data=train_data,
internal_oof=internal_oof)
return get_pred_from_proba_df(y_pred_proba_oof, problem_type=self.problem_type)
# TODO: Improve error messages when trying to get oof from refit_full and distilled models.
# TODO: v0.1 add tutorial related to this method, as it is very powerful.
# TODO: Remove train_data argument once we start caching the raw original data: Can just load that instead.
def get_oof_pred_proba(self, model: str = None, transformed=False, as_multiclass=True, train_data=None, internal_oof=False) -> Union[pd.DataFrame, pd.Series]:
"""
Note: This is advanced functionality not intended for normal usage.
Returns the out-of-fold (OOF) predicted class probabilities for every row in the training data.
OOF prediction probabilities may provide unbiased estimates of generalization accuracy (reflecting how predictions will behave on new data)
Predictions for each row are only made using models that were fit to a subset of data where this row was held-out.
Warning: This method will raise an exception if called on a model that is not a bagged ensemble. Only bagged models (such a stacker models) can produce OOF predictions.
This also means that refit_full models and distilled models will raise an exception.
Warning: If intending to join the output of this method with the original training data, be aware that a rare edge-case issue exists:
Multiclass problems with rare classes combined with the use of the 'log_loss' eval_metric may have forced AutoGluon to duplicate rows in the training data to satisfy minimum class counts in the data.
If this has occurred, then the indices and row counts of the returned :class:`pd.Series` in this method may not align with the training data.
In this case, consider fetching the processed training data using `predictor.load_data_internal()` instead of using the original training data.
A more benign version of this issue occurs when 'log_loss' wasn't specified as the eval_metric but rare classes were dropped by AutoGluon.
In this case, not all of the original training data rows will have an OOF prediction. It is recommended to either drop these rows during the join or to get direct predictions on the missing rows via :meth:`TabularPredictor.predict_proba`.
Parameters
----------
model : str (optional)
The name of the model to get out-of-fold predictions from. Defaults to None, which uses the highest scoring model on the validation set.
Valid models are listed in this `predictor` by calling `predictor.get_model_names()`
transformed : bool, default = False
Whether the output values should be of the original label representation (False) or the internal label representation (True).
The internal representation for binary and multiclass classification are integers numbering the k possible classes from 0 to k-1, while the original representation is identical to the label classes provided during fit.
Generally, most users will want the original representation and keep `transformed=False`.
as_multiclass : bool, default = True
Whether to return binary classification probabilities as if they were for multiclass classification.
Output will contain two columns, and if `transformed=False`, the column names will correspond to the binary class labels.
The columns will be the same order as `predictor.class_labels`.
If False, output will contain only 1 column for the positive class (get positive_class name via `predictor.positive_class`).
Only impacts output for binary classification problems.
train_data : pd.DataFrame, default = None
Specify the original `train_data` to ensure that any training rows that were originally dropped internally are properly handled.
If None, then output will not contain all rows if training rows were dropped internally during fit.
internal_oof : bool, default = False
[Advanced Option] Return the internal OOF preds rather than the externally facing OOF preds.
Internal OOF preds may have more/fewer rows than was provided in train_data, and are incompatible with external data.
If you don't know what this does, keep it as False.
Returns
-------
:class:`pd.Series` or :class:`pd.DataFrame` object of the out-of-fold training prediction probabilities of the model.
"""
if model is None:
model = self.get_model_best()
if not self._trainer.bagged_mode:
raise AssertionError('Predictor must be in bagged mode to get out-of-fold predictions.')
if model in self._trainer._model_full_dict_val_score:
# FIXME: This is a hack, add refit tag in a nicer way than via the _model_full_dict_val_score
# TODO: bagged-with-holdout refit to bagged-no-holdout should still be able to return out-of-fold predictions
raise AssertionError('_FULL models do not have out-of-fold predictions.')
if self._trainer.get_model_attribute_full(model=model, attribute='val_in_fit', func=max):
raise AssertionError(f'Model {model} does not have out-of-fold predictions because it used a validation set during training.')
y_pred_proba_oof_transformed = self.transform_features(base_models=[model], return_original_features=False)
if not internal_oof:
is_duplicate_index = y_pred_proba_oof_transformed.index.duplicated(keep='first')
if is_duplicate_index.any():
logger.log(20, 'Detected duplicate indices... This means that data rows may have been duplicated during training. '
'Removing all duplicates except for the first instance.')
y_pred_proba_oof_transformed = y_pred_proba_oof_transformed[is_duplicate_index == False]
if self._learner._pre_X_rows is not None and len(y_pred_proba_oof_transformed) < self._learner._pre_X_rows:
len_diff = self._learner._pre_X_rows - len(y_pred_proba_oof_transformed)
if train_data is None:
logger.warning(f'WARNING: {len_diff} rows of training data were dropped internally during fit. '
f'The output will not contain all original training rows.\n'
f'If attempting to get `oof_pred_proba`, DO NOT pass `train_data` into `predictor.predict_proba` or `predictor.transform_features`!\n'
f'Instead this can be done by the following '
f'(Ensure `train_data` is identical to when it was used in fit):\n'
f'oof_pred_proba = predictor.get_oof_pred_proba(train_data=train_data)\n'
f'oof_pred = predictor.get_oof_pred(train_data=train_data)\n')
else:
missing_idx = list(train_data.index.difference(y_pred_proba_oof_transformed.index))
missing_idx_data = train_data.loc[missing_idx]
missing_pred_proba = self.transform_features(data=missing_idx_data, base_models=[model], return_original_features=False)
y_pred_proba_oof_transformed = pd.concat([y_pred_proba_oof_transformed, missing_pred_proba])
y_pred_proba_oof_transformed = y_pred_proba_oof_transformed.reindex(list(train_data.index))
if self.problem_type == MULTICLASS and self._learner.label_cleaner.problem_type_transform == MULTICLASS:
y_pred_proba_oof_transformed.columns = copy.deepcopy(self._learner.label_cleaner.ordered_class_labels_transformed)
elif self.problem_type == QUANTILE:
y_pred_proba_oof_transformed.columns = self.quantile_levels
else:
y_pred_proba_oof_transformed.columns = [self.label]
y_pred_proba_oof_transformed = y_pred_proba_oof_transformed[self.label]
if as_multiclass and self.problem_type == BINARY:
y_pred_proba_oof_transformed = LabelCleanerMulticlassToBinary.convert_binary_proba_to_multiclass_proba(y_pred_proba_oof_transformed, as_pandas=True)
elif self.problem_type == MULTICLASS:
if transformed:
y_pred_proba_oof_transformed = LabelCleanerMulticlassToBinary.convert_binary_proba_to_multiclass_proba(y_pred_proba_oof_transformed, as_pandas=True)
y_pred_proba_oof_transformed.columns = copy.deepcopy(self._learner.label_cleaner.ordered_class_labels_transformed)
if transformed:
return y_pred_proba_oof_transformed
else:
return self.transform_labels(labels=y_pred_proba_oof_transformed, inverse=True, proba=True)
@property
def positive_class(self):
"""
Returns the positive class name in binary classification. Useful for computing metrics such as F1 which require a positive and negative class.
In binary classification, :class:`TabularPredictor.predict_proba(as_multiclass=False)` returns the estimated probability that each row belongs to the positive class.
Will print a warning and return None if called when `predictor.problem_type != 'binary'`.
Returns
-------
The positive class name in binary classification or None if the problem is not binary classification.
"""
return self._learner.positive_class
def load_data_internal(self, data='train', return_X=True, return_y=True):
"""
Loads the internal data representation used during model training.
Individual AutoGluon models like the neural network may apply additional feature transformations that are not reflected in this method.
This method only applies universal transforms employed by all AutoGluon models.
Warning, the internal representation may:
Have different features compared to the original data.
Have different row counts compared to the original data.
Have indices which do not align with the original data.
Have label values which differ from those in the original data.
Internal data representations should NOT be combined with the original data, in most cases this is not possible.
Parameters
----------
data : str, default = 'train'
The data to load.
Valid values are:
'train':
Load the training data used during model training.
This is a transformed and augmented version of the `train_data` passed in `fit()`.
'val':
Load the validation data used during model training.
This is a transformed and augmented version of the `tuning_data` passed in `fit()`.
If `tuning_data=None` was set in `fit()`, then `tuning_data` is an automatically generated validation set created by splitting `train_data`.
Warning: Will raise an exception if called by a bagged predictor, as bagged predictors have no validation data.
return_X : bool, default = True
Whether to return the internal data features
If set to `False`, then the first element in the returned tuple will be None.
return_y : bool, default = True
Whether to return the internal data labels
If set to `False`, then the second element in the returned tuple will be None.
Returns
-------
Tuple of (:class:`pd.DataFrame`, :class:`pd.Series`) corresponding to the internal data features and internal data labels, respectively.
"""
if data == 'train':
load_X = self._trainer.load_X
load_y = self._trainer.load_y
elif data == 'val':
load_X = self._trainer.load_X_val
load_y = self._trainer.load_y_val
else:
raise ValueError(f'data must be one of: [\'train\', \'val\'], but was \'{data}\'.')
X = load_X() if return_X else None
y = load_y() if return_y else None
return X, y
def save_space(self, remove_data=True, remove_fit_stack=True, requires_save=True, reduce_children=False):
"""
Reduces the memory and disk size of predictor by deleting auxiliary model files that aren't needed for prediction on new data.
This function has NO impact on inference accuracy.
It is recommended to invoke this method if the only goal is to use the trained model for prediction.
However, certain advanced functionality may no longer be available after `save_space()` has been called.
Parameters
----------
remove_data : bool, default = True
Whether to remove cached files of the original training and validation data.
Only reduces disk usage, it has no impact on memory usage.
This is especially useful when the original data was large.
This is equivalent to setting `cache_data=False` during the original `fit()`.
Will disable all advanced functionality that requires `cache_data=True`.
remove_fit_stack : bool, default = True
Whether to remove information required to fit new stacking models and continue fitting bagged models with new folds.
Only reduces disk usage, it has no impact on memory usage.
This includes:
out-of-fold (OOF) predictions
This is useful for multiclass problems with many classes, as OOF predictions can become very large on disk. (1 GB per model in extreme cases)
This disables `predictor.refit_full()` for stacker models.
requires_save : bool, default = True
Whether to remove information that requires the model to be saved again to disk.
Typically this only includes flag variables that don't have significant impact on memory or disk usage, but should technically be updated due to the removal of more important information.
An example is the `is_data_saved` boolean variable in `trainer`, which should be updated to `False` if `remove_data=True` was set.
reduce_children : bool, default = False
Whether to apply the reduction rules to bagged ensemble children models. These are the models trained for each fold of the bagged ensemble.
This should generally be kept as `False` since the most important memory and disk reduction techniques are automatically applied to these models during the original `fit()` call.
"""
self._trainer.reduce_memory_size(remove_data=remove_data, remove_fit_stack=remove_fit_stack, remove_fit=True, remove_info=False, requires_save=requires_save, reduce_children=reduce_children)
def delete_models(self, models_to_keep=None, models_to_delete=None, allow_delete_cascade=False, delete_from_disk=True, dry_run=True):
"""
Deletes models from `predictor`.
This can be helpful to minimize memory usage and disk usage, particularly for model deployment.
This will remove all references to the models in `predictor`.
For example, removed models will not appear in `predictor.leaderboard()`.
WARNING: If `delete_from_disk=True`, this will DELETE ALL FILES in the deleted model directories, regardless if they were created by AutoGluon or not.
DO NOT STORE FILES INSIDE OF THE MODEL DIRECTORY THAT ARE UNRELATED TO AUTOGLUON.
Parameters
----------
models_to_keep : str or list, default = None
Name of model or models to not delete.
All models that are not specified and are also not required as a dependency of any model in `models_to_keep` will be deleted.
Specify `models_to_keep='best'` to keep only the best model and its model dependencies.
`models_to_delete` must be None if `models_to_keep` is set.
To see the list of possible model names, use: `predictor.get_model_names()` or `predictor.leaderboard()`.
models_to_delete : str or list, default = None
Name of model or models to delete.
All models that are not specified but depend on a model in `models_to_delete` will also be deleted.
`models_to_keep` must be None if `models_to_delete` is set.
allow_delete_cascade : bool, default = False
If `False`, if unspecified dependent models of models in `models_to_delete` exist an exception will be raised instead of deletion occurring.
An example of a dependent model is m1 if m2 is a stacker model and takes predictions from m1 as inputs. In this case, m1 would be a dependent model of m2.
If `True`, all dependent models of models in `models_to_delete` will be deleted.
Has no effect if `models_to_delete=None`.
delete_from_disk : bool, default = True
If `True`, deletes the models from disk if they were persisted.
WARNING: This deletes the entire directory for the deleted models, and ALL FILES located there.
It is highly recommended to first run with `dry_run=True` to understand which directories will be deleted.
dry_run : bool, default = True
If `True`, then deletions don't occur, and logging statements are printed describing what would have occurred.
Set `dry_run=False` to perform the deletions.
"""
if models_to_keep == 'best':
models_to_keep = self._trainer.model_best
if models_to_keep is None:
models_to_keep = self._trainer.get_model_best()
self._trainer.delete_models(models_to_keep=models_to_keep, models_to_delete=models_to_delete, allow_delete_cascade=allow_delete_cascade, delete_from_disk=delete_from_disk, dry_run=dry_run)
# TODO: v0.1 add documentation for arguments
def get_model_names(self, stack_name=None, level=None, can_infer: bool = None, models: list = None) -> list:
"""Returns the list of model names trained in this `predictor` object."""
return self._trainer.get_model_names(stack_name=stack_name, level=level, can_infer=can_infer, models=models)
def get_model_names_persisted(self) -> list:
"""Returns the list of model names which are persisted in memory."""
return list(self._learner.load_trainer().models.keys())
def distill(self, train_data=None, tuning_data=None, augmentation_data=None, time_limit=None, hyperparameters=None, holdout_frac=None,
teacher_preds='soft', augment_method='spunge', augment_args={'size_factor': 5, 'max_size': int(1e5)}, models_name_suffix=None, verbosity=None):
"""
Distill AutoGluon's most accurate ensemble-predictor into single models which are simpler/faster and require less memory/compute.
Distillation can produce a model that is more accurate than the same model fit directly on the original training data.
After calling `distill()`, there will be more models available in this Predictor, which can be evaluated using `predictor.leaderboard(test_data)` and deployed with: `predictor.predict(test_data, model=MODEL_NAME)`.
This will raise an exception if `cache_data=False` was previously set in `fit()`.
NOTE: Until catboost v0.24 is released, `distill()` with CatBoost students in multiclass classification requires you to first install catboost-dev: `pip install catboost-dev`
Parameters
----------
train_data : str or :class:`TabularDataset` or :class:`pd.DataFrame`, default = None
Same as `train_data` argument of `fit()`.
If None, the same training data will be loaded from `fit()` call used to produce this Predictor.
tuning_data : str or :class:`TabularDataset` or :class:`pd.DataFrame`, default = None
Same as `tuning_data` argument of `fit()`.
If `tuning_data = None` and `train_data = None`: the same training/validation splits will be loaded from `fit()` call used to produce this Predictor,
unless bagging/stacking was previously used in which case a new training/validation split is performed.
augmentation_data : :class:`TabularDataset` or :class:`pd.DataFrame`, default = None
An optional extra dataset of unlabeled rows that can be used for augmenting the dataset used to fit student models during distillation (ignored if None).
time_limit : int, default = None
Approximately how long (in seconds) the distillation process should run for.
If None, no time-constraint will be enforced allowing the distilled models to fully train.
hyperparameters : dict or str, default = None
Specifies which models to use as students and what hyperparameter-values to use for them.
Same as `hyperparameters` argument of `fit()`.
If = None, then student models will use the same hyperparameters from `fit()` used to produce this Predictor.
Note: distillation is currently only supported for ['GBM','NN','RF','CAT'] student models, other models and their hyperparameters are ignored here.
holdout_frac : float
Same as `holdout_frac` argument of :meth:`TabularPredictor.fit`.
teacher_preds : str, default = 'soft'
What form of teacher predictions to distill from (teacher refers to the most accurate AutoGluon ensemble-predictor).
If None, we only train with original labels (no data augmentation).
If 'hard', labels are hard teacher predictions given by: `teacher.predict()`
If 'soft', labels are soft teacher predictions given by: `teacher.predict_proba()`
Note: 'hard' and 'soft' are equivalent for regression problems.
If `augment_method` is not None, teacher predictions are only used to label augmented data (training data keeps original labels).
To apply label-smoothing: `teacher_preds='onehot'` will use original training data labels converted to one-hot vectors for multiclass problems (no data augmentation).
augment_method : str, default='spunge'
Specifies method to use for generating augmented data for distilling student models.
Options include:
None : no data augmentation performed.
'munge' : The MUNGE algorithm (https://www.cs.cornell.edu/~caruana/compression.kdd06.pdf).
'spunge' : A simpler, more efficient variant of the MUNGE algorithm.
augment_args : dict, default = {'size_factor':5, 'max_size': int(1e5)}
Contains the following kwargs that control the chosen `augment_method` (these are ignored if `augment_method=None`):
'num_augmented_samples': int, number of augmented datapoints used during distillation. Overrides 'size_factor', 'max_size' if specified.
'max_size': float, the maximum number of augmented datapoints to add (ignored if 'num_augmented_samples' specified).
'size_factor': float, if n = training data sample-size, we add int(n * size_factor) augmented datapoints, up to 'max_size'.
Larger values in `augment_args` will slow down the runtime of distill(), and may produce worse results if provided time_limit are too small.
You can also pass in kwargs for the `spunge_augment`, `munge_augment` functions in `autogluon.tabular.augmentation.distill_utils`.
models_name_suffix : str, default = None
Optional suffix that can be appended at the end of all distilled student models' names.
Note: all distilled models will contain '_DSTL' substring in their name by default.
verbosity : int, default = None
Controls amount of printed output during distillation (4 = highest, 0 = lowest).
Same as `verbosity` parameter of :class:`TabularPredictor`.
If None, the same `verbosity` used in previous fit is employed again.
Returns
-------
List of names (str) corresponding to the distilled models.
Examples
--------
>>> from autogluon.tabular import TabularDataset, TabularPredictor
>>> train_data = TabularDataset('train.csv')
>>> predictor = TabularPredictor(label='class').fit(train_data, auto_stack=True)
>>> distilled_model_names = predictor.distill()
>>> test_data = TabularDataset('test.csv')
>>> ldr = predictor.leaderboard(test_data)
>>> model_to_deploy = distilled_model_names[0]
>>> predictor.predict(test_data, model=model_to_deploy)
"""
if isinstance(hyperparameters, str):
hyperparameters = get_hyperparameter_config(hyperparameters)
return self._learner.distill(X=train_data, X_val=tuning_data, time_limit=time_limit, hyperparameters=hyperparameters, holdout_frac=holdout_frac,
verbosity=verbosity, models_name_suffix=models_name_suffix, teacher_preds=teacher_preds,
augmentation_data=augmentation_data, augment_method=augment_method, augment_args=augment_args)
def plot_ensemble_model(self, prune_unused_nodes=True) -> str:
"""
Output the visualized stack ensemble architecture of a model trained by `fit()`.
The plot is stored to a file, `ensemble_model.png` in folder `predictor.path`
This function requires `graphviz` and `pygraphviz` to be installed because this visualization depends on those package.
Unless this function will raise `ImportError` without being able to generate the visual of the ensemble model.
To install the required package, run the below commands (for Ubuntu linux):
$ sudo apt-get install graphviz
$ pip install graphviz
For other platforms, refer to https://graphviz.org/ for Graphviz install, and https://pygraphviz.github.io/documentation.html for PyGraphviz.
Parameters
----------
Returns
-------
The file name with the full path to the saved graphic
"""
try:
import pygraphviz
except:
raise ImportError('Visualizing ensemble network architecture requires pygraphviz library')
G = self._trainer.model_graph.copy()
if prune_unused_nodes == True:
nodes_without_outedge = [node for node, degree in dict(G.degree()).items() if degree < 1]
else:
nodes_without_outedge = []
nodes_no_val_score = [node for node in G if G.nodes[node]['val_score'] == None]
G.remove_nodes_from(nodes_without_outedge)
G.remove_nodes_from(nodes_no_val_score)
root_node = [n for n, d in G.out_degree() if d == 0]
best_model_node = self.get_model_best()
A = nx.nx_agraph.to_agraph(G)
A.graph_attr.update(rankdir='BT')
A.node_attr.update(fontsize=10)
A.node_attr.update(shape='rectangle')
for node in A.iternodes():
node.attr['label'] = f"{node.name}\nVal score: {float(node.attr['val_score']):.4f}"
if node.name == best_model_node:
node.attr['style'] = 'filled'
node.attr['fillcolor'] = '#ff9900'
node.attr['shape'] = 'box3d'
elif nx.has_path(G, node.name, best_model_node):
node.attr['style'] = 'filled'
node.attr['fillcolor'] = '#ffcc00'
model_image_fname = os.path.join(self.path, 'ensemble_model.png')
A.draw(model_image_fname, format='png', prog='dot')
return model_image_fname
@staticmethod
def _summarize(key, msg, results):
if key in results:
print(msg + ": " + str(results[key]))
@staticmethod
def __get_dataset(data):
if isinstance(data, TabularDataset):
return data
elif isinstance(data, pd.DataFrame):
return TabularDataset(data)
elif isinstance(data, str):
return TabularDataset(data)
elif isinstance(data, pd.Series):
raise TypeError("data must be TabularDataset or pandas.DataFrame, not pandas.Series. \
To predict on just single example (ith row of table), use data.iloc[[i]] rather than data.iloc[i]")
else:
raise TypeError("data must be TabularDataset or pandas.DataFrame or str file path to data")
def _validate_hyperparameter_tune_kwargs(self, hyperparameter_tune_kwargs, time_limit=None):
"""
Returns True if hyperparameter_tune_kwargs is None or can construct a valid scheduler.
Returns False if hyperparameter_tune_kwargs results in an invalid scheduler.
"""
if hyperparameter_tune_kwargs is None:
return True
scheduler_cls, scheduler_params = scheduler_factory(hyperparameter_tune_kwargs=hyperparameter_tune_kwargs, time_out=time_limit,
nthreads_per_trial='auto', ngpus_per_trial='auto')
assert scheduler_params['searcher'] != 'bayesopt_hyperband', "searcher == 'bayesopt_hyperband' not yet supported"
if scheduler_params.get('dist_ip_addrs', None):
logger.warning('Warning: dist_ip_addrs does not currently work for Tabular. Distributed instances will not be utilized.')
if scheduler_params['num_trials'] == 1:
logger.warning('Warning: Specified num_trials == 1 for hyperparameter tuning, disabling HPO. This can occur if time_limit was not specified in `fit()`.')
return False
scheduler_ngpus = scheduler_params['resource'].get('num_gpus', 0)
if scheduler_ngpus is not None and isinstance(scheduler_ngpus, int) and scheduler_ngpus > 1:
logger.warning(f"Warning: TabularPredictor currently doesn't use >1 GPU per training run. Detected {scheduler_ngpus} GPUs.")
return True
def _set_hyperparameter_tune_kwargs_in_ag_args(self, hyperparameter_tune_kwargs, ag_args, time_limit):
if hyperparameter_tune_kwargs is not None and 'hyperparameter_tune_kwargs' not in ag_args:
if 'hyperparameter_tune_kwargs' in ag_args:
AssertionError('hyperparameter_tune_kwargs was specified in both ag_args and in kwargs. Please only specify once.')
else:
ag_args['hyperparameter_tune_kwargs'] = hyperparameter_tune_kwargs
if not self._validate_hyperparameter_tune_kwargs(ag_args.get('hyperparameter_tune_kwargs', None), time_limit):
ag_args.pop('hyperparameter_tune_kwargs', None)
if ag_args.get('hyperparameter_tune_kwargs', None) is not None:
logger.log(30, 'Warning: hyperparameter tuning is currently experimental and may cause the process to hang.')
return ag_args
def _set_post_fit_vars(self, learner: AbstractLearner = None):
if learner is not None:
self._learner: AbstractLearner = learner
self._learner_type = type(self._learner)
if self._learner.trainer_path is not None:
self._learner.persist_trainer(low_memory=True)
self._trainer: AbstractTrainer = self._learner.load_trainer() # Trainer object
def save(self):
"""
Save this Predictor to file in directory specified by this Predictor's `path`.
Note that :meth:`TabularPredictor.fit` already saves the predictor object automatically
(we do not recommend modifying the Predictor object yourself as it tracks many trained models).
"""
path = self.path
tmp_learner = self._learner
tmp_trainer = self._trainer
self._learner.save()
self._learner = None
self._trainer = None
save_pkl.save(path=path + self.predictor_file_name, object=self)
self._learner = tmp_learner
self._trainer = tmp_trainer
logger.log(20, f'TabularPredictor saved. To load, use: predictor = TabularPredictor.load("{self.path}")')
@classmethod
def load(cls, path: str, verbosity: int = None):
"""
Load a TabularPredictor object previously produced by `fit()` from file and returns this object. It is highly recommended the predictor be loaded with the exact AutoGluon version it was fit with.
Parameters
----------
path : str
The path to directory in which this Predictor was previously saved.
verbosity : int, default = None
Sets the verbosity level of this Predictor after it is loaded.
Valid values range from 0 (least verbose) to 4 (most verbose).
If None, logging verbosity is not changed from existing values.
Specify larger values to see more information printed when using Predictor during inference, smaller values to see less information.
Refer to TabularPredictor init for more information.
"""
if verbosity is not None:
set_logger_verbosity(verbosity, logger=logger) # Reset logging after load (may be in new Python session)
if path is None:
raise ValueError("path cannot be None in load()")
path = setup_outputdir(path, warn_if_exist=False) # replace ~ with absolute path if it exists
predictor: TabularPredictor = load_pkl.load(path=path + cls.predictor_file_name)
learner = predictor._learner_type.load(path)
predictor._set_post_fit_vars(learner=learner)
try:
from ..version import __version__
version_inference = __version__
except:
version_inference = None
try:
version_fit = predictor._learner.version
except:
version_fit = None
if version_fit is None:
version_fit = 'Unknown (Likely <=0.0.11)'
if version_inference != version_fit:
logger.warning('')
logger.warning('############################## WARNING ##############################')
logger.warning('WARNING: AutoGluon version differs from the version used during the original model fit! This may lead to instability and it is highly recommended the model be loaded with the exact AutoGluon version it was fit with.')
logger.warning(f'\tFit Version: {version_fit}')
logger.warning(f'\tCurrent Version: {version_inference}')
logger.warning('############################## WARNING ##############################')
logger.warning('')
return predictor
@staticmethod
def _validate_init_kwargs(kwargs):
valid_kwargs = {
'learner_type',
'learner_kwargs',
'quantile_levels',
}
invalid_keys = []
for key in kwargs:
if key not in valid_kwargs:
invalid_keys.append(key)
if invalid_keys:
raise ValueError(f'Invalid kwargs passed: {invalid_keys}\nValid kwargs: {list(valid_kwargs)}')
def _validate_fit_kwargs(self, kwargs):
# TODO:
# Valid core_kwargs values:
# ag_args, ag_args_fit, ag_args_ensemble, stack_name, ensemble_type, name_suffix, time_limit
# Valid aux_kwargs values:
# name_suffix, time_limit, stack_name, aux_hyperparameters, ag_args, ag_args_ensemble
# TODO: Remove features from models option for fit_extra
# TODO: Constructor?
fit_kwargs_default = dict(
# data split / ensemble architecture kwargs -> Don't nest but have nested documentation -> Actually do nesting
holdout_frac=None, # TODO: Potentially error if num_bag_folds is also specified
num_bag_folds=None, # TODO: Potentially move to fit_extra, raise exception if value too large / invalid in fit_extra.
auto_stack=False,
use_bag_holdout=False,
# other
feature_generator='auto',
unlabeled_data=None,
_feature_generator_kwargs=None
)
kwargs = self._validate_fit_extra_kwargs(kwargs, extra_valid_keys=list(fit_kwargs_default.keys()))
kwargs_sanitized = fit_kwargs_default.copy()
kwargs_sanitized.update(kwargs)
return kwargs_sanitized
def _validate_fit_extra_kwargs(self, kwargs, extra_valid_keys=None):
fit_extra_kwargs_default = dict(
# data split / ensemble architecture kwargs -> Don't nest but have nested documentation -> Actually do nesting
num_bag_sets=None,
num_stack_levels=None,
hyperparameter_tune_kwargs=None,
# core_kwargs -> +1 nest
ag_args=None,
ag_args_fit=None,
ag_args_ensemble=None,
excluded_model_types=None,
# aux_kwargs -> +1 nest
# post_fit_kwargs -> +1 nest
set_best_to_refit_full=False,
keep_only_best=False,
save_space=False,
refit_full=False,
# other
verbosity=self.verbosity,
# private
_save_bag_folds=None,
# quantile levels
quantile_levels=None,
)
allowed_kwarg_names = list(fit_extra_kwargs_default.keys())
if extra_valid_keys is not None:
allowed_kwarg_names += extra_valid_keys
for kwarg_name in kwargs.keys():
if kwarg_name not in allowed_kwarg_names:
public_kwarg_options = [kwarg for kwarg in allowed_kwarg_names if kwarg[0] != '_']
public_kwarg_options.sort()
raise ValueError(f"Unknown keyword argument specified: {kwarg_name}\nValid kwargs: {public_kwarg_options}")
kwargs_sanitized = fit_extra_kwargs_default.copy()
kwargs_sanitized.update(kwargs)
# Deepcopy args to avoid altering outer context
deepcopy_args = ['ag_args', 'ag_args_fit', 'ag_args_ensemble', 'excluded_model_types']
for deepcopy_arg in deepcopy_args:
kwargs_sanitized[deepcopy_arg] = copy.deepcopy(kwargs_sanitized[deepcopy_arg])
refit_full = kwargs_sanitized['refit_full']
set_best_to_refit_full = kwargs_sanitized['set_best_to_refit_full']
if refit_full and not self._learner.cache_data:
raise ValueError('`refit_full=True` is only available when `cache_data=True`. Set `cache_data=True` to utilize `refit_full`.')
if set_best_to_refit_full and not refit_full:
raise ValueError('`set_best_to_refit_full=True` is only available when `refit_full=True`. Set `refit_full=True` to utilize `set_best_to_refit_full`.')
return kwargs_sanitized
def _validate_fit_data(self, train_data, tuning_data=None, unlabeled_data=None):
if isinstance(train_data, str):
train_data = TabularDataset(train_data)
if tuning_data is not None and isinstance(tuning_data, str):
tuning_data = TabularDataset(tuning_data)
if unlabeled_data is not None and isinstance(unlabeled_data, str):
unlabeled_data = TabularDataset(unlabeled_data)
if not isinstance(train_data, pd.DataFrame):
raise AssertionError(f'train_data is required to be a pandas DataFrame, but was instead: {type(train_data)}')
if len(set(train_data.columns)) < len(train_data.columns):
raise ValueError("Column names are not unique, please change duplicated column names (in pandas: train_data.rename(columns={'current_name':'new_name'})")
if tuning_data is not None:
if not isinstance(tuning_data, pd.DataFrame):
raise AssertionError(f'tuning_data is required to be a pandas DataFrame, but was instead: {type(tuning_data)}')
train_features = [column for column in train_data.columns if column != self.label]
tuning_features = [column for column in tuning_data.columns if column != self.label]
if self.sample_weight is not None:
if self.sample_weight in train_features:
train_features.remove(self.sample_weight)
if self.sample_weight in tuning_features:
tuning_features.remove(self.sample_weight)
train_features = np.array(train_features)
tuning_features = np.array(tuning_features)
if np.any(train_features != tuning_features):
raise ValueError("Column names must match between training and tuning data")
if unlabeled_data is not None:
if not isinstance(unlabeled_data, pd.DataFrame):
raise AssertionError(f'unlabeled_data is required to be a pandas DataFrame, but was instead: {type(unlabeled_data)}')
train_features = [column for column in train_data.columns if column != self.label]
unlabeled_features = [column for column in unlabeled_data.columns]
if self.sample_weight is not None:
if self.sample_weight in train_features:
train_features.remove(self.sample_weight)
if self.sample_weight in unlabeled_features:
unlabeled_features.remove(self.sample_weight)
train_features = sorted(np.array(train_features))
unlabeled_features = sorted(np.array(unlabeled_features))
if np.any(train_features != unlabeled_features):
raise ValueError("Column names must match between training and unlabeled data.\n"
"Unlabeled data must have not the label column specified in it.\n")
return train_data, tuning_data, unlabeled_data
def _set_feature_generator(self, feature_generator='auto', feature_metadata=None, init_kwargs=None):
if self._learner.feature_generator is not None:
if isinstance(feature_generator, str) and feature_generator == 'auto':
feature_generator = self._learner.feature_generator
else:
raise AssertionError('FeatureGenerator already exists!')
self._learner.feature_generator = get_default_feature_generator(feature_generator=feature_generator, feature_metadata=feature_metadata, init_kwargs=init_kwargs)
def _sanitize_stack_args(self, num_bag_folds, num_bag_sets, num_stack_levels, time_limit, auto_stack, num_train_rows):
if auto_stack:
# TODO: What about datasets that are 100k+? At a certain point should we not bag?
# TODO: What about time_limit? Metalearning can tell us expected runtime of each model, then we can select optimal folds + stack levels to fit time constraint
if num_bag_folds is None:
num_bag_folds = min(10, max(5, math.floor(num_train_rows / 100)))
if num_stack_levels is None:
num_stack_levels = min(1, max(0, math.floor(num_train_rows / 750)))
if num_bag_folds is None:
num_bag_folds = 0
if num_stack_levels is None:
num_stack_levels = 0
if not isinstance(num_bag_folds, int):
raise ValueError(f'num_bag_folds must be an integer. (num_bag_folds={num_bag_folds})')
if not isinstance(num_stack_levels, int):
raise ValueError(f'num_stack_levels must be an integer. (num_stack_levels={num_stack_levels})')
if num_bag_folds < 2 and num_bag_folds != 0:
raise ValueError(f'num_bag_folds must be equal to 0 or >=2. (num_bag_folds={num_bag_folds})')
if num_stack_levels != 0 and num_bag_folds == 0:
raise ValueError(f'num_stack_levels must be 0 if num_bag_folds is 0. (num_stack_levels={num_stack_levels}, num_bag_folds={num_bag_folds})')
if num_bag_sets is None:
if num_bag_folds >= 2:
if time_limit is not None:
num_bag_sets = 20 # TODO: v0.1 Reduce to 5 or 3 as 20 is unnecessarily extreme as a default.
else:
num_bag_sets = 1
else:
num_bag_sets = 1
if not isinstance(num_bag_sets, int):
raise ValueError(f'num_bag_sets must be an integer. (num_bag_sets={num_bag_sets})')
return num_bag_folds, num_bag_sets, num_stack_levels
# Location to store WIP functionality that will be later added to TabularPredictor
class _TabularPredictorExperimental(TabularPredictor):
# TODO: Documentation, flesh out capabilities
# TODO: Rename feature_generator -> feature_pipeline for users?
# TODO: Return transformed data?
# TODO: feature_generator_kwargs?
def fit_feature_generator(self, data: pd.DataFrame, feature_generator='auto', feature_metadata=None):
self._set_feature_generator(feature_generator=feature_generator, feature_metadata=feature_metadata)
self._learner.fit_transform_features(data)
# TODO: rename to `advice`
# TODO: Add documentation
def _advice(self):
is_feature_generator_fit = self._learner.feature_generator.is_fit()
is_learner_fit = self._learner.trainer_path is not None
exists_trainer = self._trainer is not None
advice_dict = dict(
is_feature_generator_fit=is_feature_generator_fit,
is_learner_fit=is_learner_fit,
exists_trainer=exists_trainer,
# TODO
)
advice_list = []
if not advice_dict['is_feature_generator_fit']:
advice_list.append('FeatureGenerator has not been fit, consider calling `predictor.fit_feature_generator(data)`.')
if not advice_dict['is_learner_fit']:
advice_list.append('Learner is not fit, consider calling `predictor.fit(...)`')
if not advice_dict['exists_trainer']:
advice_list.append('Trainer is not initialized, consider calling `predictor.fit(...)`')
# TODO: Advice on unused features (if no model uses a feature)
# TODO: Advice on fit_extra
# TODO: Advice on distill
# TODO: Advice on leaderboard
# TODO: Advice on persist
# TODO: Advice on refit_full
# TODO: Advice on feature_importance
# TODO: Advice on dropping poor models
logger.log(20, '======================= AutoGluon Advice =======================')
if advice_list:
for advice in advice_list:
logger.log(20, advice)
else:
logger.log(20, 'No further advice found.')
logger.log(20, '================================================================')
@classmethod
def from_learner(cls, learner: AbstractLearner):
predictor = cls(label=learner.label, path=learner.path)
predictor._set_post_fit_vars(learner=learner)
return predictor
|
the-stack_0_18136 | """ Test script for the Unicode implementation.
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import _string
import codecs
import itertools
import operator
import struct
import sys
import textwrap
import unicodedata
import unittest
import warnings
from test import support, string_tests
from test.support.script_helper import assert_python_failure
# Error handling (bad decoder return)
def search_function(encoding):
def decode1(input, errors="strict"):
return 42 # not a tuple
def encode1(input, errors="strict"):
return 42 # not a tuple
def encode2(input, errors="strict"):
return (42, 42) # no unicode
def decode2(input, errors="strict"):
return (42, 42) # no unicode
if encoding=="test.unicode1":
return (encode1, decode1, None, None)
elif encoding=="test.unicode2":
return (encode2, decode2, None, None)
else:
return None
codecs.register(search_function)
def duplicate_string(text):
"""
Try to get a fresh clone of the specified text:
new object with a reference count of 1.
This is a best-effort: latin1 single letters and the empty
string ('') are singletons and cannot be cloned.
"""
return text.encode().decode()
class StrSubclass(str):
pass
class UnicodeTest(string_tests.CommonTest,
string_tests.MixinStrUnicodeUserStringTest,
string_tests.MixinStrUnicodeTest,
unittest.TestCase):
type2test = str
def checkequalnofix(self, result, object, methodname, *args):
method = getattr(object, methodname)
realresult = method(*args)
self.assertEqual(realresult, result)
self.assertTrue(type(realresult) is type(result))
# if the original is returned make sure that
# this doesn't happen with subclasses
if realresult is object:
class usub(str):
def __repr__(self):
return 'usub(%r)' % str.__repr__(self)
object = usub(object)
method = getattr(object, methodname)
realresult = method(*args)
self.assertEqual(realresult, result)
self.assertTrue(object is not realresult)
def test_literals(self):
self.assertEqual('\xff', '\u00ff')
self.assertEqual('\uffff', '\U0000ffff')
self.assertRaises(SyntaxError, eval, '\'\\Ufffffffe\'')
self.assertRaises(SyntaxError, eval, '\'\\Uffffffff\'')
self.assertRaises(SyntaxError, eval, '\'\\U%08x\'' % 0x110000)
# raw strings should not have unicode escapes
self.assertNotEqual(r"\u0020", " ")
def test_ascii(self):
if not sys.platform.startswith('java'):
# Test basic sanity of repr()
self.assertEqual(ascii('abc'), "'abc'")
self.assertEqual(ascii('ab\\c'), "'ab\\\\c'")
self.assertEqual(ascii('ab\\'), "'ab\\\\'")
self.assertEqual(ascii('\\c'), "'\\\\c'")
self.assertEqual(ascii('\\'), "'\\\\'")
self.assertEqual(ascii('\n'), "'\\n'")
self.assertEqual(ascii('\r'), "'\\r'")
self.assertEqual(ascii('\t'), "'\\t'")
self.assertEqual(ascii('\b'), "'\\x08'")
self.assertEqual(ascii("'\""), """'\\'"'""")
self.assertEqual(ascii("'\""), """'\\'"'""")
self.assertEqual(ascii("'"), '''"'"''')
self.assertEqual(ascii('"'), """'"'""")
latin1repr = (
"'\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\t\\n\\x0b\\x0c\\r"
"\\x0e\\x0f\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a"
"\\x1b\\x1c\\x1d\\x1e\\x1f !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHI"
"JKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\\x7f"
"\\x80\\x81\\x82\\x83\\x84\\x85\\x86\\x87\\x88\\x89\\x8a\\x8b\\x8c\\x8d"
"\\x8e\\x8f\\x90\\x91\\x92\\x93\\x94\\x95\\x96\\x97\\x98\\x99\\x9a\\x9b"
"\\x9c\\x9d\\x9e\\x9f\\xa0\\xa1\\xa2\\xa3\\xa4\\xa5\\xa6\\xa7\\xa8\\xa9"
"\\xaa\\xab\\xac\\xad\\xae\\xaf\\xb0\\xb1\\xb2\\xb3\\xb4\\xb5\\xb6\\xb7"
"\\xb8\\xb9\\xba\\xbb\\xbc\\xbd\\xbe\\xbf\\xc0\\xc1\\xc2\\xc3\\xc4\\xc5"
"\\xc6\\xc7\\xc8\\xc9\\xca\\xcb\\xcc\\xcd\\xce\\xcf\\xd0\\xd1\\xd2\\xd3"
"\\xd4\\xd5\\xd6\\xd7\\xd8\\xd9\\xda\\xdb\\xdc\\xdd\\xde\\xdf\\xe0\\xe1"
"\\xe2\\xe3\\xe4\\xe5\\xe6\\xe7\\xe8\\xe9\\xea\\xeb\\xec\\xed\\xee\\xef"
"\\xf0\\xf1\\xf2\\xf3\\xf4\\xf5\\xf6\\xf7\\xf8\\xf9\\xfa\\xfb\\xfc\\xfd"
"\\xfe\\xff'")
testrepr = ascii(''.join(map(chr, range(256))))
self.assertEqual(testrepr, latin1repr)
# Test ascii works on wide unicode escapes without overflow.
self.assertEqual(ascii("\U00010000" * 39 + "\uffff" * 4096),
ascii("\U00010000" * 39 + "\uffff" * 4096))
class WrongRepr:
def __repr__(self):
return b'byte-repr'
self.assertRaises(TypeError, ascii, WrongRepr())
def test_repr(self):
if not sys.platform.startswith('java'):
# Test basic sanity of repr()
self.assertEqual(repr('abc'), "'abc'")
self.assertEqual(repr('ab\\c'), "'ab\\\\c'")
self.assertEqual(repr('ab\\'), "'ab\\\\'")
self.assertEqual(repr('\\c'), "'\\\\c'")
self.assertEqual(repr('\\'), "'\\\\'")
self.assertEqual(repr('\n'), "'\\n'")
self.assertEqual(repr('\r'), "'\\r'")
self.assertEqual(repr('\t'), "'\\t'")
self.assertEqual(repr('\b'), "'\\x08'")
self.assertEqual(repr("'\""), """'\\'"'""")
self.assertEqual(repr("'\""), """'\\'"'""")
self.assertEqual(repr("'"), '''"'"''')
self.assertEqual(repr('"'), """'"'""")
latin1repr = (
"'\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\t\\n\\x0b\\x0c\\r"
"\\x0e\\x0f\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a"
"\\x1b\\x1c\\x1d\\x1e\\x1f !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHI"
"JKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\\x7f"
"\\x80\\x81\\x82\\x83\\x84\\x85\\x86\\x87\\x88\\x89\\x8a\\x8b\\x8c\\x8d"
"\\x8e\\x8f\\x90\\x91\\x92\\x93\\x94\\x95\\x96\\x97\\x98\\x99\\x9a\\x9b"
"\\x9c\\x9d\\x9e\\x9f\\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9"
"\xaa\xab\xac\\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
"\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5"
"\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3"
"\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1"
"\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef"
"\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd"
"\xfe\xff'")
testrepr = repr(''.join(map(chr, range(256))))
self.assertEqual(testrepr, latin1repr)
# Test repr works on wide unicode escapes without overflow.
self.assertEqual(repr("\U00010000" * 39 + "\uffff" * 4096),
repr("\U00010000" * 39 + "\uffff" * 4096))
class WrongRepr:
def __repr__(self):
return b'byte-repr'
self.assertRaises(TypeError, repr, WrongRepr())
def test_iterators(self):
# Make sure unicode objects have an __iter__ method
it = "\u1111\u2222\u3333".__iter__()
self.assertEqual(next(it), "\u1111")
self.assertEqual(next(it), "\u2222")
self.assertEqual(next(it), "\u3333")
self.assertRaises(StopIteration, next, it)
def test_count(self):
string_tests.CommonTest.test_count(self)
# check mixed argument types
self.checkequalnofix(3, 'aaa', 'count', 'a')
self.checkequalnofix(0, 'aaa', 'count', 'b')
self.checkequalnofix(3, 'aaa', 'count', 'a')
self.checkequalnofix(0, 'aaa', 'count', 'b')
self.checkequalnofix(0, 'aaa', 'count', 'b')
self.checkequalnofix(1, 'aaa', 'count', 'a', -1)
self.checkequalnofix(3, 'aaa', 'count', 'a', -10)
self.checkequalnofix(2, 'aaa', 'count', 'a', 0, -1)
self.checkequalnofix(0, 'aaa', 'count', 'a', 0, -10)
# test mixed kinds
self.checkequal(10, '\u0102' + 'a' * 10, 'count', 'a')
self.checkequal(10, '\U00100304' + 'a' * 10, 'count', 'a')
self.checkequal(10, '\U00100304' + '\u0102' * 10, 'count', '\u0102')
self.checkequal(0, 'a' * 10, 'count', '\u0102')
self.checkequal(0, 'a' * 10, 'count', '\U00100304')
self.checkequal(0, '\u0102' * 10, 'count', '\U00100304')
self.checkequal(10, '\u0102' + 'a_' * 10, 'count', 'a_')
self.checkequal(10, '\U00100304' + 'a_' * 10, 'count', 'a_')
self.checkequal(10, '\U00100304' + '\u0102_' * 10, 'count', '\u0102_')
self.checkequal(0, 'a' * 10, 'count', 'a\u0102')
self.checkequal(0, 'a' * 10, 'count', 'a\U00100304')
self.checkequal(0, '\u0102' * 10, 'count', '\u0102\U00100304')
def test_find(self):
string_tests.CommonTest.test_find(self)
# test implementation details of the memchr fast path
self.checkequal(100, 'a' * 100 + '\u0102', 'find', '\u0102')
self.checkequal(-1, 'a' * 100 + '\u0102', 'find', '\u0201')
self.checkequal(-1, 'a' * 100 + '\u0102', 'find', '\u0120')
self.checkequal(-1, 'a' * 100 + '\u0102', 'find', '\u0220')
self.checkequal(100, 'a' * 100 + '\U00100304', 'find', '\U00100304')
self.checkequal(-1, 'a' * 100 + '\U00100304', 'find', '\U00100204')
self.checkequal(-1, 'a' * 100 + '\U00100304', 'find', '\U00102004')
# check mixed argument types
self.checkequalnofix(0, 'abcdefghiabc', 'find', 'abc')
self.checkequalnofix(9, 'abcdefghiabc', 'find', 'abc', 1)
self.checkequalnofix(-1, 'abcdefghiabc', 'find', 'def', 4)
self.assertRaises(TypeError, 'hello'.find)
self.assertRaises(TypeError, 'hello'.find, 42)
# test mixed kinds
self.checkequal(100, '\u0102' * 100 + 'a', 'find', 'a')
self.checkequal(100, '\U00100304' * 100 + 'a', 'find', 'a')
self.checkequal(100, '\U00100304' * 100 + '\u0102', 'find', '\u0102')
self.checkequal(-1, 'a' * 100, 'find', '\u0102')
self.checkequal(-1, 'a' * 100, 'find', '\U00100304')
self.checkequal(-1, '\u0102' * 100, 'find', '\U00100304')
self.checkequal(100, '\u0102' * 100 + 'a_', 'find', 'a_')
self.checkequal(100, '\U00100304' * 100 + 'a_', 'find', 'a_')
self.checkequal(100, '\U00100304' * 100 + '\u0102_', 'find', '\u0102_')
self.checkequal(-1, 'a' * 100, 'find', 'a\u0102')
self.checkequal(-1, 'a' * 100, 'find', 'a\U00100304')
self.checkequal(-1, '\u0102' * 100, 'find', '\u0102\U00100304')
def test_rfind(self):
string_tests.CommonTest.test_rfind(self)
# test implementation details of the memrchr fast path
self.checkequal(0, '\u0102' + 'a' * 100 , 'rfind', '\u0102')
self.checkequal(-1, '\u0102' + 'a' * 100 , 'rfind', '\u0201')
self.checkequal(-1, '\u0102' + 'a' * 100 , 'rfind', '\u0120')
self.checkequal(-1, '\u0102' + 'a' * 100 , 'rfind', '\u0220')
self.checkequal(0, '\U00100304' + 'a' * 100, 'rfind', '\U00100304')
self.checkequal(-1, '\U00100304' + 'a' * 100, 'rfind', '\U00100204')
self.checkequal(-1, '\U00100304' + 'a' * 100, 'rfind', '\U00102004')
# check mixed argument types
self.checkequalnofix(9, 'abcdefghiabc', 'rfind', 'abc')
self.checkequalnofix(12, 'abcdefghiabc', 'rfind', '')
self.checkequalnofix(12, 'abcdefghiabc', 'rfind', '')
# test mixed kinds
self.checkequal(0, 'a' + '\u0102' * 100, 'rfind', 'a')
self.checkequal(0, 'a' + '\U00100304' * 100, 'rfind', 'a')
self.checkequal(0, '\u0102' + '\U00100304' * 100, 'rfind', '\u0102')
self.checkequal(-1, 'a' * 100, 'rfind', '\u0102')
self.checkequal(-1, 'a' * 100, 'rfind', '\U00100304')
self.checkequal(-1, '\u0102' * 100, 'rfind', '\U00100304')
self.checkequal(0, '_a' + '\u0102' * 100, 'rfind', '_a')
self.checkequal(0, '_a' + '\U00100304' * 100, 'rfind', '_a')
self.checkequal(0, '_\u0102' + '\U00100304' * 100, 'rfind', '_\u0102')
self.checkequal(-1, 'a' * 100, 'rfind', '\u0102a')
self.checkequal(-1, 'a' * 100, 'rfind', '\U00100304a')
self.checkequal(-1, '\u0102' * 100, 'rfind', '\U00100304\u0102')
def test_index(self):
string_tests.CommonTest.test_index(self)
self.checkequalnofix(0, 'abcdefghiabc', 'index', '')
self.checkequalnofix(3, 'abcdefghiabc', 'index', 'def')
self.checkequalnofix(0, 'abcdefghiabc', 'index', 'abc')
self.checkequalnofix(9, 'abcdefghiabc', 'index', 'abc', 1)
self.assertRaises(ValueError, 'abcdefghiabc'.index, 'hib')
self.assertRaises(ValueError, 'abcdefghiab'.index, 'abc', 1)
self.assertRaises(ValueError, 'abcdefghi'.index, 'ghi', 8)
self.assertRaises(ValueError, 'abcdefghi'.index, 'ghi', -1)
# test mixed kinds
self.checkequal(100, '\u0102' * 100 + 'a', 'index', 'a')
self.checkequal(100, '\U00100304' * 100 + 'a', 'index', 'a')
self.checkequal(100, '\U00100304' * 100 + '\u0102', 'index', '\u0102')
self.assertRaises(ValueError, ('a' * 100).index, '\u0102')
self.assertRaises(ValueError, ('a' * 100).index, '\U00100304')
self.assertRaises(ValueError, ('\u0102' * 100).index, '\U00100304')
self.checkequal(100, '\u0102' * 100 + 'a_', 'index', 'a_')
self.checkequal(100, '\U00100304' * 100 + 'a_', 'index', 'a_')
self.checkequal(100, '\U00100304' * 100 + '\u0102_', 'index', '\u0102_')
self.assertRaises(ValueError, ('a' * 100).index, 'a\u0102')
self.assertRaises(ValueError, ('a' * 100).index, 'a\U00100304')
self.assertRaises(ValueError, ('\u0102' * 100).index, '\u0102\U00100304')
def test_rindex(self):
string_tests.CommonTest.test_rindex(self)
self.checkequalnofix(12, 'abcdefghiabc', 'rindex', '')
self.checkequalnofix(3, 'abcdefghiabc', 'rindex', 'def')
self.checkequalnofix(9, 'abcdefghiabc', 'rindex', 'abc')
self.checkequalnofix(0, 'abcdefghiabc', 'rindex', 'abc', 0, -1)
self.assertRaises(ValueError, 'abcdefghiabc'.rindex, 'hib')
self.assertRaises(ValueError, 'defghiabc'.rindex, 'def', 1)
self.assertRaises(ValueError, 'defghiabc'.rindex, 'abc', 0, -1)
self.assertRaises(ValueError, 'abcdefghi'.rindex, 'ghi', 0, 8)
self.assertRaises(ValueError, 'abcdefghi'.rindex, 'ghi', 0, -1)
# test mixed kinds
self.checkequal(0, 'a' + '\u0102' * 100, 'rindex', 'a')
self.checkequal(0, 'a' + '\U00100304' * 100, 'rindex', 'a')
self.checkequal(0, '\u0102' + '\U00100304' * 100, 'rindex', '\u0102')
self.assertRaises(ValueError, ('a' * 100).rindex, '\u0102')
self.assertRaises(ValueError, ('a' * 100).rindex, '\U00100304')
self.assertRaises(ValueError, ('\u0102' * 100).rindex, '\U00100304')
self.checkequal(0, '_a' + '\u0102' * 100, 'rindex', '_a')
self.checkequal(0, '_a' + '\U00100304' * 100, 'rindex', '_a')
self.checkequal(0, '_\u0102' + '\U00100304' * 100, 'rindex', '_\u0102')
self.assertRaises(ValueError, ('a' * 100).rindex, '\u0102a')
self.assertRaises(ValueError, ('a' * 100).rindex, '\U00100304a')
self.assertRaises(ValueError, ('\u0102' * 100).rindex, '\U00100304\u0102')
def test_maketrans_translate(self):
# these work with plain translate()
self.checkequalnofix('bbbc', 'abababc', 'translate',
{ord('a'): None})
self.checkequalnofix('iiic', 'abababc', 'translate',
{ord('a'): None, ord('b'): ord('i')})
self.checkequalnofix('iiix', 'abababc', 'translate',
{ord('a'): None, ord('b'): ord('i'), ord('c'): 'x'})
self.checkequalnofix('c', 'abababc', 'translate',
{ord('a'): None, ord('b'): ''})
self.checkequalnofix('xyyx', 'xzx', 'translate',
{ord('z'): 'yy'})
# this needs maketrans()
self.checkequalnofix('abababc', 'abababc', 'translate',
{'b': '<i>'})
tbl = self.type2test.maketrans({'a': None, 'b': '<i>'})
self.checkequalnofix('<i><i><i>c', 'abababc', 'translate', tbl)
# test alternative way of calling maketrans()
tbl = self.type2test.maketrans('abc', 'xyz', 'd')
self.checkequalnofix('xyzzy', 'abdcdcbdddd', 'translate', tbl)
# various tests switching from ASCII to latin1 or the opposite;
# same length, remove a letter, or replace with a longer string.
self.assertEqual("[a]".translate(str.maketrans('a', 'X')),
"[X]")
self.assertEqual("[a]".translate(str.maketrans({'a': 'X'})),
"[X]")
self.assertEqual("[a]".translate(str.maketrans({'a': None})),
"[]")
self.assertEqual("[a]".translate(str.maketrans({'a': 'XXX'})),
"[XXX]")
self.assertEqual("[a]".translate(str.maketrans({'a': '\xe9'})),
"[\xe9]")
self.assertEqual('axb'.translate(str.maketrans({'a': None, 'b': '123'})),
"x123")
self.assertEqual('axb'.translate(str.maketrans({'a': None, 'b': '\xe9'})),
"x\xe9")
# test non-ASCII (don't take the fast-path)
self.assertEqual("[a]".translate(str.maketrans({'a': '<\xe9>'})),
"[<\xe9>]")
self.assertEqual("[\xe9]".translate(str.maketrans({'\xe9': 'a'})),
"[a]")
self.assertEqual("[\xe9]".translate(str.maketrans({'\xe9': None})),
"[]")
self.assertEqual("[\xe9]".translate(str.maketrans({'\xe9': '123'})),
"[123]")
self.assertEqual("[a\xe9]".translate(str.maketrans({'a': '<\u20ac>'})),
"[<\u20ac>\xe9]")
# invalid Unicode characters
invalid_char = 0x10ffff+1
for before in "a\xe9\u20ac\U0010ffff":
mapping = str.maketrans({before: invalid_char})
text = "[%s]" % before
self.assertRaises(ValueError, text.translate, mapping)
# errors
self.assertRaises(TypeError, self.type2test.maketrans)
self.assertRaises(ValueError, self.type2test.maketrans, 'abc', 'defg')
self.assertRaises(TypeError, self.type2test.maketrans, 2, 'def')
self.assertRaises(TypeError, self.type2test.maketrans, 'abc', 2)
self.assertRaises(TypeError, self.type2test.maketrans, 'abc', 'def', 2)
self.assertRaises(ValueError, self.type2test.maketrans, {'xy': 2})
self.assertRaises(TypeError, self.type2test.maketrans, {(1,): 2})
self.assertRaises(TypeError, 'hello'.translate)
self.assertRaises(TypeError, 'abababc'.translate, 'abc', 'xyz')
def test_split(self):
string_tests.CommonTest.test_split(self)
# test mixed kinds
for left, right in ('ba', '\u0101\u0100', '\U00010301\U00010300'):
left *= 9
right *= 9
for delim in ('c', '\u0102', '\U00010302'):
self.checkequal([left + right],
left + right, 'split', delim)
self.checkequal([left, right],
left + delim + right, 'split', delim)
self.checkequal([left + right],
left + right, 'split', delim * 2)
self.checkequal([left, right],
left + delim * 2 + right, 'split', delim *2)
def test_rsplit(self):
string_tests.CommonTest.test_rsplit(self)
# test mixed kinds
for left, right in ('ba', '\u0101\u0100', '\U00010301\U00010300'):
left *= 9
right *= 9
for delim in ('c', '\u0102', '\U00010302'):
self.checkequal([left + right],
left + right, 'rsplit', delim)
self.checkequal([left, right],
left + delim + right, 'rsplit', delim)
self.checkequal([left + right],
left + right, 'rsplit', delim * 2)
self.checkequal([left, right],
left + delim * 2 + right, 'rsplit', delim *2)
def test_partition(self):
string_tests.MixinStrUnicodeUserStringTest.test_partition(self)
# test mixed kinds
self.checkequal(('ABCDEFGH', '', ''), 'ABCDEFGH', 'partition', '\u4200')
for left, right in ('ba', '\u0101\u0100', '\U00010301\U00010300'):
left *= 9
right *= 9
for delim in ('c', '\u0102', '\U00010302'):
self.checkequal((left + right, '', ''),
left + right, 'partition', delim)
self.checkequal((left, delim, right),
left + delim + right, 'partition', delim)
self.checkequal((left + right, '', ''),
left + right, 'partition', delim * 2)
self.checkequal((left, delim * 2, right),
left + delim * 2 + right, 'partition', delim * 2)
def test_rpartition(self):
string_tests.MixinStrUnicodeUserStringTest.test_rpartition(self)
# test mixed kinds
self.checkequal(('', '', 'ABCDEFGH'), 'ABCDEFGH', 'rpartition', '\u4200')
for left, right in ('ba', '\u0101\u0100', '\U00010301\U00010300'):
left *= 9
right *= 9
for delim in ('c', '\u0102', '\U00010302'):
self.checkequal(('', '', left + right),
left + right, 'rpartition', delim)
self.checkequal((left, delim, right),
left + delim + right, 'rpartition', delim)
self.checkequal(('', '', left + right),
left + right, 'rpartition', delim * 2)
self.checkequal((left, delim * 2, right),
left + delim * 2 + right, 'rpartition', delim * 2)
def test_join(self):
string_tests.MixinStrUnicodeUserStringTest.test_join(self)
class MyWrapper:
def __init__(self, sval): self.sval = sval
def __str__(self): return self.sval
# mixed arguments
self.checkequalnofix('a b c d', ' ', 'join', ['a', 'b', 'c', 'd'])
self.checkequalnofix('abcd', '', 'join', ('a', 'b', 'c', 'd'))
self.checkequalnofix('w x y z', ' ', 'join', string_tests.Sequence('wxyz'))
self.checkequalnofix('a b c d', ' ', 'join', ['a', 'b', 'c', 'd'])
self.checkequalnofix('a b c d', ' ', 'join', ['a', 'b', 'c', 'd'])
self.checkequalnofix('abcd', '', 'join', ('a', 'b', 'c', 'd'))
self.checkequalnofix('w x y z', ' ', 'join', string_tests.Sequence('wxyz'))
self.checkraises(TypeError, ' ', 'join', ['1', '2', MyWrapper('foo')])
self.checkraises(TypeError, ' ', 'join', ['1', '2', '3', bytes()])
self.checkraises(TypeError, ' ', 'join', [1, 2, 3])
self.checkraises(TypeError, ' ', 'join', ['1', '2', 3])
@unittest.skipIf(sys.maxsize > 2**32,
'needs too much memory on a 64-bit platform')
def test_join_overflow(self):
size = int(sys.maxsize**0.5) + 1
seq = ('A' * size,) * size
self.assertRaises(OverflowError, ''.join, seq)
def test_replace(self):
string_tests.CommonTest.test_replace(self)
# method call forwarded from str implementation because of unicode argument
self.checkequalnofix('one@two!three!', 'one!two!three!', 'replace', '!', '@', 1)
self.assertRaises(TypeError, 'replace'.replace, "r", 42)
# test mixed kinds
for left, right in ('ba', '\u0101\u0100', '\U00010301\U00010300'):
left *= 9
right *= 9
for delim in ('c', '\u0102', '\U00010302'):
for repl in ('d', '\u0103', '\U00010303'):
self.checkequal(left + right,
left + right, 'replace', delim, repl)
self.checkequal(left + repl + right,
left + delim + right,
'replace', delim, repl)
self.checkequal(left + right,
left + right, 'replace', delim * 2, repl)
self.checkequal(left + repl + right,
left + delim * 2 + right,
'replace', delim * 2, repl)
@support.cpython_only
def test_replace_id(self):
pattern = 'abc'
text = 'abc def'
self.assertIs(text.replace(pattern, pattern), text)
def test_bytes_comparison(self):
with support.check_warnings():
warnings.simplefilter('ignore', BytesWarning)
self.assertEqual('abc' == b'abc', False)
self.assertEqual('abc' != b'abc', True)
self.assertEqual('abc' == bytearray(b'abc'), False)
self.assertEqual('abc' != bytearray(b'abc'), True)
def test_comparison(self):
# Comparisons:
self.assertEqual('abc', 'abc')
self.assertTrue('abcd' > 'abc')
self.assertTrue('abc' < 'abcd')
if 0:
# Move these tests to a Unicode collation module test...
# Testing UTF-16 code point order comparisons...
# No surrogates, no fixup required.
self.assertTrue('\u0061' < '\u20ac')
# Non surrogate below surrogate value, no fixup required
self.assertTrue('\u0061' < '\ud800\udc02')
# Non surrogate above surrogate value, fixup required
def test_lecmp(s, s2):
self.assertTrue(s < s2)
def test_fixup(s):
s2 = '\ud800\udc01'
test_lecmp(s, s2)
s2 = '\ud900\udc01'
test_lecmp(s, s2)
s2 = '\uda00\udc01'
test_lecmp(s, s2)
s2 = '\udb00\udc01'
test_lecmp(s, s2)
s2 = '\ud800\udd01'
test_lecmp(s, s2)
s2 = '\ud900\udd01'
test_lecmp(s, s2)
s2 = '\uda00\udd01'
test_lecmp(s, s2)
s2 = '\udb00\udd01'
test_lecmp(s, s2)
s2 = '\ud800\ude01'
test_lecmp(s, s2)
s2 = '\ud900\ude01'
test_lecmp(s, s2)
s2 = '\uda00\ude01'
test_lecmp(s, s2)
s2 = '\udb00\ude01'
test_lecmp(s, s2)
s2 = '\ud800\udfff'
test_lecmp(s, s2)
s2 = '\ud900\udfff'
test_lecmp(s, s2)
s2 = '\uda00\udfff'
test_lecmp(s, s2)
s2 = '\udb00\udfff'
test_lecmp(s, s2)
test_fixup('\ue000')
test_fixup('\uff61')
# Surrogates on both sides, no fixup required
self.assertTrue('\ud800\udc02' < '\ud84d\udc56')
def test_islower(self):
super().test_islower()
self.checkequalnofix(False, '\u1FFc', 'islower')
self.assertFalse('\u2167'.islower())
self.assertTrue('\u2177'.islower())
# non-BMP, uppercase
self.assertFalse('\U00010401'.islower())
self.assertFalse('\U00010427'.islower())
# non-BMP, lowercase
self.assertTrue('\U00010429'.islower())
self.assertTrue('\U0001044E'.islower())
# non-BMP, non-cased
self.assertFalse('\U0001F40D'.islower())
self.assertFalse('\U0001F46F'.islower())
def test_isupper(self):
super().test_isupper()
if not sys.platform.startswith('java'):
self.checkequalnofix(False, '\u1FFc', 'isupper')
self.assertTrue('\u2167'.isupper())
self.assertFalse('\u2177'.isupper())
# non-BMP, uppercase
self.assertTrue('\U00010401'.isupper())
self.assertTrue('\U00010427'.isupper())
# non-BMP, lowercase
self.assertFalse('\U00010429'.isupper())
self.assertFalse('\U0001044E'.isupper())
# non-BMP, non-cased
self.assertFalse('\U0001F40D'.isupper())
self.assertFalse('\U0001F46F'.isupper())
def test_istitle(self):
super().test_istitle()
self.checkequalnofix(True, '\u1FFc', 'istitle')
self.checkequalnofix(True, 'Greek \u1FFcitlecases ...', 'istitle')
# non-BMP, uppercase + lowercase
self.assertTrue('\U00010401\U00010429'.istitle())
self.assertTrue('\U00010427\U0001044E'.istitle())
# apparently there are no titlecased (Lt) non-BMP chars in Unicode 6
for ch in ['\U00010429', '\U0001044E', '\U0001F40D', '\U0001F46F']:
self.assertFalse(ch.istitle(), '{!a} is not title'.format(ch))
def test_isspace(self):
super().test_isspace()
self.checkequalnofix(True, '\u2000', 'isspace')
self.checkequalnofix(True, '\u200a', 'isspace')
self.checkequalnofix(False, '\u2014', 'isspace')
# There are no non-BMP whitespace chars as of Unicode 12.
for ch in ['\U00010401', '\U00010427', '\U00010429', '\U0001044E',
'\U0001F40D', '\U0001F46F']:
self.assertFalse(ch.isspace(), '{!a} is not space.'.format(ch))
@support.requires_resource('cpu')
def test_isspace_invariant(self):
for codepoint in range(sys.maxunicode + 1):
char = chr(codepoint)
bidirectional = unicodedata.bidirectional(char)
category = unicodedata.category(char)
self.assertEqual(char.isspace(),
(bidirectional in ('WS', 'B', 'S')
or category == 'Zs'))
def test_isalnum(self):
super().test_isalnum()
for ch in ['\U00010401', '\U00010427', '\U00010429', '\U0001044E',
'\U0001D7F6', '\U00011066', '\U000104A0', '\U0001F107']:
self.assertTrue(ch.isalnum(), '{!a} is alnum.'.format(ch))
def test_isalpha(self):
super().test_isalpha()
self.checkequalnofix(True, '\u1FFc', 'isalpha')
# non-BMP, cased
self.assertTrue('\U00010401'.isalpha())
self.assertTrue('\U00010427'.isalpha())
self.assertTrue('\U00010429'.isalpha())
self.assertTrue('\U0001044E'.isalpha())
# non-BMP, non-cased
self.assertFalse('\U0001F40D'.isalpha())
self.assertFalse('\U0001F46F'.isalpha())
def test_isascii(self):
super().test_isascii()
self.assertFalse("\u20ac".isascii())
self.assertFalse("\U0010ffff".isascii())
def test_isdecimal(self):
self.checkequalnofix(False, '', 'isdecimal')
self.checkequalnofix(False, 'a', 'isdecimal')
self.checkequalnofix(True, '0', 'isdecimal')
self.checkequalnofix(False, '\u2460', 'isdecimal') # CIRCLED DIGIT ONE
self.checkequalnofix(False, '\xbc', 'isdecimal') # VULGAR FRACTION ONE QUARTER
self.checkequalnofix(True, '\u0660', 'isdecimal') # ARABIC-INDIC DIGIT ZERO
self.checkequalnofix(True, '0123456789', 'isdecimal')
self.checkequalnofix(False, '0123456789a', 'isdecimal')
self.checkraises(TypeError, 'abc', 'isdecimal', 42)
for ch in ['\U00010401', '\U00010427', '\U00010429', '\U0001044E',
'\U0001F40D', '\U0001F46F', '\U00011065', '\U0001F107']:
self.assertFalse(ch.isdecimal(), '{!a} is not decimal.'.format(ch))
for ch in ['\U0001D7F6', '\U00011066', '\U000104A0']:
self.assertTrue(ch.isdecimal(), '{!a} is decimal.'.format(ch))
def test_isdigit(self):
super().test_isdigit()
self.checkequalnofix(True, '\u2460', 'isdigit')
self.checkequalnofix(False, '\xbc', 'isdigit')
self.checkequalnofix(True, '\u0660', 'isdigit')
for ch in ['\U00010401', '\U00010427', '\U00010429', '\U0001044E',
'\U0001F40D', '\U0001F46F', '\U00011065']:
self.assertFalse(ch.isdigit(), '{!a} is not a digit.'.format(ch))
for ch in ['\U0001D7F6', '\U00011066', '\U000104A0', '\U0001F107']:
self.assertTrue(ch.isdigit(), '{!a} is a digit.'.format(ch))
def test_isnumeric(self):
self.checkequalnofix(False, '', 'isnumeric')
self.checkequalnofix(False, 'a', 'isnumeric')
self.checkequalnofix(True, '0', 'isnumeric')
self.checkequalnofix(True, '\u2460', 'isnumeric')
self.checkequalnofix(True, '\xbc', 'isnumeric')
self.checkequalnofix(True, '\u0660', 'isnumeric')
self.checkequalnofix(True, '0123456789', 'isnumeric')
self.checkequalnofix(False, '0123456789a', 'isnumeric')
self.assertRaises(TypeError, "abc".isnumeric, 42)
for ch in ['\U00010401', '\U00010427', '\U00010429', '\U0001044E',
'\U0001F40D', '\U0001F46F']:
self.assertFalse(ch.isnumeric(), '{!a} is not numeric.'.format(ch))
for ch in ['\U00011065', '\U0001D7F6', '\U00011066',
'\U000104A0', '\U0001F107']:
self.assertTrue(ch.isnumeric(), '{!a} is numeric.'.format(ch))
def test_isidentifier(self):
self.assertTrue("a".isidentifier())
self.assertTrue("Z".isidentifier())
self.assertTrue("_".isidentifier())
self.assertTrue("b0".isidentifier())
self.assertTrue("bc".isidentifier())
self.assertTrue("b_".isidentifier())
self.assertTrue("µ".isidentifier())
self.assertTrue("𝔘𝔫𝔦𝔠𝔬𝔡𝔢".isidentifier())
self.assertFalse(" ".isidentifier())
self.assertFalse("[".isidentifier())
self.assertFalse("©".isidentifier())
self.assertFalse("0".isidentifier())
@support.cpython_only
def test_isidentifier_legacy(self):
import _testcapi
u = '𝖀𝖓𝖎𝖈𝖔𝖉𝖊'
self.assertTrue(u.isidentifier())
self.assertTrue(_testcapi.unicode_legacy_string(u).isidentifier())
def test_isprintable(self):
self.assertTrue("".isprintable())
self.assertTrue(" ".isprintable())
self.assertTrue("abcdefg".isprintable())
self.assertFalse("abcdefg\n".isprintable())
# some defined Unicode character
self.assertTrue("\u0374".isprintable())
# undefined character
self.assertFalse("\u0378".isprintable())
# single surrogate character
self.assertFalse("\ud800".isprintable())
self.assertTrue('\U0001F46F'.isprintable())
self.assertFalse('\U000E0020'.isprintable())
def test_surrogates(self):
for s in ('a\uD800b\uDFFF', 'a\uDFFFb\uD800',
'a\uD800b\uDFFFa', 'a\uDFFFb\uD800a'):
self.assertTrue(s.islower())
self.assertFalse(s.isupper())
self.assertFalse(s.istitle())
for s in ('A\uD800B\uDFFF', 'A\uDFFFB\uD800',
'A\uD800B\uDFFFA', 'A\uDFFFB\uD800A'):
self.assertFalse(s.islower())
self.assertTrue(s.isupper())
self.assertTrue(s.istitle())
for meth_name in ('islower', 'isupper', 'istitle'):
meth = getattr(str, meth_name)
for s in ('\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF'):
self.assertFalse(meth(s), '%a.%s() is False' % (s, meth_name))
for meth_name in ('isalpha', 'isalnum', 'isdigit', 'isspace',
'isdecimal', 'isnumeric',
'isidentifier', 'isprintable'):
meth = getattr(str, meth_name)
for s in ('\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF',
'a\uD800b\uDFFF', 'a\uDFFFb\uD800',
'a\uD800b\uDFFFa', 'a\uDFFFb\uD800a'):
self.assertFalse(meth(s), '%a.%s() is False' % (s, meth_name))
def test_lower(self):
string_tests.CommonTest.test_lower(self)
self.assertEqual('\U00010427'.lower(), '\U0001044F')
self.assertEqual('\U00010427\U00010427'.lower(),
'\U0001044F\U0001044F')
self.assertEqual('\U00010427\U0001044F'.lower(),
'\U0001044F\U0001044F')
self.assertEqual('X\U00010427x\U0001044F'.lower(),
'x\U0001044Fx\U0001044F')
self.assertEqual('fi'.lower(), 'fi')
self.assertEqual('\u0130'.lower(), '\u0069\u0307')
# Special case for GREEK CAPITAL LETTER SIGMA U+03A3
self.assertEqual('\u03a3'.lower(), '\u03c3')
self.assertEqual('\u0345\u03a3'.lower(), '\u0345\u03c3')
self.assertEqual('A\u0345\u03a3'.lower(), 'a\u0345\u03c2')
self.assertEqual('A\u0345\u03a3a'.lower(), 'a\u0345\u03c3a')
self.assertEqual('A\u0345\u03a3'.lower(), 'a\u0345\u03c2')
self.assertEqual('A\u03a3\u0345'.lower(), 'a\u03c2\u0345')
self.assertEqual('\u03a3\u0345 '.lower(), '\u03c3\u0345 ')
self.assertEqual('\U0008fffe'.lower(), '\U0008fffe')
self.assertEqual('\u2177'.lower(), '\u2177')
def test_casefold(self):
self.assertEqual('hello'.casefold(), 'hello')
self.assertEqual('hELlo'.casefold(), 'hello')
self.assertEqual('ß'.casefold(), 'ss')
self.assertEqual('fi'.casefold(), 'fi')
self.assertEqual('\u03a3'.casefold(), '\u03c3')
self.assertEqual('A\u0345\u03a3'.casefold(), 'a\u03b9\u03c3')
self.assertEqual('\u00b5'.casefold(), '\u03bc')
def test_upper(self):
string_tests.CommonTest.test_upper(self)
self.assertEqual('\U0001044F'.upper(), '\U00010427')
self.assertEqual('\U0001044F\U0001044F'.upper(),
'\U00010427\U00010427')
self.assertEqual('\U00010427\U0001044F'.upper(),
'\U00010427\U00010427')
self.assertEqual('X\U00010427x\U0001044F'.upper(),
'X\U00010427X\U00010427')
self.assertEqual('fi'.upper(), 'FI')
self.assertEqual('\u0130'.upper(), '\u0130')
self.assertEqual('\u03a3'.upper(), '\u03a3')
self.assertEqual('ß'.upper(), 'SS')
self.assertEqual('\u1fd2'.upper(), '\u0399\u0308\u0300')
self.assertEqual('\U0008fffe'.upper(), '\U0008fffe')
self.assertEqual('\u2177'.upper(), '\u2167')
def test_capitalize(self):
string_tests.CommonTest.test_capitalize(self)
self.assertEqual('\U0001044F'.capitalize(), '\U00010427')
self.assertEqual('\U0001044F\U0001044F'.capitalize(),
'\U00010427\U0001044F')
self.assertEqual('\U00010427\U0001044F'.capitalize(),
'\U00010427\U0001044F')
self.assertEqual('\U0001044F\U00010427'.capitalize(),
'\U00010427\U0001044F')
self.assertEqual('X\U00010427x\U0001044F'.capitalize(),
'X\U0001044Fx\U0001044F')
self.assertEqual('h\u0130'.capitalize(), 'H\u0069\u0307')
exp = '\u0399\u0308\u0300\u0069\u0307'
self.assertEqual('\u1fd2\u0130'.capitalize(), exp)
self.assertEqual('finnish'.capitalize(), 'Finnish')
self.assertEqual('A\u0345\u03a3'.capitalize(), 'A\u0345\u03c2')
def test_title(self):
super().test_title()
self.assertEqual('\U0001044F'.title(), '\U00010427')
self.assertEqual('\U0001044F\U0001044F'.title(),
'\U00010427\U0001044F')
self.assertEqual('\U0001044F\U0001044F \U0001044F\U0001044F'.title(),
'\U00010427\U0001044F \U00010427\U0001044F')
self.assertEqual('\U00010427\U0001044F \U00010427\U0001044F'.title(),
'\U00010427\U0001044F \U00010427\U0001044F')
self.assertEqual('\U0001044F\U00010427 \U0001044F\U00010427'.title(),
'\U00010427\U0001044F \U00010427\U0001044F')
self.assertEqual('X\U00010427x\U0001044F X\U00010427x\U0001044F'.title(),
'X\U0001044Fx\U0001044F X\U0001044Fx\U0001044F')
self.assertEqual('fiNNISH'.title(), 'Finnish')
self.assertEqual('A\u03a3 \u1fa1xy'.title(), 'A\u03c2 \u1fa9xy')
self.assertEqual('A\u03a3A'.title(), 'A\u03c3a')
def test_swapcase(self):
string_tests.CommonTest.test_swapcase(self)
self.assertEqual('\U0001044F'.swapcase(), '\U00010427')
self.assertEqual('\U00010427'.swapcase(), '\U0001044F')
self.assertEqual('\U0001044F\U0001044F'.swapcase(),
'\U00010427\U00010427')
self.assertEqual('\U00010427\U0001044F'.swapcase(),
'\U0001044F\U00010427')
self.assertEqual('\U0001044F\U00010427'.swapcase(),
'\U00010427\U0001044F')
self.assertEqual('X\U00010427x\U0001044F'.swapcase(),
'x\U0001044FX\U00010427')
self.assertEqual('fi'.swapcase(), 'FI')
self.assertEqual('\u0130'.swapcase(), '\u0069\u0307')
# Special case for GREEK CAPITAL LETTER SIGMA U+03A3
self.assertEqual('\u03a3'.swapcase(), '\u03c3')
self.assertEqual('\u0345\u03a3'.swapcase(), '\u0399\u03c3')
self.assertEqual('A\u0345\u03a3'.swapcase(), 'a\u0399\u03c2')
self.assertEqual('A\u0345\u03a3a'.swapcase(), 'a\u0399\u03c3A')
self.assertEqual('A\u0345\u03a3'.swapcase(), 'a\u0399\u03c2')
self.assertEqual('A\u03a3\u0345'.swapcase(), 'a\u03c2\u0399')
self.assertEqual('\u03a3\u0345 '.swapcase(), '\u03c3\u0399 ')
self.assertEqual('\u03a3'.swapcase(), '\u03c3')
self.assertEqual('ß'.swapcase(), 'SS')
self.assertEqual('\u1fd2'.swapcase(), '\u0399\u0308\u0300')
def test_center(self):
string_tests.CommonTest.test_center(self)
self.assertEqual('x'.center(2, '\U0010FFFF'),
'x\U0010FFFF')
self.assertEqual('x'.center(3, '\U0010FFFF'),
'\U0010FFFFx\U0010FFFF')
self.assertEqual('x'.center(4, '\U0010FFFF'),
'\U0010FFFFx\U0010FFFF\U0010FFFF')
@unittest.skipUnless(sys.maxsize == 2**31 - 1, "requires 32-bit system")
@support.cpython_only
def test_case_operation_overflow(self):
# Issue #22643
size = 2**32//12 + 1
try:
s = "ü" * size
except MemoryError:
self.skipTest('no enough memory (%.0f MiB required)' % (size / 2**20))
try:
self.assertRaises(OverflowError, s.upper)
finally:
del s
def test_contains(self):
# Testing Unicode contains method
self.assertIn('a', 'abdb')
self.assertIn('a', 'bdab')
self.assertIn('a', 'bdaba')
self.assertIn('a', 'bdba')
self.assertNotIn('a', 'bdb')
self.assertIn('a', 'bdba')
self.assertIn('a', ('a',1,None))
self.assertIn('a', (1,None,'a'))
self.assertIn('a', ('a',1,None))
self.assertIn('a', (1,None,'a'))
self.assertNotIn('a', ('x',1,'y'))
self.assertNotIn('a', ('x',1,None))
self.assertNotIn('abcd', 'abcxxxx')
self.assertIn('ab', 'abcd')
self.assertIn('ab', 'abc')
self.assertIn('ab', (1,None,'ab'))
self.assertIn('', 'abc')
self.assertIn('', '')
self.assertIn('', 'abc')
self.assertNotIn('\0', 'abc')
self.assertIn('\0', '\0abc')
self.assertIn('\0', 'abc\0')
self.assertIn('a', '\0abc')
self.assertIn('asdf', 'asdf')
self.assertNotIn('asdf', 'asd')
self.assertNotIn('asdf', '')
self.assertRaises(TypeError, "abc".__contains__)
# test mixed kinds
for fill in ('a', '\u0100', '\U00010300'):
fill *= 9
for delim in ('c', '\u0102', '\U00010302'):
self.assertNotIn(delim, fill)
self.assertIn(delim, fill + delim)
self.assertNotIn(delim * 2, fill)
self.assertIn(delim * 2, fill + delim * 2)
def test_issue18183(self):
'\U00010000\U00100000'.lower()
'\U00010000\U00100000'.casefold()
'\U00010000\U00100000'.upper()
'\U00010000\U00100000'.capitalize()
'\U00010000\U00100000'.title()
'\U00010000\U00100000'.swapcase()
'\U00100000'.center(3, '\U00010000')
'\U00100000'.ljust(3, '\U00010000')
'\U00100000'.rjust(3, '\U00010000')
def test_format(self):
self.assertEqual(''.format(), '')
self.assertEqual('a'.format(), 'a')
self.assertEqual('ab'.format(), 'ab')
self.assertEqual('a{{'.format(), 'a{')
self.assertEqual('a}}'.format(), 'a}')
self.assertEqual('{{b'.format(), '{b')
self.assertEqual('}}b'.format(), '}b')
self.assertEqual('a{{b'.format(), 'a{b')
# examples from the PEP:
import datetime
self.assertEqual("My name is {0}".format('Fred'), "My name is Fred")
self.assertEqual("My name is {0[name]}".format(dict(name='Fred')),
"My name is Fred")
self.assertEqual("My name is {0} :-{{}}".format('Fred'),
"My name is Fred :-{}")
d = datetime.date(2007, 8, 18)
self.assertEqual("The year is {0.year}".format(d),
"The year is 2007")
# classes we'll use for testing
class C:
def __init__(self, x=100):
self._x = x
def __format__(self, spec):
return spec
class D:
def __init__(self, x):
self.x = x
def __format__(self, spec):
return str(self.x)
# class with __str__, but no __format__
class E:
def __init__(self, x):
self.x = x
def __str__(self):
return 'E(' + self.x + ')'
# class with __repr__, but no __format__ or __str__
class F:
def __init__(self, x):
self.x = x
def __repr__(self):
return 'F(' + self.x + ')'
# class with __format__ that forwards to string, for some format_spec's
class G:
def __init__(self, x):
self.x = x
def __str__(self):
return "string is " + self.x
def __format__(self, format_spec):
if format_spec == 'd':
return 'G(' + self.x + ')'
return object.__format__(self, format_spec)
class I(datetime.date):
def __format__(self, format_spec):
return self.strftime(format_spec)
class J(int):
def __format__(self, format_spec):
return int.__format__(self * 2, format_spec)
class M:
def __init__(self, x):
self.x = x
def __repr__(self):
return 'M(' + self.x + ')'
__str__ = None
class N:
def __init__(self, x):
self.x = x
def __repr__(self):
return 'N(' + self.x + ')'
__format__ = None
self.assertEqual(''.format(), '')
self.assertEqual('abc'.format(), 'abc')
self.assertEqual('{0}'.format('abc'), 'abc')
self.assertEqual('{0:}'.format('abc'), 'abc')
# self.assertEqual('{ 0 }'.format('abc'), 'abc')
self.assertEqual('X{0}'.format('abc'), 'Xabc')
self.assertEqual('{0}X'.format('abc'), 'abcX')
self.assertEqual('X{0}Y'.format('abc'), 'XabcY')
self.assertEqual('{1}'.format(1, 'abc'), 'abc')
self.assertEqual('X{1}'.format(1, 'abc'), 'Xabc')
self.assertEqual('{1}X'.format(1, 'abc'), 'abcX')
self.assertEqual('X{1}Y'.format(1, 'abc'), 'XabcY')
self.assertEqual('{0}'.format(-15), '-15')
self.assertEqual('{0}{1}'.format(-15, 'abc'), '-15abc')
self.assertEqual('{0}X{1}'.format(-15, 'abc'), '-15Xabc')
self.assertEqual('{{'.format(), '{')
self.assertEqual('}}'.format(), '}')
self.assertEqual('{{}}'.format(), '{}')
self.assertEqual('{{x}}'.format(), '{x}')
self.assertEqual('{{{0}}}'.format(123), '{123}')
self.assertEqual('{{{{0}}}}'.format(), '{{0}}')
self.assertEqual('}}{{'.format(), '}{')
self.assertEqual('}}x{{'.format(), '}x{')
# weird field names
self.assertEqual("{0[foo-bar]}".format({'foo-bar':'baz'}), 'baz')
self.assertEqual("{0[foo bar]}".format({'foo bar':'baz'}), 'baz')
self.assertEqual("{0[ ]}".format({' ':3}), '3')
self.assertEqual('{foo._x}'.format(foo=C(20)), '20')
self.assertEqual('{1}{0}'.format(D(10), D(20)), '2010')
self.assertEqual('{0._x.x}'.format(C(D('abc'))), 'abc')
self.assertEqual('{0[0]}'.format(['abc', 'def']), 'abc')
self.assertEqual('{0[1]}'.format(['abc', 'def']), 'def')
self.assertEqual('{0[1][0]}'.format(['abc', ['def']]), 'def')
self.assertEqual('{0[1][0].x}'.format(['abc', [D('def')]]), 'def')
# strings
self.assertEqual('{0:.3s}'.format('abc'), 'abc')
self.assertEqual('{0:.3s}'.format('ab'), 'ab')
self.assertEqual('{0:.3s}'.format('abcdef'), 'abc')
self.assertEqual('{0:.0s}'.format('abcdef'), '')
self.assertEqual('{0:3.3s}'.format('abc'), 'abc')
self.assertEqual('{0:2.3s}'.format('abc'), 'abc')
self.assertEqual('{0:2.2s}'.format('abc'), 'ab')
self.assertEqual('{0:3.2s}'.format('abc'), 'ab ')
self.assertEqual('{0:x<0s}'.format('result'), 'result')
self.assertEqual('{0:x<5s}'.format('result'), 'result')
self.assertEqual('{0:x<6s}'.format('result'), 'result')
self.assertEqual('{0:x<7s}'.format('result'), 'resultx')
self.assertEqual('{0:x<8s}'.format('result'), 'resultxx')
self.assertEqual('{0: <7s}'.format('result'), 'result ')
self.assertEqual('{0:<7s}'.format('result'), 'result ')
self.assertEqual('{0:>7s}'.format('result'), ' result')
self.assertEqual('{0:>8s}'.format('result'), ' result')
self.assertEqual('{0:^8s}'.format('result'), ' result ')
self.assertEqual('{0:^9s}'.format('result'), ' result ')
self.assertEqual('{0:^10s}'.format('result'), ' result ')
self.assertEqual('{0:10000}'.format('a'), 'a' + ' ' * 9999)
self.assertEqual('{0:10000}'.format(''), ' ' * 10000)
self.assertEqual('{0:10000000}'.format(''), ' ' * 10000000)
# issue 12546: use \x00 as a fill character
self.assertEqual('{0:\x00<6s}'.format('foo'), 'foo\x00\x00\x00')
self.assertEqual('{0:\x01<6s}'.format('foo'), 'foo\x01\x01\x01')
self.assertEqual('{0:\x00^6s}'.format('foo'), '\x00foo\x00\x00')
self.assertEqual('{0:^6s}'.format('foo'), ' foo ')
self.assertEqual('{0:\x00<6}'.format(3), '3\x00\x00\x00\x00\x00')
self.assertEqual('{0:\x01<6}'.format(3), '3\x01\x01\x01\x01\x01')
self.assertEqual('{0:\x00^6}'.format(3), '\x00\x003\x00\x00\x00')
self.assertEqual('{0:<6}'.format(3), '3 ')
self.assertEqual('{0:\x00<6}'.format(3.14), '3.14\x00\x00')
self.assertEqual('{0:\x01<6}'.format(3.14), '3.14\x01\x01')
self.assertEqual('{0:\x00^6}'.format(3.14), '\x003.14\x00')
self.assertEqual('{0:^6}'.format(3.14), ' 3.14 ')
self.assertEqual('{0:\x00<12}'.format(3+2.0j), '(3+2j)\x00\x00\x00\x00\x00\x00')
self.assertEqual('{0:\x01<12}'.format(3+2.0j), '(3+2j)\x01\x01\x01\x01\x01\x01')
self.assertEqual('{0:\x00^12}'.format(3+2.0j), '\x00\x00\x00(3+2j)\x00\x00\x00')
self.assertEqual('{0:^12}'.format(3+2.0j), ' (3+2j) ')
# format specifiers for user defined type
self.assertEqual('{0:abc}'.format(C()), 'abc')
# !r, !s and !a coercions
self.assertEqual('{0!s}'.format('Hello'), 'Hello')
self.assertEqual('{0!s:}'.format('Hello'), 'Hello')
self.assertEqual('{0!s:15}'.format('Hello'), 'Hello ')
self.assertEqual('{0!s:15s}'.format('Hello'), 'Hello ')
self.assertEqual('{0!r}'.format('Hello'), "'Hello'")
self.assertEqual('{0!r:}'.format('Hello'), "'Hello'")
self.assertEqual('{0!r}'.format(F('Hello')), 'F(Hello)')
self.assertEqual('{0!r}'.format('\u0378'), "'\\u0378'") # nonprintable
self.assertEqual('{0!r}'.format('\u0374'), "'\u0374'") # printable
self.assertEqual('{0!r}'.format(F('\u0374')), 'F(\u0374)')
self.assertEqual('{0!a}'.format('Hello'), "'Hello'")
self.assertEqual('{0!a}'.format('\u0378'), "'\\u0378'") # nonprintable
self.assertEqual('{0!a}'.format('\u0374'), "'\\u0374'") # printable
self.assertEqual('{0!a:}'.format('Hello'), "'Hello'")
self.assertEqual('{0!a}'.format(F('Hello')), 'F(Hello)')
self.assertEqual('{0!a}'.format(F('\u0374')), 'F(\\u0374)')
# test fallback to object.__format__
self.assertEqual('{0}'.format({}), '{}')
self.assertEqual('{0}'.format([]), '[]')
self.assertEqual('{0}'.format([1]), '[1]')
self.assertEqual('{0:d}'.format(G('data')), 'G(data)')
self.assertEqual('{0!s}'.format(G('data')), 'string is data')
self.assertRaises(TypeError, '{0:^10}'.format, E('data'))
self.assertRaises(TypeError, '{0:^10s}'.format, E('data'))
self.assertRaises(TypeError, '{0:>15s}'.format, G('data'))
self.assertEqual("{0:date: %Y-%m-%d}".format(I(year=2007,
month=8,
day=27)),
"date: 2007-08-27")
# test deriving from a builtin type and overriding __format__
self.assertEqual("{0}".format(J(10)), "20")
# string format specifiers
self.assertEqual('{0:}'.format('a'), 'a')
# computed format specifiers
self.assertEqual("{0:.{1}}".format('hello world', 5), 'hello')
self.assertEqual("{0:.{1}s}".format('hello world', 5), 'hello')
self.assertEqual("{0:.{precision}s}".format('hello world', precision=5), 'hello')
self.assertEqual("{0:{width}.{precision}s}".format('hello world', width=10, precision=5), 'hello ')
self.assertEqual("{0:{width}.{precision}s}".format('hello world', width='10', precision='5'), 'hello ')
# test various errors
self.assertRaises(ValueError, '{'.format)
self.assertRaises(ValueError, '}'.format)
self.assertRaises(ValueError, 'a{'.format)
self.assertRaises(ValueError, 'a}'.format)
self.assertRaises(ValueError, '{a'.format)
self.assertRaises(ValueError, '}a'.format)
self.assertRaises(IndexError, '{0}'.format)
self.assertRaises(IndexError, '{1}'.format, 'abc')
self.assertRaises(KeyError, '{x}'.format)
self.assertRaises(ValueError, "}{".format)
self.assertRaises(ValueError, "abc{0:{}".format)
self.assertRaises(ValueError, "{0".format)
self.assertRaises(IndexError, "{0.}".format)
self.assertRaises(ValueError, "{0.}".format, 0)
self.assertRaises(ValueError, "{0[}".format)
self.assertRaises(ValueError, "{0[}".format, [])
self.assertRaises(KeyError, "{0]}".format)
self.assertRaises(ValueError, "{0.[]}".format, 0)
self.assertRaises(ValueError, "{0..foo}".format, 0)
self.assertRaises(ValueError, "{0[0}".format, 0)
self.assertRaises(ValueError, "{0[0:foo}".format, 0)
self.assertRaises(KeyError, "{c]}".format)
self.assertRaises(ValueError, "{{ {{{0}}".format, 0)
self.assertRaises(ValueError, "{0}}".format, 0)
self.assertRaises(KeyError, "{foo}".format, bar=3)
self.assertRaises(ValueError, "{0!x}".format, 3)
self.assertRaises(ValueError, "{0!}".format, 0)
self.assertRaises(ValueError, "{0!rs}".format, 0)
self.assertRaises(ValueError, "{!}".format)
self.assertRaises(IndexError, "{:}".format)
self.assertRaises(IndexError, "{:s}".format)
self.assertRaises(IndexError, "{}".format)
big = "23098475029384702983476098230754973209482573"
self.assertRaises(ValueError, ("{" + big + "}").format)
self.assertRaises(ValueError, ("{[" + big + "]}").format, [0])
# issue 6089
self.assertRaises(ValueError, "{0[0]x}".format, [None])
self.assertRaises(ValueError, "{0[0](10)}".format, [None])
# can't have a replacement on the field name portion
self.assertRaises(TypeError, '{0[{1}]}'.format, 'abcdefg', 4)
# exceed maximum recursion depth
self.assertRaises(ValueError, "{0:{1:{2}}}".format, 'abc', 's', '')
self.assertRaises(ValueError, "{0:{1:{2:{3:{4:{5:{6}}}}}}}".format,
0, 1, 2, 3, 4, 5, 6, 7)
# string format spec errors
sign_msg = "Sign not allowed in string format specifier"
self.assertRaisesRegex(ValueError, sign_msg, "{0:-s}".format, '')
self.assertRaisesRegex(ValueError, sign_msg, format, "", "-")
space_msg = "Space not allowed in string format specifier"
self.assertRaisesRegex(ValueError, space_msg, "{: }".format, '')
self.assertRaises(ValueError, "{0:=s}".format, '')
# Alternate formatting is not supported
self.assertRaises(ValueError, format, '', '#')
self.assertRaises(ValueError, format, '', '#20')
# Non-ASCII
self.assertEqual("{0:s}{1:s}".format("ABC", "\u0410\u0411\u0412"),
'ABC\u0410\u0411\u0412')
self.assertEqual("{0:.3s}".format("ABC\u0410\u0411\u0412"),
'ABC')
self.assertEqual("{0:.0s}".format("ABC\u0410\u0411\u0412"),
'')
self.assertEqual("{[{}]}".format({"{}": 5}), "5")
self.assertEqual("{[{}]}".format({"{}" : "a"}), "a")
self.assertEqual("{[{]}".format({"{" : "a"}), "a")
self.assertEqual("{[}]}".format({"}" : "a"}), "a")
self.assertEqual("{[[]}".format({"[" : "a"}), "a")
self.assertEqual("{[!]}".format({"!" : "a"}), "a")
self.assertRaises(ValueError, "{a{}b}".format, 42)
self.assertRaises(ValueError, "{a{b}".format, 42)
self.assertRaises(ValueError, "{[}".format, 42)
self.assertEqual("0x{:0{:d}X}".format(0x0,16), "0x0000000000000000")
# Blocking fallback
m = M('data')
self.assertEqual("{!r}".format(m), 'M(data)')
self.assertRaises(TypeError, "{!s}".format, m)
self.assertRaises(TypeError, "{}".format, m)
n = N('data')
self.assertEqual("{!r}".format(n), 'N(data)')
self.assertEqual("{!s}".format(n), 'N(data)')
self.assertRaises(TypeError, "{}".format, n)
def test_format_map(self):
self.assertEqual(''.format_map({}), '')
self.assertEqual('a'.format_map({}), 'a')
self.assertEqual('ab'.format_map({}), 'ab')
self.assertEqual('a{{'.format_map({}), 'a{')
self.assertEqual('a}}'.format_map({}), 'a}')
self.assertEqual('{{b'.format_map({}), '{b')
self.assertEqual('}}b'.format_map({}), '}b')
self.assertEqual('a{{b'.format_map({}), 'a{b')
# using mappings
class Mapping(dict):
def __missing__(self, key):
return key
self.assertEqual('{hello}'.format_map(Mapping()), 'hello')
self.assertEqual('{a} {world}'.format_map(Mapping(a='hello')), 'hello world')
class InternalMapping:
def __init__(self):
self.mapping = {'a': 'hello'}
def __getitem__(self, key):
return self.mapping[key]
self.assertEqual('{a}'.format_map(InternalMapping()), 'hello')
class C:
def __init__(self, x=100):
self._x = x
def __format__(self, spec):
return spec
self.assertEqual('{foo._x}'.format_map({'foo': C(20)}), '20')
# test various errors
self.assertRaises(TypeError, ''.format_map)
self.assertRaises(TypeError, 'a'.format_map)
self.assertRaises(ValueError, '{'.format_map, {})
self.assertRaises(ValueError, '}'.format_map, {})
self.assertRaises(ValueError, 'a{'.format_map, {})
self.assertRaises(ValueError, 'a}'.format_map, {})
self.assertRaises(ValueError, '{a'.format_map, {})
self.assertRaises(ValueError, '}a'.format_map, {})
# issue #12579: can't supply positional params to format_map
self.assertRaises(ValueError, '{}'.format_map, {'a' : 2})
self.assertRaises(ValueError, '{}'.format_map, 'a')
self.assertRaises(ValueError, '{a} {}'.format_map, {"a" : 2, "b" : 1})
class BadMapping:
def __getitem__(self, key):
return 1/0
self.assertRaises(KeyError, '{a}'.format_map, {})
self.assertRaises(TypeError, '{a}'.format_map, [])
self.assertRaises(ZeroDivisionError, '{a}'.format_map, BadMapping())
def test_format_huge_precision(self):
format_string = ".{}f".format(sys.maxsize + 1)
with self.assertRaises(ValueError):
result = format(2.34, format_string)
def test_format_huge_width(self):
format_string = "{}f".format(sys.maxsize + 1)
with self.assertRaises(ValueError):
result = format(2.34, format_string)
def test_format_huge_item_number(self):
format_string = "{{{}:.6f}}".format(sys.maxsize + 1)
with self.assertRaises(ValueError):
result = format_string.format(2.34)
def test_format_auto_numbering(self):
class C:
def __init__(self, x=100):
self._x = x
def __format__(self, spec):
return spec
self.assertEqual('{}'.format(10), '10')
self.assertEqual('{:5}'.format('s'), 's ')
self.assertEqual('{!r}'.format('s'), "'s'")
self.assertEqual('{._x}'.format(C(10)), '10')
self.assertEqual('{[1]}'.format([1, 2]), '2')
self.assertEqual('{[a]}'.format({'a':4, 'b':2}), '4')
self.assertEqual('a{}b{}c'.format(0, 1), 'a0b1c')
self.assertEqual('a{:{}}b'.format('x', '^10'), 'a x b')
self.assertEqual('a{:{}x}b'.format(20, '#'), 'a0x14b')
# can't mix and match numbering and auto-numbering
self.assertRaises(ValueError, '{}{1}'.format, 1, 2)
self.assertRaises(ValueError, '{1}{}'.format, 1, 2)
self.assertRaises(ValueError, '{:{1}}'.format, 1, 2)
self.assertRaises(ValueError, '{0:{}}'.format, 1, 2)
# can mix and match auto-numbering and named
self.assertEqual('{f}{}'.format(4, f='test'), 'test4')
self.assertEqual('{}{f}'.format(4, f='test'), '4test')
self.assertEqual('{:{f}}{g}{}'.format(1, 3, g='g', f=2), ' 1g3')
self.assertEqual('{f:{}}{}{g}'.format(2, 4, f=1, g='g'), ' 14g')
def test_formatting(self):
string_tests.MixinStrUnicodeUserStringTest.test_formatting(self)
# Testing Unicode formatting strings...
self.assertEqual("%s, %s" % ("abc", "abc"), 'abc, abc')
self.assertEqual("%s, %s, %i, %f, %5.2f" % ("abc", "abc", 1, 2, 3), 'abc, abc, 1, 2.000000, 3.00')
self.assertEqual("%s, %s, %i, %f, %5.2f" % ("abc", "abc", 1, -2, 3), 'abc, abc, 1, -2.000000, 3.00')
self.assertEqual("%s, %s, %i, %f, %5.2f" % ("abc", "abc", -1, -2, 3.5), 'abc, abc, -1, -2.000000, 3.50')
self.assertEqual("%s, %s, %i, %f, %5.2f" % ("abc", "abc", -1, -2, 3.57), 'abc, abc, -1, -2.000000, 3.57')
self.assertEqual("%s, %s, %i, %f, %5.2f" % ("abc", "abc", -1, -2, 1003.57), 'abc, abc, -1, -2.000000, 1003.57')
if not sys.platform.startswith('java'):
self.assertEqual("%r, %r" % (b"abc", "abc"), "b'abc', 'abc'")
self.assertEqual("%r" % ("\u1234",), "'\u1234'")
self.assertEqual("%a" % ("\u1234",), "'\\u1234'")
self.assertEqual("%(x)s, %(y)s" % {'x':"abc", 'y':"def"}, 'abc, def')
self.assertEqual("%(x)s, %(\xfc)s" % {'x':"abc", '\xfc':"def"}, 'abc, def')
self.assertEqual('%c' % 0x1234, '\u1234')
self.assertEqual('%c' % 0x21483, '\U00021483')
self.assertRaises(OverflowError, "%c".__mod__, (0x110000,))
self.assertEqual('%c' % '\U00021483', '\U00021483')
self.assertRaises(TypeError, "%c".__mod__, "aa")
self.assertRaises(ValueError, "%.1\u1032f".__mod__, (1.0/3))
self.assertRaises(TypeError, "%i".__mod__, "aa")
# formatting jobs delegated from the string implementation:
self.assertEqual('...%(foo)s...' % {'foo':"abc"}, '...abc...')
self.assertEqual('...%(foo)s...' % {'foo':"abc"}, '...abc...')
self.assertEqual('...%(foo)s...' % {'foo':"abc"}, '...abc...')
self.assertEqual('...%(foo)s...' % {'foo':"abc"}, '...abc...')
self.assertEqual('...%(foo)s...' % {'foo':"abc",'def':123}, '...abc...')
self.assertEqual('...%(foo)s...' % {'foo':"abc",'def':123}, '...abc...')
self.assertEqual('...%s...%s...%s...%s...' % (1,2,3,"abc"), '...1...2...3...abc...')
self.assertEqual('...%%...%%s...%s...%s...%s...%s...' % (1,2,3,"abc"), '...%...%s...1...2...3...abc...')
self.assertEqual('...%s...' % "abc", '...abc...')
self.assertEqual('%*s' % (5,'abc',), ' abc')
self.assertEqual('%*s' % (-5,'abc',), 'abc ')
self.assertEqual('%*.*s' % (5,2,'abc',), ' ab')
self.assertEqual('%*.*s' % (5,3,'abc',), ' abc')
self.assertEqual('%i %*.*s' % (10, 5,3,'abc',), '10 abc')
self.assertEqual('%i%s %*.*s' % (10, 3, 5, 3, 'abc',), '103 abc')
self.assertEqual('%c' % 'a', 'a')
class Wrapper:
def __str__(self):
return '\u1234'
self.assertEqual('%s' % Wrapper(), '\u1234')
# issue 3382
NAN = float('nan')
INF = float('inf')
self.assertEqual('%f' % NAN, 'nan')
self.assertEqual('%F' % NAN, 'NAN')
self.assertEqual('%f' % INF, 'inf')
self.assertEqual('%F' % INF, 'INF')
# PEP 393
self.assertEqual('%.1s' % "a\xe9\u20ac", 'a')
self.assertEqual('%.2s' % "a\xe9\u20ac", 'a\xe9')
#issue 19995
class PseudoInt:
def __init__(self, value):
self.value = int(value)
def __int__(self):
return self.value
def __index__(self):
return self.value
class PseudoFloat:
def __init__(self, value):
self.value = float(value)
def __int__(self):
return int(self.value)
pi = PseudoFloat(3.1415)
letter_m = PseudoInt(109)
self.assertEqual('%x' % 42, '2a')
self.assertEqual('%X' % 15, 'F')
self.assertEqual('%o' % 9, '11')
self.assertEqual('%c' % 109, 'm')
self.assertEqual('%x' % letter_m, '6d')
self.assertEqual('%X' % letter_m, '6D')
self.assertEqual('%o' % letter_m, '155')
self.assertEqual('%c' % letter_m, 'm')
self.assertRaisesRegex(TypeError, '%x format: an integer is required, not float', operator.mod, '%x', 3.14),
self.assertRaisesRegex(TypeError, '%X format: an integer is required, not float', operator.mod, '%X', 2.11),
self.assertRaisesRegex(TypeError, '%o format: an integer is required, not float', operator.mod, '%o', 1.79),
self.assertRaisesRegex(TypeError, '%x format: an integer is required, not PseudoFloat', operator.mod, '%x', pi),
self.assertRaises(TypeError, operator.mod, '%c', pi),
def test_formatting_with_enum(self):
# issue18780
import enum
class Float(float, enum.Enum):
PI = 3.1415926
class Int(enum.IntEnum):
IDES = 15
class Str(str, enum.Enum):
ABC = 'abc'
# Testing Unicode formatting strings...
self.assertEqual("%s, %s" % (Str.ABC, Str.ABC),
'Str.ABC, Str.ABC')
self.assertEqual("%s, %s, %d, %i, %u, %f, %5.2f" %
(Str.ABC, Str.ABC,
Int.IDES, Int.IDES, Int.IDES,
Float.PI, Float.PI),
'Str.ABC, Str.ABC, 15, 15, 15, 3.141593, 3.14')
# formatting jobs delegated from the string implementation:
self.assertEqual('...%(foo)s...' % {'foo':Str.ABC},
'...Str.ABC...')
self.assertEqual('...%(foo)s...' % {'foo':Int.IDES},
'...Int.IDES...')
self.assertEqual('...%(foo)i...' % {'foo':Int.IDES},
'...15...')
self.assertEqual('...%(foo)d...' % {'foo':Int.IDES},
'...15...')
self.assertEqual('...%(foo)u...' % {'foo':Int.IDES, 'def':Float.PI},
'...15...')
self.assertEqual('...%(foo)f...' % {'foo':Float.PI,'def':123},
'...3.141593...')
def test_formatting_huge_precision(self):
format_string = "%.{}f".format(sys.maxsize + 1)
with self.assertRaises(ValueError):
result = format_string % 2.34
def test_issue28598_strsubclass_rhs(self):
# A subclass of str with an __rmod__ method should be able to hook
# into the % operator
class SubclassedStr(str):
def __rmod__(self, other):
return 'Success, self.__rmod__({!r}) was called'.format(other)
self.assertEqual('lhs %% %r' % SubclassedStr('rhs'),
"Success, self.__rmod__('lhs %% %r') was called")
@support.cpython_only
def test_formatting_huge_precision_c_limits(self):
from _testcapi import INT_MAX
format_string = "%.{}f".format(INT_MAX + 1)
with self.assertRaises(ValueError):
result = format_string % 2.34
def test_formatting_huge_width(self):
format_string = "%{}f".format(sys.maxsize + 1)
with self.assertRaises(ValueError):
result = format_string % 2.34
def test_startswith_endswith_errors(self):
for meth in ('foo'.startswith, 'foo'.endswith):
with self.assertRaises(TypeError) as cm:
meth(['f'])
exc = str(cm.exception)
self.assertIn('str', exc)
self.assertIn('tuple', exc)
@support.run_with_locale('LC_ALL', 'de_DE', 'fr_FR')
def test_format_float(self):
# should not format with a comma, but always with C locale
self.assertEqual('1.0', '%.1f' % 1.0)
def test_constructor(self):
# unicode(obj) tests (this maps to PyObject_Unicode() at C level)
self.assertEqual(
str('unicode remains unicode'),
'unicode remains unicode'
)
for text in ('ascii', '\xe9', '\u20ac', '\U0010FFFF'):
subclass = StrSubclass(text)
self.assertEqual(str(subclass), text)
self.assertEqual(len(subclass), len(text))
if text == 'ascii':
self.assertEqual(subclass.encode('ascii'), b'ascii')
self.assertEqual(subclass.encode('utf-8'), b'ascii')
self.assertEqual(
str('strings are converted to unicode'),
'strings are converted to unicode'
)
class StringCompat:
def __init__(self, x):
self.x = x
def __str__(self):
return self.x
self.assertEqual(
str(StringCompat('__str__ compatible objects are recognized')),
'__str__ compatible objects are recognized'
)
# unicode(obj) is compatible to str():
o = StringCompat('unicode(obj) is compatible to str()')
self.assertEqual(str(o), 'unicode(obj) is compatible to str()')
self.assertEqual(str(o), 'unicode(obj) is compatible to str()')
for obj in (123, 123.45, 123):
self.assertEqual(str(obj), str(str(obj)))
# unicode(obj, encoding, error) tests (this maps to
# PyUnicode_FromEncodedObject() at C level)
if not sys.platform.startswith('java'):
self.assertRaises(
TypeError,
str,
'decoding unicode is not supported',
'utf-8',
'strict'
)
self.assertEqual(
str(b'strings are decoded to unicode', 'utf-8', 'strict'),
'strings are decoded to unicode'
)
if not sys.platform.startswith('java'):
self.assertEqual(
str(
memoryview(b'character buffers are decoded to unicode'),
'utf-8',
'strict'
),
'character buffers are decoded to unicode'
)
self.assertRaises(TypeError, str, 42, 42, 42)
def test_constructor_keyword_args(self):
"""Pass various keyword argument combinations to the constructor."""
# The object argument can be passed as a keyword.
self.assertEqual(str(object='foo'), 'foo')
self.assertEqual(str(object=b'foo', encoding='utf-8'), 'foo')
# The errors argument without encoding triggers "decode" mode.
self.assertEqual(str(b'foo', errors='strict'), 'foo') # not "b'foo'"
self.assertEqual(str(object=b'foo', errors='strict'), 'foo')
def test_constructor_defaults(self):
"""Check the constructor argument defaults."""
# The object argument defaults to '' or b''.
self.assertEqual(str(), '')
self.assertEqual(str(errors='strict'), '')
utf8_cent = '¢'.encode('utf-8')
# The encoding argument defaults to utf-8.
self.assertEqual(str(utf8_cent, errors='strict'), '¢')
# The errors argument defaults to strict.
self.assertRaises(UnicodeDecodeError, str, utf8_cent, encoding='ascii')
def test_codecs_utf7(self):
utfTests = [
('A\u2262\u0391.', b'A+ImIDkQ.'), # RFC2152 example
('Hi Mom -\u263a-!', b'Hi Mom -+Jjo--!'), # RFC2152 example
('\u65E5\u672C\u8A9E', b'+ZeVnLIqe-'), # RFC2152 example
('Item 3 is \u00a31.', b'Item 3 is +AKM-1.'), # RFC2152 example
('+', b'+-'),
('+-', b'+--'),
('+?', b'+-?'),
(r'\?', b'+AFw?'),
('+?', b'+-?'),
(r'\\?', b'+AFwAXA?'),
(r'\\\?', b'+AFwAXABc?'),
(r'++--', b'+-+---'),
('\U000abcde', b'+2m/c3g-'), # surrogate pairs
('/', b'/'),
]
for (x, y) in utfTests:
self.assertEqual(x.encode('utf-7'), y)
# Unpaired surrogates are passed through
self.assertEqual('\uD801'.encode('utf-7'), b'+2AE-')
self.assertEqual('\uD801x'.encode('utf-7'), b'+2AE-x')
self.assertEqual('\uDC01'.encode('utf-7'), b'+3AE-')
self.assertEqual('\uDC01x'.encode('utf-7'), b'+3AE-x')
self.assertEqual(b'+2AE-'.decode('utf-7'), '\uD801')
self.assertEqual(b'+2AE-x'.decode('utf-7'), '\uD801x')
self.assertEqual(b'+3AE-'.decode('utf-7'), '\uDC01')
self.assertEqual(b'+3AE-x'.decode('utf-7'), '\uDC01x')
self.assertEqual('\uD801\U000abcde'.encode('utf-7'), b'+2AHab9ze-')
self.assertEqual(b'+2AHab9ze-'.decode('utf-7'), '\uD801\U000abcde')
# Issue #2242: crash on some Windows/MSVC versions
self.assertEqual(b'+\xc1'.decode('utf-7', 'ignore'), '')
# Direct encoded characters
set_d = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'(),-./:?"
# Optional direct characters
set_o = '!"#$%&*;<=>@[]^_`{|}'
for c in set_d:
self.assertEqual(c.encode('utf7'), c.encode('ascii'))
self.assertEqual(c.encode('ascii').decode('utf7'), c)
for c in set_o:
self.assertEqual(c.encode('ascii').decode('utf7'), c)
with self.assertRaisesRegex(UnicodeDecodeError,
'ill-formed sequence'):
b'+@'.decode('utf-7')
def test_codecs_utf8(self):
self.assertEqual(''.encode('utf-8'), b'')
self.assertEqual('\u20ac'.encode('utf-8'), b'\xe2\x82\xac')
self.assertEqual('\U00010002'.encode('utf-8'), b'\xf0\x90\x80\x82')
self.assertEqual('\U00023456'.encode('utf-8'), b'\xf0\xa3\x91\x96')
self.assertEqual('\ud800'.encode('utf-8', 'surrogatepass'), b'\xed\xa0\x80')
self.assertEqual('\udc00'.encode('utf-8', 'surrogatepass'), b'\xed\xb0\x80')
self.assertEqual(('\U00010002'*10).encode('utf-8'),
b'\xf0\x90\x80\x82'*10)
self.assertEqual(
'\u6b63\u78ba\u306b\u8a00\u3046\u3068\u7ffb\u8a33\u306f'
'\u3055\u308c\u3066\u3044\u307e\u305b\u3093\u3002\u4e00'
'\u90e8\u306f\u30c9\u30a4\u30c4\u8a9e\u3067\u3059\u304c'
'\u3001\u3042\u3068\u306f\u3067\u305f\u3089\u3081\u3067'
'\u3059\u3002\u5b9f\u969b\u306b\u306f\u300cWenn ist das'
' Nunstuck git und'.encode('utf-8'),
b'\xe6\xad\xa3\xe7\xa2\xba\xe3\x81\xab\xe8\xa8\x80\xe3\x81'
b'\x86\xe3\x81\xa8\xe7\xbf\xbb\xe8\xa8\xb3\xe3\x81\xaf\xe3'
b'\x81\x95\xe3\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe'
b'\xe3\x81\x9b\xe3\x82\x93\xe3\x80\x82\xe4\xb8\x80\xe9\x83'
b'\xa8\xe3\x81\xaf\xe3\x83\x89\xe3\x82\xa4\xe3\x83\x84\xe8'
b'\xaa\x9e\xe3\x81\xa7\xe3\x81\x99\xe3\x81\x8c\xe3\x80\x81'
b'\xe3\x81\x82\xe3\x81\xa8\xe3\x81\xaf\xe3\x81\xa7\xe3\x81'
b'\x9f\xe3\x82\x89\xe3\x82\x81\xe3\x81\xa7\xe3\x81\x99\xe3'
b'\x80\x82\xe5\xae\x9f\xe9\x9a\x9b\xe3\x81\xab\xe3\x81\xaf'
b'\xe3\x80\x8cWenn ist das Nunstuck git und'
)
# UTF-8 specific decoding tests
self.assertEqual(str(b'\xf0\xa3\x91\x96', 'utf-8'), '\U00023456' )
self.assertEqual(str(b'\xf0\x90\x80\x82', 'utf-8'), '\U00010002' )
self.assertEqual(str(b'\xe2\x82\xac', 'utf-8'), '\u20ac' )
# Other possible utf-8 test cases:
# * strict decoding testing for all of the
# UTF8_ERROR cases in PyUnicode_DecodeUTF8
def test_utf8_decode_valid_sequences(self):
sequences = [
# single byte
(b'\x00', '\x00'), (b'a', 'a'), (b'\x7f', '\x7f'),
# 2 bytes
(b'\xc2\x80', '\x80'), (b'\xdf\xbf', '\u07ff'),
# 3 bytes
(b'\xe0\xa0\x80', '\u0800'), (b'\xed\x9f\xbf', '\ud7ff'),
(b'\xee\x80\x80', '\uE000'), (b'\xef\xbf\xbf', '\uffff'),
# 4 bytes
(b'\xF0\x90\x80\x80', '\U00010000'),
(b'\xf4\x8f\xbf\xbf', '\U0010FFFF')
]
for seq, res in sequences:
self.assertEqual(seq.decode('utf-8'), res)
def test_utf8_decode_invalid_sequences(self):
# continuation bytes in a sequence of 2, 3, or 4 bytes
continuation_bytes = [bytes([x]) for x in range(0x80, 0xC0)]
# start bytes of a 2-byte sequence equivalent to code points < 0x7F
invalid_2B_seq_start_bytes = [bytes([x]) for x in range(0xC0, 0xC2)]
# start bytes of a 4-byte sequence equivalent to code points > 0x10FFFF
invalid_4B_seq_start_bytes = [bytes([x]) for x in range(0xF5, 0xF8)]
invalid_start_bytes = (
continuation_bytes + invalid_2B_seq_start_bytes +
invalid_4B_seq_start_bytes + [bytes([x]) for x in range(0xF7, 0x100)]
)
for byte in invalid_start_bytes:
self.assertRaises(UnicodeDecodeError, byte.decode, 'utf-8')
for sb in invalid_2B_seq_start_bytes:
for cb in continuation_bytes:
self.assertRaises(UnicodeDecodeError, (sb+cb).decode, 'utf-8')
for sb in invalid_4B_seq_start_bytes:
for cb1 in continuation_bytes[:3]:
for cb3 in continuation_bytes[:3]:
self.assertRaises(UnicodeDecodeError,
(sb+cb1+b'\x80'+cb3).decode, 'utf-8')
for cb in [bytes([x]) for x in range(0x80, 0xA0)]:
self.assertRaises(UnicodeDecodeError,
(b'\xE0'+cb+b'\x80').decode, 'utf-8')
self.assertRaises(UnicodeDecodeError,
(b'\xE0'+cb+b'\xBF').decode, 'utf-8')
# surrogates
for cb in [bytes([x]) for x in range(0xA0, 0xC0)]:
self.assertRaises(UnicodeDecodeError,
(b'\xED'+cb+b'\x80').decode, 'utf-8')
self.assertRaises(UnicodeDecodeError,
(b'\xED'+cb+b'\xBF').decode, 'utf-8')
for cb in [bytes([x]) for x in range(0x80, 0x90)]:
self.assertRaises(UnicodeDecodeError,
(b'\xF0'+cb+b'\x80\x80').decode, 'utf-8')
self.assertRaises(UnicodeDecodeError,
(b'\xF0'+cb+b'\xBF\xBF').decode, 'utf-8')
for cb in [bytes([x]) for x in range(0x90, 0xC0)]:
self.assertRaises(UnicodeDecodeError,
(b'\xF4'+cb+b'\x80\x80').decode, 'utf-8')
self.assertRaises(UnicodeDecodeError,
(b'\xF4'+cb+b'\xBF\xBF').decode, 'utf-8')
def test_issue8271(self):
# Issue #8271: during the decoding of an invalid UTF-8 byte sequence,
# only the start byte and the continuation byte(s) are now considered
# invalid, instead of the number of bytes specified by the start byte.
# See https://www.unicode.org/versions/Unicode5.2.0/ch03.pdf (page 95,
# table 3-8, Row 2) for more information about the algorithm used.
FFFD = '\ufffd'
sequences = [
# invalid start bytes
(b'\x80', FFFD), # continuation byte
(b'\x80\x80', FFFD*2), # 2 continuation bytes
(b'\xc0', FFFD),
(b'\xc0\xc0', FFFD*2),
(b'\xc1', FFFD),
(b'\xc1\xc0', FFFD*2),
(b'\xc0\xc1', FFFD*2),
# with start byte of a 2-byte sequence
(b'\xc2', FFFD), # only the start byte
(b'\xc2\xc2', FFFD*2), # 2 start bytes
(b'\xc2\xc2\xc2', FFFD*3), # 3 start bytes
(b'\xc2\x41', FFFD+'A'), # invalid continuation byte
# with start byte of a 3-byte sequence
(b'\xe1', FFFD), # only the start byte
(b'\xe1\xe1', FFFD*2), # 2 start bytes
(b'\xe1\xe1\xe1', FFFD*3), # 3 start bytes
(b'\xe1\xe1\xe1\xe1', FFFD*4), # 4 start bytes
(b'\xe1\x80', FFFD), # only 1 continuation byte
(b'\xe1\x41', FFFD+'A'), # invalid continuation byte
(b'\xe1\x41\x80', FFFD+'A'+FFFD), # invalid cb followed by valid cb
(b'\xe1\x41\x41', FFFD+'AA'), # 2 invalid continuation bytes
(b'\xe1\x80\x41', FFFD+'A'), # only 1 valid continuation byte
(b'\xe1\x80\xe1\x41', FFFD*2+'A'), # 1 valid and the other invalid
(b'\xe1\x41\xe1\x80', FFFD+'A'+FFFD), # 1 invalid and the other valid
# with start byte of a 4-byte sequence
(b'\xf1', FFFD), # only the start byte
(b'\xf1\xf1', FFFD*2), # 2 start bytes
(b'\xf1\xf1\xf1', FFFD*3), # 3 start bytes
(b'\xf1\xf1\xf1\xf1', FFFD*4), # 4 start bytes
(b'\xf1\xf1\xf1\xf1\xf1', FFFD*5), # 5 start bytes
(b'\xf1\x80', FFFD), # only 1 continuation bytes
(b'\xf1\x80\x80', FFFD), # only 2 continuation bytes
(b'\xf1\x80\x41', FFFD+'A'), # 1 valid cb and 1 invalid
(b'\xf1\x80\x41\x41', FFFD+'AA'), # 1 valid cb and 1 invalid
(b'\xf1\x80\x80\x41', FFFD+'A'), # 2 valid cb and 1 invalid
(b'\xf1\x41\x80', FFFD+'A'+FFFD), # 1 invalid cv and 1 valid
(b'\xf1\x41\x80\x80', FFFD+'A'+FFFD*2), # 1 invalid cb and 2 invalid
(b'\xf1\x41\x80\x41', FFFD+'A'+FFFD+'A'), # 2 invalid cb and 1 invalid
(b'\xf1\x41\x41\x80', FFFD+'AA'+FFFD), # 1 valid cb and 1 invalid
(b'\xf1\x41\xf1\x80', FFFD+'A'+FFFD),
(b'\xf1\x41\x80\xf1', FFFD+'A'+FFFD*2),
(b'\xf1\xf1\x80\x41', FFFD*2+'A'),
(b'\xf1\x41\xf1\xf1', FFFD+'A'+FFFD*2),
# with invalid start byte of a 4-byte sequence (rfc2279)
(b'\xf5', FFFD), # only the start byte
(b'\xf5\xf5', FFFD*2), # 2 start bytes
(b'\xf5\x80', FFFD*2), # only 1 continuation byte
(b'\xf5\x80\x80', FFFD*3), # only 2 continuation byte
(b'\xf5\x80\x80\x80', FFFD*4), # 3 continuation bytes
(b'\xf5\x80\x41', FFFD*2+'A'), # 1 valid cb and 1 invalid
(b'\xf5\x80\x41\xf5', FFFD*2+'A'+FFFD),
(b'\xf5\x41\x80\x80\x41', FFFD+'A'+FFFD*2+'A'),
# with invalid start byte of a 5-byte sequence (rfc2279)
(b'\xf8', FFFD), # only the start byte
(b'\xf8\xf8', FFFD*2), # 2 start bytes
(b'\xf8\x80', FFFD*2), # only one continuation byte
(b'\xf8\x80\x41', FFFD*2 + 'A'), # 1 valid cb and 1 invalid
(b'\xf8\x80\x80\x80\x80', FFFD*5), # invalid 5 bytes seq with 5 bytes
# with invalid start byte of a 6-byte sequence (rfc2279)
(b'\xfc', FFFD), # only the start byte
(b'\xfc\xfc', FFFD*2), # 2 start bytes
(b'\xfc\x80\x80', FFFD*3), # only 2 continuation bytes
(b'\xfc\x80\x80\x80\x80\x80', FFFD*6), # 6 continuation bytes
# invalid start byte
(b'\xfe', FFFD),
(b'\xfe\x80\x80', FFFD*3),
# other sequences
(b'\xf1\x80\x41\x42\x43', '\ufffd\x41\x42\x43'),
(b'\xf1\x80\xff\x42\x43', '\ufffd\ufffd\x42\x43'),
(b'\xf1\x80\xc2\x81\x43', '\ufffd\x81\x43'),
(b'\x61\xF1\x80\x80\xE1\x80\xC2\x62\x80\x63\x80\xBF\x64',
'\x61\uFFFD\uFFFD\uFFFD\x62\uFFFD\x63\uFFFD\uFFFD\x64'),
]
for n, (seq, res) in enumerate(sequences):
self.assertRaises(UnicodeDecodeError, seq.decode, 'utf-8', 'strict')
self.assertEqual(seq.decode('utf-8', 'replace'), res)
self.assertEqual((seq+b'b').decode('utf-8', 'replace'), res+'b')
self.assertEqual(seq.decode('utf-8', 'ignore'),
res.replace('\uFFFD', ''))
def assertCorrectUTF8Decoding(self, seq, res, err):
"""
Check that an invalid UTF-8 sequence raises a UnicodeDecodeError when
'strict' is used, returns res when 'replace' is used, and that doesn't
return anything when 'ignore' is used.
"""
with self.assertRaises(UnicodeDecodeError) as cm:
seq.decode('utf-8')
exc = cm.exception
self.assertIn(err, str(exc))
self.assertEqual(seq.decode('utf-8', 'replace'), res)
self.assertEqual((b'aaaa' + seq + b'bbbb').decode('utf-8', 'replace'),
'aaaa' + res + 'bbbb')
res = res.replace('\ufffd', '')
self.assertEqual(seq.decode('utf-8', 'ignore'), res)
self.assertEqual((b'aaaa' + seq + b'bbbb').decode('utf-8', 'ignore'),
'aaaa' + res + 'bbbb')
def test_invalid_start_byte(self):
"""
Test that an 'invalid start byte' error is raised when the first byte
is not in the ASCII range or is not a valid start byte of a 2-, 3-, or
4-bytes sequence. The invalid start byte is replaced with a single
U+FFFD when errors='replace'.
E.g. <80> is a continuation byte and can appear only after a start byte.
"""
FFFD = '\ufffd'
for byte in b'\x80\xA0\x9F\xBF\xC0\xC1\xF5\xFF':
self.assertCorrectUTF8Decoding(bytes([byte]), '\ufffd',
'invalid start byte')
def test_unexpected_end_of_data(self):
"""
Test that an 'unexpected end of data' error is raised when the string
ends after a start byte of a 2-, 3-, or 4-bytes sequence without having
enough continuation bytes. The incomplete sequence is replaced with a
single U+FFFD when errors='replace'.
E.g. in the sequence <F3 80 80>, F3 is the start byte of a 4-bytes
sequence, but it's followed by only 2 valid continuation bytes and the
last continuation bytes is missing.
Note: the continuation bytes must be all valid, if one of them is
invalid another error will be raised.
"""
sequences = [
'C2', 'DF',
'E0 A0', 'E0 BF', 'E1 80', 'E1 BF', 'EC 80', 'EC BF',
'ED 80', 'ED 9F', 'EE 80', 'EE BF', 'EF 80', 'EF BF',
'F0 90', 'F0 BF', 'F0 90 80', 'F0 90 BF', 'F0 BF 80', 'F0 BF BF',
'F1 80', 'F1 BF', 'F1 80 80', 'F1 80 BF', 'F1 BF 80', 'F1 BF BF',
'F3 80', 'F3 BF', 'F3 80 80', 'F3 80 BF', 'F3 BF 80', 'F3 BF BF',
'F4 80', 'F4 8F', 'F4 80 80', 'F4 80 BF', 'F4 8F 80', 'F4 8F BF'
]
FFFD = '\ufffd'
for seq in sequences:
self.assertCorrectUTF8Decoding(bytes.fromhex(seq), '\ufffd',
'unexpected end of data')
def test_invalid_cb_for_2bytes_seq(self):
"""
Test that an 'invalid continuation byte' error is raised when the
continuation byte of a 2-bytes sequence is invalid. The start byte
is replaced by a single U+FFFD and the second byte is handled
separately when errors='replace'.
E.g. in the sequence <C2 41>, C2 is the start byte of a 2-bytes
sequence, but 41 is not a valid continuation byte because it's the
ASCII letter 'A'.
"""
FFFD = '\ufffd'
FFFDx2 = FFFD * 2
sequences = [
('C2 00', FFFD+'\x00'), ('C2 7F', FFFD+'\x7f'),
('C2 C0', FFFDx2), ('C2 FF', FFFDx2),
('DF 00', FFFD+'\x00'), ('DF 7F', FFFD+'\x7f'),
('DF C0', FFFDx2), ('DF FF', FFFDx2),
]
for seq, res in sequences:
self.assertCorrectUTF8Decoding(bytes.fromhex(seq), res,
'invalid continuation byte')
def test_invalid_cb_for_3bytes_seq(self):
"""
Test that an 'invalid continuation byte' error is raised when the
continuation byte(s) of a 3-bytes sequence are invalid. When
errors='replace', if the first continuation byte is valid, the first
two bytes (start byte + 1st cb) are replaced by a single U+FFFD and the
third byte is handled separately, otherwise only the start byte is
replaced with a U+FFFD and the other continuation bytes are handled
separately.
E.g. in the sequence <E1 80 41>, E1 is the start byte of a 3-bytes
sequence, 80 is a valid continuation byte, but 41 is not a valid cb
because it's the ASCII letter 'A'.
Note: when the start byte is E0 or ED, the valid ranges for the first
continuation byte are limited to A0..BF and 80..9F respectively.
Python 2 used to consider all the bytes in range 80..BF valid when the
start byte was ED. This is fixed in Python 3.
"""
FFFD = '\ufffd'
FFFDx2 = FFFD * 2
sequences = [
('E0 00', FFFD+'\x00'), ('E0 7F', FFFD+'\x7f'), ('E0 80', FFFDx2),
('E0 9F', FFFDx2), ('E0 C0', FFFDx2), ('E0 FF', FFFDx2),
('E0 A0 00', FFFD+'\x00'), ('E0 A0 7F', FFFD+'\x7f'),
('E0 A0 C0', FFFDx2), ('E0 A0 FF', FFFDx2),
('E0 BF 00', FFFD+'\x00'), ('E0 BF 7F', FFFD+'\x7f'),
('E0 BF C0', FFFDx2), ('E0 BF FF', FFFDx2), ('E1 00', FFFD+'\x00'),
('E1 7F', FFFD+'\x7f'), ('E1 C0', FFFDx2), ('E1 FF', FFFDx2),
('E1 80 00', FFFD+'\x00'), ('E1 80 7F', FFFD+'\x7f'),
('E1 80 C0', FFFDx2), ('E1 80 FF', FFFDx2),
('E1 BF 00', FFFD+'\x00'), ('E1 BF 7F', FFFD+'\x7f'),
('E1 BF C0', FFFDx2), ('E1 BF FF', FFFDx2), ('EC 00', FFFD+'\x00'),
('EC 7F', FFFD+'\x7f'), ('EC C0', FFFDx2), ('EC FF', FFFDx2),
('EC 80 00', FFFD+'\x00'), ('EC 80 7F', FFFD+'\x7f'),
('EC 80 C0', FFFDx2), ('EC 80 FF', FFFDx2),
('EC BF 00', FFFD+'\x00'), ('EC BF 7F', FFFD+'\x7f'),
('EC BF C0', FFFDx2), ('EC BF FF', FFFDx2), ('ED 00', FFFD+'\x00'),
('ED 7F', FFFD+'\x7f'),
('ED A0', FFFDx2), ('ED BF', FFFDx2), # see note ^
('ED C0', FFFDx2), ('ED FF', FFFDx2), ('ED 80 00', FFFD+'\x00'),
('ED 80 7F', FFFD+'\x7f'), ('ED 80 C0', FFFDx2),
('ED 80 FF', FFFDx2), ('ED 9F 00', FFFD+'\x00'),
('ED 9F 7F', FFFD+'\x7f'), ('ED 9F C0', FFFDx2),
('ED 9F FF', FFFDx2), ('EE 00', FFFD+'\x00'),
('EE 7F', FFFD+'\x7f'), ('EE C0', FFFDx2), ('EE FF', FFFDx2),
('EE 80 00', FFFD+'\x00'), ('EE 80 7F', FFFD+'\x7f'),
('EE 80 C0', FFFDx2), ('EE 80 FF', FFFDx2),
('EE BF 00', FFFD+'\x00'), ('EE BF 7F', FFFD+'\x7f'),
('EE BF C0', FFFDx2), ('EE BF FF', FFFDx2), ('EF 00', FFFD+'\x00'),
('EF 7F', FFFD+'\x7f'), ('EF C0', FFFDx2), ('EF FF', FFFDx2),
('EF 80 00', FFFD+'\x00'), ('EF 80 7F', FFFD+'\x7f'),
('EF 80 C0', FFFDx2), ('EF 80 FF', FFFDx2),
('EF BF 00', FFFD+'\x00'), ('EF BF 7F', FFFD+'\x7f'),
('EF BF C0', FFFDx2), ('EF BF FF', FFFDx2),
]
for seq, res in sequences:
self.assertCorrectUTF8Decoding(bytes.fromhex(seq), res,
'invalid continuation byte')
def test_invalid_cb_for_4bytes_seq(self):
"""
Test that an 'invalid continuation byte' error is raised when the
continuation byte(s) of a 4-bytes sequence are invalid. When
errors='replace',the start byte and all the following valid
continuation bytes are replaced with a single U+FFFD, and all the bytes
starting from the first invalid continuation bytes (included) are
handled separately.
E.g. in the sequence <E1 80 41>, E1 is the start byte of a 3-bytes
sequence, 80 is a valid continuation byte, but 41 is not a valid cb
because it's the ASCII letter 'A'.
Note: when the start byte is E0 or ED, the valid ranges for the first
continuation byte are limited to A0..BF and 80..9F respectively.
However, when the start byte is ED, Python 2 considers all the bytes
in range 80..BF valid. This is fixed in Python 3.
"""
FFFD = '\ufffd'
FFFDx2 = FFFD * 2
sequences = [
('F0 00', FFFD+'\x00'), ('F0 7F', FFFD+'\x7f'), ('F0 80', FFFDx2),
('F0 8F', FFFDx2), ('F0 C0', FFFDx2), ('F0 FF', FFFDx2),
('F0 90 00', FFFD+'\x00'), ('F0 90 7F', FFFD+'\x7f'),
('F0 90 C0', FFFDx2), ('F0 90 FF', FFFDx2),
('F0 BF 00', FFFD+'\x00'), ('F0 BF 7F', FFFD+'\x7f'),
('F0 BF C0', FFFDx2), ('F0 BF FF', FFFDx2),
('F0 90 80 00', FFFD+'\x00'), ('F0 90 80 7F', FFFD+'\x7f'),
('F0 90 80 C0', FFFDx2), ('F0 90 80 FF', FFFDx2),
('F0 90 BF 00', FFFD+'\x00'), ('F0 90 BF 7F', FFFD+'\x7f'),
('F0 90 BF C0', FFFDx2), ('F0 90 BF FF', FFFDx2),
('F0 BF 80 00', FFFD+'\x00'), ('F0 BF 80 7F', FFFD+'\x7f'),
('F0 BF 80 C0', FFFDx2), ('F0 BF 80 FF', FFFDx2),
('F0 BF BF 00', FFFD+'\x00'), ('F0 BF BF 7F', FFFD+'\x7f'),
('F0 BF BF C0', FFFDx2), ('F0 BF BF FF', FFFDx2),
('F1 00', FFFD+'\x00'), ('F1 7F', FFFD+'\x7f'), ('F1 C0', FFFDx2),
('F1 FF', FFFDx2), ('F1 80 00', FFFD+'\x00'),
('F1 80 7F', FFFD+'\x7f'), ('F1 80 C0', FFFDx2),
('F1 80 FF', FFFDx2), ('F1 BF 00', FFFD+'\x00'),
('F1 BF 7F', FFFD+'\x7f'), ('F1 BF C0', FFFDx2),
('F1 BF FF', FFFDx2), ('F1 80 80 00', FFFD+'\x00'),
('F1 80 80 7F', FFFD+'\x7f'), ('F1 80 80 C0', FFFDx2),
('F1 80 80 FF', FFFDx2), ('F1 80 BF 00', FFFD+'\x00'),
('F1 80 BF 7F', FFFD+'\x7f'), ('F1 80 BF C0', FFFDx2),
('F1 80 BF FF', FFFDx2), ('F1 BF 80 00', FFFD+'\x00'),
('F1 BF 80 7F', FFFD+'\x7f'), ('F1 BF 80 C0', FFFDx2),
('F1 BF 80 FF', FFFDx2), ('F1 BF BF 00', FFFD+'\x00'),
('F1 BF BF 7F', FFFD+'\x7f'), ('F1 BF BF C0', FFFDx2),
('F1 BF BF FF', FFFDx2), ('F3 00', FFFD+'\x00'),
('F3 7F', FFFD+'\x7f'), ('F3 C0', FFFDx2), ('F3 FF', FFFDx2),
('F3 80 00', FFFD+'\x00'), ('F3 80 7F', FFFD+'\x7f'),
('F3 80 C0', FFFDx2), ('F3 80 FF', FFFDx2),
('F3 BF 00', FFFD+'\x00'), ('F3 BF 7F', FFFD+'\x7f'),
('F3 BF C0', FFFDx2), ('F3 BF FF', FFFDx2),
('F3 80 80 00', FFFD+'\x00'), ('F3 80 80 7F', FFFD+'\x7f'),
('F3 80 80 C0', FFFDx2), ('F3 80 80 FF', FFFDx2),
('F3 80 BF 00', FFFD+'\x00'), ('F3 80 BF 7F', FFFD+'\x7f'),
('F3 80 BF C0', FFFDx2), ('F3 80 BF FF', FFFDx2),
('F3 BF 80 00', FFFD+'\x00'), ('F3 BF 80 7F', FFFD+'\x7f'),
('F3 BF 80 C0', FFFDx2), ('F3 BF 80 FF', FFFDx2),
('F3 BF BF 00', FFFD+'\x00'), ('F3 BF BF 7F', FFFD+'\x7f'),
('F3 BF BF C0', FFFDx2), ('F3 BF BF FF', FFFDx2),
('F4 00', FFFD+'\x00'), ('F4 7F', FFFD+'\x7f'), ('F4 90', FFFDx2),
('F4 BF', FFFDx2), ('F4 C0', FFFDx2), ('F4 FF', FFFDx2),
('F4 80 00', FFFD+'\x00'), ('F4 80 7F', FFFD+'\x7f'),
('F4 80 C0', FFFDx2), ('F4 80 FF', FFFDx2),
('F4 8F 00', FFFD+'\x00'), ('F4 8F 7F', FFFD+'\x7f'),
('F4 8F C0', FFFDx2), ('F4 8F FF', FFFDx2),
('F4 80 80 00', FFFD+'\x00'), ('F4 80 80 7F', FFFD+'\x7f'),
('F4 80 80 C0', FFFDx2), ('F4 80 80 FF', FFFDx2),
('F4 80 BF 00', FFFD+'\x00'), ('F4 80 BF 7F', FFFD+'\x7f'),
('F4 80 BF C0', FFFDx2), ('F4 80 BF FF', FFFDx2),
('F4 8F 80 00', FFFD+'\x00'), ('F4 8F 80 7F', FFFD+'\x7f'),
('F4 8F 80 C0', FFFDx2), ('F4 8F 80 FF', FFFDx2),
('F4 8F BF 00', FFFD+'\x00'), ('F4 8F BF 7F', FFFD+'\x7f'),
('F4 8F BF C0', FFFDx2), ('F4 8F BF FF', FFFDx2)
]
for seq, res in sequences:
self.assertCorrectUTF8Decoding(bytes.fromhex(seq), res,
'invalid continuation byte')
def test_codecs_idna(self):
# Test whether trailing dot is preserved
self.assertEqual("www.python.org.".encode("idna"), b"www.python.org.")
def test_codecs_errors(self):
# Error handling (encoding)
self.assertRaises(UnicodeError, 'Andr\202 x'.encode, 'ascii')
self.assertRaises(UnicodeError, 'Andr\202 x'.encode, 'ascii','strict')
self.assertEqual('Andr\202 x'.encode('ascii','ignore'), b"Andr x")
self.assertEqual('Andr\202 x'.encode('ascii','replace'), b"Andr? x")
self.assertEqual('Andr\202 x'.encode('ascii', 'replace'),
'Andr\202 x'.encode('ascii', errors='replace'))
self.assertEqual('Andr\202 x'.encode('ascii', 'ignore'),
'Andr\202 x'.encode(encoding='ascii', errors='ignore'))
# Error handling (decoding)
self.assertRaises(UnicodeError, str, b'Andr\202 x', 'ascii')
self.assertRaises(UnicodeError, str, b'Andr\202 x', 'ascii', 'strict')
self.assertEqual(str(b'Andr\202 x', 'ascii', 'ignore'), "Andr x")
self.assertEqual(str(b'Andr\202 x', 'ascii', 'replace'), 'Andr\uFFFD x')
self.assertEqual(str(b'\202 x', 'ascii', 'replace'), '\uFFFD x')
# Error handling (unknown character names)
self.assertEqual(b"\\N{foo}xx".decode("unicode-escape", "ignore"), "xx")
# Error handling (truncated escape sequence)
self.assertRaises(UnicodeError, b"\\".decode, "unicode-escape")
self.assertRaises(TypeError, b"hello".decode, "test.unicode1")
self.assertRaises(TypeError, str, b"hello", "test.unicode2")
self.assertRaises(TypeError, "hello".encode, "test.unicode1")
self.assertRaises(TypeError, "hello".encode, "test.unicode2")
# Error handling (wrong arguments)
self.assertRaises(TypeError, "hello".encode, 42, 42, 42)
# Error handling (lone surrogate in
# _PyUnicode_TransformDecimalAndSpaceToASCII())
self.assertRaises(ValueError, int, "\ud800")
self.assertRaises(ValueError, int, "\udf00")
self.assertRaises(ValueError, float, "\ud800")
self.assertRaises(ValueError, float, "\udf00")
self.assertRaises(ValueError, complex, "\ud800")
self.assertRaises(ValueError, complex, "\udf00")
def test_codecs(self):
# Encoding
self.assertEqual('hello'.encode('ascii'), b'hello')
self.assertEqual('hello'.encode('utf-7'), b'hello')
self.assertEqual('hello'.encode('utf-8'), b'hello')
self.assertEqual('hello'.encode('utf-8'), b'hello')
self.assertEqual('hello'.encode('utf-16-le'), b'h\000e\000l\000l\000o\000')
self.assertEqual('hello'.encode('utf-16-be'), b'\000h\000e\000l\000l\000o')
self.assertEqual('hello'.encode('latin-1'), b'hello')
# Default encoding is utf-8
self.assertEqual('\u2603'.encode(), b'\xe2\x98\x83')
# Roundtrip safety for BMP (just the first 1024 chars)
for c in range(1024):
u = chr(c)
for encoding in ('utf-7', 'utf-8', 'utf-16', 'utf-16-le',
'utf-16-be', 'raw_unicode_escape',
'unicode_escape'):
self.assertEqual(str(u.encode(encoding),encoding), u)
# Roundtrip safety for BMP (just the first 256 chars)
for c in range(256):
u = chr(c)
for encoding in ('latin-1',):
self.assertEqual(str(u.encode(encoding),encoding), u)
# Roundtrip safety for BMP (just the first 128 chars)
for c in range(128):
u = chr(c)
for encoding in ('ascii',):
self.assertEqual(str(u.encode(encoding),encoding), u)
# Roundtrip safety for non-BMP (just a few chars)
with warnings.catch_warnings():
u = '\U00010001\U00020002\U00030003\U00040004\U00050005'
for encoding in ('utf-8', 'utf-16', 'utf-16-le', 'utf-16-be',
'raw_unicode_escape', 'unicode_escape'):
self.assertEqual(str(u.encode(encoding),encoding), u)
# UTF-8 must be roundtrip safe for all code points
# (except surrogates, which are forbidden).
u = ''.join(map(chr, list(range(0, 0xd800)) +
list(range(0xe000, 0x110000))))
for encoding in ('utf-8',):
self.assertEqual(str(u.encode(encoding),encoding), u)
def test_codecs_charmap(self):
# 0-127
s = bytes(range(128))
for encoding in (
'cp037', 'cp1026', 'cp273',
'cp437', 'cp500', 'cp720', 'cp737', 'cp775', 'cp850',
'cp852', 'cp855', 'cp858', 'cp860', 'cp861', 'cp862',
'cp863', 'cp865', 'cp866', 'cp1125',
'iso8859_10', 'iso8859_13', 'iso8859_14', 'iso8859_15',
'iso8859_2', 'iso8859_3', 'iso8859_4', 'iso8859_5', 'iso8859_6',
'iso8859_7', 'iso8859_9',
'koi8_r', 'koi8_t', 'koi8_u', 'kz1048', 'latin_1',
'mac_cyrillic', 'mac_latin2',
'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255',
'cp1256', 'cp1257', 'cp1258',
'cp856', 'cp857', 'cp864', 'cp869', 'cp874',
'mac_greek', 'mac_iceland','mac_roman', 'mac_turkish',
'cp1006', 'iso8859_8',
### These have undefined mappings:
#'cp424',
### These fail the round-trip:
#'cp875'
):
self.assertEqual(str(s, encoding).encode(encoding), s)
# 128-255
s = bytes(range(128, 256))
for encoding in (
'cp037', 'cp1026', 'cp273',
'cp437', 'cp500', 'cp720', 'cp737', 'cp775', 'cp850',
'cp852', 'cp855', 'cp858', 'cp860', 'cp861', 'cp862',
'cp863', 'cp865', 'cp866', 'cp1125',
'iso8859_10', 'iso8859_13', 'iso8859_14', 'iso8859_15',
'iso8859_2', 'iso8859_4', 'iso8859_5',
'iso8859_9', 'koi8_r', 'koi8_u', 'latin_1',
'mac_cyrillic', 'mac_latin2',
### These have undefined mappings:
#'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255',
#'cp1256', 'cp1257', 'cp1258',
#'cp424', 'cp856', 'cp857', 'cp864', 'cp869', 'cp874',
#'iso8859_3', 'iso8859_6', 'iso8859_7', 'koi8_t', 'kz1048',
#'mac_greek', 'mac_iceland','mac_roman', 'mac_turkish',
### These fail the round-trip:
#'cp1006', 'cp875', 'iso8859_8',
):
self.assertEqual(str(s, encoding).encode(encoding), s)
def test_concatenation(self):
self.assertEqual(("abc" "def"), "abcdef")
self.assertEqual(("abc" "def"), "abcdef")
self.assertEqual(("abc" "def"), "abcdef")
self.assertEqual(("abc" "def" "ghi"), "abcdefghi")
self.assertEqual(("abc" "def" "ghi"), "abcdefghi")
def test_printing(self):
class BitBucket:
def write(self, text):
pass
out = BitBucket()
print('abc', file=out)
print('abc', 'def', file=out)
print('abc', 'def', file=out)
print('abc', 'def', file=out)
print('abc\n', file=out)
print('abc\n', end=' ', file=out)
print('abc\n', end=' ', file=out)
print('def\n', file=out)
print('def\n', file=out)
def test_ucs4(self):
x = '\U00100000'
y = x.encode("raw-unicode-escape").decode("raw-unicode-escape")
self.assertEqual(x, y)
y = br'\U00100000'
x = y.decode("raw-unicode-escape").encode("raw-unicode-escape")
self.assertEqual(x, y)
y = br'\U00010000'
x = y.decode("raw-unicode-escape").encode("raw-unicode-escape")
self.assertEqual(x, y)
try:
br'\U11111111'.decode("raw-unicode-escape")
except UnicodeDecodeError as e:
self.assertEqual(e.start, 0)
self.assertEqual(e.end, 10)
else:
self.fail("Should have raised UnicodeDecodeError")
def test_conversion(self):
# Make sure __str__() works properly
class ObjectToStr:
def __str__(self):
return "foo"
class StrSubclassToStr(str):
def __str__(self):
return "foo"
class StrSubclassToStrSubclass(str):
def __new__(cls, content=""):
return str.__new__(cls, 2*content)
def __str__(self):
return self
self.assertEqual(str(ObjectToStr()), "foo")
self.assertEqual(str(StrSubclassToStr("bar")), "foo")
s = str(StrSubclassToStrSubclass("foo"))
self.assertEqual(s, "foofoo")
self.assertIs(type(s), StrSubclassToStrSubclass)
s = StrSubclass(StrSubclassToStrSubclass("foo"))
self.assertEqual(s, "foofoo")
self.assertIs(type(s), StrSubclass)
def test_unicode_repr(self):
class s1:
def __repr__(self):
return '\\n'
class s2:
def __repr__(self):
return '\\n'
self.assertEqual(repr(s1()), '\\n')
self.assertEqual(repr(s2()), '\\n')
def test_printable_repr(self):
self.assertEqual(repr('\U00010000'), "'%c'" % (0x10000,)) # printable
self.assertEqual(repr('\U00014000'), "'\\U00014000'") # nonprintable
# This test only affects 32-bit platforms because expandtabs can only take
# an int as the max value, not a 64-bit C long. If expandtabs is changed
# to take a 64-bit long, this test should apply to all platforms.
@unittest.skipIf(sys.maxsize > (1 << 32) or struct.calcsize('P') != 4,
'only applies to 32-bit platforms')
def test_expandtabs_overflows_gracefully(self):
self.assertRaises(OverflowError, 't\tt\t'.expandtabs, sys.maxsize)
@support.cpython_only
def test_expandtabs_optimization(self):
s = 'abc'
self.assertIs(s.expandtabs(), s)
def test_raiseMemError(self):
if struct.calcsize('P') == 8:
# 64 bits pointers
ascii_struct_size = 48
compact_struct_size = 72
else:
# 32 bits pointers
ascii_struct_size = 24
compact_struct_size = 36
for char in ('a', '\xe9', '\u20ac', '\U0010ffff'):
code = ord(char)
if code < 0x100:
char_size = 1 # sizeof(Py_UCS1)
struct_size = ascii_struct_size
elif code < 0x10000:
char_size = 2 # sizeof(Py_UCS2)
struct_size = compact_struct_size
else:
char_size = 4 # sizeof(Py_UCS4)
struct_size = compact_struct_size
# Note: sys.maxsize is half of the actual max allocation because of
# the signedness of Py_ssize_t. Strings of maxlen-1 should in principle
# be allocatable, given enough memory.
maxlen = ((sys.maxsize - struct_size) // char_size)
alloc = lambda: char * maxlen
self.assertRaises(MemoryError, alloc)
self.assertRaises(MemoryError, alloc)
def test_format_subclass(self):
class S(str):
def __str__(self):
return '__str__ overridden'
s = S('xxx')
self.assertEqual("%s" % s, '__str__ overridden')
self.assertEqual("{}".format(s), '__str__ overridden')
def test_subclass_add(self):
class S(str):
def __add__(self, o):
return "3"
self.assertEqual(S("4") + S("5"), "3")
class S(str):
def __iadd__(self, o):
return "3"
s = S("1")
s += "4"
self.assertEqual(s, "3")
def test_getnewargs(self):
text = 'abc'
args = text.__getnewargs__()
self.assertIsNot(args[0], text)
self.assertEqual(args[0], text)
self.assertEqual(len(args), 1)
@support.cpython_only
def test_resize(self):
from _testcapi import getargs_u
for length in range(1, 100, 7):
# generate a fresh string (refcount=1)
text = 'a' * length + 'b'
# fill wstr internal field
abc = getargs_u(text)
self.assertEqual(abc, text)
# resize text: wstr field must be cleared and then recomputed
text += 'c'
abcdef = getargs_u(text)
self.assertNotEqual(abc, abcdef)
self.assertEqual(abcdef, text)
def test_compare(self):
# Issue #17615
N = 10
ascii = 'a' * N
ascii2 = 'z' * N
latin = '\x80' * N
latin2 = '\xff' * N
bmp = '\u0100' * N
bmp2 = '\uffff' * N
astral = '\U00100000' * N
astral2 = '\U0010ffff' * N
strings = (
ascii, ascii2,
latin, latin2,
bmp, bmp2,
astral, astral2)
for text1, text2 in itertools.combinations(strings, 2):
equal = (text1 is text2)
self.assertEqual(text1 == text2, equal)
self.assertEqual(text1 != text2, not equal)
if equal:
self.assertTrue(text1 <= text2)
self.assertTrue(text1 >= text2)
# text1 is text2: duplicate strings to skip the "str1 == str2"
# optimization in unicode_compare_eq() and really compare
# character per character
copy1 = duplicate_string(text1)
copy2 = duplicate_string(text2)
self.assertIsNot(copy1, copy2)
self.assertTrue(copy1 == copy2)
self.assertFalse(copy1 != copy2)
self.assertTrue(copy1 <= copy2)
self.assertTrue(copy2 >= copy2)
self.assertTrue(ascii < ascii2)
self.assertTrue(ascii < latin)
self.assertTrue(ascii < bmp)
self.assertTrue(ascii < astral)
self.assertFalse(ascii >= ascii2)
self.assertFalse(ascii >= latin)
self.assertFalse(ascii >= bmp)
self.assertFalse(ascii >= astral)
self.assertFalse(latin < ascii)
self.assertTrue(latin < latin2)
self.assertTrue(latin < bmp)
self.assertTrue(latin < astral)
self.assertTrue(latin >= ascii)
self.assertFalse(latin >= latin2)
self.assertFalse(latin >= bmp)
self.assertFalse(latin >= astral)
self.assertFalse(bmp < ascii)
self.assertFalse(bmp < latin)
self.assertTrue(bmp < bmp2)
self.assertTrue(bmp < astral)
self.assertTrue(bmp >= ascii)
self.assertTrue(bmp >= latin)
self.assertFalse(bmp >= bmp2)
self.assertFalse(bmp >= astral)
self.assertFalse(astral < ascii)
self.assertFalse(astral < latin)
self.assertFalse(astral < bmp2)
self.assertTrue(astral < astral2)
self.assertTrue(astral >= ascii)
self.assertTrue(astral >= latin)
self.assertTrue(astral >= bmp2)
self.assertFalse(astral >= astral2)
def test_free_after_iterating(self):
support.check_free_after_iterating(self, iter, str)
support.check_free_after_iterating(self, reversed, str)
def test_check_encoding_errors(self):
# bpo-37388: str(bytes) and str.decode() must check encoding and errors
# arguments in dev mode
encodings = ('ascii', 'utf8', 'latin1')
invalid = 'Boom, Shaka Laka, Boom!'
code = textwrap.dedent(f'''
import sys
encodings = {encodings!r}
for data in (b'', b'short string'):
try:
str(data, encoding={invalid!r})
except LookupError:
pass
else:
sys.exit(21)
try:
str(data, errors={invalid!r})
except LookupError:
pass
else:
sys.exit(22)
for encoding in encodings:
try:
str(data, encoding, errors={invalid!r})
except LookupError:
pass
else:
sys.exit(22)
for data in ('', 'short string'):
try:
data.encode(encoding={invalid!r})
except LookupError:
pass
else:
sys.exit(23)
try:
data.encode(errors={invalid!r})
except LookupError:
pass
else:
sys.exit(24)
for encoding in encodings:
try:
data.encode(encoding, errors={invalid!r})
except LookupError:
pass
else:
sys.exit(24)
sys.exit(10)
''')
proc = assert_python_failure('-X', 'dev', '-c', code)
self.assertEqual(proc.rc, 10, proc)
class CAPITest(unittest.TestCase):
# Test PyUnicode_FromFormat()
def test_from_format(self):
support.import_module('ctypes')
from ctypes import (
c_char_p,
pythonapi, py_object, sizeof,
c_int, c_long, c_longlong, c_ssize_t,
c_uint, c_ulong, c_ulonglong, c_size_t, c_void_p)
name = "PyUnicode_FromFormat"
_PyUnicode_FromFormat = getattr(pythonapi, name)
_PyUnicode_FromFormat.argtypes = (c_char_p,)
_PyUnicode_FromFormat.restype = py_object
def PyUnicode_FromFormat(format, *args):
cargs = tuple(
py_object(arg) if isinstance(arg, str) else arg
for arg in args)
return _PyUnicode_FromFormat(format, *cargs)
def check_format(expected, format, *args):
text = PyUnicode_FromFormat(format, *args)
self.assertEqual(expected, text)
# ascii format, non-ascii argument
check_format('ascii\x7f=unicode\xe9',
b'ascii\x7f=%U', 'unicode\xe9')
# non-ascii format, ascii argument: ensure that PyUnicode_FromFormatV()
# raises an error
self.assertRaisesRegex(ValueError,
r'^PyUnicode_FromFormatV\(\) expects an ASCII-encoded format '
'string, got a non-ASCII byte: 0xe9$',
PyUnicode_FromFormat, b'unicode\xe9=%s', 'ascii')
# test "%c"
check_format('\uabcd',
b'%c', c_int(0xabcd))
check_format('\U0010ffff',
b'%c', c_int(0x10ffff))
with self.assertRaises(OverflowError):
PyUnicode_FromFormat(b'%c', c_int(0x110000))
# Issue #18183
check_format('\U00010000\U00100000',
b'%c%c', c_int(0x10000), c_int(0x100000))
# test "%"
check_format('%',
b'%')
check_format('%',
b'%%')
check_format('%s',
b'%%s')
check_format('[%]',
b'[%%]')
check_format('%abc',
b'%%%s', b'abc')
# truncated string
check_format('abc',
b'%.3s', b'abcdef')
check_format('abc[\ufffd',
b'%.5s', 'abc[\u20ac]'.encode('utf8'))
check_format("'\\u20acABC'",
b'%A', '\u20acABC')
check_format("'\\u20",
b'%.5A', '\u20acABCDEF')
check_format("'\u20acABC'",
b'%R', '\u20acABC')
check_format("'\u20acA",
b'%.3R', '\u20acABCDEF')
check_format('\u20acAB',
b'%.3S', '\u20acABCDEF')
check_format('\u20acAB',
b'%.3U', '\u20acABCDEF')
check_format('\u20acAB',
b'%.3V', '\u20acABCDEF', None)
check_format('abc[\ufffd',
b'%.5V', None, 'abc[\u20ac]'.encode('utf8'))
# following tests comes from #7330
# test width modifier and precision modifier with %S
check_format("repr= abc",
b'repr=%5S', 'abc')
check_format("repr=ab",
b'repr=%.2S', 'abc')
check_format("repr= ab",
b'repr=%5.2S', 'abc')
# test width modifier and precision modifier with %R
check_format("repr= 'abc'",
b'repr=%8R', 'abc')
check_format("repr='ab",
b'repr=%.3R', 'abc')
check_format("repr= 'ab",
b'repr=%5.3R', 'abc')
# test width modifier and precision modifier with %A
check_format("repr= 'abc'",
b'repr=%8A', 'abc')
check_format("repr='ab",
b'repr=%.3A', 'abc')
check_format("repr= 'ab",
b'repr=%5.3A', 'abc')
# test width modifier and precision modifier with %s
check_format("repr= abc",
b'repr=%5s', b'abc')
check_format("repr=ab",
b'repr=%.2s', b'abc')
check_format("repr= ab",
b'repr=%5.2s', b'abc')
# test width modifier and precision modifier with %U
check_format("repr= abc",
b'repr=%5U', 'abc')
check_format("repr=ab",
b'repr=%.2U', 'abc')
check_format("repr= ab",
b'repr=%5.2U', 'abc')
# test width modifier and precision modifier with %V
check_format("repr= abc",
b'repr=%5V', 'abc', b'123')
check_format("repr=ab",
b'repr=%.2V', 'abc', b'123')
check_format("repr= ab",
b'repr=%5.2V', 'abc', b'123')
check_format("repr= 123",
b'repr=%5V', None, b'123')
check_format("repr=12",
b'repr=%.2V', None, b'123')
check_format("repr= 12",
b'repr=%5.2V', None, b'123')
# test integer formats (%i, %d, %u)
check_format('010',
b'%03i', c_int(10))
check_format('0010',
b'%0.4i', c_int(10))
check_format('-123',
b'%i', c_int(-123))
check_format('-123',
b'%li', c_long(-123))
check_format('-123',
b'%lli', c_longlong(-123))
check_format('-123',
b'%zi', c_ssize_t(-123))
check_format('-123',
b'%d', c_int(-123))
check_format('-123',
b'%ld', c_long(-123))
check_format('-123',
b'%lld', c_longlong(-123))
check_format('-123',
b'%zd', c_ssize_t(-123))
check_format('123',
b'%u', c_uint(123))
check_format('123',
b'%lu', c_ulong(123))
check_format('123',
b'%llu', c_ulonglong(123))
check_format('123',
b'%zu', c_size_t(123))
# test long output
min_longlong = -(2 ** (8 * sizeof(c_longlong) - 1))
max_longlong = -min_longlong - 1
check_format(str(min_longlong),
b'%lld', c_longlong(min_longlong))
check_format(str(max_longlong),
b'%lld', c_longlong(max_longlong))
max_ulonglong = 2 ** (8 * sizeof(c_ulonglong)) - 1
check_format(str(max_ulonglong),
b'%llu', c_ulonglong(max_ulonglong))
PyUnicode_FromFormat(b'%p', c_void_p(-1))
# test padding (width and/or precision)
check_format('123'.rjust(10, '0'),
b'%010i', c_int(123))
check_format('123'.rjust(100),
b'%100i', c_int(123))
check_format('123'.rjust(100, '0'),
b'%.100i', c_int(123))
check_format('123'.rjust(80, '0').rjust(100),
b'%100.80i', c_int(123))
check_format('123'.rjust(10, '0'),
b'%010u', c_uint(123))
check_format('123'.rjust(100),
b'%100u', c_uint(123))
check_format('123'.rjust(100, '0'),
b'%.100u', c_uint(123))
check_format('123'.rjust(80, '0').rjust(100),
b'%100.80u', c_uint(123))
check_format('123'.rjust(10, '0'),
b'%010x', c_int(0x123))
check_format('123'.rjust(100),
b'%100x', c_int(0x123))
check_format('123'.rjust(100, '0'),
b'%.100x', c_int(0x123))
check_format('123'.rjust(80, '0').rjust(100),
b'%100.80x', c_int(0x123))
# test %A
check_format(r"%A:'abc\xe9\uabcd\U0010ffff'",
b'%%A:%A', 'abc\xe9\uabcd\U0010ffff')
# test %V
check_format('repr=abc',
b'repr=%V', 'abc', b'xyz')
# Test string decode from parameter of %s using utf-8.
# b'\xe4\xba\xba\xe6\xb0\x91' is utf-8 encoded byte sequence of
# '\u4eba\u6c11'
check_format('repr=\u4eba\u6c11',
b'repr=%V', None, b'\xe4\xba\xba\xe6\xb0\x91')
#Test replace error handler.
check_format('repr=abc\ufffd',
b'repr=%V', None, b'abc\xff')
# not supported: copy the raw format string. these tests are just here
# to check for crashes and should not be considered as specifications
check_format('%s',
b'%1%s', b'abc')
check_format('%1abc',
b'%1abc')
check_format('%+i',
b'%+i', c_int(10))
check_format('%.%s',
b'%.%s', b'abc')
# Issue #33817: empty strings
check_format('',
b'')
check_format('',
b'%s', b'')
# Test PyUnicode_AsWideChar()
@support.cpython_only
def test_aswidechar(self):
from _testcapi import unicode_aswidechar
support.import_module('ctypes')
from ctypes import c_wchar, sizeof
wchar, size = unicode_aswidechar('abcdef', 2)
self.assertEqual(size, 2)
self.assertEqual(wchar, 'ab')
wchar, size = unicode_aswidechar('abc', 3)
self.assertEqual(size, 3)
self.assertEqual(wchar, 'abc')
wchar, size = unicode_aswidechar('abc', 4)
self.assertEqual(size, 3)
self.assertEqual(wchar, 'abc\0')
wchar, size = unicode_aswidechar('abc', 10)
self.assertEqual(size, 3)
self.assertEqual(wchar, 'abc\0')
wchar, size = unicode_aswidechar('abc\0def', 20)
self.assertEqual(size, 7)
self.assertEqual(wchar, 'abc\0def\0')
nonbmp = chr(0x10ffff)
if sizeof(c_wchar) == 2:
buflen = 3
nchar = 2
else: # sizeof(c_wchar) == 4
buflen = 2
nchar = 1
wchar, size = unicode_aswidechar(nonbmp, buflen)
self.assertEqual(size, nchar)
self.assertEqual(wchar, nonbmp + '\0')
# Test PyUnicode_AsWideCharString()
@support.cpython_only
def test_aswidecharstring(self):
from _testcapi import unicode_aswidecharstring
support.import_module('ctypes')
from ctypes import c_wchar, sizeof
wchar, size = unicode_aswidecharstring('abc')
self.assertEqual(size, 3)
self.assertEqual(wchar, 'abc\0')
wchar, size = unicode_aswidecharstring('abc\0def')
self.assertEqual(size, 7)
self.assertEqual(wchar, 'abc\0def\0')
nonbmp = chr(0x10ffff)
if sizeof(c_wchar) == 2:
nchar = 2
else: # sizeof(c_wchar) == 4
nchar = 1
wchar, size = unicode_aswidecharstring(nonbmp)
self.assertEqual(size, nchar)
self.assertEqual(wchar, nonbmp + '\0')
# Test PyUnicode_AsUCS4()
@support.cpython_only
def test_asucs4(self):
from _testcapi import unicode_asucs4
for s in ['abc', '\xa1\xa2', '\u4f60\u597d', 'a\U0001f600',
'a\ud800b\udfffc', '\ud834\udd1e']:
l = len(s)
self.assertEqual(unicode_asucs4(s, l, True), s+'\0')
self.assertEqual(unicode_asucs4(s, l, False), s+'\uffff')
self.assertEqual(unicode_asucs4(s, l+1, True), s+'\0\uffff')
self.assertEqual(unicode_asucs4(s, l+1, False), s+'\0\uffff')
self.assertRaises(SystemError, unicode_asucs4, s, l-1, True)
self.assertRaises(SystemError, unicode_asucs4, s, l-2, False)
s = '\0'.join([s, s])
self.assertEqual(unicode_asucs4(s, len(s), True), s+'\0')
self.assertEqual(unicode_asucs4(s, len(s), False), s+'\uffff')
# Test PyUnicode_AsUTF8()
@support.cpython_only
def test_asutf8(self):
from _testcapi import unicode_asutf8
bmp = '\u0100'
bmp2 = '\uffff'
nonbmp = chr(0x10ffff)
self.assertEqual(unicode_asutf8(bmp), b'\xc4\x80')
self.assertEqual(unicode_asutf8(bmp2), b'\xef\xbf\xbf')
self.assertEqual(unicode_asutf8(nonbmp), b'\xf4\x8f\xbf\xbf')
self.assertRaises(UnicodeEncodeError, unicode_asutf8, 'a\ud800b\udfffc')
# Test PyUnicode_AsUTF8AndSize()
@support.cpython_only
def test_asutf8andsize(self):
from _testcapi import unicode_asutf8andsize
bmp = '\u0100'
bmp2 = '\uffff'
nonbmp = chr(0x10ffff)
self.assertEqual(unicode_asutf8andsize(bmp), (b'\xc4\x80', 2))
self.assertEqual(unicode_asutf8andsize(bmp2), (b'\xef\xbf\xbf', 3))
self.assertEqual(unicode_asutf8andsize(nonbmp), (b'\xf4\x8f\xbf\xbf', 4))
self.assertRaises(UnicodeEncodeError, unicode_asutf8andsize, 'a\ud800b\udfffc')
# Test PyUnicode_FindChar()
@support.cpython_only
def test_findchar(self):
from _testcapi import unicode_findchar
for str in "\xa1", "\u8000\u8080", "\ud800\udc02", "\U0001f100\U0001f1f1":
for i, ch in enumerate(str):
self.assertEqual(unicode_findchar(str, ord(ch), 0, len(str), 1), i)
self.assertEqual(unicode_findchar(str, ord(ch), 0, len(str), -1), i)
str = "!>_<!"
self.assertEqual(unicode_findchar(str, 0x110000, 0, len(str), 1), -1)
self.assertEqual(unicode_findchar(str, 0x110000, 0, len(str), -1), -1)
# start < end
self.assertEqual(unicode_findchar(str, ord('!'), 1, len(str)+1, 1), 4)
self.assertEqual(unicode_findchar(str, ord('!'), 1, len(str)+1, -1), 4)
# start >= end
self.assertEqual(unicode_findchar(str, ord('!'), 0, 0, 1), -1)
self.assertEqual(unicode_findchar(str, ord('!'), len(str), 0, 1), -1)
# negative
self.assertEqual(unicode_findchar(str, ord('!'), -len(str), -1, 1), 0)
self.assertEqual(unicode_findchar(str, ord('!'), -len(str), -1, -1), 0)
# Test PyUnicode_CopyCharacters()
@support.cpython_only
def test_copycharacters(self):
from _testcapi import unicode_copycharacters
strings = [
'abcde', '\xa1\xa2\xa3\xa4\xa5',
'\u4f60\u597d\u4e16\u754c\uff01',
'\U0001f600\U0001f601\U0001f602\U0001f603\U0001f604'
]
for idx, from_ in enumerate(strings):
# wide -> narrow: exceed maxchar limitation
for to in strings[:idx]:
self.assertRaises(
SystemError,
unicode_copycharacters, to, 0, from_, 0, 5
)
# same kind
for from_start in range(5):
self.assertEqual(
unicode_copycharacters(from_, 0, from_, from_start, 5),
(from_[from_start:from_start+5].ljust(5, '\0'),
5-from_start)
)
for to_start in range(5):
self.assertEqual(
unicode_copycharacters(from_, to_start, from_, to_start, 5),
(from_[to_start:to_start+5].rjust(5, '\0'),
5-to_start)
)
# narrow -> wide
# Tests omitted since this creates invalid strings.
s = strings[0]
self.assertRaises(IndexError, unicode_copycharacters, s, 6, s, 0, 5)
self.assertRaises(IndexError, unicode_copycharacters, s, -1, s, 0, 5)
self.assertRaises(IndexError, unicode_copycharacters, s, 0, s, 6, 5)
self.assertRaises(IndexError, unicode_copycharacters, s, 0, s, -1, 5)
self.assertRaises(SystemError, unicode_copycharacters, s, 1, s, 0, 5)
self.assertRaises(SystemError, unicode_copycharacters, s, 0, s, 0, -1)
self.assertRaises(SystemError, unicode_copycharacters, s, 0, b'', 0, 0)
@support.cpython_only
def test_encode_decimal(self):
from _testcapi import unicode_encodedecimal
self.assertEqual(unicode_encodedecimal('123'),
b'123')
self.assertEqual(unicode_encodedecimal('\u0663.\u0661\u0664'),
b'3.14')
self.assertEqual(unicode_encodedecimal("\N{EM SPACE}3.14\N{EN SPACE}"),
b' 3.14 ')
self.assertRaises(UnicodeEncodeError,
unicode_encodedecimal, "123\u20ac", "strict")
self.assertRaisesRegex(
ValueError,
"^'decimal' codec can't encode character",
unicode_encodedecimal, "123\u20ac", "replace")
@support.cpython_only
def test_transform_decimal(self):
from _testcapi import unicode_transformdecimaltoascii as transform_decimal
self.assertEqual(transform_decimal('123'),
'123')
self.assertEqual(transform_decimal('\u0663.\u0661\u0664'),
'3.14')
self.assertEqual(transform_decimal("\N{EM SPACE}3.14\N{EN SPACE}"),
"\N{EM SPACE}3.14\N{EN SPACE}")
self.assertEqual(transform_decimal('123\u20ac'),
'123\u20ac')
@support.cpython_only
def test_pep393_utf8_caching_bug(self):
# Issue #25709: Problem with string concatenation and utf-8 cache
from _testcapi import getargs_s_hash
for k in 0x24, 0xa4, 0x20ac, 0x1f40d:
s = ''
for i in range(5):
# Due to CPython specific optimization the 's' string can be
# resized in-place.
s += chr(k)
# Parsing with the "s#" format code calls indirectly
# PyUnicode_AsUTF8AndSize() which creates the UTF-8
# encoded string cached in the Unicode object.
self.assertEqual(getargs_s_hash(s), chr(k).encode() * (i + 1))
# Check that the second call returns the same result
self.assertEqual(getargs_s_hash(s), chr(k).encode() * (i + 1))
class StringModuleTest(unittest.TestCase):
def test_formatter_parser(self):
def parse(format):
return list(_string.formatter_parser(format))
formatter = parse("prefix {2!s}xxx{0:^+10.3f}{obj.attr!s} {z[0]!s:10}")
self.assertEqual(formatter, [
('prefix ', '2', '', 's'),
('xxx', '0', '^+10.3f', None),
('', 'obj.attr', '', 's'),
(' ', 'z[0]', '10', 's'),
])
formatter = parse("prefix {} suffix")
self.assertEqual(formatter, [
('prefix ', '', '', None),
(' suffix', None, None, None),
])
formatter = parse("str")
self.assertEqual(formatter, [
('str', None, None, None),
])
formatter = parse("")
self.assertEqual(formatter, [])
formatter = parse("{0}")
self.assertEqual(formatter, [
('', '0', '', None),
])
self.assertRaises(TypeError, _string.formatter_parser, 1)
def test_formatter_field_name_split(self):
def split(name):
items = list(_string.formatter_field_name_split(name))
items[1] = list(items[1])
return items
self.assertEqual(split("obj"), ["obj", []])
self.assertEqual(split("obj.arg"), ["obj", [(True, 'arg')]])
self.assertEqual(split("obj[key]"), ["obj", [(False, 'key')]])
self.assertEqual(split("obj.arg[key1][key2]"), [
"obj",
[(True, 'arg'),
(False, 'key1'),
(False, 'key2'),
]])
self.assertRaises(TypeError, _string.formatter_field_name_split, 1)
if __name__ == "__main__":
unittest.main()
|
the-stack_0_18137 | from pyrogram.types.bots_and_keyboards.inline_keyboard_button import InlineKeyboardButton
from pyrogram.types.bots_and_keyboards.inline_keyboard_markup import InlineKeyboardMarkup
from nksama import bot
from pyrogram import filters
from nksama.plugins.stats import col
from nksama.plugins.stats import users_db , grps
from nksama import help_message
@bot.on_message(filters.command('start') | filters.command('start@FlameXbot'))
def start(_,message):
try:
if message.chat.type == "private":
users = col.find({})
mfs = []
for x in users:
mfs.append(x['user_id'])
if message.from_user.id not in mfs:
user = {"type": "user" , "user_id": message.from_user.id}
col.insert_one(user)
else:
users = grps.find({})
mfs = []
for x in users:
mfs.append(x['chat_id'])
if message.chat.id not in mfs:
grp = {"type": "group" , "chat_id": message.chat.id}
grps.insert_one(grp)
except Exception as e:
bot.send_message(-1001646296281 , f"error in adding stats:\n\n{e}")
if message.chat.type == "private" and not "help" in message.text:
bot.send_message(message.chat.id , "Hello there I'm Rengoku\nI'll help you to manage your groups" , reply_markup=InlineKeyboardMarkup([
[InlineKeyboardButton('help' , callback_data="help")]
]))
if "help" in message.text:
bot.send_message(message.chat.id , "Help" , reply_markup=InlineKeyboardMarkup([
[InlineKeyboardButton('help' , callback_data="help")]
]))
if not message.chat.type == "private":
message.reply("Hello there I'm Rengoku")
|
the-stack_0_18138 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------------
# MIT License
#
# Copyright (c) 2020, Solace Corporation, Ricardo Gomez-Ulmke ([email protected])
# Copyright (c) 2020, Solace Corporation, Swen-Helge Huber <[email protected]
# Copyright (c) 2019, Mark Street <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# --------------------------------------------------------------------------
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
import ansible.module_utils.network.solace.solace_utils as su
from ansible.module_utils.basic import AnsibleModule
DOCUMENTATION = '''
---
module: solace_queue
short_description: Configure a queue object on a message vpn.
description:
- "Configure a queue object on a message vpn. Allows addition, removal and configuration of queue objects in an idempotent manner."
- "Reference: U(https://docs.solace.com/API-Developer-Online-Ref-Documentation/swagger-ui/config/index.html#/queue)."
options:
name:
description: Name of the queue. Maps to 'queueName' in the API.
required: true
type: str
aliases: [queue, queue_name]
extends_documentation_fragment:
- solace.broker
- solace.vpn
- solace.settings
- solace.state
seealso:
- module: solace_get_queues
author:
- Mark Street ([email protected])
- Swen-Helge Huber ([email protected])
- Ricardo Gomez-Ulmke ([email protected])
'''
EXAMPLES = '''
- name: Playbook to add a queue named 'bar'
hosts: localhost
tasks:
- name: Remove 'bar' queue from 'foo' VPN
solace_queue:
name: bar
msg_vpn: foo
state: absent
- name: Add 'bar' queue to 'foo' VPN
solace_queue:
name: bar
msg_vpn: foo
state: present
register: testout
- name: dump output
debug:
msg: '{{ testout }}'
'''
RETURN = '''
response:
description: The response from the Solace Sempv2 request.
type: dict
'''
class SolaceQueueTask(su.SolaceTask):
LOOKUP_ITEM_KEY = 'queueName'
def __init__(self, module):
su.SolaceTask.__init__(self, module)
def lookup_item(self):
return self.module.params['name']
def get_args(self):
return [self.module.params['msg_vpn']]
def get_func(self, solace_config, vpn, lookup_item_value):
"""Pull configuration for all Queues associated with a given VPN"""
# GET /msgVpns/{msgVpnName}/queues/{queueName}
path_array = [su.SEMP_V2_CONFIG, su.MSG_VPNS, vpn, su.QUEUES, lookup_item_value]
return su.get_configuration(solace_config, path_array, self.LOOKUP_ITEM_KEY)
def create_func(self, solace_config, vpn, queue, settings=None):
"""Create a Queue"""
defaults = {}
mandatory = {
'msgVpnName': vpn,
'queueName': queue
}
data = su.merge_dicts(defaults, mandatory, settings)
path_array = [su.SEMP_V2_CONFIG, su.MSG_VPNS, vpn, su.QUEUES]
return su.make_post_request(solace_config, path_array, data)
def update_func(self, solace_config, vpn, lookup_item_value, settings):
"""Update an existing Queue"""
path_array = [su.SEMP_V2_CONFIG, su.MSG_VPNS, vpn, su.QUEUES, lookup_item_value]
return su.make_patch_request(solace_config, path_array, settings)
def delete_func(self, solace_config, vpn, lookup_item_value):
"""Delete a Queue"""
path_array = [su.SEMP_V2_CONFIG, su.MSG_VPNS, vpn, su.QUEUES, lookup_item_value]
return su.make_delete_request(solace_config, path_array)
def run_module():
"""Entrypoint to module"""
"""Compose module arguments"""
module_args = dict(
name=dict(type='str', aliases=['queue', 'queue_name'], required=True)
)
arg_spec = su.arg_spec_broker()
arg_spec.update(su.arg_spec_vpn())
arg_spec.update(su.arg_spec_crud())
# module_args override standard arg_specs
arg_spec.update(module_args)
module = AnsibleModule(
argument_spec=arg_spec,
supports_check_mode=True
)
solace_task = SolaceQueueTask(module)
result = solace_task.do_task()
module.exit_json(**result)
def main():
"""Standard boilerplate"""
run_module()
if __name__ == '__main__':
main()
|
the-stack_0_18139 | '''
706B:Interesting drink
url: https://codeforces.com/problemset/problem/706/B
'''
shops = int(input())
costs = list(map(int, input().split(' ')))
days = int(input())
costs.sort()
# jisuan
min_cost = costs[0]
max_cost = costs[-1]
# shorten the available shops but need to focus whether coin exceeds the max
availabe_shops = [-1] * (max_cost + 1)
for i in range(min_cost):
availabe_shops[i] = 0
# processed max cost
current_max = min_cost-1
for i in range(days):
coin = int(input())
# coins even larger than max price offered!
if coin > max_cost:
print(shops)
# not prepared yet
else:
if availabe_shops[coin] == -1:
for spend in range(current_max+1, coin + 1):
if spend >= max_cost:
availabe_shops[spend] = shops
continue
'''
for index in range(shops):
if costs[index] > spend and costs[index-1] <= spend:
availabe_shops[spend] = index
break
'''
# use binary search instead
left = 0
right = shops - 1
while left != right:
mid = (left+right)//2
if costs[mid] <= spend:
left = mid
# only need to find if latter shop is smaller
# mind the exceeding problem
if mid == shops-1:
availabe_shops[spend] = shops
break
if costs[mid+1] > spend:
availabe_shops[spend] = mid + 1
break
elif costs[mid] > spend:
# shops >= spend
right = mid
#update the current max
current_max = coin
# get the info, yeah!
print(availabe_shops[coin]) |
the-stack_0_18140 | from typing import Any, Dict, List, Union
from typing_extensions import TypedDict
from .flags import MessageType, Trace
from .types import (
ClientCapabilities,
CompletionContext,
CompletionItem,
ConfigurationItem,
DocumentUri,
FileEvent,
MessageActionItem,
Position,
ProgressToken,
Registration,
TextDocumentIdentifier,
Unregistration,
WorkspaceEdit,
WorkspaceFolder,
WorkspaceFoldersChangeEvent,
)
class WorkDoneProgressParams(TypedDict, total=False):
workDoneToken: ProgressToken
class PartialResultParams(TypedDict, total=False):
partialResultToken: ProgressToken
class _InitializeParams__Optional(WorkDoneProgressParams, total=False):
clientInfo: Dict[str, Any] # lazy
rootPath: Union[str, None]
initializationOptions: Any
trace: Trace
workspaceFolders: Union[List[WorkspaceFolder], None]
class InitializeParams(_InitializeParams__Optional):
processId: Union[int, None]
rootUri: Union[DocumentUri, None]
capabilities: ClientCapabilities
class InitializedParams(TypedDict):
pass
class ShutdownParams(TypedDict):
pass # This might actually be null..
class ExitParams(TypedDict):
pass # This might actually be null..
class ShowMessageParams(TypedDict):
type: MessageType
message: str
class _ShowMessageRequestParams__Optional(TypedDict, total=False):
actions: List[MessageActionItem]
class ShowMessageRequestParams(_ShowMessageRequestParams__Optional):
type: MessageType
message: str
class LogMessageParams(TypedDict):
type: MessageType
message: str
class WorkDoneProgressCreateParams(TypedDict):
token: ProgressToken
class WorkDoneProgressCancelParams(TypedDict):
token: ProgressToken
class RegistrationParams(TypedDict):
registrations: List[Registration]
class UnregistrationParams(TypedDict):
# purposeful typo!
unregisterations: List[Unregistration]
WorkspaceFoldersParams = Union[None]
class DidChangeWorkspaceFoldersParams(TypedDict):
event: WorkspaceFoldersChangeEvent
class DidChangeConfigurationParams(TypedDict):
settings: Any
class ConfigurationParams(TypedDict):
items: List[ConfigurationItem]
class DidChangeWatchedFilesParams(TypedDict):
changes: List[FileEvent]
class WorkspaceSymbolParams(WorkDoneProgressParams, PartialResultParams):
query: str
class _ExecuteCommandParams__Optional(WorkDoneProgressParams, total=False):
arguments: List[Any]
class ExecuteCommandParams(_ExecuteCommandParams__Optional):
command: str
class _ApplyWorkspaceEditParams__Optional(TypedDict, total=False):
label: str
class ApplyWorkspaceEditParams(_ApplyWorkspaceEditParams__Optional):
edit: WorkspaceEdit
class TextDocumentPositionParams(TypedDict):
textDocument: TextDocumentIdentifier
position: Position
class CompletionParams(
TextDocumentPositionParams, WorkDoneProgressParams, PartialResultParams, total=False
):
context: CompletionContext
CompletionItemResolveParams = CompletionItem
class HoverParams(TextDocumentPositionParams, WorkDoneProgressParams):
pass
|
the-stack_0_18141 | import os
from django.views.generic.edit import BaseDetailView
from django.http import HttpResponse, Http404
from django.core.exceptions import PermissionDenied
from django.contrib.contenttypes.models import ContentType
from django.shortcuts import get_object_or_404
from django.conf import settings
from django.views import static
from mapentity.decorators import view_cache_response_content
from mapentity.views import JSONResponseMixin, LastModifiedMixin
from geotrek.common.permissions import PublicOrReadPermMixin
from .models import AltimetryMixin
class HttpSVGResponse(HttpResponse):
content_type = 'image/svg+xml'
def __init__(self, content='', **kwargs):
kwargs['content_type'] = self.content_type
super().__init__(content, **kwargs)
class ElevationChart(LastModifiedMixin, PublicOrReadPermMixin, BaseDetailView):
def render_to_response(self, context, **response_kwargs):
return HttpSVGResponse(self.get_object().get_elevation_profile_svg(self.kwargs['lang']),
**response_kwargs)
class ElevationProfile(LastModifiedMixin, JSONResponseMixin,
PublicOrReadPermMixin, BaseDetailView):
"""Extract elevation profile from a path and return it as JSON"""
def get_context_data(self, **kwargs):
"""
Put elevation profile into response context.
"""
data = {}
elevation_profile = self.object.get_elevation_profile()
# Formatted as distance, elevation, [lng, lat]
for step in elevation_profile:
formatted = step[0], step[3], step[1:3]
data.setdefault('profile', []).append(formatted)
data['limits'] = dict(zip(['ceil', 'floor'], self.object.get_elevation_limits()))
return data
class ElevationArea(LastModifiedMixin, JSONResponseMixin, PublicOrReadPermMixin,
BaseDetailView):
"""Extract elevation profile on an area and return it as JSON"""
def view_cache_key(self):
"""Used by the ``view_cache_response_content`` decorator.
"""
obj = self.get_object()
return 'altimetry_dem_area_%s' % obj.pk
@view_cache_response_content()
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
return self.object.get_elevation_area()
def serve_elevation_chart(request, model_name, pk, from_command=False):
model = get_object_or_404(ContentType, model=model_name).model_class()
if not issubclass(model, AltimetryMixin):
raise Http404
obj = get_object_or_404(model, pk=pk)
if not obj.is_public():
if not request.user.is_authenticated:
raise PermissionDenied
if not request.user.has_perm('%s.read_%s' % (model._meta.app_label, model_name)):
raise PermissionDenied
language = request.LANGUAGE_CODE
obj.prepare_elevation_chart(language, request.build_absolute_uri('/'))
path = obj.get_elevation_chart_path(language).replace(settings.MEDIA_ROOT, '').lstrip('/')
if settings.DEBUG or from_command:
response = static.serve(request, path, settings.MEDIA_ROOT)
else:
response = HttpResponse()
response['X-Accel-Redirect'] = os.path.join(settings.MEDIA_URL_SECURE, path)
response["Content-Type"] = 'image/png'
return response
|
the-stack_0_18143 | # You are given an n x n 2D matrix representing an image.
# Rotate the image by 90 degrees (clockwise).
# Follow up:
# Could you do this in-place?
class Solution:
# @param {integer[][]} matrix
# @return {void} Do not return anything, modify matrix in-place instead.
def rotate(self, matrix):
if not matrix:
return
m, n = 0, len(matrix)
for i in xrange(n, 0, -2):
for ps in self.calPairs(i, m):
self.swap(matrix, ps)
m += 1
# return matrix #for test
def calPairs(self, n, m):
l = []
for i in xrange(n-1):
l.append([(x+m, y+m) for (x, y) in [(0, i), (i, n-1), (n-1, n-1-i), (n-1-i, 0)]])
return l
def swap(self, mx, ps):
mx[ps[0][0]][ps[0][1]], mx[ps[1][0]][ps[1][1]], mx[ps[2][0]][ps[2][1]], mx[ps[3][0]][ps[3][1]] = mx[ps[3][0]][ps[3][1]], mx[ps[0][0]][ps[0][1]], mx[ps[1][0]][ps[1][1]], mx[ps[2][0]][ps[2][1]] |
the-stack_0_18144 | # This is the Python adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
#
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The names of functional part are summarized here."""
from mindspore.common._register_for_tensor import tensor_operator_registry
from .primitive import Primitive
from . import operations as P
from .operations import _grad_ops
typeof = Primitive('typeof')
hastype = Primitive('hastype')
cast = P.Cast()
dtype = P.DType()
isconstant = Primitive('is_constant')
isconstant.set_const_prim(True)
issubclass_ = P.IsSubClass()
isinstance_ = P.IsInstance()
fill = P.Fill()
tile = P.Tile()
select = P.Select()
size = P.Size()
ones_like = P.OnesLike()
shape = P.Shape()
rank = P.Rank()
reshape = P.Reshape()
# control_depend: represent dependency between two operators
control_depend = P.ControlDepend()
merge = P.Merge()
geswitch = P.GeSwitch()
addn = P.AddN()
tensor_add = P.TensorAdd()
neg_tensor = P.Neg()
tensor_lt = P.Less()
tensor_le = P.LessEqual()
tensor_gt = P.Greater()
tensor_ge = P.GreaterEqual()
tensor_sub = P.Sub()
tensor_mul = P.Mul()
tensor_div = P.RealDiv()
tensor_floordiv = P.FloorDiv()
tensor_pow = P.Pow()
tensor_mod = P.FloorMod()
strided_slice = P.StridedSlice()
same_type_shape = P.SameTypeShape()
check_bprop = P.CheckBprop()
equal = P.Equal()
not_equal = P.NotEqual()
assign_sub = P.AssignSub()
assign_add = P.AssignAdd()
assign = P.Assign()
square = P.Square()
sqrt = P.Sqrt()
scalar_to_array = P.ScalarToArray()
scalar_to_tensor = P.ScalarToTensor()
tuple_to_array = P.TupleToArray()
scalar_cast = P.ScalarCast()
print_ = P.Print()
expand_dims = P.ExpandDims()
scatter_nd = P.ScatterNd()
gather = P.GatherV2()
gather_nd = P.GatherNd()
scatter_update = P.ScatterUpdate()
scatter_nd_update = P.ScatterNdUpdate()
pack = P.Pack()
partial = P.Partial()
# depend: mount a node to another node
depend = P.Depend()
identity = P.identity()
tuple_setitem = Primitive('tuple_setitem')
tuple_getitem = Primitive('tuple_getitem')
list_getitem = Primitive('list_getitem')
list_setitem = Primitive('list_setitem')
dict_getitem = Primitive('dict_getitem')
dict_setitem = Primitive('dict_setitem')
tuple_div = Primitive("tuple_div")
tuple_len = Primitive("tuple_len")
list_len = Primitive("list_len")
tuple_reversed = Primitive("tuple_reversed")
make_range = Primitive("make_range")
make_tuple = Primitive('make_tuple')
make_dict = Primitive('make_dict')
make_list = Primitive('make_list')
make_slice = Primitive('make_slice')
tuple_equal = Primitive("tuple_equal")
list_equal = Primitive("list_equal")
make_ref = Primitive("make_ref")
scalar_add = Primitive('scalar_add')
scalar_mul = Primitive('scalar_mul')
scalar_sub = Primitive('scalar_sub')
scalar_div = Primitive('scalar_div')
scalar_floordiv = Primitive('scalar_floordiv')
scalar_log = Primitive('scalar_log')
scalar_pow = Primitive('scalar_pow')
scalar_gt = Primitive('scalar_gt')
scalar_ge = Primitive('scalar_ge')
scalar_le = Primitive('scalar_le')
scalar_lt = Primitive('scalar_lt')
scalar_eq = Primitive('scalar_eq')
scalar_ne = Primitive('scalar_ne')
scalar_uadd = Primitive('scalar_uadd')
scalar_usub = Primitive('scalar_usub')
scalar_mod = Primitive('scalar_mod')
string_eq = Primitive('string_equal')
string_concat = Primitive('string_concat')
bool_not = Primitive("bool_not")
bool_or = Primitive("bool_or")
bool_and = Primitive("bool_and")
bool_eq = Primitive("bool_eq")
logical_and = P.LogicalAnd()
logical_or = P.LogicalOr()
logical_not = P.LogicalNot()
array_to_scalar = Primitive('array_to_scalar')
is_ = Primitive("is_")
is_not = Primitive("is_not")
in_dict = Primitive("in_dict")
not_in_dict = Primitive("not_in_dict")
mixed_precision_cast = Primitive("mixed_precision_cast")
broadcast_gradient_args = Primitive('BroadcastGradientArgs')
dot = Primitive('dot')
array_reduce = Primitive('array_reduce')
zeros_like = P.ZerosLike()
distribute = Primitive('distribute')
embed = Primitive('embed')
ref_to_embed = _grad_ops.RefToEmbed()
env_setitem = Primitive('env_setitem')
env_getitem = Primitive('env_getitem')
env_add = Primitive('env_add')
J = Primitive('J')
switch = Primitive('switch')
switch_layer = Primitive('switch_layer')
# for sum bprop
reduced_shape = Primitive("reduced_shape")
# shape_mul:input mush be shape multiply elemts in tuple(shape)
shape_mul = Primitive("shape_mul")
# a primitive to compare between tuple.
stop_gradient = Primitive("stop_gradient")
make_row_tensor = Primitive('MakeRowTensor')
row_tensor_get_values = Primitive('RowTensorGetValues')
row_tensor_get_indices = Primitive('RowTensorGetIndices')
row_tensor_get_dense_shape = Primitive('RowTensorGetDenseShape')
make_sparse_tensor = Primitive('MakeSparseTensor')
sparse_tensor_get_values = Primitive('SparseTensorGetValues')
sparse_tensor_get_indices = Primitive('SparseTensorGetIndices')
sparse_tensor_get_dense_shape = Primitive('SparseTensorGetDenseShape')
tensor_operator_registry.register('__add__', tensor_add)
tensor_operator_registry.register('__sub__', tensor_sub)
tensor_operator_registry.register('__mul__', tensor_mul)
tensor_operator_registry.register('__truediv__', tensor_div)
tensor_operator_registry.register('__mod__', tensor_mod)
tensor_operator_registry.register('__pow__', tensor_pow)
tensor_operator_registry.register('__floordiv__', tensor_floordiv)
tensor_operator_registry.register('all', P.ReduceAll)
tensor_operator_registry.register('any', P.ReduceAny)
# ms cannot support Tensor(True) compare
tensor_operator_registry.register('__eq__', equal)
tensor_operator_registry.register('__ne__', not_equal)
tensor_operator_registry.register('__neg__', neg_tensor)
tensor_operator_registry.register('__lt__', tensor_lt)
tensor_operator_registry.register('__le__', tensor_le)
tensor_operator_registry.register('__gt__', tensor_gt)
tensor_operator_registry.register('__ge__', tensor_ge)
tensor_operator_registry.register('shape', shape)
# support GE backend for no compare operators
tensor_operator_registry.register('cast', cast)
__all__ = [name for name in dir() if name[0] != "_"]
__all__.remove('Primitive')
|
the-stack_0_18145 | # -*- coding: utf-8 -*-
import random
import time
from types import SimpleNamespace
import pyautogui
import lushi
import main_gui
import unittest
import os
import cv2
import numpy as np
import yaml
from utils.util import find_lushi_window, find_icon_location, restart_game, tuple_add, find_relative_loc
class TestLushi(unittest.TestCase):
def setUp(self):
print("setUp")
def tearDown(self) -> None:
return super().tearDown()
def get_save_image(self, idx=0, to_gray=True):
# 罗卡拉 英雄, 英雄列表, 宝藏列表, 香蕉英雄列表
imageNames = ["reward_2021-11-15_05-23.png", "reward_2021-11-15_04-14.png", "reward_2021-11-15_20-28.png",
"reward_2021-11-15_10-20.png"]
imgPath = os.path.join(".", "resource", "imgs_eng_1024x768", "img", imageNames[idx])
src = cv2.imread(imgPath)
if to_gray:
image = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
else:
image = np.array(src)
return image
def get_config(self, lang='eng'):
config = {}
try:
with open(f'config/locs_{lang}.yaml', 'r', encoding='utf-8') as f:
config = yaml.safe_load(f)
except:
return ""
config['title'] = 'hearthstone'
config['lang'] = 'eng'
config['loc_file'] = 'config/locs_eng.yaml'
config['img_folder'] = 'resource/imgs_eng_1024x768'
config['screenshot_reward'] = True
config['lang'] = 'EN-1024x768'
config['treasure_blacklist'] = {}
config['lang'] = 'EN-1024x768'
config['reward_count_dropdown'] = 3
config['hero'] = {}
config['hs_path'] = 'C:/Program Files (x86)/Hearthstone/Hearthstone.exe'
config['delay'] = 0.5
config['confidence'] = 0.8
return config
def test_treasure(self):
config = self.get_config()
locs = SimpleNamespace(**config['location'])
print(locs.rewards.get(config['reward_count_dropdown']))
def get_screen(self, title):
return find_lushi_window(title)
def test_screen_record(self):
config = self.get_config()
agent = lushi.Agent(config)
agent.screen_record("test")
self.assertEqual(True, True)
def test_pick_treasure(self):
config = self.get_config()
agent = lushi.Agent(config)
image = self.get_save_image(2)
re = agent.pick_treasure(image)
print(re)
self.assertEqual(True, True)
def test_pick_visitor(self):
config = self.get_config()
agent = lushi.Agent(config)
image = self.get_save_image(3)
re = agent.pick_visitor(image)
print(re)
self.assertEqual(True, True)
def test_loc_click(self):
config = self.get_config(lang='chs')
self.locs = SimpleNamespace(**config['location'])
rect, img = find_lushi_window('炉石传说')
pyautogui.PAUSE = 0.5
pyautogui.click(tuple_add(rect, self.locs.boss_page_right))
pyautogui.click(tuple_add(rect, self.locs.boss_page_left))
def test_hero_pos(self):
a = [680, 810, 630, 240] # 680, 810
b = [465, 575] # 偶数
first_x = a[0]
mid_x = a[1]
n_my_hero = 6
is_even = n_my_hero % 2 == 0
for i in range(n_my_hero):
x_offset = (mid_x - first_x) * (-n_my_hero // 2 + i + 1)
if is_even:
x_offset -= 65
print(x_offset + mid_x)
# 297 550
# 410 410 680
# 523 523 523 810
# 636 636 940
# 749 1070
# 242 485
# 355 355 615
# 468 468 468 745
# 581 581 581 875
# 694 694 1005
# 807 1135
def test_submit_task(self):
config = self.get_config(lang='eng')
self.locs = SimpleNamespace(**config['location'])
rect, img = find_lushi_window('hearthstone')
pyautogui.PAUSE = 0.5
for y in self.locs.tasks_y:
for x in self.locs.tasks_x:
# do task
pyautogui.click(tuple_add(rect, (x, y)))
pyautogui.click(tuple_add(rect, self.locs.tasks_abandon))
pyautogui.click(tuple_add(rect, self.locs.tasks_abandon))
pyautogui.click(tuple_add(rect, self.locs.tasks_abandon_cancel))
pyautogui.click(tuple_add(rect, self.locs.tasks_abandon_cancel))
pyautogui.click(tuple_add(rect, self.locs.campfire_exit))
pyautogui.click(tuple_add(rect, self.locs.campfire_exit))
# exit the campfire
pyautogui.click(tuple_add(rect, self.locs.empty))
# select first first boss of map
pyautogui.click(tuple_add(rect, self.locs.first_boss))
def test_scan_surprise_loc(self):
return None
def test_any(self):
a = [1, 2, 3]
print(f'asdadasd sd {a}')
if __name__ == "__main__":
unittest.main()
|
the-stack_0_18146 | # Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Any, Iterable, Union, List, TYPE_CHECKING
import abc
import asyncio
import numpy as np
from cirq import circuits, study, value
from cirq.work import work_pool
if TYPE_CHECKING:
import cirq
@value.value_equality(unhashable=True)
class CircuitSampleJob:
"""Describes a sampling task."""
def __init__(self,
circuit: circuits.Circuit,
*,
repetitions: int,
tag: Any = None):
"""
Args:
circuit: The circuit to sample from.
repetitions: How many times to sample the circuit.
tag: An arbitrary value associated with the job. This value is used
so that when a job completes and is handed back, it is possible
to tell what the job was for. For example, the key could be a
string like "main_run" or "calibration_run", or it could be set
to the component of the Hamiltonian (e.g. a PauliString) that
the circuit is supposed to be helping to estimate.
"""
self.circuit = circuit
self.repetitions = repetitions
self.tag = tag
def _value_equality_values_(self):
return self.circuit, self.repetitions, self.tag
def __repr__(self):
return ('cirq.CircuitSampleJob('
'tag={!r}, repetitions={!r}, circuit={!r})').format(
self.tag, self.repetitions, self.circuit)
CIRCUIT_SAMPLE_JOB_TREE = Union[CircuitSampleJob, Iterable[Any]]
class Collector(metaclass=abc.ABCMeta):
"""Collects data from a sampler, in parallel, towards some purpose.
Child classes must override the `next_job` and `on_job_result` methods,
which respectively determine what to sample and how to process the results.
Utility methods on the base class such as `collect` and `collect_async` can
then be given a sampler to collect from, and will request samples with some
specified amount of parallelism.
"""
@abc.abstractmethod
def next_job(self) -> Optional[CIRCUIT_SAMPLE_JOB_TREE]:
"""Determines what to sample next.
This method is called by driving code when more samples can be
requested.
Returns:
A CircuitSampleJob describing the circuit to sample, how many
samples to take, and a key value that can be used in the
`on_job_result` method to recognize which job this is.
Can also return a nested iterable of such jobs.
Returning None, an empty list, or any other result which flattens
into an empty list of work, indicates that the driving code should
await more results (and pass them into on_job_results) before
bothering to ask for more jobs again.
"""
@abc.abstractmethod
def on_job_result(self, job: CircuitSampleJob,
result: study.TrialResult) -> None:
"""Incorporates sampled results.
This method is called by driving code when sample results have become
available.
The results should be incorporated into the collector's state.
"""
def collect(self,
sampler: 'cirq.Sampler',
*,
concurrency: int = 2,
max_total_samples: Optional[int] = None) -> None:
"""Collects needed samples from a sampler.
Examples:
```
collector = cirq.PauliStringCollector(...)
sampler.collect(collector, concurrency=3)
print(collector.estimated_energy())
```
Args:
sampler: The simulator or service to collect samples from.
concurrency: Desired number of sampling jobs to have in flight at
any given time.
max_total_samples: Optional limit on the maximum number of samples
to collect.
Returns:
The collector's result after all desired samples have been
collected.
See Also:
Python 3 documentation "Coroutines and Tasks"
https://docs.python.org/3/library/asyncio-task.html
"""
return asyncio.get_event_loop().run_until_complete(
self.collect_async(sampler,
concurrency=concurrency,
max_total_samples=max_total_samples))
async def collect_async(self,
sampler: 'cirq.Sampler',
*,
concurrency: int = 2,
max_total_samples: Optional[int] = None) -> None:
"""Asynchronously collects needed samples from a sampler.
Examples:
```
collector = cirq.PauliStringCollector(...)
await sampler.collect_async(collector, concurrency=3)
print(collector.estimated_energy())
```
Args:
sampler: The simulator or service to collect samples from.
concurrency: Desired number of sampling jobs to have in flight at
any given time.
max_total_samples: Optional limit on the maximum number of samples
to collect.
Returns:
The collector's result after all desired samples have been
collected.
See Also:
Python 3 documentation "Coroutines and Tasks"
https://docs.python.org/3/library/asyncio-task.html
"""
pool = work_pool.CompletionOrderedAsyncWorkPool()
queued_jobs: List[CircuitSampleJob] = []
remaining_samples = (np.infty if max_total_samples is None else
max_total_samples)
async def _start_async_job(job):
return job, await sampler.run_async(job.circuit,
repetitions=job.repetitions)
# Keep dispatching and processing work.
while True:
# Fill up the work pool.
while remaining_samples > 0 and pool.num_uncollected < concurrency:
if not queued_jobs:
queued_jobs.extend(_flatten_jobs(self.next_job()))
# If no jobs were given, stop asking until something completes.
if not queued_jobs:
break
# Start new sampling job.
new_job = queued_jobs.pop(0)
remaining_samples -= new_job.repetitions
pool.include_work(_start_async_job(new_job))
# If no jobs were started or running, we're in a steady state. Halt.
if not pool.num_uncollected:
break
# Forward next job result from pool.
done_job, done_val = await pool.__anext__()
self.on_job_result(done_job, done_val)
def _flatten_jobs(given: Optional[CIRCUIT_SAMPLE_JOB_TREE]
) -> List[CircuitSampleJob]:
out: List[CircuitSampleJob] = []
if given is not None:
_flatten_jobs_helper(given, out=out)
return out
def _flatten_jobs_helper(given: CIRCUIT_SAMPLE_JOB_TREE, *,
out: List[CircuitSampleJob]) -> None:
if isinstance(given, CircuitSampleJob):
out.append(given)
elif given is not None:
for item in given:
_flatten_jobs_helper(item, out=out)
|
the-stack_0_18147 | import datetime
_origin_time_ = datetime.datetime(2020, 6, 7, 23, 45, 39)
_time_since_origin_ = datetime.datetime.now() - _origin_time_
_seconds_since_origin_ = _time_since_origin_.days * 3600*24 + _time_since_origin_.seconds
BRUTE_SLOT_SIZE = 10**6
CACHE_PATH = './commissions.json'
DEFAULT_SLOT = 73500891 # Default to Oct '22
VERBOSITY = 9*10**5
MAX_AFTER = int(_seconds_since_origin_ * 0.95) # Bruting mustn't exceed the current time
|
the-stack_0_18148 | ## 함수 선언 부분 ##
def quickSort(ary) :
n = len(ary)
if n <= 1 : # 정렬할 리스트의 개수가 1개 이하면
return ary
pivot = ary[n // 2] # 기준값을 중간 값으로 지정
leftAry, midAry, rightAry = [], [], []
for num in ary :
if num < pivot :
leftAry.append(num)
elif num > pivot :
rightAry.append(num)
else :
midAry.append(num)
return quickSort(leftAry) + midAry + quickSort(rightAry)
## 전역 변수 선언 부분 ##
dataAry = [120, 120, 188, 150, 168, 50, 50, 162, 105, 120, 177, 50]
## 메인 코드 부분 ##
print('정렬 전 -->', dataAry)
dataAry = quickSort(dataAry)
print('정렬 후 -->', dataAry)
|
the-stack_0_18151 | """
blockmedian - Block average (x,y,z) data tables by median estimation.
"""
import pandas as pd
from pygmt.clib import Session
from pygmt.exceptions import GMTInvalidInput
from pygmt.helpers import (
GMTTempFile,
build_arg_string,
data_kind,
dummy_context,
fmt_docstring,
kwargs_to_strings,
use_alias,
)
@fmt_docstring
@use_alias(I="spacing", R="region", V="verbose")
@kwargs_to_strings(R="sequence")
def blockmedian(table, outfile=None, **kwargs):
r"""
Block average (x,y,z) data tables by median estimation.
Reads arbitrarily located (x,y,z) triples [or optionally weighted
quadruples (x,y,z,w)] from a table and writes to the output a median
position and value for every non-empty block in a grid region defined by
the region and spacing arguments.
Full option list at :gmt-docs:`blockmedian.html`
{aliases}
Parameters
----------
table : pandas.DataFrame or str
Either a pandas dataframe with (x, y, z) or (longitude, latitude,
elevation) values in the first three columns, or a file name to an
ASCII data table.
spacing : str
*xinc*\[\ *unit*\][**+e**\|\ **n**]
[/*yinc*\ [*unit*][**+e**\|\ **n**]].
*xinc* [and optionally *yinc*] is the grid spacing.
region : str or list
*xmin/xmax/ymin/ymax*\[\ **+r**\][**+u**\ *unit*].
Specify the region of interest.
outfile : str
Required if ``table`` is a file. The file name for the output ASCII
file.
{V}
Returns
-------
output : pandas.DataFrame or None
Return type depends on whether the ``outfile`` parameter is set:
- :class:`pandas.DataFrame` table with (x, y, z) columns if ``outfile``
is not set
- None if ``outfile`` is set (filtered output will be stored in file
set by ``outfile``)
"""
kind = data_kind(table)
with GMTTempFile(suffix=".csv") as tmpfile:
with Session() as lib:
if kind == "matrix":
if not hasattr(table, "values"):
raise GMTInvalidInput(f"Unrecognized data type: {type(table)}")
file_context = lib.virtualfile_from_matrix(table.values)
elif kind == "file":
if outfile is None:
raise GMTInvalidInput("Please pass in a str to 'outfile'")
file_context = dummy_context(table)
else:
raise GMTInvalidInput(f"Unrecognized data type: {type(table)}")
with file_context as infile:
if outfile is None:
outfile = tmpfile.name
arg_str = " ".join([infile, build_arg_string(kwargs), "->" + outfile])
lib.call_module(module="blockmedian", args=arg_str)
# Read temporary csv output to a pandas table
if outfile == tmpfile.name: # if user did not set outfile, return pd.DataFrame
result = pd.read_csv(tmpfile.name, sep="\t", names=table.columns)
elif outfile != tmpfile.name: # return None if outfile set, output in outfile
result = None
return result
|
the-stack_0_18153 | # qubit number=4
# total number=41
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.cx(input_qubit[0],input_qubit[3]) # number=13
prog.cx(input_qubit[0],input_qubit[3]) # number=17
prog.x(input_qubit[3]) # number=18
prog.cx(input_qubit[0],input_qubit[3]) # number=19
prog.cx(input_qubit[0],input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[1]) # number=31
prog.cz(input_qubit[2],input_qubit[1]) # number=32
prog.h(input_qubit[1]) # number=33
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[0]) # number=24
prog.cz(input_qubit[3],input_qubit[0]) # number=25
prog.h(input_qubit[0]) # number=26
prog.h(input_qubit[0]) # number=38
prog.cz(input_qubit[3],input_qubit[0]) # number=39
prog.h(input_qubit[0]) # number=40
prog.z(input_qubit[3]) # number=29
prog.cx(input_qubit[3],input_qubit[0]) # number=30
prog.x(input_qubit[2]) # number=23
prog.cx(input_qubit[3],input_qubit[0]) # number=22
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
prog.x(input_qubit[3]) # number=36
prog.cx(input_qubit[3],input_qubit[0]) # number=34
prog.cx(input_qubit[3],input_qubit[0]) # number=35
prog.x(input_qubit[2]) # number=37
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit3049.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
the-stack_0_18154 | """Project: Eskapade - A python-based package for data analysis.
Macro: esk402_roodatahist_fill
Created: 2017/03/28
Description:
This macro illustrates how to fill a N-dimensional roodatahist from a
pandas dataframe. (A roodatahist can be filled iteratively, while looping
over multiple pandas dataframes.) The roodatahist can be used to create
a roofit histogram-pdf (roohistpdf).
Authors:
KPMG Advanced Analytics & Big Data team, Amstelveen, The Netherlands
Redistribution and use in source and binary forms, with or without
modification, are permitted according to the terms listed in the file
LICENSE.
"""
from eskapade import ConfigObject, Chain, process_manager
from eskapade import core_ops, analysis
from eskapade.logger import Logger, LogLevel
from esroofit import resources
from esroofit.links import RooDataHistFiller
logger = Logger()
logger.debug('Now parsing configuration file esk402_roodatahist_fill')
#########################################################################################
# --- minimal analysis information
settings = process_manager.service(ConfigObject)
settings['analysisName'] = 'esk402_roodatahist_fill'
settings['version'] = 0
#########################################################################################
# --- Analysis values, settings, helper functions, configuration flags.
input_files = [resources.fixture('mock_accounts.csv.gz')]
#########################################################################################
# --- now set up the chains and links based on configuration flags
ch = Chain('Data')
# --- 0. readdata keeps on opening the next file in the file list.
# all kwargs are passed on to pandas file reader.
read_data = analysis.ReadToDf(name='dflooper', key='accounts', reader='csv')
read_data.path = input_files
# readdata.itr_over_files = True
ch.add(read_data)
# --- 1. add the record factorizer to convert categorical observables into integers
# Here the columns dummy and loc of the input dataset are factorized
# e.g. x = ['apple', 'tree', 'pear', 'apple', 'pear'] becomes the column:
# x = [0, 1, 2, 0, 2]
# By default, the mapping is stored in a dict under key: 'map_'+store_key+'_to_original'
fact = analysis.RecordFactorizer(name='rf1')
fact.columns = ['isActive', 'eyeColor', 'favoriteFruit', 'gender']
fact.read_key = 'accounts'
fact.inplace = True
# factorizer stores a dict with the mappings that have been applied to all observables
fact.sk_map_to_original = 'to_original'
# factorizer also stores a dict with the mappings back to the original observables
fact.sk_map_to_factorized = 'to_factorized'
fact.logger.log_level = LogLevel.DEBUG
ch.add(fact)
# --- 2. Fill a roodatahist
df2rdh = RooDataHistFiller()
df2rdh.read_key = read_data.key
df2rdh.store_key = 'rdh_' + read_data.key
# the observables in this map are treated as categorical observables by roofit (roocategories)
df2rdh.map_to_factorized = 'to_factorized'
df2rdh.columns = ['transaction', 'latitude', 'longitude', 'age', 'eyeColor', 'favoriteFruit']
# df2rdh.into_ws = True
ch.add(df2rdh)
# --- print contents of the datastore
overview = Chain('Overview')
pds = core_ops.PrintDs()
pds.keys = ['n_rdh_accounts', 'n_accounts']
overview.add(pds)
#########################################################################################
logger.debug('Done parsing configuration file esk402_roodatahist_fill')
|
the-stack_0_18157 | from mediocre_lux.game import Game
from mediocre_lux.game_map import Position
import mediocre_lux.game_objects as go
import sys
# print("SOME RANDOM NUMBER:", random.random(), file=sys.stderr)
from mediocre_lux.constants import ValidActions, log, print, StrategyTypes, LogicGlobals, ALL_DIRECTIONS, ResourceTypes, STRATEGY_HYPERPARAMETERS, GAME_CONSTANTS
from mediocre_lux.strategies import starter_strategy, time_based_strategy, research_based_strategy
from mediocre_lux.strategy_utils import compute_tbs_com
from collections import deque, Counter, UserDict
from itertools import chain
from mediocre_lux import annotate
from random import seed
import getpass
import math
seed(69420)
### Define helper functions
def set_unit_task(unit, player):
max_turn = STRATEGY_HYPERPARAMETERS[f"END_GAME_{LogicGlobals.game_state.map.width}X{LogicGlobals.game_state.map.height}"]
if LogicGlobals.game_state.turn >= max_turn:
if player.research_points < GAME_CONSTANTS["PARAMETERS"]["RESEARCH_REQUIREMENTS"]["COAL"]:
time_based_strategy(unit, player)
else:
research_based_strategy(unit, player)
else:
starter_strategy(unit, player)
# if player.researched_coal():
# research_based_strategy(unit, player)
# else:
# starter_strategy(unit, player)
# if LogicGlobals.game_state.turn < 200:
# starter_strategy(unit, player)
# else:
# time_based_strategy(unit, player)
def set_unit_strategy(player):
return
max_turn = STRATEGY_HYPERPARAMETERS[
f"END_GAME_{LogicGlobals.game_state.map.width}X{LogicGlobals.game_state.map.height}"]
if player.current_strategy != StrategyTypes.STARTER or (LogicGlobals.game_state.turn >= max_turn and len(player.units) >= 2):
if player.researched_coal():
if player.current_strategy != StrategyTypes.RESEARCH_BASED:
player.current_strategy = StrategyTypes.RESEARCH_BASED
else:
if player.current_strategy != StrategyTypes.TIME_BASED:
player.current_strategy = StrategyTypes.TIME_BASED
if LogicGlobals.TBS_COM is None:
LogicGlobals.TBS_COM = compute_tbs_com(LogicGlobals.game_state.map)
print(f"New TBS COM is: {LogicGlobals.TBS_COM}")
NEW_STRATEGY_UNITS = {
u.id for u in player.units if u.current_strategy != StrategyTypes.STARTER
}
cutoff = STRATEGY_HYPERPARAMETERS[f'QUADRATIC_CUTOFF_{LogicGlobals.game_state.map.width}X{LogicGlobals.game_state.map.height}']
if LogicGlobals.game_state.turn <= cutoff:
num_starter = max(0, math.ceil(((LogicGlobals.game_state.turn - cutoff) / cutoff) ** 2 * (len(player.units) - 2)))
else:
num_starter = 0
num_new_strat = len(player.units) - num_starter # TODO: THIS BREAKS IF PLAYER HAS CARTS?
print("NUMBER OF STARTER UNITS:", num_starter, "NUMBER OF TIME BASED UNITS:", num_new_strat, "LENGTH OF already time-based:", len(NEW_STRATEGY_UNITS))
if num_new_strat > len(NEW_STRATEGY_UNITS):
if player.researched_coal():
closest_units = [u for u in player.units if u.current_strategy == StrategyTypes.STARTER]
# print(closest_units)
for unit in closest_units[:len(NEW_STRATEGY_UNITS) - num_new_strat]:
# unit.current_strategy = StrategyTypes.TIME_BASED
if unit.current_strategy != StrategyTypes.RESEARCH_BASED:
unit.current_strategy = StrategyTypes.RESEARCH_BASED
unit.reset() # TODO: CHECK FOR THINGS THAT THE UNIT IS BUILDING AND REMOVE THEM?
# UNIT_TASK_FROM_STRATEGY[unit.id] = research_based_strategy
# unit.set_task_from_strategy = types.MethodType(time_based_strategy, unit)
# TIME_BASED_STRATEGY_UNITS.add(unit.ID)
return
else:
closest_units = sorted(
[u for u in player.units if u.current_strategy == StrategyTypes.STARTER], key=lambda u: (u.pos.distance_to(LogicGlobals.TBS_COM),u.id if getpass.getuser() == 'Paul' else 0)
)
# print(closest_units)
for unit in closest_units[:len(NEW_STRATEGY_UNITS) - num_new_strat]:
# unit.current_strategy = StrategyTypes.TIME_BASED
if unit.current_strategy != StrategyTypes.TIME_BASED:
unit.current_strategy = StrategyTypes.TIME_BASED
unit.reset() # TODO: CHECK FOR THINGS THAT THE UNIT IS BUILDING AND REMOVE THEM?
# UNIT_TASK_FROM_STRATEGY[unit.id] = time_based_strategy
# unit.set_task_from_strategy = types.MethodType(time_based_strategy, unit)
# TIME_BASED_STRATEGY_UNITS.add(unit.ID)
return
# for unit in player.units:
# if unit.current_strategy == StrategyTypes.STARTER:
# UNIT_TASK_FROM_STRATEGY[unit.id] = starter_strategy
# unit.set_task_from_strategy = types.MethodType(starter_strategy, unit)
def update_logic_globals(player):
if LogicGlobals.game_state.turn == 0:
for unit in player.units:
unit.has_colonized = True
if LogicGlobals.radius_for_clusters == 0:
LogicGlobals.radius_for_clusters = max(
[
player.units[0].pos.distance_to(Position(0, 0)),
player.units[0].pos.distance_to(Position(0, LogicGlobals.game_state.map.height)),
player.units[0].pos.distance_to(Position(LogicGlobals.game_state.map.width, 0)),
player.units[0].pos.distance_to(Position(LogicGlobals.game_state.map.width, LogicGlobals.game_state.map.height))
]
)
LogicGlobals.unlocked_coal = player.researched_coal()
LogicGlobals.unlocked_uranium = player.researched_uranium()
LogicGlobals.unlock_coal_memory.append(LogicGlobals.unlocked_coal)
LogicGlobals.unlock_uranium_memory.append(LogicGlobals.unlocked_uranium)
LogicGlobals.cities = player.cities
for city_id, city in player.cities.items():
city.update_resource_positions(LogicGlobals.game_state.map)
LogicGlobals.clusters_to_colonize = set()
for cluster in LogicGlobals.game_state.map.resource_clusters:
if cluster.total_amount >= 0:
if cluster.type == ResourceTypes.URANIUM:
if LogicGlobals.unlocked_uranium:
LogicGlobals.clusters_to_colonize.add(cluster)
elif cluster.type == ResourceTypes.COAL:
if LogicGlobals.unlocked_coal:
LogicGlobals.clusters_to_colonize.add(cluster)
else:
LogicGlobals.clusters_to_colonize.add(cluster)
# if cluster.type == Constants.RESOURCE_TYPES.WOOD and cluster.n_defended == 0:
# LogicGlobals.clusters_to_colonize.add(cluster)
units_lost = set(go.UNIT_CACHE) - player.unit_ids
for id_ in units_lost:
go.UNIT_CACHE.pop(id_)
for k, v in LogicGlobals.CLUSTER_ID_TO_BUILDERS.items():
LogicGlobals.CLUSTER_ID_TO_BUILDERS[k] = LogicGlobals.CLUSTER_ID_TO_BUILDERS.get(k, set()) & player.unit_ids
for k, v in LogicGlobals.CLUSTER_ID_TO_MANAGERS.items():
LogicGlobals.CLUSTER_ID_TO_MANAGERS[k] = LogicGlobals.CLUSTER_ID_TO_MANAGERS.get(k, set()) & player.unit_ids
# print("Builders:", ", ".join([f"{LogicGlobals.game_state.map.get_cluster_by_id(k)}: {v}" for k, v in LogicGlobals.CLUSTER_ID_TO_BUILDERS.items()]))
# print("Managers:", ", ".join([f"{LogicGlobals.game_state.map.get_cluster_by_id(k)}: {v}" for k, v in LogicGlobals.CLUSTER_ID_TO_MANAGERS.items()]))
LogicGlobals.RBS_cluster_carts = {}
for unit in LogicGlobals.player.units:
if unit.is_cart():
if unit.id not in go.UNIT_CACHE:
unit.cluster_to_defend_id = LogicGlobals.game_state.map.get_cell_by_pos(unit.pos).citytile.cluster_to_defend_id
LogicGlobals.RBS_cluster_carts.get(unit.cluster_to_defend_id, set()).add(unit.id)
def gather_turn_information(player, opponent):
blocked_positions = set()
enemy_blocked_positions = set()
# Not sure if this is good or not
# for cell in LogicGlobals.game_state.map.cells():
# if cell.has_resource() and cell.resource.type == Constants.RESOURCE_TYPES.WOOD:
# if cell.resource.amount < 500:
# blocked_positions = blocked_positions | cell.pos.adjacent_positions()
# for unit in opponent.units:
# # TODO: This may cause issues in the endgame. This may have to depend on map size
# # if LogicGlobals.game_state.turn < 200:
# # blocked_positions = blocked_positions | unit.pos.adjacent_positions()
# enemy_blocked_positions = enemy_blocked_positions | unit.pos.adjacent_positions()
for __, city in opponent.cities.items():
for tile in city.citytiles:
blocked_positions.add(tile.pos)
enemy_blocked_positions.add(tile.pos)
LogicGlobals.pos_being_built = set()
LogicGlobals.RESOURCES_BEING_COLLECTED = {}
for unit in player.units:
unit.check_for_task_completion(game_map=LogicGlobals.game_state.map, player=player)
blocked_positions.add(unit.pos)
for task, target in chain([unit.current_task if unit.current_task is not None else (None, None)], unit.task_q):
if task == ValidActions.BUILD:
LogicGlobals.pos_being_built.add(target)
elif task == ValidActions.MANAGE:
if target in player.cities:
player.cities[target].managers.add(unit.id)
else:
unit.current_task = None
elif task == ValidActions.COLLECT:
LogicGlobals.RESOURCES_BEING_COLLECTED[target] = LogicGlobals.RESOURCES_BEING_COLLECTED.get(target, set()) | {unit.id}
clusters_already_bing_colonized = {
c for c in LogicGlobals.clusters_to_colonize
if any(p in LogicGlobals.pos_being_built for p in c.pos_to_defend)
or any(p in player.city_pos for p in c.pos_defended)
}
LogicGlobals.clusters_to_colonize = LogicGlobals.clusters_to_colonize - clusters_already_bing_colonized
LogicGlobals.max_resource_cluster_amount = max(
[1] + [c.total_amount for c in LogicGlobals.clusters_to_colonize]
)
for c in LogicGlobals.clusters_to_colonize:
c.calculate_score(player, opponent, scaling_factor=LogicGlobals.max_resource_cluster_amount)
for city_id, city in LogicGlobals.player.cities.items():
log(f"Turn {LogicGlobals.game_state.turn} city {city_id} managers: {city.managers}")
for __, city in player.cities.items():
for tile in city.citytiles:
blocked_positions.discard(tile.pos)
deleted_cities = set()
for p in LogicGlobals.TBS_citytiles:
if LogicGlobals.game_state.map.get_cell_by_pos(p).citytile is None:
deleted_cities.add(p)
LogicGlobals.TBS_citytiles = LogicGlobals.TBS_citytiles - deleted_cities
deleted_cities = set()
for p in LogicGlobals.RBS_citytiles:
if LogicGlobals.game_state.map.get_cell_by_pos(p).citytile is None:
deleted_cities.add(p)
LogicGlobals.RBS_citytiles = LogicGlobals.RBS_citytiles - deleted_cities
# for __, city in player.cities.items():
# for tile in city.citytiles:
# if LogicGlobals.game_state.turns_until_next_night > 3: TODO: This fails for managing positions. his may have to depend on cluster resource amount
# blocked_positions.discard(tile.pos)
# else:
# blocked_positions.add(tile.pos)
# log(f"Turn {LogicGlobals.game_state.turn} - City {city.cityid} managers: {city.managers}")
return blocked_positions, enemy_blocked_positions
def unit_action_resolution(player, opponent):
actions = []
blocked_positions, enemy_blocked_positions = gather_turn_information(player, opponent)
set_unit_strategy(player)
for unit in sorted(player.units, key=lambda u: (u.cargo.wood, u.id if getpass.getuser() == 'Paul' else 0))[::-1]:
if unit.current_task is None:
unit.get_task_from_strategy(player)
# unit.set_task_from_strategy(player)
# set_unit_task(unit, player)
for cluster_id, builders in LogicGlobals.CLUSTER_ID_TO_BUILDERS.items():
pos_should_be_built = set()
cluster_to_defend = LogicGlobals.game_state.map.get_cluster_by_id(cluster_id)
if cluster_to_defend is None:
continue
for pos in LogicGlobals.game_state.map.get_cluster_by_id(cluster_id).pos_to_defend:
if len(pos_should_be_built) >= len(builders):
break
cell = LogicGlobals.game_state.map.get_cell_by_pos(pos)
if cell.is_empty():
pos_should_be_built.add(cell.pos)
# if len(pos_should_be_built) < len(builders):
# continue
units_that_should_switch_builds = set()
for unit_id in builders:
unit = LogicGlobals.player.get_unit_by_id(unit_id)
if unit is None:
continue
current_build_pos = None
if unit.current_task and unit.current_task[0] == ValidActions.BUILD:
current_build_pos = unit.current_task[1]
else:
for task in unit.task_q:
if task[0] == ValidActions.BUILD:
current_build_pos = task[1]
break
if current_build_pos is None:
pass
# print(f"BUILDER {unit.id} assigned to cluster {unit.cluster_to_defend_id} has no build task!!!")
else:
if current_build_pos in pos_should_be_built:
pos_should_be_built.discard(current_build_pos)
else:
units_that_should_switch_builds.add(unit)
for unit in units_that_should_switch_builds:
if pos_should_be_built:
new_target = min(pos_should_be_built, key=lambda p: (unit.pos.distance_to(p), p.x, p.y))
pos_should_be_built.discard(new_target)
# print(f"Switching BUILDER {unit.id} target to {new_target}")
unit.remove_next_build_action()
unit.set_task(ValidActions.BUILD, new_target)
# for unit in player.units:
# action, target = unit.propose_action(player, LogicGlobals.game_state)
# log(f"Turn {LogicGlobals.game_state.turn}: Unit {unit.id} at {unit.pos} proposed action {action} with target {target}")
# if action == ValidActions.MOVE:
# blocked_positions.discard(unit.pos)
debug_info = []
proposed_positions = {}
units_wanting_to_move = set()
for unit in player.units:
action, target, *extra = unit.propose_action(
player, LogicGlobals.game_state
)
log(
f"Unit {unit.id} at {unit.pos} proposed action {action} with target {target}",
)
if action is None:
continue
elif action == ValidActions.BUILD:
actions.append(unit.build_city(logs=debug_info))
# if player.current_strategy == StrategyTypes.TIME_BASED:
if unit.current_strategy == StrategyTypes.TIME_BASED:
LogicGlobals.TBS_citytiles.add(unit.pos)
# elif player.current_strategy == StrategyTypes.RESEARCH_BASED:
elif unit.current_strategy == StrategyTypes.RESEARCH_BASED:
LogicGlobals.RBS_citytiles.add(unit.pos)
elif action == ValidActions.TRANSFER:
actions.append(unit.transfer(*extra, logs=debug_info))
unit.did_just_transfer = True
elif action == ValidActions.PILLAGE:
actions.append(unit.pillage(logs=debug_info))
elif action == ValidActions.MOVE:
if target == unit.pos:
continue
blocked_positions.discard(unit.pos)
pos_to_check = {}
for direction in ALL_DIRECTIONS:
new_pos = unit.pos.translate(direction, 1)
# if new_pos in enemy_blocked_positions:
# continue
# if new_pos in player.city_pos and LogicGlobals.game_state.turns_until_next_night < 3:
# continue
if new_pos in enemy_blocked_positions:
continue
if not LogicGlobals.game_state.map.is_within_bounds(new_pos):
continue
if new_pos in unit.previous_pos:
continue
if LogicGlobals.game_state.map.get_cell_by_pos(unit.pos).citytile is not None and LogicGlobals.game_state.turns_until_next_night <= 1 and LogicGlobals.game_state.map.get_cell_by_pos(new_pos).citytile is None and LogicGlobals.game_state.map.num_adjacent_resources(new_pos, include_center=True, include_wood_that_is_growing=True) == 0:
continue
new_pos_contains_citytile = LogicGlobals.game_state.map.get_cell_by_pos(new_pos).citytile is not None
if new_pos_contains_citytile and unit.should_avoid_citytiles:
tiles_not_blocked = {unit.pos, target}
for p in [unit.pos, target]:
cell = LogicGlobals.game_state.map.get_cell_by_pos(p)
if cell.citytile is not None:
city_id = cell.citytile.cityid
if city_id in LogicGlobals.player.cities:
tiles_not_blocked = tiles_not_blocked | {c.pos for c in LogicGlobals.player.cities[city_id].citytiles}
if new_pos not in tiles_not_blocked and unit.turns_spent_waiting_to_move < 5:
continue
pos_to_check[direction] = new_pos
if not pos_to_check:
unit.turns_spent_waiting_to_move += 1
if unit.pos not in player.city_pos:
blocked_positions.add(unit.pos)
continue
unit.dirs_to_move = unit.pos.sort_directions_by_turn_distance(
target, LogicGlobals.game_state.map, cooldown=GAME_CONSTANTS['PARAMETERS']['UNIT_ACTION_COOLDOWN'][unit.type_str],
pos_to_check=pos_to_check, tolerance=max(0, unit.turns_spent_waiting_to_move - 3),
avoid_own_cities=unit.should_avoid_citytiles
)
unit.dirs_to_move = deque((d, pos_to_check[d]) for d in unit.dirs_to_move)
if not unit.dirs_to_move:
unit.turns_spent_waiting_to_move += 1
if unit.pos not in player.city_pos:
blocked_positions.add(unit.pos)
continue
unit.move_target = target
units_wanting_to_move.add(unit)
proposed_positions = dict()
while any(unit.dirs_to_move for unit in units_wanting_to_move) and (not proposed_positions or any(len(units) > 2 for units in proposed_positions.values())):
units_with_movement_resolved = set()
for unit in units_wanting_to_move:
if not proposed_positions or not any(unit.id in [u[0].id for u in units] for units in proposed_positions.values()):
if not unit.dirs_to_move:
units_with_movement_resolved.add(unit)
unit.turns_spent_waiting_to_move += 1
if unit.pos not in player.city_pos:
blocked_positions.add(unit.pos)
continue
dir_to_move, new_pos = unit.dirs_to_move.popleft()
proposed_positions.setdefault(new_pos, []).append((unit, dir_to_move))
pos_to_discard = []
for pos, units in sorted(proposed_positions.items(), key=lambda tup: (-len(tup[1]), tup[0].x, tup[0].y)):
# for pos, units in proposed_positions.items():
if pos in blocked_positions:
pos_to_discard.append(pos)
continue
if pos in player.city_pos:
for unit, direction in units:
unit.turns_spent_waiting_to_move = 0
actions.append(unit.move(direction, logs=debug_info))
units_with_movement_resolved.add(unit)
pos_to_discard.append(pos)
elif len(units) > 1:
unit, direction = max(
units,
key=lambda pair: (
pair[0].pos in proposed_positions and any(u[0].pos == pos for u in proposed_positions[pair[0].pos]),
not pos == pair[0].move_target,
pair[0].turns_spent_waiting_to_move,
pair[0].is_building(),
pair[0].id if getpass.getuser() == 'Paul' else 0
)
)
unit.turns_spent_waiting_to_move = 0
actions.append(unit.move(direction, logs=debug_info))
units_with_movement_resolved.add(unit)
blocked_positions.add(pos)
pos_to_discard.append(pos)
for pos in pos_to_discard:
proposed_positions.pop(pos)
units_wanting_to_move = units_wanting_to_move - units_with_movement_resolved
units_with_movement_resolved = set()
for unit in units_wanting_to_move:
if not proposed_positions or not any(unit.id in [u[0].id for u in units] for units in proposed_positions.values()):
if not unit.dirs_to_move:
units_with_movement_resolved.add(unit)
unit.turns_spent_waiting_to_move += 1
if unit.pos not in player.city_pos:
blocked_positions.add(unit.pos)
continue
dir_to_move, new_pos = unit.dirs_to_move.popleft()
proposed_positions.setdefault(new_pos, []).append((unit, dir_to_move))
for pos, units in proposed_positions.items():
if pos in blocked_positions:
continue
if pos in player.city_pos:
for unit, direction in units:
unit.turns_spent_waiting_to_move = 0
actions.append(unit.move(direction, logs=debug_info))
units_with_movement_resolved.add(unit)
else:
unit, direction = max(
units,
key=lambda pair: (
pair[0].pos in proposed_positions and any(u[0].pos == pos for u in proposed_positions[pair[0].pos]),
not pos == pair[0].move_target,
pair[0].turns_spent_waiting_to_move,
pair[0].is_building(),
pair[0].id if getpass.getuser() == 'Paul' else 0
)
)
unit.turns_spent_waiting_to_move = 0
actions.append(unit.move(direction, logs=debug_info))
units_with_movement_resolved.add(unit)
blocked_positions.add(pos)
units_wanting_to_move = units_wanting_to_move - units_with_movement_resolved
for unit in units_wanting_to_move:
unit.turns_spent_waiting_to_move += 1
return actions, debug_info
def agent(observation, configuration, include_debug_for_vis=True):
### Do not edit ###
if observation["step"] == 0:
LogicGlobals.game_state = Game(*observation["updates"][:2])
LogicGlobals.game_state.update(observation["updates"][2:], observation.player)
LogicGlobals.game_state.id = observation.player
LogicGlobals.player.current_strategy = StrategyTypes.STARTER
else:
LogicGlobals.game_state.update(observation["updates"], observation.player)
### AI Code goes down here! ###
update_logic_globals(LogicGlobals.player)
# actions, debug_info = old_unit_action_resolution(player, opponent)
actions, debug_info = unit_action_resolution(LogicGlobals.player, LogicGlobals.opponent)
spawned_this_round = 0
for _, city in sorted(LogicGlobals.player.cities.items(), key=lambda pair: -int(pair[0].split("_")[1])):
for city_tile in city.citytiles:
if city_tile.can_act():
if len(LogicGlobals.player.units) + spawned_this_round < LogicGlobals.player.city_tile_count:
if LogicGlobals.player.current_strategy == StrategyTypes.STARTER:
actions.append(city_tile.build_worker())
spawned_this_round += 1
cluster = LogicGlobals.game_state.map.position_to_cluster(city_tile.pos)
if cluster is not None:
cluster.n_workers_spawned += 1
else:
if LogicGlobals.player.current_strategy == StrategyTypes.STARTER and not LogicGlobals.player.researched_uranium():
actions.append(city_tile.research())
LogicGlobals.player.research_points += 1
# for _, city in LogicGlobals.player.cities.items():
# for city_tile in city.citytiles:
# if city_tile.can_act():
# if len(LogicGlobals.player.units) < LogicGlobals.player.city_tile_count:
# if LogicGlobals.player.current_strategy == StrategyTypes.STARTER:
# actions.append(city_tile.build_worker())
# cluster = LogicGlobals.game_state.map.position_to_cluster(city_tile.pos)
# if cluster is not None:
# cluster.n_workers_spawned += 1
# elif LogicGlobals.player.current_strategy == StrategyTypes.TIME_BASED:
# if city_tile.pos in LogicGlobals.TBS_citytiles:
# actions.append(city_tile.build_worker())
# elif LogicGlobals.player.current_strategy == StrategyTypes.RESEARCH_BASED:
# if city_tile.cluster_to_defend_id is not None:
# existing_carts = LogicGlobals.RBS_cluster_carts.get(city_tile.cluster_to_defend_id, set())
# if len(existing_carts) < STRATEGY_HYPERPARAMETERS['RBS'][LogicGlobals.RBS_rtype.upper()]['MAX_CARTS_PER_CLUSTER']:
# actions.append(city_tile.build_cart())
# existing_carts.add(f"Pending_cart_{city_tile.pos}")
# LogicGlobals.RBS_cluster_carts[city_tile.cluster_to_defend_id] = existing_carts
# continue
# if city_tile.pos in LogicGlobals.RBS_citytiles:
# actions.append(city_tile.build_worker())
# else:
# if LogicGlobals.player.current_strategy == StrategyTypes.STARTER and not LogicGlobals.player.researched_uranium():
# actions.append(city_tile.research())
# LogicGlobals.player.research_points += 1
# for _, city in LogicGlobals.player.cities.items():
# for city_tile in city.citytiles:
# if city_tile.can_act():
# if len(LogicGlobals.player.units) < LogicGlobals.player.city_tile_count:
# if LogicGlobals.player.current_strategy == StrategyTypes.STARTER:
# cluster = LogicGlobals.game_state.map.get_cluster_by_id(city_tile.cluster_to_defend_id)
# if cluster is not None:
# if len(LogicGlobals.CLUSTER_ID_TO_BUILDERS[cluster.id]) + len(LogicGlobals.CLUSTER_ID_TO_MANAGERS[cluster.id]) < len(cluster.pos_defended_by_player):
# actions.append(city_tile.build_worker())
# cluster.n_workers_spawned += 1
# continue
# if LogicGlobals.player.current_strategy == StrategyTypes.STARTER and not LogicGlobals.player.researched_uranium():
# actions.append(city_tile.research())
# LogicGlobals.player.research_points += 1
# DEBUG STUFF
if include_debug_for_vis:
actions.append(
annotate.sidetext(
f"Current Strategy: {LogicGlobals.player.current_strategy}",
)
)
actions.append(
annotate.sidetext(
f"Found {len(LogicGlobals.game_state.map.resource_clusters)} clusters",
)
)
actions.append(
annotate.sidetext(
"Cluster - N_resource - N_defend - Score",
)
)
for cluster in sorted(LogicGlobals.game_state.map.resource_clusters, key=lambda c: (c.center_pos.x, c.center_pos.y)):
actions.append(
annotate.sidetext(
annotate.format_message(f"{cluster.center_pos} - {cluster.total_amount:4d} - {cluster.n_to_block:1d} - {cluster.current_score:0.5f}"),
)
)
# for pos in cluster.resource_positions:
# actions.append(annotate.circle(pos.x, pos.y))
# for pos in cluster.pos_to_defend:
# actions.append(annotate.x(pos.x, pos.y))
# actions.append(annotate.sidetext("STRATEGIES"))
# for unit in LogicGlobals.player.units:
# actions.append(
# annotate.sidetext(
# f"{unit.id}: {unit.current_strategy}"
# )
# )
actions.append(annotate.sidetext("GOAL TASKS"))
for unit in LogicGlobals.player.units:
# if unit.current_task is not None:
# __, target = unit.current_task
# if type(target) is Position:
# actions.append(
# annotate.line(unit.pos.x, unit.pos.y, target.x, target.y)
# )
if unit.task_q:
actions.append(
annotate.sidetext(
annotate.format_message(f"{unit.id}: {unit.task_q[-1][0]} at {unit.task_q[-1][1]} ")
)
)
else:
actions.append(
annotate.sidetext(
f"{unit.id}: None"
)
)
actions.append(annotate.sidetext("CURRENT TASK"))
for unit in LogicGlobals.player.units:
if unit.current_task is None:
actions.append(
annotate.sidetext(
f"{unit.id}: None"
)
)
else:
actions.append(
annotate.sidetext(
annotate.format_message(f"{unit.id}: {unit.current_task[0]} at {unit.current_task[1]} ")
)
)
actions.append(annotate.sidetext("TASK QUEUE"))
for unit in LogicGlobals.player.units:
if unit.task_q:
actions.append(
annotate.sidetext(
# annotate.format_message(f"{unit.id}: {unit.task_q[-1][0]} at {unit.task_q[-1][1]} ")
annotate.format_message(
f"{unit.id}: " +
" - ".join(
[f"{t[0]} to {t[1]}"
if t[0] == 'move' else f"{t[0]} at {t[1]}"
for t in unit.task_q
]
)
)
)
)
else:
actions.append(
annotate.sidetext(
f"{unit.id}: None"
)
)
actions.append(annotate.sidetext("ACTIONS"))
for uid, action, target in debug_info:
actions.append(
annotate.sidetext(
annotate.format_message(f"{uid}: {action} with target {target}")
)
)
actions.append(annotate.text(15, 15, "A"))
return actions
|
the-stack_0_18158 | from __future__ import print_function
import pickle
import json
import csv
import sys
# Allow us to import the deepmoji directory
from os.path import dirname, abspath
sys.path.insert(0, dirname(dirname(abspath(__file__))))
from deepmoji.sentence_tokenizer import SentenceTokenizer, coverage
OUTPUT_PATH = 'coverage.csv'
DATASET_PATHS = [
'../data/Olympic/raw.pickle',
'../data/PsychExp/raw.pickle',
'../data/SCv1/raw.pickle',
'../data/SCv2-GEN/raw.pickle',
'../data/SE0714/raw.pickle',
#'../data/SE1604/raw.pickle', # Excluded due to Twitter's ToS
'../data/SS-Twitter/raw.pickle',
'../data/SS-Youtube/raw.pickle',
]
with open('../model/vocabulary.json', 'r') as f:
vocab = json.load(f)
results = []
for p in DATASET_PATHS:
coverage_result = [p]
print('Calculating coverage for {}'.format(p))
with open(p) as f:
s = pickle.load(f)
# Decode data
try:
s['texts'] = [unicode(x) for x in s['texts']]
except UnicodeDecodeError:
s['texts'] = [x.decode('utf-8') for x in s['texts']]
# Own
st = SentenceTokenizer({}, 30)
tests, dicts, _ = st.split_train_val_test(s['texts'], s['info'],
[s['train_ind'],
s['val_ind'],
s['test_ind']],
extend_with=10000)
coverage_result.append(coverage(tests[2]))
# Last
st = SentenceTokenizer(vocab, 30)
tests, dicts, _ = st.split_train_val_test(s['texts'], s['info'],
[s['train_ind'],
s['val_ind'],
s['test_ind']],
extend_with=0)
coverage_result.append(coverage(tests[2]))
# Full
st = SentenceTokenizer(vocab, 30)
tests, dicts, _ = st.split_train_val_test(s['texts'], s['info'],
[s['train_ind'],
s['val_ind'],
s['test_ind']],
extend_with=10000)
coverage_result.append(coverage(tests[2]))
results.append(coverage_result)
with open(OUTPUT_PATH, 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter='\t', lineterminator='\n')
writer.writerow(['Dataset', 'Own', 'Last', 'Full'])
for i, row in enumerate(results):
try:
writer.writerow(row)
except:
print("Exception at row {}!".format(i))
print('Saved to {}'.format(OUTPUT_PATH))
|
the-stack_0_18159 | #!/usr/bin/python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, unicode_literals, print_function
import cmd
import codecs
import csv
import errno
import getpass
import optparse
import os
import platform
import re
import stat
import sys
import traceback
import warnings
import webbrowser
from contextlib import contextmanager
from glob import glob
from uuid import UUID
if sys.version_info < (3, 6) and sys.version_info[0:2] != (2, 7):
sys.exit("\ncqlsh requires Python 3.6+\n")
# see CASSANDRA-10428
if platform.python_implementation().startswith('Jython'):
sys.exit("\nCQL Shell does not run on Jython\n")
UTF8 = 'utf-8'
CP65001 = 'cp65001' # Win utf-8 variant
description = "CQL Shell for Apache Cassandra"
version = "6.0.0"
readline = None
try:
# check if tty first, cause readline doesn't check, and only cares
# about $TERM. we don't want the funky escape code stuff to be
# output if not a tty.
if sys.stdin.isatty():
import readline
except ImportError:
pass
CQL_LIB_PREFIX = 'cassandra-driver-internal-only-'
CASSANDRA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')
CASSANDRA_CQL_HTML_FALLBACK = 'https://cassandra.apache.org/doc/latest/cql/index.html'
# default location of local CQL.html
if os.path.exists(CASSANDRA_PATH + '/doc/cql3/CQL.html'):
# default location of local CQL.html
CASSANDRA_CQL_HTML = 'file://' + CASSANDRA_PATH + '/doc/cql3/CQL.html'
elif os.path.exists('/usr/share/doc/cassandra/CQL.html'):
# fallback to package file
CASSANDRA_CQL_HTML = 'file:///usr/share/doc/cassandra/CQL.html'
else:
# fallback to online version
CASSANDRA_CQL_HTML = CASSANDRA_CQL_HTML_FALLBACK
# On Linux, the Python webbrowser module uses the 'xdg-open' executable
# to open a file/URL. But that only works, if the current session has been
# opened from _within_ a desktop environment. I.e. 'xdg-open' will fail,
# if the session's been opened via ssh to a remote box.
#
try:
webbrowser.register_standard_browsers() # registration is otherwise lazy in Python3
except AttributeError:
pass
if webbrowser._tryorder and webbrowser._tryorder[0] == 'xdg-open' and os.environ.get('XDG_DATA_DIRS', '') == '':
# only on Linux (some OS with xdg-open)
webbrowser._tryorder.remove('xdg-open')
webbrowser._tryorder.append('xdg-open')
# use bundled lib for python-cql if available. if there
# is a ../lib dir, use bundled libs there preferentially.
ZIPLIB_DIRS = [os.path.join(CASSANDRA_PATH, 'lib')]
myplatform = platform.system()
is_win = myplatform == 'Windows'
# Workaround for supporting CP65001 encoding on python < 3.3 (https://bugs.python.org/issue13216)
if is_win and sys.version_info < (3, 3):
codecs.register(lambda name: codecs.lookup(UTF8) if name == CP65001 else None)
if myplatform == 'Linux':
ZIPLIB_DIRS.append('/usr/share/cassandra/lib')
if os.environ.get('CQLSH_NO_BUNDLED', ''):
ZIPLIB_DIRS = ()
def find_zip(libprefix):
for ziplibdir in ZIPLIB_DIRS:
zips = glob(os.path.join(ziplibdir, libprefix + '*.zip'))
if zips:
return max(zips) # probably the highest version, if multiple
cql_zip = find_zip(CQL_LIB_PREFIX)
if cql_zip:
ver = os.path.splitext(os.path.basename(cql_zip))[0][len(CQL_LIB_PREFIX):]
sys.path.insert(0, os.path.join(cql_zip, 'cassandra-driver-' + ver))
third_parties = ('futures-', 'six-')
for lib in third_parties:
lib_zip = find_zip(lib)
if lib_zip:
sys.path.insert(0, lib_zip)
# We cannot import six until we add its location to sys.path so the Python
# interpreter can find it. Do not move this to the top.
import six
from six.moves import configparser, input
from six import StringIO, ensure_text, ensure_str
warnings.filterwarnings("ignore", r".*blist.*")
try:
import cassandra
except ImportError as e:
sys.exit("\nPython Cassandra driver not installed, or not on PYTHONPATH.\n"
'You might try "pip install cassandra-driver".\n\n'
'Python: %s\n'
'Module load path: %r\n\n'
'Error: %s\n' % (sys.executable, sys.path, e))
from cassandra.auth import PlainTextAuthProvider
from cassandra.cluster import Cluster
from cassandra.cqltypes import cql_typename
from cassandra.marshal import int64_unpack
from cassandra.metadata import (ColumnMetadata, KeyspaceMetadata,
TableMetadata, protect_name, protect_names)
from cassandra.policies import WhiteListRoundRobinPolicy
from cassandra.query import SimpleStatement, ordered_dict_factory, TraceUnavailable
from cassandra.util import datetime_from_timestamp
# cqlsh should run correctly when run out of a Cassandra source tree,
# out of an unpacked Cassandra tarball, and after a proper package install.
cqlshlibdir = os.path.join(CASSANDRA_PATH, 'pylib')
if os.path.isdir(cqlshlibdir):
sys.path.insert(0, cqlshlibdir)
from cqlshlib import cql3handling, cqlhandling, pylexotron, sslhandling, cqlshhandling
from cqlshlib.copyutil import ExportTask, ImportTask
from cqlshlib.displaying import (ANSI_RESET, BLUE, COLUMN_NAME_COLORS, CYAN,
RED, WHITE, FormattedValue, colorme)
from cqlshlib.formatting import (DEFAULT_DATE_FORMAT, DEFAULT_NANOTIME_FORMAT,
DEFAULT_TIMESTAMP_FORMAT, CqlType, DateTimeFormat,
format_by_type, formatter_for)
from cqlshlib.tracing import print_trace, print_trace_session
from cqlshlib.util import get_file_encoding_bomsize, trim_if_present
DEFAULT_HOST = '127.0.0.1'
DEFAULT_PORT = 9042
DEFAULT_SSL = False
DEFAULT_CONNECT_TIMEOUT_SECONDS = 5
DEFAULT_REQUEST_TIMEOUT_SECONDS = 10
DEFAULT_FLOAT_PRECISION = 5
DEFAULT_DOUBLE_PRECISION = 5
DEFAULT_MAX_TRACE_WAIT = 10
if readline is not None and readline.__doc__ is not None and 'libedit' in readline.__doc__:
DEFAULT_COMPLETEKEY = '\t'
else:
DEFAULT_COMPLETEKEY = 'tab'
cqldocs = None
cqlruleset = None
epilog = """Connects to %(DEFAULT_HOST)s:%(DEFAULT_PORT)d by default. These
defaults can be changed by setting $CQLSH_HOST and/or $CQLSH_PORT. When a
host (and optional port number) are given on the command line, they take
precedence over any defaults.""" % globals()
parser = optparse.OptionParser(description=description, epilog=epilog,
usage="Usage: %prog [options] [host [port]]",
version='cqlsh ' + version)
parser.add_option("-C", "--color", action='store_true', dest='color',
help='Always use color output')
parser.add_option("--no-color", action='store_false', dest='color',
help='Never use color output')
parser.add_option("--browser", dest='browser', help="""The browser to use to display CQL help, where BROWSER can be:
- one of the supported browsers in https://docs.python.org/3/library/webbrowser.html.
- browser path followed by %s, example: /usr/bin/google-chrome-stable %s""")
parser.add_option('--ssl', action='store_true', help='Use SSL', default=False)
parser.add_option("-u", "--username", help="Authenticate as user.")
parser.add_option("-p", "--password", help="Authenticate using password.")
parser.add_option('-k', '--keyspace', help='Authenticate to the given keyspace.')
parser.add_option("-f", "--file", help="Execute commands from FILE, then exit")
parser.add_option('--debug', action='store_true',
help='Show additional debugging information')
parser.add_option('--coverage', action='store_true',
help='Collect coverage data')
parser.add_option("--encoding", help="Specify a non-default encoding for output."
" (Default: %s)" % (UTF8,))
parser.add_option("--cqlshrc", help="Specify an alternative cqlshrc file location.")
parser.add_option("--credentials", help="Specify an alternative credentials file location.")
parser.add_option('--cqlversion', default=None,
help='Specify a particular CQL version, '
'by default the highest version supported by the server will be used.'
' Examples: "3.0.3", "3.1.0"')
parser.add_option("--protocol-version", type="int", default=None,
help='Specify a specific protcol version otherwise the client will default and downgrade as necessary')
parser.add_option("-e", "--execute", help='Execute the statement and quit.')
parser.add_option("--connect-timeout", default=DEFAULT_CONNECT_TIMEOUT_SECONDS, dest='connect_timeout',
help='Specify the connection timeout in seconds (default: %default seconds).')
parser.add_option("--request-timeout", default=DEFAULT_REQUEST_TIMEOUT_SECONDS, dest='request_timeout',
help='Specify the default request timeout in seconds (default: %default seconds).')
parser.add_option("-t", "--tty", action='store_true', dest='tty',
help='Force tty mode (command prompt).')
# This is a hidden option to suppress the warning when the -p/--password command line option is used.
# Power users may use this option if they know no other people has access to the system where cqlsh is run or don't care about security.
# Use of this option in scripting is discouraged. Please use a (temporary) credentials file where possible.
# The Cassandra distributed tests (dtests) also use this option in some tests when a well-known password is supplied via the command line.
parser.add_option("--insecure-password-without-warning", action='store_true', dest='insecure_password_without_warning',
help=optparse.SUPPRESS_HELP)
optvalues = optparse.Values()
(options, arguments) = parser.parse_args(sys.argv[1:], values=optvalues)
# BEGIN history/config definition
HISTORY_DIR = os.path.expanduser(os.path.join('~', '.cassandra'))
if hasattr(options, 'cqlshrc'):
CONFIG_FILE = options.cqlshrc
if not os.path.exists(CONFIG_FILE):
print('\nWarning: Specified cqlshrc location `%s` does not exist. Using `%s` instead.\n' % (CONFIG_FILE, HISTORY_DIR))
CONFIG_FILE = os.path.join(HISTORY_DIR, 'cqlshrc')
else:
CONFIG_FILE = os.path.join(HISTORY_DIR, 'cqlshrc')
HISTORY = os.path.join(HISTORY_DIR, 'cqlsh_history')
if not os.path.exists(HISTORY_DIR):
try:
os.mkdir(HISTORY_DIR)
except OSError:
print('\nWarning: Cannot create directory at `%s`. Command history will not be saved.\n' % HISTORY_DIR)
OLD_CONFIG_FILE = os.path.expanduser(os.path.join('~', '.cqlshrc'))
if os.path.exists(OLD_CONFIG_FILE):
if os.path.exists(CONFIG_FILE):
print('\nWarning: cqlshrc config files were found at both the old location ({0})'
' and the new location ({1}), the old config file will not be migrated to the new'
' location, and the new location will be used for now. You should manually'
' consolidate the config files at the new location and remove the old file.'
.format(OLD_CONFIG_FILE, CONFIG_FILE))
else:
os.rename(OLD_CONFIG_FILE, CONFIG_FILE)
OLD_HISTORY = os.path.expanduser(os.path.join('~', '.cqlsh_history'))
if os.path.exists(OLD_HISTORY):
os.rename(OLD_HISTORY, HISTORY)
# END history/config definition
CQL_ERRORS = (
cassandra.AlreadyExists, cassandra.AuthenticationFailed, cassandra.CoordinationFailure,
cassandra.InvalidRequest, cassandra.Timeout, cassandra.Unauthorized, cassandra.OperationTimedOut,
cassandra.cluster.NoHostAvailable,
cassandra.connection.ConnectionBusy, cassandra.connection.ProtocolError, cassandra.connection.ConnectionException,
cassandra.protocol.ErrorMessage, cassandra.protocol.InternalError, cassandra.query.TraceUnavailable
)
debug_completion = bool(os.environ.get('CQLSH_DEBUG_COMPLETION', '') == 'YES')
class NoKeyspaceError(Exception):
pass
class KeyspaceNotFound(Exception):
pass
class ColumnFamilyNotFound(Exception):
pass
class IndexNotFound(Exception):
pass
class MaterializedViewNotFound(Exception):
pass
class ObjectNotFound(Exception):
pass
class VersionNotSupported(Exception):
pass
class UserTypeNotFound(Exception):
pass
class FunctionNotFound(Exception):
pass
class AggregateNotFound(Exception):
pass
class DecodeError(Exception):
verb = 'decode'
def __init__(self, thebytes, err, colname=None):
self.thebytes = thebytes
self.err = err
self.colname = colname
def __str__(self):
return str(self.thebytes)
def message(self):
what = 'value %r' % (self.thebytes,)
if self.colname is not None:
what = 'value %r (for column %r)' % (self.thebytes, self.colname)
return 'Failed to %s %s : %s' \
% (self.verb, what, self.err)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.message())
def maybe_ensure_text(val):
return ensure_text(val) if val else val
class FormatError(DecodeError):
verb = 'format'
def full_cql_version(ver):
while ver.count('.') < 2:
ver += '.0'
ver_parts = ver.split('-', 1) + ['']
vertuple = tuple(list(map(int, ver_parts[0].split('.'))) + [ver_parts[1]])
return ver, vertuple
def format_value(val, cqltype, encoding, addcolor=False, date_time_format=None,
float_precision=None, colormap=None, nullval=None):
if isinstance(val, DecodeError):
if addcolor:
return colorme(repr(val.thebytes), colormap, 'error')
else:
return FormattedValue(repr(val.thebytes))
return format_by_type(val, cqltype=cqltype, encoding=encoding, colormap=colormap,
addcolor=addcolor, nullval=nullval, date_time_format=date_time_format,
float_precision=float_precision)
def show_warning_without_quoting_line(message, category, filename, lineno, file=None, line=None):
if file is None:
file = sys.stderr
try:
file.write(warnings.formatwarning(message, category, filename, lineno, line=''))
except IOError:
pass
warnings.showwarning = show_warning_without_quoting_line
warnings.filterwarnings('always', category=cql3handling.UnexpectedTableStructure)
def insert_driver_hooks():
class DateOverFlowWarning(RuntimeWarning):
pass
# Native datetime types blow up outside of datetime.[MIN|MAX]_YEAR. We will fall back to an int timestamp
def deserialize_date_fallback_int(byts, protocol_version):
timestamp_ms = int64_unpack(byts)
try:
return datetime_from_timestamp(timestamp_ms / 1000.0)
except OverflowError:
warnings.warn(DateOverFlowWarning("Some timestamps are larger than Python datetime can represent. "
"Timestamps are displayed in milliseconds from epoch."))
return timestamp_ms
cassandra.cqltypes.DateType.deserialize = staticmethod(deserialize_date_fallback_int)
if hasattr(cassandra, 'deserializers'):
del cassandra.deserializers.DesDateType
# Return cassandra.cqltypes.EMPTY instead of None for empty values
cassandra.cqltypes.CassandraType.support_empty_values = True
class Shell(cmd.Cmd):
custom_prompt = ensure_text(os.getenv('CQLSH_PROMPT', ''))
if custom_prompt != '':
custom_prompt += "\n"
default_prompt = custom_prompt + "cqlsh> "
continue_prompt = " ... "
keyspace_prompt = custom_prompt + "cqlsh:{}> "
keyspace_continue_prompt = "{} ... "
show_line_nums = False
debug = False
coverage = False
coveragerc_path = None
stop = False
last_hist = None
shunted_query_out = None
use_paging = True
default_page_size = 100
def __init__(self, hostname, port, color=False,
username=None, password=None, encoding=None, stdin=None, tty=True,
completekey=DEFAULT_COMPLETEKEY, browser=None, use_conn=None,
cqlver=None, keyspace=None,
tracing_enabled=False, expand_enabled=False,
display_nanotime_format=DEFAULT_NANOTIME_FORMAT,
display_timestamp_format=DEFAULT_TIMESTAMP_FORMAT,
display_date_format=DEFAULT_DATE_FORMAT,
display_float_precision=DEFAULT_FLOAT_PRECISION,
display_double_precision=DEFAULT_DOUBLE_PRECISION,
display_timezone=None,
max_trace_wait=DEFAULT_MAX_TRACE_WAIT,
ssl=False,
single_statement=None,
request_timeout=DEFAULT_REQUEST_TIMEOUT_SECONDS,
protocol_version=None,
connect_timeout=DEFAULT_CONNECT_TIMEOUT_SECONDS,
is_subshell=False):
cmd.Cmd.__init__(self, completekey=completekey)
self.hostname = hostname
self.port = port
self.auth_provider = None
if username:
if not password:
password = getpass.getpass()
self.auth_provider = PlainTextAuthProvider(username=username, password=password)
self.username = username
self.keyspace = keyspace
self.ssl = ssl
self.tracing_enabled = tracing_enabled
self.page_size = self.default_page_size
self.expand_enabled = expand_enabled
if use_conn:
self.conn = use_conn
else:
kwargs = {}
if protocol_version is not None:
kwargs['protocol_version'] = protocol_version
self.conn = Cluster(contact_points=(self.hostname,), port=self.port, cql_version=cqlver,
auth_provider=self.auth_provider,
ssl_options=sslhandling.ssl_settings(hostname, CONFIG_FILE) if ssl else None,
load_balancing_policy=WhiteListRoundRobinPolicy([self.hostname]),
control_connection_timeout=connect_timeout,
connect_timeout=connect_timeout,
**kwargs)
self.owns_connection = not use_conn
if keyspace:
self.session = self.conn.connect(keyspace)
else:
self.session = self.conn.connect()
if browser == "":
browser = None
self.browser = browser
self.color = color
self.display_nanotime_format = display_nanotime_format
self.display_timestamp_format = display_timestamp_format
self.display_date_format = display_date_format
self.display_float_precision = display_float_precision
self.display_double_precision = display_double_precision
self.display_timezone = display_timezone
self.session.default_timeout = request_timeout
self.session.row_factory = ordered_dict_factory
self.session.default_consistency_level = cassandra.ConsistencyLevel.ONE
self.get_connection_versions()
self.set_expanded_cql_version(self.connection_versions['cql'])
self.current_keyspace = keyspace
self.max_trace_wait = max_trace_wait
self.session.max_trace_wait = max_trace_wait
self.tty = tty
self.encoding = encoding
self.check_windows_encoding()
self.output_codec = codecs.lookup(encoding)
self.statement = StringIO()
self.lineno = 1
self.in_comment = False
self.prompt = ''
if stdin is None:
stdin = sys.stdin
if tty:
self.reset_prompt()
self.report_connection()
print('Use HELP for help.')
else:
self.show_line_nums = True
self.stdin = stdin
self.query_out = sys.stdout
self.consistency_level = cassandra.ConsistencyLevel.ONE
self.serial_consistency_level = cassandra.ConsistencyLevel.SERIAL
self.empty_lines = 0
self.statement_error = False
self.single_statement = single_statement
self.is_subshell = is_subshell
@property
def batch_mode(self):
return not self.tty
@property
def is_using_utf8(self):
# utf8 encodings from https://docs.python.org/{2,3}/library/codecs.html
return self.encoding.replace('-', '_').lower() in ['utf', 'utf_8', 'u8', 'utf8', CP65001]
def check_windows_encoding(self):
if is_win and os.name == 'nt' and self.tty and \
self.is_using_utf8 and sys.stdout.encoding != CP65001:
self.printerr("\nWARNING: console codepage must be set to cp65001 "
"to support {} encoding on Windows platforms.\n"
"If you experience encoding problems, change your console"
" codepage with 'chcp 65001' before starting cqlsh.\n".format(self.encoding))
def set_expanded_cql_version(self, ver):
ver, vertuple = full_cql_version(ver)
self.cql_version = ver
self.cql_ver_tuple = vertuple
def cqlver_atleast(self, major, minor=0, patch=0):
return self.cql_ver_tuple[:3] >= (major, minor, patch)
def myformat_value(self, val, cqltype=None, **kwargs):
if isinstance(val, DecodeError):
self.decoding_errors.append(val)
try:
dtformats = DateTimeFormat(timestamp_format=self.display_timestamp_format,
date_format=self.display_date_format, nanotime_format=self.display_nanotime_format,
timezone=self.display_timezone)
precision = self.display_double_precision if cqltype is not None and cqltype.type_name == 'double' \
else self.display_float_precision
return format_value(val, cqltype=cqltype, encoding=self.output_codec.name,
addcolor=self.color, date_time_format=dtformats,
float_precision=precision, **kwargs)
except Exception as e:
err = FormatError(val, e)
self.decoding_errors.append(err)
return format_value(err, cqltype=cqltype, encoding=self.output_codec.name, addcolor=self.color)
def myformat_colname(self, name, table_meta=None):
column_colors = COLUMN_NAME_COLORS.copy()
# check column role and color appropriately
if table_meta:
if name in [col.name for col in table_meta.partition_key]:
column_colors.default_factory = lambda: RED
elif name in [col.name for col in table_meta.clustering_key]:
column_colors.default_factory = lambda: CYAN
elif name in table_meta.columns and table_meta.columns[name].is_static:
column_colors.default_factory = lambda: WHITE
return self.myformat_value(name, colormap=column_colors)
def report_connection(self):
self.show_host()
self.show_version()
def show_host(self):
print("Connected to {0} at {1}:{2}"
.format(self.applycolor(self.get_cluster_name(), BLUE),
self.hostname,
self.port))
def show_version(self):
vers = self.connection_versions.copy()
vers['shver'] = version
# system.Versions['cql'] apparently does not reflect changes with
# set_cql_version.
vers['cql'] = self.cql_version
print("[cqlsh %(shver)s | Cassandra %(build)s | CQL spec %(cql)s | Native protocol v%(protocol)s]" % vers)
def show_session(self, sessionid, partial_session=False):
print_trace_session(self, self.session, sessionid, partial_session)
def get_connection_versions(self):
result, = self.session.execute("select * from system.local where key = 'local'")
vers = {
'build': result['release_version'],
'protocol': self.conn.protocol_version,
'cql': result['cql_version'],
}
self.connection_versions = vers
def get_keyspace_names(self):
return list(self.conn.metadata.keyspaces)
def get_columnfamily_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return list(self.get_keyspace_meta(ksname).tables)
def get_materialized_view_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return list(self.get_keyspace_meta(ksname).views)
def get_index_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return list(self.get_keyspace_meta(ksname).indexes)
def get_column_names(self, ksname, cfname):
if ksname is None:
ksname = self.current_keyspace
layout = self.get_table_meta(ksname, cfname)
return list(layout.columns)
def get_usertype_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return list(self.get_keyspace_meta(ksname).user_types)
def get_usertype_layout(self, ksname, typename):
if ksname is None:
ksname = self.current_keyspace
ks_meta = self.get_keyspace_meta(ksname)
try:
user_type = ks_meta.user_types[typename]
except KeyError:
raise UserTypeNotFound("User type {!r} not found".format(typename))
return list(zip(user_type.field_names, user_type.field_types))
def get_userfunction_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return [f.name for f in list(self.get_keyspace_meta(ksname).functions.values())]
def get_useraggregate_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return [f.name for f in list(self.get_keyspace_meta(ksname).aggregates.values())]
def get_cluster_name(self):
return self.conn.metadata.cluster_name
def get_partitioner(self):
return self.conn.metadata.partitioner
def get_keyspace_meta(self, ksname):
if ksname in self.conn.metadata.keyspaces:
return self.conn.metadata.keyspaces[ksname]
raise KeyspaceNotFound('Keyspace %r not found.' % ksname)
def get_keyspaces(self):
return list(self.conn.metadata.keyspaces.values())
def get_ring(self, ks):
self.conn.metadata.token_map.rebuild_keyspace(ks, build_if_absent=True)
return self.conn.metadata.token_map.tokens_to_hosts_by_ks[ks]
def get_table_meta(self, ksname, tablename):
if ksname is None:
ksname = self.current_keyspace
ksmeta = self.get_keyspace_meta(ksname)
if tablename not in ksmeta.tables:
if ksname == 'system_auth' and tablename in ['roles', 'role_permissions']:
self.get_fake_auth_table_meta(ksname, tablename)
else:
raise ColumnFamilyNotFound("Column family {} not found".format(tablename))
else:
return ksmeta.tables[tablename]
def get_fake_auth_table_meta(self, ksname, tablename):
# may be using external auth implementation so internal tables
# aren't actually defined in schema. In this case, we'll fake
# them up
if tablename == 'roles':
ks_meta = KeyspaceMetadata(ksname, True, None, None)
table_meta = TableMetadata(ks_meta, 'roles')
table_meta.columns['role'] = ColumnMetadata(table_meta, 'role', cassandra.cqltypes.UTF8Type)
table_meta.columns['is_superuser'] = ColumnMetadata(table_meta, 'is_superuser', cassandra.cqltypes.BooleanType)
table_meta.columns['can_login'] = ColumnMetadata(table_meta, 'can_login', cassandra.cqltypes.BooleanType)
elif tablename == 'role_permissions':
ks_meta = KeyspaceMetadata(ksname, True, None, None)
table_meta = TableMetadata(ks_meta, 'role_permissions')
table_meta.columns['role'] = ColumnMetadata(table_meta, 'role', cassandra.cqltypes.UTF8Type)
table_meta.columns['resource'] = ColumnMetadata(table_meta, 'resource', cassandra.cqltypes.UTF8Type)
table_meta.columns['permission'] = ColumnMetadata(table_meta, 'permission', cassandra.cqltypes.UTF8Type)
else:
raise ColumnFamilyNotFound("Column family {} not found".format(tablename))
def get_index_meta(self, ksname, idxname):
if ksname is None:
ksname = self.current_keyspace
ksmeta = self.get_keyspace_meta(ksname)
if idxname not in ksmeta.indexes:
raise IndexNotFound("Index {} not found".format(idxname))
return ksmeta.indexes[idxname]
def get_view_meta(self, ksname, viewname):
if ksname is None:
ksname = self.current_keyspace
ksmeta = self.get_keyspace_meta(ksname)
if viewname not in ksmeta.views:
raise MaterializedViewNotFound("Materialized view '{}' not found".format(viewname))
return ksmeta.views[viewname]
def get_object_meta(self, ks, name):
if name is None:
if ks and ks in self.conn.metadata.keyspaces:
return self.conn.metadata.keyspaces[ks]
elif self.current_keyspace is None:
raise ObjectNotFound("'{}' not found in keyspaces".format(ks))
else:
name = ks
ks = self.current_keyspace
if ks is None:
ks = self.current_keyspace
ksmeta = self.get_keyspace_meta(ks)
if name in ksmeta.tables:
return ksmeta.tables[name]
elif name in ksmeta.indexes:
return ksmeta.indexes[name]
elif name in ksmeta.views:
return ksmeta.views[name]
raise ObjectNotFound("'{}' not found in keyspace '{}'".format(name, ks))
def get_usertypes_meta(self):
data = self.session.execute("select * from system.schema_usertypes")
if not data:
return cql3handling.UserTypesMeta({})
return cql3handling.UserTypesMeta.from_layout(data)
def get_trigger_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return [trigger.name
for table in list(self.get_keyspace_meta(ksname).tables.values())
for trigger in list(table.triggers.values())]
def reset_statement(self):
self.reset_prompt()
self.statement.truncate(0)
self.statement.seek(0)
self.empty_lines = 0
def reset_prompt(self):
if self.current_keyspace is None:
self.set_prompt(self.default_prompt, True)
else:
self.set_prompt(self.keyspace_prompt.format(self.current_keyspace), True)
def set_continue_prompt(self):
if self.empty_lines >= 3:
self.set_prompt("Statements are terminated with a ';'. You can press CTRL-C to cancel an incomplete statement.")
self.empty_lines = 0
return
if self.current_keyspace is None:
self.set_prompt(self.continue_prompt)
else:
spaces = ' ' * len(str(self.current_keyspace))
self.set_prompt(self.keyspace_continue_prompt.format(spaces))
self.empty_lines = self.empty_lines + 1 if not self.lastcmd else 0
@contextmanager
def prepare_loop(self):
readline = None
if self.tty and self.completekey:
try:
import readline
except ImportError:
if is_win:
print("WARNING: pyreadline dependency missing. Install to enable tab completion.")
pass
else:
old_completer = readline.get_completer()
readline.set_completer(self.complete)
if readline.__doc__ is not None and 'libedit' in readline.__doc__:
readline.parse_and_bind("bind -e")
readline.parse_and_bind("bind '" + self.completekey + "' rl_complete")
readline.parse_and_bind("bind ^R em-inc-search-prev")
else:
readline.parse_and_bind(self.completekey + ": complete")
# start coverage collection if requested, unless in subshell
if self.coverage and not self.is_subshell:
# check for coveragerc file, write it if missing
if os.path.exists(HISTORY_DIR):
self.coveragerc_path = os.path.join(HISTORY_DIR, '.coveragerc')
covdata_path = os.path.join(HISTORY_DIR, '.coverage')
if not os.path.isfile(self.coveragerc_path):
with open(self.coveragerc_path, 'w') as f:
f.writelines(["[run]\n",
"concurrency = multiprocessing\n",
"data_file = {}\n".format(covdata_path),
"parallel = true\n"]
)
# start coverage
import coverage
self.cov = coverage.Coverage(config_file=self.coveragerc_path)
self.cov.start()
try:
yield
finally:
if readline is not None:
readline.set_completer(old_completer)
if self.coverage and not self.is_subshell:
self.stop_coverage()
def get_input_line(self, prompt=''):
if self.tty:
self.lastcmd = input(ensure_str(prompt))
line = ensure_text(self.lastcmd) + '\n'
else:
self.lastcmd = ensure_text(self.stdin.readline())
line = self.lastcmd
if not len(line):
raise EOFError
self.lineno += 1
line = ensure_text(line)
return line
def use_stdin_reader(self, until='', prompt=''):
until += '\n'
while True:
try:
newline = self.get_input_line(prompt=prompt)
except EOFError:
return
if newline == until:
return
yield newline
def cmdloop(self):
"""
Adapted from cmd.Cmd's version, because there is literally no way with
cmd.Cmd.cmdloop() to tell the difference between "EOF" showing up in
input and an actual EOF.
"""
with self.prepare_loop():
while not self.stop:
try:
if self.single_statement:
line = self.single_statement
self.stop = True
else:
line = self.get_input_line(self.prompt)
self.statement.write(line)
if self.onecmd(self.statement.getvalue()):
self.reset_statement()
except EOFError:
self.handle_eof()
except CQL_ERRORS as cqlerr:
self.printerr(cqlerr.message)
except KeyboardInterrupt:
self.reset_statement()
print('')
def strip_comment_blocks(self, statementtext):
comment_block_in_literal_string = re.search('["].*[/][*].*[*][/].*["]', statementtext)
if not comment_block_in_literal_string:
result = re.sub('[/][*].*[*][/]', "", statementtext)
if '*/' in result and '/*' not in result and not self.in_comment:
raise SyntaxError("Encountered comment block terminator without being in comment block")
if '/*' in result:
result = re.sub('[/][*].*', "", result)
self.in_comment = True
if '*/' in result:
result = re.sub('.*[*][/]', "", result)
self.in_comment = False
if self.in_comment and not re.findall('[/][*]|[*][/]', statementtext):
result = ''
return result
return statementtext
def onecmd(self, statementtext):
"""
Returns true if the statement is complete and was handled (meaning it
can be reset).
"""
statementtext = ensure_text(statementtext)
statementtext = self.strip_comment_blocks(statementtext)
try:
statements, endtoken_escaped = cqlruleset.cql_split_statements(statementtext)
except pylexotron.LexingError as e:
if self.show_line_nums:
self.printerr('Invalid syntax at line {0}, char {1}'
.format(e.linenum, e.charnum))
else:
self.printerr('Invalid syntax at char {0}'.format(e.charnum))
statementline = statementtext.split('\n')[e.linenum - 1]
self.printerr(' {0}'.format(statementline))
self.printerr(' {0}^'.format(' ' * e.charnum))
return True
while statements and not statements[-1]:
statements = statements[:-1]
if not statements:
return True
if endtoken_escaped or statements[-1][-1][0] != 'endtoken':
self.set_continue_prompt()
return
for st in statements:
try:
self.handle_statement(st, statementtext)
except Exception as e:
if self.debug:
traceback.print_exc()
else:
self.printerr(e)
return True
def handle_eof(self):
if self.tty:
print('')
statement = self.statement.getvalue()
if statement.strip():
if not self.onecmd(statement):
self.printerr('Incomplete statement at end of file')
self.do_exit()
def handle_statement(self, tokens, srcstr):
# Concat multi-line statements and insert into history
if readline is not None:
nl_count = srcstr.count("\n")
new_hist = ensure_str(srcstr.replace("\n", " ").rstrip())
if nl_count > 1 and self.last_hist != new_hist:
readline.add_history(new_hist)
self.last_hist = new_hist
cmdword = tokens[0][1]
if cmdword == '?':
cmdword = 'help'
custom_handler = getattr(self, 'do_' + cmdword.lower(), None)
if custom_handler:
parsed = cqlruleset.cql_whole_parse_tokens(tokens, srcstr=srcstr,
startsymbol='cqlshCommand')
if parsed and not parsed.remainder:
# successful complete parse
return custom_handler(parsed)
else:
return self.handle_parse_error(cmdword, tokens, parsed, srcstr)
return self.perform_statement(cqlruleset.cql_extract_orig(tokens, srcstr))
def handle_parse_error(self, cmdword, tokens, parsed, srcstr):
if cmdword.lower() in ('select', 'insert', 'update', 'delete', 'truncate',
'create', 'drop', 'alter', 'grant', 'revoke',
'batch', 'list'):
# hey, maybe they know about some new syntax we don't. type
# assumptions won't work, but maybe the query will.
return self.perform_statement(cqlruleset.cql_extract_orig(tokens, srcstr))
if parsed:
self.printerr('Improper %s command (problem at %r).' % (cmdword, parsed.remainder[0]))
else:
self.printerr('Improper %s command.' % cmdword)
def do_use(self, parsed):
ksname = parsed.get_binding('ksname')
success, _ = self.perform_simple_statement(SimpleStatement(parsed.extract_orig()))
if success:
if ksname[0] == '"' and ksname[-1] == '"':
self.current_keyspace = self.cql_unprotect_name(ksname)
else:
self.current_keyspace = ksname.lower()
def do_select(self, parsed):
tracing_was_enabled = self.tracing_enabled
ksname = parsed.get_binding('ksname')
stop_tracing = ksname == 'system_traces' or (ksname is None and self.current_keyspace == 'system_traces')
self.tracing_enabled = self.tracing_enabled and not stop_tracing
statement = parsed.extract_orig()
self.perform_statement(statement)
self.tracing_enabled = tracing_was_enabled
def perform_statement(self, statement):
statement = ensure_text(statement)
stmt = SimpleStatement(statement, consistency_level=self.consistency_level, serial_consistency_level=self.serial_consistency_level, fetch_size=self.page_size if self.use_paging else None)
success, future = self.perform_simple_statement(stmt)
if future:
if future.warnings:
self.print_warnings(future.warnings)
if self.tracing_enabled:
try:
for trace in future.get_all_query_traces(max_wait_per=self.max_trace_wait, query_cl=self.consistency_level):
print_trace(self, trace)
except TraceUnavailable:
msg = "Statement trace did not complete within %d seconds; trace data may be incomplete." % (self.session.max_trace_wait,)
self.writeresult(msg, color=RED)
for trace_id in future.get_query_trace_ids():
self.show_session(trace_id, partial_session=True)
except Exception as err:
self.printerr("Unable to fetch query trace: %s" % (str(err),))
return success
def parse_for_select_meta(self, query_string):
try:
parsed = cqlruleset.cql_parse(query_string)[1]
except IndexError:
return None
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
name = self.cql_unprotect_name(parsed.get_binding('cfname', None))
try:
return self.get_table_meta(ks, name)
except ColumnFamilyNotFound:
try:
return self.get_view_meta(ks, name)
except MaterializedViewNotFound:
raise ObjectNotFound("'{}' not found in keyspace '{}'".format(name, ks))
def parse_for_update_meta(self, query_string):
try:
parsed = cqlruleset.cql_parse(query_string)[1]
except IndexError:
return None
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
cf = self.cql_unprotect_name(parsed.get_binding('cfname'))
return self.get_table_meta(ks, cf)
def perform_simple_statement(self, statement):
if not statement:
return False, None
future = self.session.execute_async(statement, trace=self.tracing_enabled)
result = None
try:
result = future.result()
except CQL_ERRORS as err:
err_msg = ensure_text(err.message if hasattr(err, 'message') else str(err))
self.printerr(str(err.__class__.__name__) + ": " + err_msg)
except Exception:
import traceback
self.printerr(traceback.format_exc())
# Even if statement failed we try to refresh schema if not agreed (see CASSANDRA-9689)
if not future.is_schema_agreed:
try:
self.conn.refresh_schema_metadata(5) # will throw exception if there is a schema mismatch
except Exception:
self.printerr("Warning: schema version mismatch detected; check the schema versions of your "
"nodes in system.local and system.peers.")
self.conn.refresh_schema_metadata(-1)
if result is None:
return False, None
if statement.query_string[:6].lower() == 'select':
self.print_result(result, self.parse_for_select_meta(statement.query_string))
elif statement.query_string.lower().startswith("list users") or statement.query_string.lower().startswith("list roles"):
self.print_result(result, self.get_table_meta('system_auth', 'roles'))
elif statement.query_string.lower().startswith("list"):
self.print_result(result, self.get_table_meta('system_auth', 'role_permissions'))
elif result:
# CAS INSERT/UPDATE
self.writeresult("")
self.print_static_result(result, self.parse_for_update_meta(statement.query_string), with_header=True, tty=self.tty)
self.flush_output()
return True, future
def print_result(self, result, table_meta):
self.decoding_errors = []
self.writeresult("")
def print_all(result, table_meta, tty):
# Return the number of rows in total
num_rows = 0
isFirst = True
while True:
# Always print for the first page even it is empty
if result.current_rows or isFirst:
with_header = isFirst or tty
self.print_static_result(result, table_meta, with_header, tty, num_rows)
num_rows += len(result.current_rows)
if result.has_more_pages:
if self.shunted_query_out is None and tty:
# Only pause when not capturing.
input("---MORE---")
result.fetch_next_page()
else:
if not tty:
self.writeresult("")
break
isFirst = False
return num_rows
num_rows = print_all(result, table_meta, self.tty)
self.writeresult("(%d rows)" % num_rows)
if self.decoding_errors:
for err in self.decoding_errors[:2]:
self.writeresult(err.message(), color=RED)
if len(self.decoding_errors) > 2:
self.writeresult('%d more decoding errors suppressed.'
% (len(self.decoding_errors) - 2), color=RED)
def print_static_result(self, result, table_meta, with_header, tty, row_count_offset=0):
if not result.column_names and not table_meta:
return
column_names = result.column_names or list(table_meta.columns.keys())
formatted_names = [self.myformat_colname(name, table_meta) for name in column_names]
if not result.current_rows:
# print header only
self.print_formatted_result(formatted_names, None, with_header=True, tty=tty)
return
cql_types = []
if result.column_types:
ks_name = table_meta.keyspace_name if table_meta else self.current_keyspace
ks_meta = self.conn.metadata.keyspaces.get(ks_name, None)
cql_types = [CqlType(cql_typename(t), ks_meta) for t in result.column_types]
formatted_values = [list(map(self.myformat_value, [row[c] for c in column_names], cql_types)) for row in result.current_rows]
if self.expand_enabled:
self.print_formatted_result_vertically(formatted_names, formatted_values, row_count_offset)
else:
self.print_formatted_result(formatted_names, formatted_values, with_header, tty)
def print_formatted_result(self, formatted_names, formatted_values, with_header, tty):
# determine column widths
widths = [n.displaywidth for n in formatted_names]
if formatted_values is not None:
for fmtrow in formatted_values:
for num, col in enumerate(fmtrow):
widths[num] = max(widths[num], col.displaywidth)
# print header
if with_header:
header = ' | '.join(hdr.ljust(w, color=self.color) for (hdr, w) in zip(formatted_names, widths))
self.writeresult(' ' + header.rstrip())
self.writeresult('-%s-' % '-+-'.join('-' * w for w in widths))
# stop if there are no rows
if formatted_values is None:
self.writeresult("")
return
# print row data
for row in formatted_values:
line = ' | '.join(col.rjust(w, color=self.color) for (col, w) in zip(row, widths))
self.writeresult(' ' + line)
if tty:
self.writeresult("")
def print_formatted_result_vertically(self, formatted_names, formatted_values, row_count_offset):
max_col_width = max([n.displaywidth for n in formatted_names])
max_val_width = max([n.displaywidth for row in formatted_values for n in row])
# for each row returned, list all the column-value pairs
for i, row in enumerate(formatted_values):
self.writeresult("@ Row %d" % (row_count_offset + i + 1))
self.writeresult('-%s-' % '-+-'.join(['-' * max_col_width, '-' * max_val_width]))
for field_id, field in enumerate(row):
column = formatted_names[field_id].ljust(max_col_width, color=self.color)
value = field.ljust(field.displaywidth, color=self.color)
self.writeresult(' ' + " | ".join([column, value]))
self.writeresult('')
def print_warnings(self, warnings):
if warnings is None or len(warnings) == 0:
return
self.writeresult('')
self.writeresult('Warnings :')
for warning in warnings:
self.writeresult(warning)
self.writeresult('')
def emptyline(self):
pass
def parseline(self, line):
# this shouldn't be needed
raise NotImplementedError
def complete(self, text, state):
if readline is None:
return
if state == 0:
try:
self.completion_matches = self.find_completions(text)
except Exception:
if debug_completion:
import traceback
traceback.print_exc()
else:
raise
try:
return self.completion_matches[state]
except IndexError:
return None
def find_completions(self, text):
curline = readline.get_line_buffer()
prevlines = self.statement.getvalue()
wholestmt = prevlines + curline
begidx = readline.get_begidx() + len(prevlines)
stuff_to_complete = wholestmt[:begidx]
return cqlruleset.cql_complete(stuff_to_complete, text, cassandra_conn=self,
debug=debug_completion, startsymbol='cqlshCommand')
def set_prompt(self, prompt, prepend_user=False):
if prepend_user and self.username:
self.prompt = "{0}@{1}".format(self.username, prompt)
return
self.prompt = prompt
def cql_unprotect_name(self, namestr):
if namestr is None:
return
return cqlruleset.dequote_name(namestr)
def cql_unprotect_value(self, valstr):
if valstr is not None:
return cqlruleset.dequote_value(valstr)
def _columnize_unicode(self, name_list):
"""
Used when columnizing identifiers that may contain unicode
"""
names = [n for n in name_list]
cmd.Cmd.columnize(self, names)
print('')
def do_describe(self, parsed):
"""
DESCRIBE [cqlsh only]
(DESC may be used as a shorthand.)
Outputs information about the connected Cassandra cluster, or about
the data objects stored in the cluster. Use in one of the following ways:
DESCRIBE KEYSPACES
Output the names of all keyspaces.
DESCRIBE KEYSPACE [<keyspacename>]
Output CQL commands that could be used to recreate the given keyspace,
and the objects in it (such as tables, types, functions, etc.).
In some cases, as the CQL interface matures, there will be some metadata
about a keyspace that is not representable with CQL. That metadata will not be shown.
The '<keyspacename>' argument may be omitted, in which case the current
keyspace will be described.
DESCRIBE TABLES
Output the names of all tables in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE TABLE [<keyspace>.]<tablename>
Output CQL commands that could be used to recreate the given table.
In some cases, as above, there may be table metadata which is not
representable and which will not be shown.
DESCRIBE INDEX <indexname>
Output the CQL command that could be used to recreate the given index.
In some cases, there may be index metadata which is not representable
and which will not be shown.
DESCRIBE MATERIALIZED VIEW <viewname>
Output the CQL command that could be used to recreate the given materialized view.
In some cases, there may be materialized view metadata which is not representable
and which will not be shown.
DESCRIBE CLUSTER
Output information about the connected Cassandra cluster, such as the
cluster name, and the partitioner and snitch in use. When you are
connected to a non-system keyspace, also shows endpoint-range
ownership information for the Cassandra ring.
DESCRIBE [FULL] SCHEMA
Output CQL commands that could be used to recreate the entire (non-system) schema.
Works as though "DESCRIBE KEYSPACE k" was invoked for each non-system keyspace
k. Use DESCRIBE FULL SCHEMA to include the system keyspaces.
DESCRIBE TYPES
Output the names of all user-defined-types in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE TYPE [<keyspace>.]<type>
Output the CQL command that could be used to recreate the given user-defined-type.
DESCRIBE FUNCTIONS
Output the names of all user-defined-functions in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE FUNCTION [<keyspace>.]<function>
Output the CQL command that could be used to recreate the given user-defined-function.
DESCRIBE AGGREGATES
Output the names of all user-defined-aggregates in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE AGGREGATE [<keyspace>.]<aggregate>
Output the CQL command that could be used to recreate the given user-defined-aggregate.
DESCRIBE <objname>
Output CQL commands that could be used to recreate the entire object schema,
where object can be either a keyspace or a table or an index or a materialized
view (in this order).
"""
stmt = SimpleStatement(parsed.extract_orig(), consistency_level=cassandra.ConsistencyLevel.LOCAL_ONE, fetch_size=self.page_size if self.use_paging else None)
future = self.session.execute_async(stmt)
if self.connection_versions['build'][0] < '4':
print('\nWARN: DESCRIBE|DESC was moved to server side in Cassandra 4.0. As a consequence DESRIBE|DESC '
'will not work in cqlsh %r connected to Cassandra %r, the version that you are connected to. '
'DESCRIBE does not exist server side prior Cassandra 4.0.'
% (version, self.connection_versions['build']))
else:
try:
result = future.result()
what = parsed.matched[1][1].lower()
if what in ('columnfamilies', 'tables', 'types', 'functions', 'aggregates'):
self.describe_list(result)
elif what == 'keyspaces':
self.describe_keyspaces(result)
elif what == 'cluster':
self.describe_cluster(result)
elif what:
self.describe_element(result)
except CQL_ERRORS as err:
err_msg = ensure_text(err.message if hasattr(err, 'message') else str(err))
self.printerr(err_msg.partition("message=")[2].strip('"'))
except Exception:
import traceback
self.printerr(traceback.format_exc())
if future:
if future.warnings:
self.print_warnings(future.warnings)
do_desc = do_describe
def describe_keyspaces(self, rows):
"""
Print the output for a DESCRIBE KEYSPACES query
"""
names = [ensure_str(r['name']) for r in rows]
print('')
cmd.Cmd.columnize(self, names)
print('')
def describe_list(self, rows):
"""
Print the output for all the DESCRIBE queries for element names (e.g DESCRIBE TABLES, DESCRIBE FUNCTIONS ...)
"""
keyspace = None
names = list()
for row in rows:
if row['keyspace_name'] != keyspace:
if keyspace is not None:
self.print_keyspace_element_names(keyspace, names)
keyspace = row['keyspace_name']
names = list()
names.append(ensure_str(row['name']))
if keyspace is not None:
self.print_keyspace_element_names(keyspace, names)
print('')
def print_keyspace_element_names(self, keyspace, names):
print('')
if self.current_keyspace is None:
print('Keyspace %s' % (keyspace))
print('---------%s' % ('-' * len(keyspace)))
cmd.Cmd.columnize(self, names)
def describe_element(self, rows):
"""
Print the output for all the DESCRIBE queries where an element name as been specified (e.g DESCRIBE TABLE, DESCRIBE INDEX ...)
"""
for row in rows:
print('')
self.query_out.write(row['create_statement'])
print('')
def describe_cluster(self, rows):
"""
Print the output for a DESCRIBE CLUSTER query.
If a specified keyspace was in use the returned ResultSet will contains a 'range_ownership' column,
otherwise not.
"""
for row in rows:
print('\nCluster: %s' % row['cluster'])
print('Partitioner: %s' % row['partitioner'])
print('Snitch: %s\n' % row['snitch'])
if 'range_ownership' in row:
print("Range ownership:")
for entry in list(row['range_ownership'].items()):
print(' %39s [%s]' % (entry[0], ', '.join([host for host in entry[1]])))
print('')
def do_copy(self, parsed):
r"""
COPY [cqlsh only]
COPY x FROM: Imports CSV data into a Cassandra table
COPY x TO: Exports data from a Cassandra table in CSV format.
COPY <table_name> [ ( column [, ...] ) ]
FROM ( '<file_pattern_1, file_pattern_2, ... file_pattern_n>' | STDIN )
[ WITH <option>='value' [AND ...] ];
File patterns are either file names or valid python glob expressions, e.g. *.csv or folder/*.csv.
COPY <table_name> [ ( column [, ...] ) ]
TO ( '<filename>' | STDOUT )
[ WITH <option>='value' [AND ...] ];
Available common COPY options and defaults:
DELIMITER=',' - character that appears between records
QUOTE='"' - quoting character to be used to quote fields
ESCAPE='\' - character to appear before the QUOTE char when quoted
HEADER=false - whether to ignore the first line
NULL='' - string that represents a null value
DATETIMEFORMAT= - timestamp strftime format
'%Y-%m-%d %H:%M:%S%z' defaults to time_format value in cqlshrc
MAXATTEMPTS=5 - the maximum number of attempts per batch or range
REPORTFREQUENCY=0.25 - the frequency with which we display status updates in seconds
DECIMALSEP='.' - the separator for decimal values
THOUSANDSSEP='' - the separator for thousands digit groups
BOOLSTYLE='True,False' - the representation for booleans, case insensitive, specify true followed by false,
for example yes,no or 1,0
NUMPROCESSES=n - the number of worker processes, by default the number of cores minus one
capped at 16
CONFIGFILE='' - a configuration file with the same format as .cqlshrc (see the Python ConfigParser
documentation) where you can specify WITH options under the following optional
sections: [copy], [copy-to], [copy-from], [copy:ks.table], [copy-to:ks.table],
[copy-from:ks.table], where <ks> is your keyspace name and <table> is your table
name. Options are read from these sections, in the order specified
above, and command line options always override options in configuration files.
Depending on the COPY direction, only the relevant copy-from or copy-to sections
are used. If no configfile is specified then .cqlshrc is searched instead.
RATEFILE='' - an optional file where to print the output statistics
Available COPY FROM options and defaults:
CHUNKSIZE=5000 - the size of chunks passed to worker processes
INGESTRATE=100000 - an approximate ingest rate in rows per second
MINBATCHSIZE=10 - the minimum size of an import batch
MAXBATCHSIZE=20 - the maximum size of an import batch
MAXROWS=-1 - the maximum number of rows, -1 means no maximum
SKIPROWS=0 - the number of rows to skip
SKIPCOLS='' - a comma separated list of column names to skip
MAXPARSEERRORS=-1 - the maximum global number of parsing errors, -1 means no maximum
MAXINSERTERRORS=1000 - the maximum global number of insert errors, -1 means no maximum
ERRFILE='' - a file where to store all rows that could not be imported, by default this is
import_ks_table.err where <ks> is your keyspace and <table> is your table name.
PREPAREDSTATEMENTS=True - whether to use prepared statements when importing, by default True. Set this to
False if you don't mind shifting data parsing to the cluster. The cluster will also
have to compile every batch statement. For large and oversized clusters
this will result in a faster import but for smaller clusters it may generate
timeouts.
TTL=3600 - the time to live in seconds, by default data will not expire
Available COPY TO options and defaults:
ENCODING='utf8' - encoding for CSV output
PAGESIZE='1000' - the page size for fetching results
PAGETIMEOUT=10 - the page timeout in seconds for fetching results
BEGINTOKEN='' - the minimum token string to consider when exporting data
ENDTOKEN='' - the maximum token string to consider when exporting data
MAXREQUESTS=6 - the maximum number of requests each worker process can work on in parallel
MAXOUTPUTSIZE='-1' - the maximum size of the output file measured in number of lines,
beyond this maximum the output file will be split into segments,
-1 means unlimited.
FLOATPRECISION=5 - the number of digits displayed after the decimal point for cql float values
DOUBLEPRECISION=12 - the number of digits displayed after the decimal point for cql double values
When entering CSV data on STDIN, you can use the sequence "\."
on a line by itself to end the data input.
"""
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
if ks is None:
ks = self.current_keyspace
if ks is None:
raise NoKeyspaceError("Not in any keyspace.")
table = self.cql_unprotect_name(parsed.get_binding('cfname'))
columns = parsed.get_binding('colnames', None)
if columns is not None:
columns = list(map(self.cql_unprotect_name, columns))
else:
# default to all known columns
columns = self.get_column_names(ks, table)
fname = parsed.get_binding('fname', None)
if fname is not None:
fname = self.cql_unprotect_value(fname)
copyoptnames = list(map(six.text_type.lower, parsed.get_binding('optnames', ())))
copyoptvals = list(map(self.cql_unprotect_value, parsed.get_binding('optvals', ())))
opts = dict(list(zip(copyoptnames, copyoptvals)))
direction = parsed.get_binding('dir').upper()
if direction == 'FROM':
task = ImportTask(self, ks, table, columns, fname, opts, self.conn.protocol_version, CONFIG_FILE)
elif direction == 'TO':
task = ExportTask(self, ks, table, columns, fname, opts, self.conn.protocol_version, CONFIG_FILE)
else:
raise SyntaxError("Unknown direction %s" % direction)
task.run()
def do_show(self, parsed):
"""
SHOW [cqlsh only]
Displays information about the current cqlsh session. Can be called in
the following ways:
SHOW VERSION
Shows the version and build of the connected Cassandra instance, as
well as the version of the CQL spec that the connected Cassandra
instance understands.
SHOW HOST
Shows where cqlsh is currently connected.
SHOW SESSION <sessionid>
Pretty-prints the requested tracing session.
"""
showwhat = parsed.get_binding('what').lower()
if showwhat == 'version':
self.get_connection_versions()
self.show_version()
elif showwhat == 'host':
self.show_host()
elif showwhat.startswith('session'):
session_id = parsed.get_binding('sessionid').lower()
self.show_session(UUID(session_id))
else:
self.printerr('Wait, how do I show %r?' % (showwhat,))
def do_source(self, parsed):
"""
SOURCE [cqlsh only]
Executes a file containing CQL statements. Gives the output for each
statement in turn, if any, or any errors that occur along the way.
Errors do NOT abort execution of the CQL source file.
Usage:
SOURCE '<file>';
That is, the path to the file to be executed must be given inside a
string literal. The path is interpreted relative to the current working
directory. The tilde shorthand notation ('~/mydir') is supported for
referring to $HOME.
See also the --file option to cqlsh.
"""
fname = parsed.get_binding('fname')
fname = os.path.expanduser(self.cql_unprotect_value(fname))
try:
encoding, bom_size = get_file_encoding_bomsize(fname)
f = codecs.open(fname, 'r', encoding)
f.seek(bom_size)
except IOError as e:
self.printerr('Could not open %r: %s' % (fname, e))
return
username = self.auth_provider.username if self.auth_provider else None
password = self.auth_provider.password if self.auth_provider else None
subshell = Shell(self.hostname, self.port, color=self.color,
username=username, password=password,
encoding=self.encoding, stdin=f, tty=False, use_conn=self.conn,
cqlver=self.cql_version, keyspace=self.current_keyspace,
tracing_enabled=self.tracing_enabled,
display_nanotime_format=self.display_nanotime_format,
display_timestamp_format=self.display_timestamp_format,
display_date_format=self.display_date_format,
display_float_precision=self.display_float_precision,
display_double_precision=self.display_double_precision,
display_timezone=self.display_timezone,
max_trace_wait=self.max_trace_wait, ssl=self.ssl,
request_timeout=self.session.default_timeout,
connect_timeout=self.conn.connect_timeout,
is_subshell=True)
# duplicate coverage related settings in subshell
if self.coverage:
subshell.coverage = True
subshell.coveragerc_path = self.coveragerc_path
subshell.cmdloop()
f.close()
def do_capture(self, parsed):
"""
CAPTURE [cqlsh only]
Begins capturing command output and appending it to a specified file.
Output will not be shown at the console while it is captured.
Usage:
CAPTURE '<file>';
CAPTURE OFF;
CAPTURE;
That is, the path to the file to be appended to must be given inside a
string literal. The path is interpreted relative to the current working
directory. The tilde shorthand notation ('~/mydir') is supported for
referring to $HOME.
Only query result output is captured. Errors and output from cqlsh-only
commands will still be shown in the cqlsh session.
To stop capturing output and show it in the cqlsh session again, use
CAPTURE OFF.
To inspect the current capture configuration, use CAPTURE with no
arguments.
"""
fname = parsed.get_binding('fname')
if fname is None:
if self.shunted_query_out is not None:
print("Currently capturing query output to %r." % (self.query_out.name,))
else:
print("Currently not capturing query output.")
return
if fname.upper() == 'OFF':
if self.shunted_query_out is None:
self.printerr('Not currently capturing output.')
return
self.query_out.close()
self.query_out = self.shunted_query_out
self.color = self.shunted_color
self.shunted_query_out = None
del self.shunted_color
return
if self.shunted_query_out is not None:
self.printerr('Already capturing output to %s. Use CAPTURE OFF'
' to disable.' % (self.query_out.name,))
return
fname = os.path.expanduser(self.cql_unprotect_value(fname))
try:
f = open(fname, 'a')
except IOError as e:
self.printerr('Could not open %r for append: %s' % (fname, e))
return
self.shunted_query_out = self.query_out
self.shunted_color = self.color
self.query_out = f
self.color = False
print('Now capturing query output to %r.' % (fname,))
def do_tracing(self, parsed):
"""
TRACING [cqlsh]
Enables or disables request tracing.
TRACING ON
Enables tracing for all further requests.
TRACING OFF
Disables tracing.
TRACING
TRACING with no arguments shows the current tracing status.
"""
self.tracing_enabled = SwitchCommand("TRACING", "Tracing").execute(self.tracing_enabled, parsed, self.printerr)
def do_expand(self, parsed):
"""
EXPAND [cqlsh]
Enables or disables expanded (vertical) output.
EXPAND ON
Enables expanded (vertical) output.
EXPAND OFF
Disables expanded (vertical) output.
EXPAND
EXPAND with no arguments shows the current value of expand setting.
"""
self.expand_enabled = SwitchCommand("EXPAND", "Expanded output").execute(self.expand_enabled, parsed, self.printerr)
def do_consistency(self, parsed):
"""
CONSISTENCY [cqlsh only]
Overrides default consistency level (default level is ONE).
CONSISTENCY <level>
Sets consistency level for future requests.
Valid consistency levels:
ANY, ONE, TWO, THREE, QUORUM, ALL, LOCAL_ONE, LOCAL_QUORUM, EACH_QUORUM, SERIAL and LOCAL_SERIAL.
SERIAL and LOCAL_SERIAL may be used only for SELECTs; will be rejected with updates.
CONSISTENCY
CONSISTENCY with no arguments shows the current consistency level.
"""
level = parsed.get_binding('level')
if level is None:
print('Current consistency level is %s.' % (cassandra.ConsistencyLevel.value_to_name[self.consistency_level]))
return
self.consistency_level = cassandra.ConsistencyLevel.name_to_value[level.upper()]
print('Consistency level set to %s.' % (level.upper(),))
def do_serial(self, parsed):
"""
SERIAL CONSISTENCY [cqlsh only]
Overrides serial consistency level (default level is SERIAL).
SERIAL CONSISTENCY <level>
Sets consistency level for future conditional updates.
Valid consistency levels:
SERIAL, LOCAL_SERIAL.
SERIAL CONSISTENCY
SERIAL CONSISTENCY with no arguments shows the current consistency level.
"""
level = parsed.get_binding('level')
if level is None:
print('Current serial consistency level is %s.' % (cassandra.ConsistencyLevel.value_to_name[self.serial_consistency_level]))
return
self.serial_consistency_level = cassandra.ConsistencyLevel.name_to_value[level.upper()]
print('Serial consistency level set to %s.' % (level.upper(),))
def do_login(self, parsed):
"""
LOGIN [cqlsh only]
Changes login information without requiring restart.
LOGIN <username> (<password>)
Login using the specified username. If password is specified, it will be used
otherwise, you will be prompted to enter.
"""
username = parsed.get_binding('username')
password = parsed.get_binding('password')
if password is None:
password = getpass.getpass()
else:
password = password[1:-1]
auth_provider = PlainTextAuthProvider(username=username, password=password)
conn = Cluster(contact_points=(self.hostname,), port=self.port, cql_version=self.conn.cql_version,
protocol_version=self.conn.protocol_version,
auth_provider=auth_provider,
ssl_options=self.conn.ssl_options,
load_balancing_policy=WhiteListRoundRobinPolicy([self.hostname]),
control_connection_timeout=self.conn.connect_timeout,
connect_timeout=self.conn.connect_timeout)
if self.current_keyspace:
session = conn.connect(self.current_keyspace)
else:
session = conn.connect()
# Copy session properties
session.default_timeout = self.session.default_timeout
session.row_factory = self.session.row_factory
session.default_consistency_level = self.session.default_consistency_level
session.max_trace_wait = self.session.max_trace_wait
# Update after we've connected in case we fail to authenticate
self.conn = conn
self.auth_provider = auth_provider
self.username = username
self.session = session
def do_exit(self, parsed=None):
"""
EXIT/QUIT [cqlsh only]
Exits cqlsh.
"""
self.stop = True
if self.owns_connection:
self.conn.shutdown()
do_quit = do_exit
def do_clear(self, parsed):
"""
CLEAR/CLS [cqlsh only]
Clears the console.
"""
import subprocess
subprocess.call(['clear', 'cls'][is_win], shell=True)
do_cls = do_clear
def do_debug(self, parsed):
import pdb
pdb.set_trace()
def get_help_topics(self):
topics = [t[3:] for t in dir(self) if t.startswith('do_') and getattr(self, t, None).__doc__]
for hide_from_help in ('quit',):
topics.remove(hide_from_help)
return topics
def columnize(self, slist, *a, **kw):
return cmd.Cmd.columnize(self, sorted([u.upper() for u in slist]), *a, **kw)
def do_help(self, parsed):
"""
HELP [cqlsh only]
Gives information about cqlsh commands. To see available topics,
enter "HELP" without any arguments. To see help on a topic,
use "HELP <topic>".
"""
topics = parsed.get_binding('topic', ())
if not topics:
shell_topics = [t.upper() for t in self.get_help_topics()]
self.print_topics("\nDocumented shell commands:", shell_topics, 15, 80)
cql_topics = [t.upper() for t in cqldocs.get_help_topics()]
self.print_topics("CQL help topics:", cql_topics, 15, 80)
return
for t in topics:
if t.lower() in self.get_help_topics():
doc = getattr(self, 'do_' + t.lower()).__doc__
self.stdout.write(doc + "\n")
elif t.lower() in cqldocs.get_help_topics():
urlpart = cqldocs.get_help_topic(t)
if urlpart is not None:
url = "%s#%s" % (CASSANDRA_CQL_HTML, urlpart)
if self.browser is not None:
opened = webbrowser.get(self.browser).open_new_tab(url)
else:
opened = webbrowser.open_new_tab(url)
if not opened:
self.printerr("*** No browser to display CQL help. URL for help topic %s : %s" % (t, url))
else:
self.printerr("*** No help on %s" % (t,))
def do_unicode(self, parsed):
"""
Textual input/output
When control characters, or other characters which can't be encoded
in your current locale, are found in values of 'text' or 'ascii'
types, it will be shown as a backslash escape. If color is enabled,
any such backslash escapes will be shown in a different color from
the surrounding text.
Unicode code points in your data will be output intact, if the
encoding for your locale is capable of decoding them. If you prefer
that non-ascii characters be shown with Python-style "\\uABCD"
escape sequences, invoke cqlsh with an ASCII locale (for example,
by setting the $LANG environment variable to "C").
"""
def do_paging(self, parsed):
"""
PAGING [cqlsh]
Enables or disables query paging.
PAGING ON
Enables query paging for all further queries.
PAGING OFF
Disables paging.
PAGING
PAGING with no arguments shows the current query paging status.
"""
(self.use_paging, requested_page_size) = SwitchCommandWithValue(
"PAGING", "Query paging", value_type=int).execute(self.use_paging, parsed, self.printerr)
if self.use_paging and requested_page_size is not None:
self.page_size = requested_page_size
if self.use_paging:
print(("Page size: {}".format(self.page_size)))
else:
self.page_size = self.default_page_size
def applycolor(self, text, color=None):
if not color or not self.color:
return text
return color + text + ANSI_RESET
def writeresult(self, text, color=None, newline=True, out=None):
if out is None:
out = self.query_out
# convert Exceptions, etc to text
if not isinstance(text, six.text_type):
text = "{}".format(text)
to_write = self.applycolor(text, color) + ('\n' if newline else '')
to_write = ensure_str(to_write)
out.write(to_write)
def flush_output(self):
self.query_out.flush()
def printerr(self, text, color=RED, newline=True, shownum=None):
self.statement_error = True
if shownum is None:
shownum = self.show_line_nums
if shownum:
text = '%s:%d:%s' % (self.stdin.name, self.lineno, text)
self.writeresult(text, color, newline=newline, out=sys.stderr)
def stop_coverage(self):
if self.coverage and self.cov is not None:
self.cov.stop()
self.cov.save()
self.cov = None
class SwitchCommand(object):
command = None
description = None
def __init__(self, command, desc):
self.command = command
self.description = desc
def execute(self, state, parsed, printerr):
switch = parsed.get_binding('switch')
if switch is None:
if state:
print("%s is currently enabled. Use %s OFF to disable"
% (self.description, self.command))
else:
print("%s is currently disabled. Use %s ON to enable."
% (self.description, self.command))
return state
if switch.upper() == 'ON':
if state:
printerr('%s is already enabled. Use %s OFF to disable.'
% (self.description, self.command))
return state
print('Now %s is enabled' % (self.description,))
return True
if switch.upper() == 'OFF':
if not state:
printerr('%s is not enabled.' % (self.description,))
return state
print('Disabled %s.' % (self.description,))
return False
class SwitchCommandWithValue(SwitchCommand):
"""The same as SwitchCommand except it also accepts a value in place of ON.
This returns a tuple of the form: (SWITCH_VALUE, PASSED_VALUE)
eg: PAGING 50 returns (True, 50)
PAGING OFF returns (False, None)
PAGING ON returns (True, None)
The value_type must match for the PASSED_VALUE, otherwise it will return None.
"""
def __init__(self, command, desc, value_type=int):
SwitchCommand.__init__(self, command, desc)
self.value_type = value_type
def execute(self, state, parsed, printerr):
binary_switch_value = SwitchCommand.execute(self, state, parsed, printerr)
switch = parsed.get_binding('switch')
try:
value = self.value_type(switch)
binary_switch_value = True
except (ValueError, TypeError):
value = None
return (binary_switch_value, value)
def option_with_default(cparser_getter, section, option, default=None):
try:
return cparser_getter(section, option)
except configparser.Error:
return default
def raw_option_with_default(configs, section, option, default=None):
"""
Same (almost) as option_with_default() but won't do any string interpolation.
Useful for config values that include '%' symbol, e.g. time format string.
"""
try:
return configs.get(section, option, raw=True)
except configparser.Error:
return default
def should_use_color():
if not sys.stdout.isatty():
return False
if os.environ.get('TERM', '') in ('dumb', ''):
return False
try:
import subprocess
p = subprocess.Popen(['tput', 'colors'], stdout=subprocess.PIPE)
stdout, _ = p.communicate()
if int(stdout.strip()) < 8:
return False
except (OSError, ImportError, ValueError):
# oh well, we tried. at least we know there's a $TERM and it's
# not "dumb".
pass
return True
def is_file_secure(filename):
if is_win:
# We simply cannot tell whether the file is seucre on Windows,
# because os.stat().st_uid is always 0 and os.stat().st_mode is meaningless
return True
try:
st = os.stat(filename)
except OSError as e:
if e.errno != errno.ENOENT:
raise
return True # the file doesn't exists, the security of it is irrelevant
uid = os.getuid()
# Skip enforcing the file owner and UID matching for the root user (uid == 0).
# This is to allow "sudo cqlsh" to work with user owned credentials file.
return (uid == 0 or st.st_uid == uid) and stat.S_IMODE(st.st_mode) & (stat.S_IRGRP | stat.S_IROTH) == 0
def read_options(cmdlineargs, environment):
configs = configparser.SafeConfigParser() if sys.version_info < (3, 2) else configparser.ConfigParser()
configs.read(CONFIG_FILE)
rawconfigs = configparser.RawConfigParser()
rawconfigs.read(CONFIG_FILE)
username_from_cqlshrc = option_with_default(configs.get, 'authentication', 'username')
password_from_cqlshrc = option_with_default(rawconfigs.get, 'authentication', 'password')
if username_from_cqlshrc or password_from_cqlshrc:
if password_from_cqlshrc and not is_file_secure(CONFIG_FILE):
print("\nWarning: Password is found in an insecure cqlshrc file. The file is owned or readable by other users on the system.",
end='', file=sys.stderr)
print("\nNotice: Credentials in the cqlshrc file is deprecated and will be ignored in the future."
"\nPlease use a credentials file to specify the username and password.\n", file=sys.stderr)
optvalues = optparse.Values()
optvalues.username = None
optvalues.password = None
optvalues.credentials = os.path.expanduser(option_with_default(configs.get, 'authentication', 'credentials',
os.path.join(HISTORY_DIR, 'credentials')))
optvalues.keyspace = option_with_default(configs.get, 'authentication', 'keyspace')
optvalues.browser = option_with_default(configs.get, 'ui', 'browser', None)
optvalues.completekey = option_with_default(configs.get, 'ui', 'completekey',
DEFAULT_COMPLETEKEY)
optvalues.color = option_with_default(configs.getboolean, 'ui', 'color')
optvalues.time_format = raw_option_with_default(configs, 'ui', 'time_format',
DEFAULT_TIMESTAMP_FORMAT)
optvalues.nanotime_format = raw_option_with_default(configs, 'ui', 'nanotime_format',
DEFAULT_NANOTIME_FORMAT)
optvalues.date_format = raw_option_with_default(configs, 'ui', 'date_format',
DEFAULT_DATE_FORMAT)
optvalues.float_precision = option_with_default(configs.getint, 'ui', 'float_precision',
DEFAULT_FLOAT_PRECISION)
optvalues.double_precision = option_with_default(configs.getint, 'ui', 'double_precision',
DEFAULT_DOUBLE_PRECISION)
optvalues.field_size_limit = option_with_default(configs.getint, 'csv', 'field_size_limit', csv.field_size_limit())
optvalues.max_trace_wait = option_with_default(configs.getfloat, 'tracing', 'max_trace_wait',
DEFAULT_MAX_TRACE_WAIT)
optvalues.timezone = option_with_default(configs.get, 'ui', 'timezone', None)
optvalues.debug = False
optvalues.coverage = False
if 'CQLSH_COVERAGE' in environment.keys():
optvalues.coverage = True
optvalues.file = None
optvalues.ssl = option_with_default(configs.getboolean, 'connection', 'ssl', DEFAULT_SSL)
optvalues.encoding = option_with_default(configs.get, 'ui', 'encoding', UTF8)
optvalues.tty = option_with_default(configs.getboolean, 'ui', 'tty', sys.stdin.isatty())
optvalues.protocol_version = option_with_default(configs.getint, 'protocol', 'version', None)
optvalues.cqlversion = option_with_default(configs.get, 'cql', 'version', None)
optvalues.connect_timeout = option_with_default(configs.getint, 'connection', 'timeout', DEFAULT_CONNECT_TIMEOUT_SECONDS)
optvalues.request_timeout = option_with_default(configs.getint, 'connection', 'request_timeout', DEFAULT_REQUEST_TIMEOUT_SECONDS)
optvalues.execute = None
optvalues.insecure_password_without_warning = False
(options, arguments) = parser.parse_args(cmdlineargs, values=optvalues)
if not is_file_secure(options.credentials):
print("\nWarning: Credentials file '{0}' exists but is not used, because:"
"\n a. the file owner is not the current user; or"
"\n b. the file is readable by group or other."
"\nPlease ensure the file is owned by the current user and is not readable by group or other."
"\nOn a Linux or UNIX-like system, you often can do this by using the `chown` and `chmod` commands:"
"\n chown YOUR_USERNAME credentials"
"\n chmod 600 credentials\n".format(options.credentials),
file=sys.stderr)
options.credentials = '' # ConfigParser.read() will ignore unreadable files
if not options.username:
credentials = configparser.SafeConfigParser() if sys.version_info < (3, 2) else configparser.ConfigParser()
credentials.read(options.credentials)
# use the username from credentials file but fallback to cqlshrc if username is absent from the command line parameters
options.username = option_with_default(credentials.get, 'plain_text_auth', 'username', username_from_cqlshrc)
if not options.password:
rawcredentials = configparser.RawConfigParser()
rawcredentials.read(options.credentials)
# handling password in the same way as username, priority cli > credentials > cqlshrc
options.password = option_with_default(rawcredentials.get, 'plain_text_auth', 'password', password_from_cqlshrc)
elif not options.insecure_password_without_warning:
print("\nWarning: Using a password on the command line interface can be insecure."
"\nRecommendation: use the credentials file to securely provide the password.\n", file=sys.stderr)
# Make sure some user values read from the command line are in unicode
options.execute = maybe_ensure_text(options.execute)
options.username = maybe_ensure_text(options.username)
options.password = maybe_ensure_text(options.password)
options.keyspace = maybe_ensure_text(options.keyspace)
hostname = option_with_default(configs.get, 'connection', 'hostname', DEFAULT_HOST)
port = option_with_default(configs.get, 'connection', 'port', DEFAULT_PORT)
try:
options.connect_timeout = int(options.connect_timeout)
except ValueError:
parser.error('"%s" is not a valid connect timeout.' % (options.connect_timeout,))
options.connect_timeout = DEFAULT_CONNECT_TIMEOUT_SECONDS
try:
options.request_timeout = int(options.request_timeout)
except ValueError:
parser.error('"%s" is not a valid request timeout.' % (options.request_timeout,))
options.request_timeout = DEFAULT_REQUEST_TIMEOUT_SECONDS
hostname = environment.get('CQLSH_HOST', hostname)
port = environment.get('CQLSH_PORT', port)
if len(arguments) > 0:
hostname = arguments[0]
if len(arguments) > 1:
port = arguments[1]
if options.file or options.execute:
options.tty = False
if options.execute and not options.execute.endswith(';'):
options.execute += ';'
if optvalues.color in (True, False):
options.color = optvalues.color
else:
if options.file is not None:
options.color = False
else:
options.color = should_use_color()
if options.cqlversion is not None:
options.cqlversion, cqlvertup = full_cql_version(options.cqlversion)
if cqlvertup[0] < 3:
parser.error('%r is not a supported CQL version.' % options.cqlversion)
options.cqlmodule = cql3handling
try:
port = int(port)
except ValueError:
parser.error('%r is not a valid port number.' % port)
return options, hostname, port
def setup_cqlruleset(cqlmodule):
global cqlruleset
cqlruleset = cqlmodule.CqlRuleSet
cqlruleset.append_rules(cqlshhandling.cqlsh_extra_syntax_rules)
for rulename, termname, func in cqlshhandling.cqlsh_syntax_completers:
cqlruleset.completer_for(rulename, termname)(func)
cqlruleset.commands_end_with_newline.update(cqlshhandling.my_commands_ending_with_newline)
def setup_cqldocs(cqlmodule):
global cqldocs
cqldocs = cqlmodule.cqldocs
def init_history():
if readline is not None:
try:
readline.read_history_file(HISTORY)
except IOError:
pass
delims = readline.get_completer_delims()
delims.replace("'", "")
delims += '.'
readline.set_completer_delims(delims)
def save_history():
if readline is not None:
try:
readline.write_history_file(HISTORY)
except IOError:
pass
def main(options, hostname, port):
setup_cqlruleset(options.cqlmodule)
setup_cqldocs(options.cqlmodule)
init_history()
csv.field_size_limit(options.field_size_limit)
if options.file is None:
stdin = None
else:
try:
encoding, bom_size = get_file_encoding_bomsize(options.file)
stdin = codecs.open(options.file, 'r', encoding)
stdin.seek(bom_size)
except IOError as e:
sys.exit("Can't open %r: %s" % (options.file, e))
if options.debug:
sys.stderr.write("Using CQL driver: %s\n" % (cassandra,))
sys.stderr.write("Using connect timeout: %s seconds\n" % (options.connect_timeout,))
sys.stderr.write("Using '%s' encoding\n" % (options.encoding,))
sys.stderr.write("Using ssl: %s\n" % (options.ssl,))
# create timezone based on settings, environment or auto-detection
timezone = None
if options.timezone or 'TZ' in os.environ:
try:
import pytz
if options.timezone:
try:
timezone = pytz.timezone(options.timezone)
except Exception:
sys.stderr.write("Warning: could not recognize timezone '%s' specified in cqlshrc\n\n" % (options.timezone))
if 'TZ' in os.environ:
try:
timezone = pytz.timezone(os.environ['TZ'])
except Exception:
sys.stderr.write("Warning: could not recognize timezone '%s' from environment value TZ\n\n" % (os.environ['TZ']))
except ImportError:
sys.stderr.write("Warning: Timezone defined and 'pytz' module for timezone conversion not installed. Timestamps will be displayed in UTC timezone.\n\n")
# try auto-detect timezone if tzlocal is installed
if not timezone:
try:
from tzlocal import get_localzone
timezone = get_localzone()
except ImportError:
# we silently ignore and fallback to UTC unless a custom timestamp format (which likely
# does contain a TZ part) was specified
if options.time_format != DEFAULT_TIMESTAMP_FORMAT:
sys.stderr.write("Warning: custom timestamp format specified in cqlshrc, "
"but local timezone could not be detected.\n"
"Either install Python 'tzlocal' module for auto-detection "
"or specify client timezone in your cqlshrc.\n\n")
try:
shell = Shell(hostname,
port,
color=options.color,
username=options.username,
password=options.password,
stdin=stdin,
tty=options.tty,
completekey=options.completekey,
browser=options.browser,
protocol_version=options.protocol_version,
cqlver=options.cqlversion,
keyspace=options.keyspace,
display_timestamp_format=options.time_format,
display_nanotime_format=options.nanotime_format,
display_date_format=options.date_format,
display_float_precision=options.float_precision,
display_double_precision=options.double_precision,
display_timezone=timezone,
max_trace_wait=options.max_trace_wait,
ssl=options.ssl,
single_statement=options.execute,
request_timeout=options.request_timeout,
connect_timeout=options.connect_timeout,
encoding=options.encoding)
except KeyboardInterrupt:
sys.exit('Connection aborted.')
except CQL_ERRORS as e:
sys.exit('Connection error: %s' % (e,))
except VersionNotSupported as e:
sys.exit('Unsupported CQL version: %s' % (e,))
if options.debug:
shell.debug = True
if options.coverage:
shell.coverage = True
import signal
def handle_sighup():
shell.stop_coverage()
shell.do_exit()
signal.signal(signal.SIGHUP, handle_sighup)
shell.cmdloop()
save_history()
if shell.batch_mode and shell.statement_error:
sys.exit(2)
# always call this regardless of module name: when a sub-process is spawned
# on Windows then the module name is not __main__, see CASSANDRA-9304
insert_driver_hooks()
if __name__ == '__main__':
main(*read_options(sys.argv[1:], os.environ))
# vim: set ft=python et ts=4 sw=4 :
|
the-stack_0_18160 | # from flask import Flask, Blueprint
# from flask_sqlalchemy import SQLAlchemy
# from flask_login import LoginManager
# import os
from flask import Flask, jsonify, request, make_response, redirect, url_for
import jwt
import datetime
import os
from functools import wraps
from flask_sqlalchemy import SQLAlchemy
import uuid
from werkzeug.security import generate_password_hash, check_password_hash
from werkzeug.utils import secure_filename
from sqlalchemy import select
from flask_migrate import Migrate, migrate
from flask_cors import CORS
from sqlalchemy import inspect
from sqlalchemy import Table, Column, MetaData, Integer, Computed
from numpy import array
import json
from bson import json_util
#import pandas as pd
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secretollave'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///todo.db'
ABSOLUTE_PATH_TO_YOUR_FOLDER ='/home/dani/flask/static/fotosPerfil'
ABSOLUTE_PATH_TO_YOUR_PDF_FOLDER ='/home/dani/flask/static/pdf'
CORS(app)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
# Models
class Usuario(db.Model):
nick = db.Column(db.String(20), primary_key=True)
Nombre_de_usuario = db.Column(db.String(50))
password = db.Column(db.String(50))
e_mail = db.Column(db.String(50), unique=True, nullable=False)
descripcion = db.Column(db.String(1000))
link = db.Column(db.String(200))
foto_de_perfil = db.Column(db.String(400))
class Sigue(db.Model):
#id = db.Column(db.Integer, primary_key=True )
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
Usuario_Nickb = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Chat(db.Model):
#Column('timestamp', TIMESTAMP(timezone=False), nullable=False, default=datetime.now())
timestamp = db.Column(db.TIMESTAMP, nullable=False,
server_default=db.func.now(),
onupdate=db.func.now())
mensaje = db.Column(db.String(1000))
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
Usuario_Nickb = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Publicacion(db.Model):
id = db.Column(Integer,primary_key=True)
#id = db.Sequence('id', start=1, increment=1)
descripcion = db.Column(db.String(1000))
#Column('timestamp', TIMESTAMP(timezone=False), nullable=False, default=datetime.now())
timestamp = db.Column(db.TIMESTAMP, nullable=False,
server_default=db.func.now(),
onupdate=db.func.now())
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'))
class Propia(db.Model):
pdf = db.Column(db.String(400))
id = db.Column(db.String(20), db.ForeignKey('publicacion.id'),primary_key=True)
class Recomendacion(db.Model):
link = db.Column(db.String(200),nullable=False)
titulo = db.Column(db.String(200),nullable=False)
autor = db.Column(db.String(200),nullable=False)
id = db.Column(db.String(20), db.ForeignKey('publicacion.id'),primary_key=True)
class Tematica(db.Model):
tema = db.Column(db.String(50), primary_key=True )
class Notificaciones(db.Model):
id = db.Column(db.Integer, primary_key=True )
fecha = db.Column(db.Date)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Prefiere(db.Model):
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
tema = db.Column(db.String(50), db.ForeignKey('tematica.tema'),primary_key=True)
class Trata_pub_del_tema(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
tema = db.Column(db.String(50), db.ForeignKey('tematica.tema'),primary_key=True)
class Gusta(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Comenta(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
comentario = db.Column(db.String(1000))
class Guarda(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Trata(db.Model):
id_publi = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
id_notif = db.Column(db.String(20), db.ForeignKey('notificaciones.id'),primary_key=True)
class Genera(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class PublicacionRecomandacion:
def __init__(self,id, tipo, titulo,autor,descripcion,link,usuario,foto_de_perfil,nlikes,ncomentarios,nguardados,likemio,guardadomio):
self.id = id
self.tipo = tipo
self.titulo = titulo
self.autor = autor
self.descripcion = descripcion
self.link = link
self.usuario = usuario
self.foto_de_perfil = foto_de_perfil
self.nlikes = nlikes
self.ncomentarios = ncomentarios
self.nguardados = nguardados
self.likemio = likemio
self.guardadomio = guardadomio
def token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
#token = request.args.get('token') #http://127.0.0.1:5000/route?token=djsnvidnoffofn
#data = request.get_json()
token = request.headers['token']
#token = data['token']
if not token:
return jsonify({'error': 'Token no existe'}), 403
try:
data = jwt.decode(token, app.config['SECRET_KEY'])
current_user = Usuario.query.filter_by(nick=data['nick']).first()
current_user = data['nick']
except:
return jsonify({'error': 'Token no valido'}), 403
return f(current_user,*args, **kwargs)
return decorated
def token_required_id(f):
@wraps(f)
def decorated(*args, **kwargs):
#token = request.args.get('token') #http://127.0.0.1:5000/route?token=djsnvidnoffofn
#data = request.get_json()
token = request.headers['token']
#token = data['token']
if not token:
return jsonify({'error': 'Token no existe'}), 403
try:
print(token)
data = jwt.decode(token, app.config['SECRET_KEY'])
current_user = Usuario.query.filter_by(nick=data['nick']).first()
current_user = data['nick']
current_id = Publicacion.query.filter_by(id=data['id']).first()
_id = data['id']
except:
return jsonify({'error': 'Token no valido'}), 403
return f(current_user,_id,*args, **kwargs)
return decorated
@app.route('/unprotected')
def unprotected():
return jsonify({'message': 'Puede entrar tol mundo'})
@app.route('/protected')
@token_required
def protected(current_user):
print(current_user)
return jsonify({'message': 'Puedes entrar si puedes'})
# Ruta para el login
@app.route('/register', methods=['POST'])
def add_data():
data= request.get_json()
#nick = request.form.get("nick")
#password = request.form.get("password")
#e_mail = request.form.get("e_mail")
user = Usuario.query.filter_by(e_mail=data['e_mail']).first()
nick = Usuario.query.filter_by(nick=data['nick']).first()
if user: # si esto devuelve algo entonces el email existe
return jsonify({'error': 'Existe correo'}) #json diciendo error existe email
if nick:
return jsonify({'error': 'Existe nick'})
#if (check_email(e_mail) == True and check_password(data['password']) == True ):
register = Usuario(nick=data['nick'],password=generate_password_hash(data['password']), e_mail=data['e_mail'],foto_de_perfil="platon.jpg")
db.session.add(register)
db.session.commit()
token = jwt.encode({'nick' : data['nick'], 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/login', methods=['POST'])
def login():
# auth = request.authorization #new ESTO SI LO HACES CON AUTH
data= request.get_json()
if '@' in data['nickOcorreo']:
user = Usuario.query.filter_by(e_mail=data['nickOcorreo']).first()
else:
user = Usuario.query.filter_by(nick=data['nickOcorreo']).first()
if not user:
return jsonify({'error': 'No existe ese usuario'})#error mal user
if not check_password_hash(user.password, data['password']):
return jsonify({'error': 'Mal contraseña'}) #error mala contraseña
token = jwt.encode({'nick' : data['nickOcorreo'], 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=9999999)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/editarPerfil', methods=['GET'])
@token_required
def editarPerfilget(current_user):
s = select([Usuario.Nombre_de_usuario, Usuario.descripcion,Usuario.link, Usuario.foto_de_perfil]).where((Usuario.nick == current_user))
result = db.session.execute(s)
seguidos= db.session.query(Sigue).filter(Sigue.Usuario_Nicka == current_user ).count()
seguidores= db.session.query(Sigue).filter(Sigue.Usuario_Nickb == current_user ).count()
nposts= db.session.query(Publicacion).filter(Publicacion.Usuario_Nicka == current_user ).count()
tema = select([Prefiere.tema]).where((Prefiere.Usuario_Nicka == current_user))
temas = db.session.execute(tema)
vector = []
for row in temas:
vector += row
for row in result:
fila = {
"nick": current_user,
"nombre_de_usuario":row[0],
"descripcion":row[1],
"link":row[2],
"foto_de_perfil": 'http://51.255.50.207:5000/display/' + row[3],
"nsiguiendo": seguidos,
"nseguidores": seguidores,
"nposts": nposts,
"tematicas": vector
#"foto_de_perfil" :url_for('static', filename='fotosPerfil/' + row[3])
}
return fila
@app.route('/display/<filename>')
def foto(filename):
return redirect(url_for('static', filename='fotosPerfil/' + filename),code = 301)
@app.route('/editarPerfil', methods=['POST'])
@token_required
def editarPerfilpost(current_user):
data= request.get_json()
user = Usuario.query.filter_by(nick=current_user).first()
user.Nombre_de_usuario = data['nombre_de_usuario']
print(data['nombre_de_usuario'])
print(data['descripcion'])
print(data['link'])
print(data['tematicas'])
user.descripcion = data['descripcion']
user.link = data['link']
tematicas = data['tematicas']
for temas in tematicas:
tema = Prefiere.query.filter_by(tema=temas).first()
if not tema:
tema = Prefiere(Usuario_Nicka=current_user, tema = temas)
db.session.add(tema)
#db.session.commit()
#cambia_foto
db.session.commit()
token = jwt.encode({'nick' : current_user, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/actualizarImagen', methods=['POST'])
@token_required
def actualizarImagen(current_user):
user = Usuario.query.filter_by(nick=current_user).first()
if request.files['nueva_foto'] is not None: #data['cambia_foto']:
file = request.files['nueva_foto']
print(request.files['nueva_foto'])
filename = secure_filename(file.filename)
file.save(os.path.join(ABSOLUTE_PATH_TO_YOUR_FOLDER, filename))
user.foto_de_perfil = filename
db.session.commit()
token = jwt.encode({'nick' : current_user, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/subirPost', methods=['POST'])
@token_required
def subirPost(current_user):
data= request.get_json()
publicacion = Publicacion(descripcion=data['descripcion'],Usuario_Nicka=current_user) #coger id
db.session.add(publicacion)
db.session.commit()
tematicas = data['tematicas']
for temas in tematicas:
temita = Tematica.query.filter_by(tema=temas).first()
if temita:
nuevo = Trata_pub_del_tema(id=publicacion.id, tema = temita.tema)
db.session.add(nuevo)
db.session.commit()
if (data['tipo']=="1"): # articulo
return jsonify({'id' : publicacion.id})
#guardarPDF(request.files['pdf'], publicacion.id)
elif(data['tipo']=="2"): # recomendacion
recomendacion = Recomendacion(link=data['link'],titulo=data['titulo'], autor = data['autor'], id = publicacion.id)
db.session.add(recomendacion)
db.session.commit()
token = jwt.encode({'nick' : current_user, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/subirPdf', methods=['POST'])
@token_required
def guardarPDF(current_user):
_id=request.headers['id']
propia = Propia( id = _id)
db.session.add(propia)
db.session.commit()
propia = Propia.query.filter_by(id=_id).first()
if request.files['pdf'] is not None:
file = request.files['pdf']
#print(pdf)
filename = secure_filename(file.filename)
file.save(os.path.join(ABSOLUTE_PATH_TO_YOUR_PDF_FOLDER, filename))
propia.pdf = filename
db.session.add(propia)
db.session.commit()
else:
print("pdf nulo")
token = jwt.encode({'nick' : current_user, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
# @app.route('/misArticulos', methods=['GET'])
# @token_required
# def misArticulos(current_user):
# data= request.get_json()
# x = select([Usuario.Nombre_de_usuario, Usuario.foto_de_perfil]).where((Usuario.nick == current_user))
# resultb = db.session.execute(x)
# Nombre_de_usuario = ""
# foto_de_perfil= 'http://51.255.50.207:5000/display/'
# for b in resultb:
# Nombre_de_usuario=b.Nombre_de_usuario
# foto_de_perfil += b.foto_de_perfil
# id = select([Publicacion.id]).where(Publicacion.Usuario_Nicka == current_user ).order_by(Publicacion.id.desc())
# descripcion = select( [Publicacion.descripcion]).where(Publicacion.Usuario_Nicka == current_user ).order_by(Publicacion.id.desc())
# timestamp = select([Publicacion.timestamp]).where(Publicacion.Usuario_Nicka == current_user).order_by(Publicacion.id.desc())
# results = db.session.execute(id)
# resultss = db.session.execute(descripcion)
# resultsss = db.session.execute(timestamp)
# #ver si ese ID existe en recomendacion sino es un post propio
# vector0 = []
# vector1 = []
# vector2 = []
# Gustas = []
# Comentarios= []
# Guardados= []
# for r in results:
# #print(str(r))
# vector0 += r
# Gustas += str(db.session.query(Gusta).filter(Gusta.id == 'r' ).count())
# Comentarios += str(db.session.query(Comenta).filter(Comenta.id == 'r' ).count())
# Guardados += str(db.session.query(Guarda).filter(Guarda.id == 'r').count())
# for r in resultss:
# vector1 += r
# for r in resultsss:
# vector2 += r
# vector3 = []
# vector4 = []
# vector5 = []
# for r in vector0:
# link = select([Recomendacion.link]).where((Recomendacion.id == r))
# titulo = select([Recomendacion.titulo]).where((Recomendacion.id == r))
# autor = select([Recomendacion.autor]).where((Recomendacion.id == r))
# resulta = db.session.execute(link)
# resultaa = db.session.execute(titulo)
# resultaaa = db.session.execute(autor)
# for a in resulta:
# vector3 +=a
# for a in resultaa:
# vector4 +=a
# for a in resultaaa:
# vector5 +=a
# #def __init__(self,id, tipo, titulo,autor,descripcion,link,usuario,foto_de_perfil,nlikes,ncomentarios,nguardados,likemio,guardadomio):
# finalDictionary = {}
# i=0
# x=0
# for x in range(len(vector0)):
# print("EL ID ES: ", vector0[x])
# existe = db.session.query(Recomendacion).filter(Recomendacion.id == vector0[x] ).count()
# print("EXISTE ES: ", existe)
# if bool(existe):
# print(i , "bool: " , existe, " e x= ", vector0[x])
# GustaMio = db.session.query(Gusta).filter(Gusta.Usuario_Nicka == current_user, Gusta.id == vector0[x] ).count()
# GuardadoMio = db.session.query(Guarda).filter(Guarda.Usuario_Nicka == current_user, Guarda.id == vector0[x]).count()
# #post = PublicacionRecomandacion(vector0[i],2,vector4[i],vector5[i],vector1[i],vector3[i], Nombre_de_usuario, foto_de_perfil,Gustas[i],Comentarios[i],Guardados[i],bool(GustaMio),bool(GuardadoMio))
# #vectorFinal +=post
# #dictionary = {'Apple': 3, 'Grapes': 1}
# #array = {'key' : vector0[x], 'value' : post}
# #finalDictionary[vector0[x]] = [ str(vector0[i]) , str(vector4[i]) ] ## ESTO VA
# finalDictionary[vector0[x]] = { 'tipo' : 2 ,'titulo' : str(vector4[i]), 'autor' : str(vector5[i]),'descripcion' : str(vector1[i]),'link' : str(vector3[i]),'usuario' : Nombre_de_usuario,'foto_de_perfil' : foto_de_perfil,'nlikes' : str(Gustas[i]),'nlikemio' : bool(GustaMio),'ncomentarios' : str(Comentarios[i]),'nguardados' : str(Guardados[i]),'guardadomio' : bool(GuardadoMio) }
# #print(result)
# #vectorFinal.append(post)
# i = i + 1
# #return json.dumps(vectorFinal)
# #finalDictionary[vector0[x]] = [str(vector0[i]) +'value' : str(vector4[i])}]
# return json.dumps(finalDictionary, indent = i)
@app.route('/display2/<filename>')
def pdf(filename):
return redirect(url_for('static', filename='pdf/' + filename),code = 301)
@app.route('/misRecomendaciones', methods=['GET'])
@token_required
def getPostsRecomendados(current_user):
data= request.get_json()
x = select([Usuario.Nombre_de_usuario, Usuario.foto_de_perfil]).where((Usuario.nick == current_user))
resultb = db.session.execute(x)
Nombre_de_usuario = ""
foto_de_perfil= 'http://51.255.50.207:5000/display/'
for b in resultb:
Nombre_de_usuario=b.Nombre_de_usuario
foto_de_perfil += b.foto_de_perfil
id = select([Publicacion.id]).where(Publicacion.Usuario_Nicka == current_user ).order_by(Publicacion.id.desc())
descripcion = select( [Publicacion.descripcion]).where(Publicacion.Usuario_Nicka == current_user ).order_by(Publicacion.id.desc())
timestamp = select([Publicacion.timestamp]).where(Publicacion.Usuario_Nicka == current_user).order_by(Publicacion.id.desc())
results = db.session.execute(id)
resultss = db.session.execute(descripcion)
resultsss = db.session.execute(timestamp)
vector0 = []
vector1 = []
vector2 = []
Gustas = []
Comentarios= []
Guardados= []
for r in results:
vector0 += r
Gustas += str(db.session.query(Gusta).filter(Gusta.id == 'r' ).count())
Comentarios += str(db.session.query(Comenta).filter(Comenta.id == 'r' ).count())
Guardados += str(db.session.query(Guarda).filter(Guarda.id == 'r').count())
for r in resultss:
vector1 += r
for r in resultsss:
vector2 += r
vector3 = []
vector4 = []
vector5 = []
for r in vector0:
link = select([Recomendacion.link]).where((Recomendacion.id == r))
titulo = select([Recomendacion.titulo]).where((Recomendacion.id == r))
autor = select([Recomendacion.autor]).where((Recomendacion.id == r))
resulta = db.session.execute(link)
resultaa = db.session.execute(titulo)
resultaaa = db.session.execute(autor)
for a in resulta:
vector3 +=a
for a in resultaa:
vector4 +=a
for a in resultaaa:
vector5 +=a
#def __init__(self,id, tipo, titulo,autor,descripcion,link,usuario,foto_de_perfil,nlikes,ncomentarios,nguardados,likemio,guardadomio):
finalDictionary = {}
i=0
x=0
for x in range(len(vector0)):
print("EL ID ES: ", vector0[x])
existe = db.session.query(Recomendacion).filter(Recomendacion.id == vector0[x] ).count()
print("EXISTE ES: ", existe)
#ver si ese ID existe en recomendacion sino es un post propio
if bool(existe):
print(i , "bool: " , existe, " e x= ", vector0[x])
GustaMio = db.session.query(Gusta).filter(Gusta.Usuario_Nicka == current_user, Gusta.id == vector0[x] ).count()
GuardadoMio = db.session.query(Guarda).filter(Guarda.Usuario_Nicka == current_user, Guarda.id == vector0[x]).count()
#post = PublicacionRecomandacion(vector0[i],2,vector4[i],vector5[i],vector1[i],vector3[i], Nombre_de_usuario, foto_de_perfil,Gustas[i],Comentarios[i],Guardados[i],bool(GustaMio),bool(GuardadoMio))
#vectorFinal +=post
#dictionary = {'Apple': 3, 'Grapes': 1}
#array = {'key' : vector0[x], 'value' : post}
#finalDictionary[vector0[x]] = [ str(vector0[i]) , str(vector4[i]) ] ## ESTO VA
finalDictionary[vector0[x]] = { 'tipo' : 2 ,'titulo' : str(vector4[i]), 'autor' : str(vector5[i]),'descripcion' : str(vector1[i]),'link' : str(vector3[i]),'usuario' : Nombre_de_usuario,'foto_de_perfil' : foto_de_perfil,'nlikes' : str(Gustas[i]),'nlikemio' : bool(GustaMio),'ncomentarios' : str(Comentarios[i]),'nguardados' : str(Guardados[i]),'guardadomio' : bool(GuardadoMio) }
#print(result)
#vectorFinal.append(post)
i = i + 1
#return json.dumps(vectorFinal)
#finalDictionary[vector0[x]] = [str(vector0[i]) +'value' : str(vector4[i])}]
return json.dumps(finalDictionary, indent = i)
#return json.dumps(vectorFinal, default=json_util.default)
#json_docs.append(json_doc)
@app.route('/misRecomendaciones', methods=['GET'])
def getPostsRecomendados(current_user):
finalDictionary[0] = { 'tipo' : 2 ,'titulo' : "su", 'autor' : "su",'descripcion' : "su",'link' : "su",'usuario' : "su",'foto_de_perfil' : "su",'nlikes' : 0,'nlikemio' : 0,'ncomentarios' : 3,'nguardados' : 2,'guardadomio' : 0 }
return finalDictionary
def check_email(email):
regex = '^[a-z0-9]+[\._]?[a-z0-9]+[@]\w+[.]\w{2,3}$'
if(re.search(regex,email)):
return True
else:
return False
# Contraseñas de entre 8 y 32 carácteres.
def check_password(password):
regex = '^(?=.*[0-9])(?=.*[a-z])(?=.*[A-Z])(?=.*[*.!@$%^&(){}[]:;<>,.?/~_+-=|\]).{8,32}$'
if(re.search(regex,password)):
return True
else:
return False
if __name__ == '__main__':
app.run(debug=True)
|
the-stack_0_18161 | """Enhance TAC with UD attributes based on CoreNLP parse
Usage:
corenlp_enrichment.py <corenlp_server> <corenlp_port> [--lines] [--input=<input-file>] [--output=<output-file>]
corenlp_enrichment.py (-h | --help)
Options:
-h --help Show this screen.
"""
from docopt import docopt
import sys
import ijson
import jsonlines
from tacred_enrichment.internal.core_nlp_client import CoreNlpClient
def split_keep_delimiter(tokens, delimiter):
fix_tokens = []
for token in tokens:
subtokens = token.split(delimiter)
if len(subtokens) == 1:
fix_tokens.append(token)
else:
fix_tokens.append(subtokens[0])
for subtoken in subtokens[1:]:
fix_tokens.append(delimiter)
fix_tokens.append(subtoken)
return fix_tokens
#return [item for token in tokens for subtoken in token.split('.') for item in [subtoken, '.'] ]
args = docopt(__doc__)
corenlp_server = args['<corenlp_server>']
corenlp_port = int(args['<corenlp_port>'])
input_stream = open(args['--input'], encoding='utf-8') if args['--input'] is not None else sys.stdin
output_stream = open(args['--output'], 'w', encoding='utf-8', newline='', buffering=1) if args['--output'] is not None else sys.stdout
lines = True if args['--lines'] else False
reader = jsonlines.Reader(input_stream) if lines else ijson.items(input_stream, 'item')
with jsonlines.Writer(output_stream) as json_write:
core_nlp = CoreNlpClient(corenlp_server, corenlp_port, 15000)
for item in reader:
retokenized = item['ucca_tokens']
parse = core_nlp.get_all(retokenized, False)
sentences = parse['sentences']
item['corenlp_ner'] = []
item['corenlp_pos'] = []
item['corenlp_heads'] = []
item['corenlp_coref'] = []
for sentence in sentences:
current_heads = [b for (a, b) in sorted([(dep_set['dependent'], dep_set['governor']) for dep_set in sentence['basicDependencies']], key=lambda x: x[0])]
current_heads = [head + len(item['corenlp_heads']) if head > 0 else head for head in current_heads]
current_pos = [token['pos'] for token in sentence['tokens']]
current_ner = [token['ner'] for token in sentence['tokens']]
item['corenlp_heads'] += current_heads
item['corenlp_pos'] += current_pos
item['corenlp_ner'] += current_ner
sentence_adj = [0]
for sentence in sentences[:-1]:
sentence_adj.append( len(sentence['tokens']) )
corefs = parse['corefs'].values()
for coref in corefs:
anchor = next(x for x in coref if x['isRepresentativeMention'])
refs = [x for x in coref if not x['isRepresentativeMention']]
anchor_coords = [anchor['startIndex']+sentence_adj[anchor['sentNum']-1], \
anchor['endIndex']+sentence_adj[anchor['sentNum']-1]]
refs_coords = [[ref['startIndex']+sentence_adj[ref['sentNum']-1], \
ref['endIndex']+sentence_adj[ref['sentNum']-1]] for ref in refs]
item['corenlp_coref'].append([anchor_coords, refs_coords])
json_write.write(item)
|
the-stack_0_18162 | from google.cloud import language_v1
def sample_analyze_sentiment(text_content):
"""
Analyzing Sentiment in a String
Args:
text_content The text content to analyze
"""
client = language_v1.LanguageServiceClient.from_service_account_json('googlecreds.json')
# text_content = 'I am so happy and joyful.'
# Available types: PLAIN_TEXT, HTML
type_ = language_v1.Document.Type.PLAIN_TEXT
# Optional. If not specified, the language is automatically detected.
# For list of supported languages:
# https://cloud.google.com/natural-language/docs/languages
language = "en"
document = {"content": text_content, "type_": type_, "language": language}
# Available values: NONE, UTF8, UTF16, UTF32
encoding_type = language_v1.EncodingType.UTF8
results = {}
response = client.analyze_sentiment(request = {'document': document, 'encoding_type': encoding_type})
# Get overall sentiment of the input document
print(u"Document sentiment score: {}".format(response.document_sentiment.score))
print(
u"Document sentiment magnitude: {}".format(
response.document_sentiment.magnitude
)
)
results['docsentimentscore'] = response.document_sentiment.score
results['doccsentimentmagnitude'] = response.document_sentiment.magnitude
# Get sentiment for all sentences in the document
sentencescores = []
for sentence in response.sentences:
print(u"Sentence text: {}".format(sentence.text.content))
print(u"Sentence sentiment score: {}".format(sentence.sentiment.score))
print(u"Sentence sentiment magnitude: {}".format(sentence.sentiment.magnitude))
s = {}
s['text'] = sentence.text.content
s['score'] = sentence.sentiment.score
s['magnitude'] = sentence.sentiment.magnitude
sentencescores.append(s)
# Get the language of the text, which will be the same as
# the language specified in the request or, if not specified,
# the automatically-detected language.
print(u"Language of the text: {}".format(response.language))
results['sentences'] = sentencescores
return results
# sample_analyze_sentiment("this is a very stressful time for me") |
the-stack_0_18163 | from .. import OutputtingScript, run_safely
from ...routines import fetch_resource
class SeeResultScript(OutputtingScript):
def configure(self, argument_subparser):
super().configure(argument_subparser)
argument_subparser.add_argument(
'result_id', metavar='RESULT_ID', nargs='?')
def run(self, args, argv):
super().run(args, argv)
is_quiet = args.is_quiet
as_json = args.as_json
run_safely(fetch_resource, {
'resource_name': 'results',
'resource_id': args.result_id,
}, is_quiet, as_json)
|
the-stack_0_18164 | #!/usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example creates new custom targeting keys and values.
To determine which custom targeting keys and values exist, run
get_all_custom_targeting_keys_and_values.py. To target these custom targeting
keys and values, run target_custom_criteria_example.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
custom_targeting_service = client.GetService(
'CustomTargetingService', version='v201902')
# Create custom targeting key objects.
keys = [
{
'displayName': 'gender',
'name': 'g',
'type': 'PREDEFINED'
},
{
'displayName': 'car model',
'name': 'c',
'type': 'FREEFORM'
},
# Add predefined key that may be use for content targeting.
{
'displayName': 'genre',
'name': 'genre',
'type': 'PREDEFINED'
}
]
# Add custom targeting keys.
keys = custom_targeting_service.createCustomTargetingKeys(keys)
# Display results.
if keys:
for key in keys:
print('A custom targeting key with id "%s", name "%s", and display '
'name "%s" was created.' % (key['id'], key['name'],
key['displayName']))
else:
print('No keys were created.')
# Create custom targeting value objects.
values = [
{
'customTargetingKeyId': keys[0]['id'],
'displayName': 'male',
# Name is set to 1 so that the actual name can be hidden from website
# users.
'name': '1',
'matchType': 'EXACT'
},
{
'customTargetingKeyId': keys[0]['id'],
'displayName': 'female',
# Name is set to 2 so that the actual name can be hidden from website
# users.
'name': '2',
'matchType': 'EXACT'
},
{
'customTargetingKeyId': keys[1]['id'],
'displayName': 'honda civic',
'name': 'honda civic',
'matchType': 'EXACT'
},
{
'customTargetingKeyId': keys[1]['id'],
'displayName': 'toyota',
'name': 'toyota',
'matchType': 'EXACT'
},
{
'customTargetingKeyId': keys[2]['id'],
'displayName': 'comedy',
'name': 'comedy',
'matchType': 'EXACT'
},
{
'customTargetingKeyId': keys[2]['id'],
'displayName': 'drama',
'name': 'drama',
'matchType': 'EXACT'
}
]
# Add custom targeting values.
values = custom_targeting_service.createCustomTargetingValues(values)
# Display results.
if values:
for value in values:
print('A custom targeting value with id "%s", belonging to key with id'
' "%s", name "%s", and display name "%s" was created.'
% (value['id'], value['customTargetingKeyId'], value['name'],
value['displayName']))
else:
print('No values were created.')
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
|
the-stack_0_18166 | import gym, assistive_gym
import pybullet as p
import numpy as np
env = gym.make('DressingSawyer-v1')
env.render()
observation = env.reset()
# Map keys to position and orientation end effector movements
pos_keys_actions = {ord('j'): np.array([-0.01, 0, 0]), ord('l'): np.array([0.01, 0, 0]),
ord('u'): np.array([0, -0.01, 0]), ord('o'): np.array([0, 0.01, 0]),
ord('k'): np.array([0, 0, -0.01]), ord('i'): np.array([0, 0, 0.01])}
rpy_keys_actions = {ord('k'): np.array([-0.05, 0, 0]), ord('i'): np.array([0.05, 0, 0]),
ord('u'): np.array([0, -0.05, 0]), ord('o'): np.array([0, 0.05, 0]),
ord('j'): np.array([0, 0, -0.05]), ord('l'): np.array([0, 0, 0.05])}
start_pos, orient = env.robot.get_pos_orient(env.robot.right_end_effector)
start_rpy = env.get_euler(orient)
target_pos_offset = np.zeros(3)
target_rpy_offset = np.zeros(3)
while True:
keys = p.getKeyboardEvents()
# Process position movement keys ('u', 'i', 'o', 'j', 'k', 'l')
for key, action in pos_keys_actions.items():
if p.B3G_SHIFT not in keys and key in keys and keys[key] & p.KEY_IS_DOWN:
target_pos_offset += action
# Process rpy movement keys (shift + movement keys)
for key, action in rpy_keys_actions.items():
if p.B3G_SHIFT in keys and keys[p.B3G_SHIFT] & p.KEY_IS_DOWN and (key in keys and keys[key] & p.KEY_IS_DOWN):
target_rpy_offset += action
# print('Target position offset:', target_pos_offset, 'Target rpy offset:', target_rpy_offset)
target_pos = start_pos + target_pos_offset
target_rpy = start_rpy + target_rpy_offset
# Use inverse kinematics to compute the joint angles for the robot's arm
# so that its end effector moves to the target position.
target_joint_angles = env.robot.ik(env.robot.right_end_effector, target_pos, env.get_quaternion(target_rpy), env.robot.right_arm_ik_indices, max_iterations=200, use_current_as_rest=True)
# Get current joint angles of the robot's arm
current_joint_angles = env.robot.get_joint_angles(env.robot.right_arm_joint_indices)
# Compute the action as the difference between target and current joint angles.
action = (target_joint_angles - current_joint_angles)
# Step the simulation forward
observation, reward, done, info = env.step(action)
|
the-stack_0_18167 | import numpy as np
import matplotlib.pyplot as plt
import math
data = [4.035101890563965, 2.142939567565918, 1.300379753112793]
baseline = 4.168240785598755
x = [1,2,4]
h1 = plt.scatter(x=x, y=data,marker='v',alpha=0.6,color='green')
h4 = plt.hlines(xmin=x[0]-0.5, xmax=x[-1]+0.5, y=baseline,linestyles='--',color='black')
plt.legend([h1, h4],['MPI','Naive'])
ew_list = range(math.floor(min(x)), math.ceil(max(x))+1)
plt.xticks(ew_list)
plt.xlabel('processes')
plt.ylabel('time [s]')
plt.grid()
plt.savefig('perf.png')
|
the-stack_0_18168 | from dataclasses import dataclass, field
from typing import Optional
from bindings.csw.anim_add_accum_attrs_accumulate import AnimAddAccumAttrsAccumulate
from bindings.csw.anim_add_accum_attrs_additive import AnimAddAccumAttrsAdditive
from bindings.csw.anim_named_target_attrs_attribute_type import (
AnimNamedTargetAttrsAttributeType,
)
__NAMESPACE__ = "http://www.w3.org/2001/SMIL20/"
@dataclass
class AnimatePrototype:
class Meta:
name = "animatePrototype"
attribute_name: Optional[str] = field(
default=None,
metadata={
"name": "attributeName",
"type": "Attribute",
"required": True,
},
)
attribute_type: AnimNamedTargetAttrsAttributeType = field(
default=AnimNamedTargetAttrsAttributeType.AUTO,
metadata={
"name": "attributeType",
"type": "Attribute",
},
)
additive: AnimAddAccumAttrsAdditive = field(
default=AnimAddAccumAttrsAdditive.REPLACE,
metadata={
"type": "Attribute",
},
)
accumulate: AnimAddAccumAttrsAccumulate = field(
default=AnimAddAccumAttrsAccumulate.NONE,
metadata={
"type": "Attribute",
},
)
to: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
from_value: Optional[str] = field(
default=None,
metadata={
"name": "from",
"type": "Attribute",
},
)
by: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
values: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
|
the-stack_0_18169 | #!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import re
import os.path
from io import open
from setuptools import find_packages, setup # type: ignore
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "azure-data-tables"
PACKAGE_PPRINT_NAME = "Azure Data Tables"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, '_version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', # type: ignore
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open("README.md", encoding="utf-8") as f:
readme = f.read()
with open("CHANGELOG.md", encoding="utf-8") as f:
changelog = f.read()
setup(
name=PACKAGE_NAME,
version=version,
include_package_data=True,
description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + changelog,
long_description_content_type='text/markdown',
license='MIT License',
author='Microsoft Corporation',
author_email='[email protected]',
url='https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/table/azure-table',
classifiers=[
"Development Status :: 5 - Production/Stable",
'Programming Language :: Python',
"Programming Language :: Python :: 3 :: Only",
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(exclude=[
# Exclude packages that will be covered by PEP420 or nspkg
'azure',
'tests',
'azure.data',
]),
python_requires=">=3.7",
install_requires=[
"azure-core<2.0.0,>=1.14.0",
"msrest>=0.6.21"
],
) |
the-stack_0_18170 | # Copyright (c) 2014 The Bitcoin Core developers
# Copyright (c) 2014-2015 The Dash developers
# Copyright (c) 2015-2017 The Pulse developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(1)
def sync_mempools(rpc_connections):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(1)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "pulse.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
pulsed and pulse-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run pulsed:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("BITCOIND", "pulsed"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
subprocess.check_call([ os.getenv("BITCOINCLI", "pulse-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:[email protected]:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].setgenerate(True, 1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in pulse.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None):
"""
Start a pulsed and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
args = [ os.getenv("BITCOIND", "pulsed"), "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
subprocess.check_call([ os.getenv("BITCOINCLI", "pulse-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None):
"""
Start multiple pulseds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using it's output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
|
the-stack_0_18173 | #!/home/hexnor/Current/SurfFree/vsurfree/bin/python
#
# The Python Imaging Library
# $Id$
#
# this demo script creates four windows containing an image and a slider.
# drag the slider to modify the image.
#
import sys
if sys.version_info[0] > 2:
import tkinter
else:
import Tkinter as tkinter
from PIL import Image, ImageTk, ImageEnhance
#
# enhancer widget
class Enhance(tkinter.Frame):
def __init__(self, master, image, name, enhancer, lo, hi):
tkinter.Frame.__init__(self, master)
# set up the image
self.tkim = ImageTk.PhotoImage(image.mode, image.size)
self.enhancer = enhancer(image)
self.update("1.0") # normalize
# image window
tkinter.Label(self, image=self.tkim).pack()
# scale
s = tkinter.Scale(self, label=name, orient=tkinter.HORIZONTAL,
from_=lo, to=hi, resolution=0.01,
command=self.update)
s.set(self.value)
s.pack()
def update(self, value):
self.value = float(value)
self.tkim.paste(self.enhancer.enhance(self.value))
#
# main
if len(sys.argv) != 2:
print("Usage: enhancer file")
sys.exit(1)
root = tkinter.Tk()
im = Image.open(sys.argv[1])
im.thumbnail((200, 200))
Enhance(root, im, "Color", ImageEnhance.Color, 0.0, 4.0).pack()
Enhance(tkinter.Toplevel(), im, "Sharpness", ImageEnhance.Sharpness, -2.0, 2.0).pack()
Enhance(tkinter.Toplevel(), im, "Brightness", ImageEnhance.Brightness, -1.0, 3.0).pack()
Enhance(tkinter.Toplevel(), im, "Contrast", ImageEnhance.Contrast, -1.0, 3.0).pack()
root.mainloop()
|
the-stack_0_18174 | import matplotlib.pyplot as plt
import math
import numpy as np
import random as r
'''
Measurement Uncertainty with probability density function (guassian distibution)
'''
class Robot:
def __init__(self, pos):
self.pos = pos
self.pole_dist = 0
class Particle(Robot):
def __init__(self, pos):
Robot.__init__(self, pos)
self.weight = 0
self.measurement_sigma = 0.5
def probability_density_function(self, mu, x):
### STUDENT CODE START
weight = np.exp((-1/2)*((x - mu)/self.measurement_sigma)**2)/(self.measurement_sigma * np.sqrt(2 * np.pi))
return weight
### STUDENT CODE END
def update_weight(self, robot_dist):
### STUDENT CODE START
self.weight = self.probability_density_function(robot_dist, self.pole_dist)
### STUDENT CODE START
# Plot Weights for a range of robot measurements.
particle = Particle(0.0)
x = np.arange(-5, 5, 0.01)
y = np.zeros(len(x))
for i in range(len(x)):
particle.update_weight(x[i])
y[i] = particle.probability_density_function(0, x[i])
plt.plot(x, y, '-r')
plt.grid(True)
plt.show()
# Integrate left side to calculate probablity.
sum_probability = 0
for i in range(int(len(y) / 2)):
sum_probability += y[i]
print("If Probability is close to 0.5, then PDF works.")
print(round(sum_probability * 0.01, 2))
print()
# Update Particle Weigth based on robot measurement.
robot_dist = 3.0
particle.pole_dist = 2.8
particle.update_weight(robot_dist)
print("Particle Weight: " + str(round(particle.weight, 2)))
plt.plot(x, y, '-r')
plt.plot([-5, 5], [particle.weight, particle.weight], '-b')
plt.grid(True)
plt.show()
# %%
|
the-stack_0_18176 | from django.contrib.admin.decorators import register
from django.contrib.admin.filters import (
AllValuesFieldListFilter,
BooleanFieldListFilter,
ChoicesFieldListFilter,
DateFieldListFilter,
EmptyFieldListFilter,
FieldListFilter,
ListFilter,
RelatedFieldListFilter,
RelatedOnlyFieldListFilter,
SimpleListFilter,
)
from django.contrib.admin.options import (
HORIZONTAL,
VERTICAL,
ModelAdmin,
StackedInline,
TabularInline,
)
from django.contrib.admin.sites import AdminSite, site
from django.utils.module_loading import autodiscover_modules
__all__ = [
"register",
"ModelAdmin",
"HORIZONTAL",
"VERTICAL",
"StackedInline",
"TabularInline",
"AdminSite",
"site",
"ListFilter",
"SimpleListFilter",
"FieldListFilter",
"BooleanFieldListFilter",
"RelatedFieldListFilter",
"ChoicesFieldListFilter",
"DateFieldListFilter",
"AllValuesFieldListFilter",
"EmptyFieldListFilter",
"RelatedOnlyFieldListFilter",
"autodiscover",
]
def autodiscover():
autodiscover_modules("admin", register_to=site)
default_app_config = "django.contrib.admin.apps.AdminConfig"
|
the-stack_0_18177 | from torchvision import datasets
from torchvision.transforms import ToTensor
from Dpex import dataloader
import time
# init ray environment
training_data = datasets.FashionMNIST(
root="data",
train=True,
download=True,
transform=ToTensor()
)
test_data = datasets.FashionMNIST(
root="data",
train=False,
download=True,
transform=ToTensor()
)
device = "cpu"
# then we recreate dataloader
train_loader = dataloader.DpexDataLoader(training_data, distribute_mode=True, num_workers=10, batch_size=100, shuffle=True)
test_loader = dataloader.DpexDataLoader(test_data, distribute_mode=True, num_workers=1, batch_size=100, shuffle=False)
del train_loader
del test_loader |
the-stack_0_18178 | import json
import time
import traceback
import uuid
from pipelineblocksdk.api import BBSDK as sdk
from pipelineblocksdk.api.Singleton import Singleton
from pipelineblocksdk.construct.constants.MetricConfig import MetricConfig
from pipelineblocksdk.data.spark.SparkConfCustom import SparkConfCustom
import gc
import os
import pyspark
from pyspark.sql.functions import udf
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.classification import DecisionTreeClassifier
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.ml.evaluation import BinaryClassificationEvaluator
from pyspark.sql import functions as F
from pyspark.sql.types import StructType, StructField, StringType, IntegerType, DoubleType, FloatType
import pyspark
import threading
import logging
from shutil import rmtree
class SparkJoin(Singleton):
logger = None
df_tmp_fil = None
dtnct = None
metrics_handler = None
metrics = dict()
sqlDF = None
localtime = time.localtime(time.time())
# Possible values= COMPLETED, PARTIALLY_COMPLETED, FAILED
block_status = "COMPLETED"
spark_schema = None
left_df = None
right_df = None
resultant_join_df = None
field_list = None
list_of_struct_fields = None
# Possible values= COMPLETED, PARTIALLY_COMPLETED, FAILED
block_status = "COMPLETED"
total_records = 0
blockErrorInfo = {}
blockProcessInfo = {}
data_frame = None
class ReadRecords(threading.Thread):
def __init__(self, spark, topic, idx, schema, input_dict=None, block_params=None, optional_arg=None,
field_list=None, logger=None):
threading.Thread.__init__(self)
self.spark = spark
self.topic = topic
self.idx = str(idx)
self.schema = schema
self.input_dict = input_dict
self.block_params = block_params
self.optional_arg = optional_arg
self.field_list = field_list
if logger is None:
self.logger = logging
else:
self.logger = logger
def run(self):
try:
optional_arg = self.optional_arg
self.logger.info('Started reading topic ' + self.idx)
channels = self.optional_arg["channels"]
read_msgs_channel = {
"channelId": channels[self.idx],
"registerId": ""
}
converted_list = []
while True:
self.logger.info('Reading....')
read_msgs_res = optional_arg['api_instance'].read_messages_from_topic_using_post(read_msgs_channel)
msgs = read_msgs_res.result
self.logger.info('messages len' + str(len(msgs)))
if (len(msgs) == 0):
self.logger.info('Zero Messages')
topic = {"topicName": self.topic}
res = optional_arg['api_instance'].get_producer_status_using_post(topic)
self.logger.info('Zero messages: ' + json.dumps(res.result))
if not res.result['value']:
break;
for msg in msgs:
msg = json.loads(msg)
for index, i in enumerate(msg):
if self.field_list[self.idx][index] == 'int':
msg[index] = int(float(msg[index]))
elif self.field_list[self.idx][index] == 'float':
msg[index] = float(msg[index])
converted_list.append(msg)
if len(converted_list) > 100000:
self.logger.info(
'Writing to file: ' + '/bigbrain/' + self.idx + ' len: ' + str(len(converted_list)))
df = self.spark.createDataFrame(converted_list, self.schema)
df.write.mode('append').parquet('/bigbrain/' + self.idx)
converted_list.clear()
if len(converted_list) > 0:
self.logger.info('Writing to file', "/bigbrain/" + self.idx, str(len(converted_list)))
df = self.spark.createDataFrame(converted_list, self.schema)
df.write.mode('append').parquet('/bigbrain/' + self.idx)
converted_list.clear()
self.logger.error('Done with ', self.idx)
except Exception as e:
self.logger.error(e)
@staticmethod
def get_df_schema(df):
try:
json_schema = df.schema.json()
json_schema = json.loads(json_schema)
b2s_dict = {}
for i, val in enumerate(json_schema['fields']):
if val['type'] == 'integer':
b2s_dict[val['name'].upper()] = {'order': i + 1, 'active': True, 'type': 'IntegerType()'}
if val['type'] == 'string':
b2s_dict[val['name'].upper()] = {'order': i + 1, 'active': True, 'type': 'StringType()'}
if val['type'] == 'float' or val['type'] == 'double':
b2s_dict[val['name'].upper()] = {'order': i + 1, 'active': True, 'type': 'FloatType()'}
return b2s_dict
except Exception as e:
raise e
def init_log_and_metric_handlers(self, block_params=None):
self.logger = sdk.block_log_handler(block_params)
self.metrics_handler = sdk.metrics_api()
self.metrics["appId"] = MetricConfig.GRAPH_APP_ID
self.metrics["keys"] = block_params
return
def init(self, input_dict=None, block_params=None, program_arguments=None):
try:
self.init_log_and_metric_handlers(block_params)
self.logger.info('Join Py Spark Block')
self.logger.info('Input params:')
self.logger.info(json.dumps(input_dict, sort_keys=True, indent=2))
sdk.resource_monitor_handler(block_params).post_cpu_mem_usage()
return
except Exception as e:
self.logger.error(traceback.format_exc())
self.block_status = "FAILED"
raise e
def processMessagesForUi(self, process_expression):
info = []
error = []
for index in self.blockProcessInfo:
expression = process_expression[index]["expression"]
if self.blockProcessInfo[index]["noOfLines"] > 0:
info.append("Created column '" + self.blockProcessInfo[index][
"column_name"] + "' with expression [" + expression + "] applied for " + str(
self.blockProcessInfo[index]["noOfLines"]) + " records")
if self.blockErrorInfo[index]["noOfErrorLines"] > 0:
error.append(self.blockErrorInfo[index]["missingColumns"])
error.append("Creation of column '" + self.blockProcessInfo[index][
"column_name"] + "' with expression [" + expression + "] failed on " + str(
self.blockErrorInfo[index]["noOfErrorLines"]) + " records")
return info, error
def stream_block2(self, input_dict=None, block_params=None, optional_arg=None):
try:
self.data_frame = optional_arg['df_path']
schema = optional_arg['schema']
topic = optional_arg['topic_id']
batch_size = input_dict['Config']['batchSize']
records_wrote = []
records_wrote.append(0)
batch_no = 1
self.logger.info(f"Started Writing Data Frame to Queue")
repartition_col = input_dict['Config']['repartition_col']
self.logger.info(f'finding distinct values of {repartition_col}')
exec("self.dtnct = self.data_frame.select('" + repartition_col + "').distinct().collect()")
# rdd_df = data_frame.rdd.zipWithIndex()
# df_final = rdd_df.toDF()
self.logger.info(f"found distinct keys")
intial_val = 0
for dstnct_vals in self.dtnct:
val_fil = dstnct_vals[repartition_col]
exec(
"self.df_tmp_fil = self.data_frame.filter(self.data_frame['" + repartition_col + "'] == " + val_fil + ")")
self.logger.info(f"writing {batch_no}")
batch_no += 1
temp_messages = []
for row in self.df_tmp_fil.collect():
row = list(row)
row = [str(x) for x in row]
temp_messages.append(json.dumps(row))
publish_req = {
"messageList": temp_messages,
"registerId": "",
"topicName": topic
}
optional_arg["api_instance"].publish_messages_to_topic_using_post(publish_req)
records_wrote[-1] += len(temp_messages)
info, error = self.processMessagesForUi(records_wrote)
ui_info = {"info": info, "error": error}
topic_name = {
"name": topic,
"metaData": {
"schema": json.dumps(schema),
"readerInfo": json.dumps({
"noOfRecordsRead": records_wrote[-1]
}),
"ui_info": json.dumps(ui_info),
"total_messages": records_wrote[-1]
}
}
optional_arg["api_instance"].update_topic_meta_using_post(topic_name)
self.logger.info(f"Written {records_wrote[-1]} records")
gc.collect()
self.logger.info("hbbbbubuuuuuuuuuu")
# for row in df_rdd.collect():
# row = list(row)
# row = [str(x) for x in row]
# temp_messages.append(json.dumps(row))
#
# if len(temp_messages) % batch_size == 0:
# batch_no += 1
#
# publish_req = {
# "messageList": temp_messages,
# "registerId": "",
# "topicName": topic
# }
#
# optional_arg["api_instance"].publish_messages_to_topic_using_post(publish_req)
# records_wrote[-1] += len(temp_messages)
#
# info, error = self.processMessagesForUi(records_wrote)
# ui_info = {"info": info, "error": error}
#
# topic_name = {
# "name": topic,
# "metaData": {
# "schema": json.dumps(schema),
# "readerInfo": json.dumps({
# "noOfRecordsRead": records_wrote[-1]
# }),
#
# "ui_info": json.dumps(ui_info),
# "total_messages": records_wrote[-1]
# }
# }
#
# optional_arg["api_instance"].update_topic_meta_using_post(topic_name)
# self.logger.info(f"Written {records_wrote[-1]} records")
# temp_messages = []
# if (batch_no == 1) or temp_messages:
# publish_req = {
# "messageList": temp_messages,
# "registerId": "",
# "topicName": topic
# }
# optional_arg["api_instance"].publish_messages_to_topic_using_post(publish_req)
# records_wrote[-1] += len(temp_messages)
#
# info, error = self.processMessagesForUi(records_wrote)
# ui_info = {"info": info, "error": error}
#
# topic_name = {
# "name": topic,
# "metaData": {
# "schema": json.dumps(schema),
# "readerInfo": json.dumps({
# "noOfRecordsRead": records_wrote[-1]
# }),
# "ui_info": json.dumps(ui_info),
# "total_messages": records_wrote[-1]
# }
# }
#
# optional_arg["api_instance"].update_topic_meta_using_post(topic_name)
# self.logger.info(f"Written {records_wrote[-1]} records")
release_request = {
"topicName": topic
}
self.logger.debug(f"Releasing Producer For Topic: {topic}")
optional_arg["api_instance"].release_producer_using_post(release_request)
self.logger.debug(f"Producer Released")
self.block_status = "COMPLETED"
self.logger.info("Stream Complete")
except Exception as e:
self.logger.error(traceback.format_exc())
self.block_status = "FAILED"
raise e
def run(self, input_dict=None, block_params=None, program_arguments=None):
try:
t1 = time.time()
output_dict = dict()
configs = input_dict["Config"]
# test_args = {'spark.app.name': 'spark_app_test', 'spark.shuffle.service.enabled': 'true', 'spark.dynamicAllocation.minExecutors': '1', 'spark.dynamicAllocation.enabled': 'true'}
queue_dict = {}
queue_dict['left_df'] = input_dict['leftData']['queueTopicName']
queue_dict['right_df'] = input_dict['rightData']['queueTopicName']
kafka_handler = sdk.kafka_handler(None)
kafka_api_instance = kafka_handler.get_api_instance()
channels = {}
for key, topic in queue_dict.items():
consumer_pool = {
"count": 1,
"groupId": str(uuid.uuid4()),
"registerId": "",
"topicsListToSubscribe": [
topic
]
}
try:
consumer_pool_res = kafka_api_instance.create_consumer_list_using_post(consumer_pool)
channels[key] = consumer_pool_res.result
except Exception as e:
self.logger.error("Error Trying To Create a Consumer Of Topic:" + str(topic))
self.block_status = "FAILED"
raise e
optional_param = {}
optional_param['queue_dict'] = queue_dict
optional_param["api_instance"] = kafka_api_instance
optional_param["channels"] = channels
self.spark = SparkConfCustom().get_spark_session()
self.spark.sparkContext.setLogLevel('ERROR')
self.spark_schema = {}
self.field_list = {}
print('waiting')
# time.sleep(200)
arrary_of_threads = []
for key, topic in queue_dict.items():
req = {"topicName": topic}
try:
schema = kafka_api_instance.get_topic_meta_using_post(req)
schema = json.loads(json.loads(schema.result)["schema"])
optional_param['schema'] = schema
self.logger.debug("Schema Received")
except Exception as e:
self.logger.error("Error Fetching Schema")
self.logger.error(str(e))
self.logger.error(traceback.format_exc())
self.block_status = "FAILED"
raise e
col_names = schema.keys()
parsed_schema_dict = {}
for name in col_names:
values = schema.get(name)
parsed_schema_dict[name] = values['type']
self.logger.info("schemaaaa hereeee")
self.logger.info(schema)
self.logger.info(parsed_schema_dict)
self.list_of_struct_fields = []
self.field_list[key] = []
for name in parsed_schema_dict.keys():
if parsed_schema_dict[name] == 'FloatType()':
self.field_list[key].append(('float'))
self.list_of_struct_fields.append(StructField(name, FloatType(), True))
elif parsed_schema_dict[name] == 'IntegerType()':
self.field_list[key].append('int')
self.list_of_struct_fields.append(StructField(name, IntegerType(), True))
elif parsed_schema_dict[name] == 'DoubleType()':
self.field_list[key].append('float')
self.list_of_struct_fields.append(StructField(name, DoubleType(), True))
else:
self.field_list[key].append('string')
self.list_of_struct_fields.append(StructField(name, StringType(), True))
self.spark_schema[key] = StructType(self.list_of_struct_fields)
fpath = '/bigbrain/' + str(key)
if os.path.exists(fpath):
rmtree(fpath)
os.makedirs(fpath, exist_ok=True)
t = self.ReadRecords(self.spark, topic, key, self.spark_schema[key], input_dict, block_params,
optional_param, self.field_list)
t.start()
arrary_of_threads.append(t)
for t in arrary_of_threads:
t.join()
print('Both topics read done')
# self.stream_block(input_dict=input_dict, block_params=block_params, optional_arg=optional_param)
self.left_df = self.spark.read.parquet('/bigbrain/left_df')
print(self.left_df.count())
self.right_df = self.spark.read.parquet('/bigbrain/right_df')
print(self.right_df.count())
exec("self.resultant_join_df" + "=" + "self.left_df.join(self.right_df,self.left_df['" + configs[
'unique_key_left'] + "']== self.right_df['" + configs['unique_key_right'] + "'] ,how='" + configs[
'join_type'] + "')")
print(self.left_df.rdd.getNumPartitions())
print(self.right_df.rdd.getNumPartitions())
new_column_name_list = self.resultant_join_df.columns
renamed_cols = {}
for col in new_column_name_list:
count = new_column_name_list.count(col)
if count > 1:
idx = new_column_name_list.index(col)
new_column_name_list[idx] = 'bbl_' + col
print(self.resultant_join_df.columns)
self.resultant_join_df = self.resultant_join_df.toDF(*new_column_name_list)
print(self.resultant_join_df.columns)
self.resultant_join_df.write.csv('/tmp/test')
print(self.resultant_join_df.rdd.getNumPartitions())
self.logger.info("*****************************")
self.logger.info("Join Completed")
# create topic for the result to be stored
kafka_handler = sdk.kafka_handler(None)
api_instance = kafka_handler.get_api_instance()
# preprocess and detect schema, add meta
block_start_time = time.time()
schema_new = self.get_df_schema(self.resultant_join_df)
operationalParams = {}
operationalParams["api_instance"] = api_instance
operationalParams["block_start_time"] = block_start_time
operationalParams["dataframe_name"] = "resultant_join_df"
operationalParams["data_frame"] = self.resultant_join_df
operationalParams["schema"] = schema_new
try:
resultant_topic = str(uuid.uuid4())
topic = {
"name": resultant_topic,
"identifiers": block_params,
"displayName": 'resultantQueueTopic'
}
topic_res = api_instance.create_topic_using_post(topic)
topic_name = {
"name": resultant_topic,
"metaData": {
"schema": json.dumps(schema_new)
}
}
api_instance.update_topic_meta_using_post(topic_name)
self.logger.info(f"Schema Added For Topic {resultant_topic}")
except Exception as e:
self.logger.error("Error creating resultant topic " + str(resultant_topic))
raise e
operationalParams["topic_id"] = resultant_topic
self.stream_block2(input_dict=input_dict, optional_arg=operationalParams, block_params=block_params)
self.logger.info("Output:")
self.logger.info(json.dumps(output_dict, indent=2))
output_dict["queueTopicName"] = resultant_topic
output_dict['readerInfo'] = None
output_dict['readerInfoError'] = None
output_dict["infoKeys"] = None
self.logger.info("Output:")
self.logger.info(json.dumps(output_dict, indent=2))
return output_dict
except Exception as e:
self.logger.error(traceback.format_exc())
self.block_status = "FAILED"
raise e
bb_object = SparkJoin()
|
the-stack_0_18179 | from flask import render_template, request, redirect, url_for, abort
from flask.helpers import flash
from . import main
from .forms import UpdateProfile
from ..models import User
from flask_login import login_required
from .. import db,photos
from flask_login import login_required, current_user
# import markdown2
from .forms import PostForm, CommentForm, UpdateProfile
from ..models import Post, Comment, User, Upvote, Downvote
@main.route('/')
def index():
Fashion= Post.query.filter_by(category='Fashion').all()
Sports = Post.query.filter_by(category='Sports').all()
Business = Post.query.filter_by(category='Business').all()
Education = Post.query.filter_by(category='Education').all()
Humour = Post.query.filter_by(category='Humour').all()
posts = Post.query.order_by(Post.added_date.desc()).all()
return render_template('index.html', Business=Business, Fashion=Fashion, Sports=Sports,Education=Education,Humour=Humour, posts=posts)
@main.route('/posts')
@login_required
def posts():
posts = Post.query.all()
likes = Upvote.query.all()
user = current_user
return render_template('pitch.html', posts=posts, likes=likes, user=user)
@main.route("/user/<uname>")
@login_required
def profile(uname):
user = User.query.filter_by(username=uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user=user)
@main.route("/user/<uname>/update", methods=["GET", "POST"])
def update_profile(uname):
user = User.query.filter_by(username=uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for(".profile", uname=user.username))
return render_template("profile/update.html", form=form)
@main.route('/user/<uname>/update/pic',methods= ['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname))
# @main.route('/user/<uname>/update',methods = ['GET','POST'])
# def update_profile(uname):
# user = User.query.filter_by(username = uname).first()
# if user is None:
# abort(404)
# form = UpdateProfile()
# if form.validate_on_submit():
# user.bio = form.bio.data
# db.session.add(user)
# db.session.commit()
# return redirect(url_for('.profile',uname=user.username))
# return render_template('profile/update.html',form =form)
@main.route('/new_post', methods=['GET', 'POST'])
@login_required
def new_post():
form = PostForm()
if form.validate_on_submit():
title = form.title.data
post = form.post.data
category = form.category.data
user_id = current_user._get_current_object().id
# post_obj = Post(post=post, title=title, category=category, user_id=user_id)
new_post=Post(title=title,post=post,category=category)
new_post.save()
db.session.add(new_post)
db.session.commit()
# post_obj.save()
flash('Your pitch has been created successfully!')
return redirect(url_for('main.index',uname=current_user.username))
return render_template('new_pitch.html', form=form ,title='Pitch Perfect')
@main.route('/comment/<int:post_id>', methods=['GET', 'POST'])
@login_required
def comment(post_id):
form = CommentForm()
post = Post.query.get(post_id)
user = User.query.all()
comments = Comment.query.filter_by(post_id=post_id).all()
if form.validate_on_submit():
comment = form.comment.data
post_id = post_id
user_id = current_user._get_current_object().id
new_comment = Comment(
comment=comment,
post_id=post_id,
user_id=user_id
)
new_comment.save_comment()
new_comments = [new_comment]
print(new_comments)
flash('Your comment has been created successfully!')
return redirect(url_for('.comment', post_id=post_id))
return render_template('comment.html', form=form, post=post, comments=comments, user=user)
@main.route('/user')
@login_required
def user():
username = current_user.username
user = User.query.filter_by(username=username).first()
if user is None:
return ('not found')
return render_template('profile.html', user=user)
# @main.route('/user/<name>/update_profile', methods=['POST', 'GET'])
# @login_required
# def updateprofile(name):
# form = UpdateProfile()
# user = User.query.filter_by(username=name).first()
# if user is None:
# error = 'The user does not exist'
# if form.validate_on_submit():
# user.bio = form.bio.data
# user.save()
# return redirect(url_for('.profile', name=name))
# return render_template('profile/update_profile.html', form=form)
@main.route('/like/<int:id>', methods=['POST', 'GET'])
@login_required
def upvote(id):
post = Post.query.get(id)
if post is None:
abort(404)
upvote= Upvote.query.filter_by(user_id=current_user.id, post_id=id).first()
if upvote is not None:
db.session.delete(upvote)
db.session.commit()
return redirect(url_for('.index'))
new_like = Upvote(
user_id=current_user.id,
post_id=id
)
db.session.add(new_like)
db.session.commit()
return redirect(url_for('main.index'))
@main.route('/dislike/<int:id>', methods=['POST', 'GET'])
@login_required
def downvote(id):
post = Post.query.get(id)
if post is None:
abort(404)
downvote= Downvote.query.filter_by(user_id=current_user.id, post_id=id).first()
if downvote is not None:
db.session.delete(downvote)
db.session.commit()
return redirect(url_for('.index'))
new_like = Downvote(
user_id=current_user.id,
post_id=id
)
db.session.add(new_like)
db.session.commit()
return redirect(url_for('main.index'))
# @main.route('/dislike/<int:id>', methods=['GET', 'POST'])
# @login_required
# def downvote(id):
# post = Post.query.get(id)
# nv = Downvote(post=post, downvote=1)
# nv.save()
# return redirect(url_for('main.posts')) |
the-stack_0_18180 | # Copyright (c) OpenMMLab. All rights reserved.
import platform
import pytest
import torch
from mmselfsup.models.algorithms import RelativeLoc
backbone = dict(
type='ResNet',
depth=18,
in_channels=3,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN'))
neck = dict(
type='RelativeLocNeck',
in_channels=512,
out_channels=2,
with_avg_pool=True)
head = dict(type='ClsHead', with_avg_pool=False, in_channels=2, num_classes=8)
@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit')
def test_relative_loc():
with pytest.raises(AssertionError):
alg = RelativeLoc(backbone=backbone, neck=None, head=head)
with pytest.raises(AssertionError):
alg = RelativeLoc(backbone=backbone, neck=neck, head=None)
alg = RelativeLoc(backbone=backbone, neck=neck, head=head)
with pytest.raises(AssertionError):
fake_input = torch.randn((2, 8, 6, 224, 224))
patch_labels = torch.LongTensor([0, 1, 2, 3, 4, 5, 6, 7])
alg.forward(fake_input, patch_labels)
# train
fake_input = torch.randn((2, 8, 6, 224, 224))
patch_labels = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7],
[0, 1, 2, 3, 4, 5, 6, 7]])
fake_out = alg.forward(fake_input, patch_labels)
assert fake_out['loss'].item() > 0
# test
fake_input = torch.randn((2, 8, 6, 224, 224))
patch_labels = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7],
[0, 1, 2, 3, 4, 5, 6, 7]])
fake_out = alg.forward(fake_input, patch_labels, mode='test')
assert 'head4' in fake_out
# extract
fake_input = torch.randn((2, 3, 224, 224))
fake_backbone_out = alg.forward(fake_input, mode='extract')
assert fake_backbone_out[0].size() == torch.Size([2, 512, 7, 7])
|
the-stack_0_18181 | """
Write BoM out to an XML file
filename = path to output file (must be a .xml)
groups = [list of ComponentGroup groups]
net = netlist object
headings = [list of headings to display in the BoM file]
prefs = BomPref object
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from xml.etree import ElementTree
from xml.dom import minidom
def WriteXML(filename, groups, net, headings, prefs):
if not filename.endswith(".xml"):
return False
nGroups = len(groups)
nTotal = sum([g.getCount() for g in groups])
nFitted = sum([g.getCount() for g in groups if g.isFitted()])
nBuild = nFitted * prefs.boards
attrib = {}
attrib['Schematic_Source'] = net.getSource()
attrib['Schematic_Version'] = net.getVersion()
attrib['Schematic_Date'] = net.getSheetDate()
attrib['PCB_Variant'] = ', '.join(prefs.pcbConfig)
attrib['BOM_Date'] = net.getDate()
attrib['KiCad_Version'] = net.getTool()
attrib['Component_Groups'] = str(nGroups)
attrib['Component_Count'] = str(nTotal)
attrib['Fitted_Components'] = str(nFitted)
attrib['Number_of_PCBs'] = str(prefs.boards)
attrib['Total_Components'] = str(nBuild)
xml = ElementTree.Element('KiCad_BOM', attrib=attrib, encoding='utf-8')
for group in groups:
if prefs.ignoreDNF and not group.isFitted():
continue
row = group.getRow(headings)
attrib = {}
for i, h in enumerate(headings):
h = h.replace(' ', '_') # Replace spaces, xml no likey
h = h.replace('"', '')
h = h.replace("'", '')
attrib[h] = str(row[i])
ElementTree.SubElement(xml, "group", attrib=attrib)
with open(filename, "w", encoding="utf-8") as output:
out = ElementTree.tostring(xml, encoding="utf-8")
# There is probably a better way to write the data to file (without so many encoding/decoding steps),
# but toprettyxml() without specifying UTF-8 will chew up non-ASCII chars. Perhaps revisit if performance here
# is ever a concern
output.write(minidom.parseString(out).toprettyxml(indent="\t", encoding="utf-8").decode("utf-8"))
return True
|
the-stack_0_18183 | # Mutabilidade: Onde você alterar o valor da variavel.
def sum(numeros):
total = 0
for numero in numeros:
total += numero
return total
print(sum([2, 4, 6, 8, 10]))
#Segundo exemplo:
print('\nsegundo exemplo: ')
lista = ['ferrari']
lista.append('porche')
print(lista) |
the-stack_0_18184 | from importlib import import_module
from inspect import getmembers, isclass
_ALL_ANNOTATIONS = {}
_MODULES = [
"calibration",
"cluster",
"compose",
"covariance",
"cross_decomposition",
"decomposition",
"discriminant_analysis",
"dummy",
"ensemble",
"feature_extraction",
"feature_selection",
"gaussian_process",
"impute",
"isotonic",
"kernel_approximation",
"kernel_ridge",
"linear_model",
"manifold",
"mixture",
"model_selection",
"multiclass",
"multioutput",
"naive_bayes",
"neighbors",
"neural_network",
"pipeline",
"preprocessing",
"random_projection",
"semi_supervised",
"svm",
"tree",
]
for modules in _MODULES:
mod = import_module(f".{modules}", package="sk_typing")
for name, member in getmembers(mod, isclass):
_ALL_ANNOTATIONS[name] = member
__all__ = [
"get_metadata",
]
def get_metadata(estimator_name):
"""Get init annotations for estimator.
Parameters
----------
estimator_name : str
Name of estimator
Returns
-------
metadata: dict
"""
annotations = _ALL_ANNOTATIONS[estimator_name]
try:
return {
"parameters": annotations.__init__.__annotations__,
"attributes": getattr(annotations, "__annotations__", {}),
}
except KeyError:
raise ValueError(f"Type annotations was not defined for {estimator_name}")
|
the-stack_0_18185 | # Copyright 2013-2019 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Martin Barisits <[email protected]>, 2015
# - Vincent Garonne <[email protected]>, 2017
# - Mario Lassnig <[email protected]>, 2019
''' create index on rules_hist_recent '''
from alembic import context
from alembic.op import create_index, drop_index
# Alembic revision identifiers
revision = '1a80adff031a'
down_revision = '3ad36e2268b0'
def upgrade():
'''
Upgrade the database to this revision
'''
if context.get_context().dialect.name in ['oracle', 'mysql', 'postgresql']:
create_index('RULES_HIST_RECENT_SC_NA_IDX', 'rules_hist_recent', ['scope', 'name'])
def downgrade():
'''
Downgrade the database to the previous revision
'''
if context.get_context().dialect.name in ['oracle', 'mysql', 'postgresql']:
drop_index('RULES_HIST_RECENT_SC_NA_IDX', 'rules_hist_recent')
|
the-stack_0_18187 | """
Purpose:
Kafka Consumer Helpers.
This library is used to aid in creating kafka consumers.
"""
# Python Library Imports
import logging
import simplejson as json
from confluent_kafka import Consumer, KafkaException, KafkaError
def get_kafka_consumer(
kafka_brokers,
consumer_group="default",
timeout=6000,
offset_start="latest",
get_stats=True
):
"""
Purpose:
Get a Kafka Consumer Object (not yet connected to a topic)
Args:
kafka_brokers (List of Strings): List of host:port combinations for kakfa brokers
consumer_group (String): Consumer group to consume as. default is "default"
timeout (String): Timeout in ms if no messages are found (during poll). Default
is 6000
offset_start (String): Where to start consuming with respect to the consumer
group/topic offset. Default is "latest", which ignores any messages in the
topic before the consumer begins consuming
get_stats (Bool): Whether or not to print statistics. Default is True
Return:
kafka_consumer (Kafka Consumer Obj): Kafka Consumer Object
"""
logging.info(f"Creating Consumer {consumer_group} for {','.join(kafka_brokers)}")
consumer_configuration = {
"bootstrap.servers": ",".join(kafka_brokers),
"group.id": consumer_group,
"session.timeout.ms": timeout,
"auto.offset.reset": offset_start,
}
if get_stats:
consumer_configuration["statistics.interval.ms"] = 100000
consumer_configuration["stats_cb"] = consumer_statistic_callback
consumer_logger = get_consumer_logger(consumer_group)
return Consumer(consumer_configuration, logger=consumer_logger)
def consume_topic(kafka_consumer, kafka_topics):
"""
Purpose:
Consume Kafka Topics
Args:
kafka_consumer (Kafka Consumer Obj): Kafka Consumer Object
kafka_topics (List of Strings): List of Kafka Topics to Consume.
Yields:
msg (Kafka Message Obj): Message Obj returned from the topic
"""
logging.info(f"Consuming Topics {', '.join(kafka_topics)}")
# Subscribe to topics
kafka_consumer.subscribe(kafka_topics, on_assign=consumer_assignment_callback)
# Read messages from Kafka, print to stdout
try:
while True:
msg = kafka_consumer.poll(timeout=1.0)
if msg is None:
continue
if msg.error():
if msg.error().code() == KafkaError._PARTITION_EOF:
# End of partition event
logging.info(
'Reached End of Offset: topic={0}, '
'partition={1}, offset={2}'.format(
msg.topic(), msg.partition(), msg.offset()
)
)
else:
raise KafkaException(msg.error())
else:
logging.info(
'Got Message from Topic: topic={0}, partition={1}, '
'offset={2}, key={3}, value={4}'.format(
msg.topic(), msg.partition(),
msg.offset(), msg.key(), msg.value()
)
)
print(
'Key {0} Returned {1}'.format(
msg.key(),
int.from_bytes(msg.value(), byteorder='big')
)
)
except KeyboardInterrupt:
logging.info('Consume Ended By User')
except KafkaException as err:
logging.error('KafkaException Raise: {0}'.format(err))
finally:
kafka_consumer.close()
###
# Consumer Management, Logging, Callbacks
###
def consumer_assignment_callback(consumer, partitions):
"""
"""
print('Assignment:', partitions)
def consumer_statistic_callback(stats_json_str):
"""
Purpose:
Parse CLI arguments for script
Args:
brokers (List of Strings): List of host:port combinations for kakfa topics
consumer_group (String): Something
timeout (String): Something
offset_start (String): Something
Return:
kafka_consumer (Kafka Consumer Obj): Kafka Consumer Object
"""
return json.loads(stats_json_str)
def get_consumer_logger(logger_name="consumer", log_level=logging.INFO):
"""
Purpose:
Parse CLI arguments for script
Args:
brokers (List of Strings): List of host:port combinations for kakfa topics
consumer_group (String): Something
timeout (String): Something
offset_start (String): Something
Return:
kafka_consumer (Kafka Consumer Obj): Kafka Consumer Object
"""
# Create logger for consumer (logs will be emitted when poll() is called)
logger = logging.getLogger(logger_name)
logger.setLevel(log_level)
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter("%(asctime)-15s %(levelname)-8s %(message)s")
)
logger.addHandler(handler)
return logger
|
the-stack_0_18191 | """
**NOTE: This file should become obsolete. The list of classes will be
implemented as a sub-JSON from SMuFL.**
This module implements the :class:`NodeClass`, which
represents one possible :class:`Node` class, such as
a notehead or a time signature. Aside from defining the "vocabulary"
of available object classes for annotation, it also contains
some information about how objects of the given class should
be displayed in the MUSCIMarker annotation software (ordering
related object classes together in menus, implementing a sensible
color scheme, etc.). There is nothing interesting about this class,
we pulled it into the ``mung`` package because the object
grammar (i.e. which relationships are allowed and which are not)
depends on having NodeClass object as its "vocabulary",
and you will probably want to manipulate the data somehow based
on the objects' relationships (like reassembling notes from notation
primitives: notehead plus stem plus flags...), and the grammar
file is a reference for doing that.
NodeClass is a plain old data class, nothing interesting
about it. The only catch is that colors for rendering
in MUSCIMarker are kept as a ``#RRGGBB`` string in the XML
file, but represented in the ``NodeClass.color`` attribute
as a triplet of floats between 0 (``00``) and 255 (``ff``).
The ``___str__()`` method of the class will output the correct
XML representation.
**XML example**
This is what a single NodeClass element might look like::
<NodeClass>
<Id>1</Id>
<Name>notehead-empty</Name>
<GroupName>note-primitive/notehead-empty</GroupName>
<Color>#FF7566</Color>
</NodeClass>
See e.g. ``test/test_data/mff-muscima-classes-annot.xml``,
which is incidentally the real NodeClass list used
for annotating MUSCIMA++.
"""
import logging
class NodeClass(object):
"""Information about the annotation class. We're using it
mostly to get the color of rendered Node.
NodeClass is a Plain Old Data class, there is no other
functionality beyond simply existing and writing itself
out in the appropriate XML format.
"""
def __init__(self, class_id, name, group_name, color):
self.class_id = class_id
self.name = name
self.group_name = group_name
# Parse the string into a RGB spec.
r, g, b = hex2rgb(color)
logging.debug('NodeClass {0}: color {1}'.format(name, (r, g, b)))
self.color = (r, g, b)
def __str__(self):
lines = []
lines.append('<NodeClass>')
lines.append(' <Id>{0}</Id>'.format(self.class_id))
lines.append(' <Name>{0}</Name>'.format(self.name))
lines.append(' <GroupName>{0}</GroupName>'.format(self.group_name))
lines.append(' <Color>{0}</Color>'.format(rgb2hex(self.color)))
lines.append('</NodeClass>')
return '\n'.join(lines)
#######################################################################
# Utility functions for name/writer conversions
_hex_tr = {
'0': 0,
'1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7,
'8': 8, '9': 9,
'a': 10, 'b': 11, 'c': 12, 'd': 13, 'e': 14, 'f': 15,
'A': 10, 'B': 11, 'C': 12, 'D': 13, 'E': 14, 'F': 15,
}
_hex_itr = {v: k for k, v in list(_hex_tr.items())}
def parse_hex(hstr):
"""Convert a hexadecimal number string to integer.
>>> parse_hex('33')
51
>>> parse_hex('abe8')
44008
"""
out = 0
for i, l in enumerate(reversed(hstr)):
out += (16 ** i) * _hex_tr[l]
return out
def hex2rgb(hstr):
"""Parse a hex-coded color like '#AA0202' into a floating-point representation.
>>> hex2rgb('#abe822')
(0.6705882352941176, 0.9098039215686274, 0.13333333333333333)
"""
if hstr.startswith('#'):
hstr = hstr[1:]
rs, gs, bs = hstr[:2], hstr[2:4], hstr[4:]
r, g, b = parse_hex(rs), parse_hex(gs), parse_hex(bs)
return r / 255.0, g / 255.0, b / 255.0
def rgb2hex(rgb):
"""Convert a floating-point representation of R, G, B values
between 0 and 1 (inclusive) to a hex string (strating with a
hashmark). Will use uppercase letters for 10 - 15.
>>> rgb = (0.6705882352941176, 0.9098039215686274, 0.13333333333333333)
>>> rgb2hex(rgb)
'#ABE822'
"""
rgb_int = [int(ch * 255) for ch in rgb]
return '#' + ''.join(['{:02X}'.format(ch) for ch in rgb_int])
|
the-stack_0_18192 | import datetime
from typing import List
from fastapi.encoders import jsonable_encoder
from sqlalchemy.orm import Session
from app.models.datasource import DataSource
from app.schemas.datasource import DataSourceItem, DataSourceCreate, DataSourceUpdate
from app.crud.base import CRUDBase
class CRUDDataSource(CRUDBase[DataSource, DataSourceCreate, DataSourceUpdate]):
def create_with_owner(
self, db_session: Session, *, obj_in: DataSourceCreate, created_by: int
) -> DataSourceItem:
obj_in_data = jsonable_encoder(obj_in)
db_obj = self.model(**obj_in_data,
created_at=datetime.datetime.now(),
updated_at=datetime.datetime.now(),
created_by=created_by)
db_session.add(db_obj)
db_session.commit()
db_session.refresh(db_obj)
return db_obj
def get_multi_by_owner(
self, db_session: Session, *, create_by: int, skip=0, limit=100
) -> List[DataSourceItem]:
return (
db_session.query(self.model)
.filter(DataSource.create_by == create_by)
.offset(skip)
.limit(limit)
.all()
)
data_source = CRUDDataSource(DataSource)
|
the-stack_0_18194 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import openstack_dashboard.dashboards.project.data_processing. \
utils.workflow_helpers as work_helpers
class Helpers(object):
def __init__(self, sahara_client):
self.sahara = sahara_client
self.plugins = self.sahara.plugins
def _get_node_processes(self, plugin):
processes = []
for proc_lst in plugin.node_processes.values():
processes += proc_lst
return [(proc_name, proc_name) for proc_name in processes]
def get_node_processes(self, plugin_name, hadoop_version):
plugin = self.plugins.get_version_details(plugin_name, hadoop_version)
return self._get_node_processes(plugin)
def _extract_parameters(self, configs, scope, applicable_target):
parameters = []
for config in configs:
if (config['scope'] == scope and
config['applicable_target'] == applicable_target):
parameters.append(work_helpers.Parameter(config))
return parameters
def get_cluster_general_configs(self, plugin_name, hadoop_version):
plugin = self.plugins.get_version_details(plugin_name, hadoop_version)
return self._extract_parameters(plugin.configs, 'cluster', "general")
def get_general_node_group_configs(self, plugin_name, hadoop_version):
plugin = self.plugins.get_version_details(plugin_name, hadoop_version)
return self._extract_parameters(plugin.configs, 'node', 'general')
def get_targeted_node_group_configs(self, plugin_name, hadoop_version):
plugin = self.plugins.get_version_details(plugin_name, hadoop_version)
parameters = {}
for service in plugin.node_processes.keys():
parameters[service] = self._extract_parameters(plugin.configs,
'node', service)
return parameters
def get_targeted_cluster_configs(self, plugin_name, hadoop_version):
plugin = self.plugins.get_version_details(plugin_name, hadoop_version)
parameters = {}
for service in plugin.node_processes.keys():
parameters[service] = self._extract_parameters(plugin.configs,
'cluster', service)
return parameters
|
the-stack_0_18195 | #!/usr/bin/env python3
from filterpy.kalman import KalmanFilter
import matplotlib.pyplot as plt
import numpy as np
import pdb
from sklearn.utils.linear_assignment_ import linear_assignment
import sys
import time
from transform_utils import convert_3dbox_to_8corner
from iou_utils import compute_iou_2d_bboxes
from covariance import Covariance
class KalmanBoxTracker(object):
"""
This class represents the internel state of individual tracked objects observed as bbox.
"""
count = 0
def __init__(self, bbox3D, info, classname):
"""
Initialises a tracker using initial bounding box.
"""
#define constant velocity model
self.kf = KalmanFilter(dim_x=10, dim_z=7)
self.kf.F = np.array([[1,0,0,0,0,0,0,1,0,0], # state transition matrix
[0,1,0,0,0,0,0,0,1,0],
[0,0,1,0,0,0,0,0,0,1],
[0,0,0,1,0,0,0,0,0,0],
[0,0,0,0,1,0,0,0,0,0],
[0,0,0,0,0,1,0,0,0,0],
[0,0,0,0,0,0,1,0,0,0],
[0,0,0,0,0,0,0,1,0,0],
[0,0,0,0,0,0,0,0,1,0],
[0,0,0,0,0,0,0,0,0,1]])
self.kf.H = np.array([[1,0,0,0,0,0,0,0,0,0], # measurement function,
[0,1,0,0,0,0,0,0,0,0],
[0,0,1,0,0,0,0,0,0,0],
[0,0,0,1,0,0,0,0,0,0],
[0,0,0,0,1,0,0,0,0,0],
[0,0,0,0,0,1,0,0,0,0],
[0,0,0,0,0,0,1,0,0,0]])
# with angular velocity
# self.kf = KalmanFilter(dim_x=11, dim_z=7)
# self.kf.F = np.array([[1,0,0,0,0,0,0,1,0,0,0], # state transition matrix
# [0,1,0,0,0,0,0,0,1,0,0],
# [0,0,1,0,0,0,0,0,0,1,0],
# [0,0,0,1,0,0,0,0,0,0,1],
# [0,0,0,0,1,0,0,0,0,0,0],
# [0,0,0,0,0,1,0,0,0,0,0],
# [0,0,0,0,0,0,1,0,0,0,0],
# [0,0,0,0,0,0,0,1,0,0,0],
# [0,0,0,0,0,0,0,0,1,0,0],
# [0,0,0,0,0,0,0,0,0,1,0],
# [0,0,0,0,0,0,0,0,0,0,1]])
# self.kf.H = np.array([[1,0,0,0,0,0,0,0,0,0,0], # measurement function,
# [0,1,0,0,0,0,0,0,0,0,0],
# [0,0,1,0,0,0,0,0,0,0,0],
# [0,0,0,1,0,0,0,0,0,0,0],
# [0,0,0,0,1,0,0,0,0,0,0],
# [0,0,0,0,0,1,0,0,0,0,0],
# [0,0,0,0,0,0,1,0,0,0,0]])
use_cov = False
if use_cov:
covariance = Covariance(2, classname) # use kitti covariance
self.kf.P = covariance.P[classname]
self.kf.Q = covariance.Q[classname]
# self.kf.R = covariance.R
else:
self.kf.R[0:,0:] *= 10. # measurement uncertainty
self.kf.P[7:,7:] *= 1000. #state uncertainty, give high uncertainty to the unobservable initial velocities, covariance matrix
self.kf.P *= 10.
# self.kf.Q[-1,-1] *= 0.01 # process uncertainty
self.kf.Q[7:,7:] *= 0.01
self.kf.x[:7] = bbox3D.reshape((7, 1))
self.time_since_update = 0
self.id = KalmanBoxTracker.count
KalmanBoxTracker.count += 1
self.history = []
self.hits = 1 # number of total hits including the first detection
self.hit_streak = 1 # number of continuing hit considering the first detection
self.first_continuing_hit = 1
self.still_first = True
self.age = 0
self.info = info # other info
# print info
def update(self, bbox3D, info):
"""
Updates the state vector with observed bbox.
"""
self.time_since_update = 0
self.history = []
self.hits += 1
self.hit_streak += 1 # number of continuing hit
if self.still_first:
self.first_continuing_hit += 1 # number of continuing hit in the fist time
######################### orientation correction
if self.kf.x[3] >= np.pi: self.kf.x[3] -= np.pi * 2 # make the theta still in the range
if self.kf.x[3] < -np.pi: self.kf.x[3] += np.pi * 2
new_theta = bbox3D[3]
if new_theta >= np.pi: new_theta -= np.pi * 2 # make the theta still in the range
if new_theta < -np.pi: new_theta += np.pi * 2
bbox3D[3] = new_theta
predicted_theta = self.kf.x[3]
if abs(new_theta - predicted_theta) > np.pi / 2.0 and abs(new_theta - predicted_theta) < np.pi * 3 / 2.0: # if the angle of two theta is not acute angle
self.kf.x[3] += np.pi
if self.kf.x[3] > np.pi: self.kf.x[3] -= np.pi * 2 # make the theta still in the range
if self.kf.x[3] < -np.pi: self.kf.x[3] += np.pi * 2
# now the angle is acute: < 90 or > 270, convert the case of > 270 to < 90
if abs(new_theta - self.kf.x[3]) >= np.pi * 3 / 2.0:
if new_theta > 0: self.kf.x[3] += np.pi * 2
else: self.kf.x[3] -= np.pi * 2
#########################
self.kf.update(bbox3D)
if self.kf.x[3] >= np.pi: self.kf.x[3] -= np.pi * 2 # make the theta still in the range
if self.kf.x[3] < -np.pi: self.kf.x[3] += np.pi * 2
self.info = info
def predict(self):
"""
Advances the state vector and returns the predicted bounding box estimate.
"""
self.kf.predict()
if self.kf.x[3] >= np.pi: self.kf.x[3] -= np.pi * 2
if self.kf.x[3] < -np.pi: self.kf.x[3] += np.pi * 2
self.age += 1
if(self.time_since_update>0):
self.hit_streak = 0
self.still_first = False
self.time_since_update += 1
self.history.append(self.kf.x)
return self.history[-1]
def get_state(self):
"""
Returns the current bounding box estimate.
"""
return self.kf.x[:7].reshape((7, ))
def associate_detections_to_trackers(detections,trackers,iou_threshold=0.1):
# def associate_detections_to_trackers(detections,trackers,iou_threshold=0.01): # ablation study
# def associate_detections_to_trackers(detections,trackers,iou_threshold=0.25):
"""
Assigns detections to tracked object (both represented as bounding boxes)
detections: N x 8 x 3
trackers: M x 8 x 3
Returns 3 lists of matches, unmatched_detections and unmatched_trackers
"""
if(len(trackers)==0):
return np.empty((0,2),dtype=int), np.arange(len(detections)), np.empty((0,8,3),dtype=int)
iou_matrix = np.zeros((len(detections),len(trackers)),dtype=np.float32)
for d,det in enumerate(detections):
for t,trk in enumerate(trackers):
#print(f'On d={d}, t={t}')
#iou_matrix[d,t] = iou3d(det,trk)[1] # try 2d iou instead # det: 8 x 3, trk: 8 x 3
iou_matrix[d,t] = compute_iou_2d_bboxes(det, trk)
matched_indices = linear_assignment(-iou_matrix) # hungarian algorithm
unmatched_detections = []
for d,det in enumerate(detections):
if(d not in matched_indices[:,0]):
unmatched_detections.append(d)
unmatched_trackers = []
for t,trk in enumerate(trackers):
if(t not in matched_indices[:,1]):
unmatched_trackers.append(t)
#print(iou_matrix)
#filter out matched with low IOU
matches = []
for m in matched_indices:
if(iou_matrix[m[0],m[1]]<iou_threshold):
unmatched_detections.append(m[0])
unmatched_trackers.append(m[1])
else:
matches.append(m.reshape(1,2))
if(len(matches)==0):
matches = np.empty((0,2),dtype=int)
else:
matches = np.concatenate(matches,axis=0)
return matches, np.array(unmatched_detections), np.array(unmatched_trackers)
class AB3DMOT(object):
def __init__(self,max_age=2,min_hits=3): # max age will preserve the bbox does not appear no more than 2 frames, interpolate the detection
# def __init__(self,max_age=3,min_hits=3): # ablation study
# def __init__(self,max_age=1,min_hits=3):
# def __init__(self,max_age=2,min_hits=1):
# def __init__(self,max_age=2,min_hits=5):
"""
"""
self.max_age = max_age
self.min_hits = min_hits
self.trackers = []
self.frame_count = 0
# self.reorder = [3, 4, 5, 6, 2, 1, 0]
# self.reorder_back = [6, 5, 4, 0, 1, 2, 3]
def update(self,dets_all, classname):
"""
Params:
dets_all: dict
dets - a numpy array of detections in the format [[x,y,z,theta,l,w,h],[x,y,z,theta,l,w,h],...]
info: a array of other info for each det
Requires: this method must be called once for each frame even with empty detections.
Returns the a similar array, where the last column is the object ID.
NOTE: The number of objects returned may differ from the number of detections provided.
"""
dets, info = dets_all['dets'], dets_all['info'] # dets: N x 7, float numpy array
# dets = dets[:, self.reorder]
self.frame_count += 1
trks = np.zeros((len(self.trackers),7)) # N x 7 , #get predicted locations from existing trackers.
to_del = []
ret = []
for t,trk in enumerate(trks):
pos = self.trackers[t].predict().reshape((-1, 1))
trk[:] = [pos[0], pos[1], pos[2], pos[3], pos[4], pos[5], pos[6]]
if(np.any(np.isnan(pos))):
to_del.append(t)
trks = np.ma.compress_rows(np.ma.masked_invalid(trks))
for t in reversed(to_del):
self.trackers.pop(t)
dets_8corner = [convert_3dbox_to_8corner(det_tmp) for det_tmp in dets]
if len(dets_8corner) > 0: dets_8corner = np.stack(dets_8corner, axis=0)
else: dets_8corner = []
trks_8corner = [convert_3dbox_to_8corner(trk_tmp) for trk_tmp in trks]
if len(trks_8corner) > 0: trks_8corner = np.stack(trks_8corner, axis=0)
matched, unmatched_dets, unmatched_trks = associate_detections_to_trackers(dets_8corner, trks_8corner)
#update matched trackers with assigned detections
for t,trk in enumerate(self.trackers):
if t not in unmatched_trks:
d = matched[np.where(matched[:,1]==t)[0],0] # a list of index
trk.update(dets[d,:][0], info[d, :][0])
#create and initialise new trackers for unmatched detections
for i in unmatched_dets: # a scalar of index
trk = KalmanBoxTracker(dets[i,:], info[i, :], classname)
self.trackers.append(trk)
i = len(self.trackers)
for trk in reversed(self.trackers):
d = trk.get_state() # bbox location
# d = d[self.reorder_back]
if((trk.time_since_update < self.max_age) and (trk.hits >= self.min_hits or self.frame_count <= self.min_hits)):
ret.append(np.concatenate((d, [trk.id+1], trk.info)).reshape(1,-1)) # +1 as MOT benchmark requires positive
i -= 1
#remove dead tracklet
if(trk.time_since_update >= self.max_age):
self.trackers.pop(i)
if(len(ret)>0):
return np.concatenate(ret) # x, y, z, theta, l, w, h, ID, other info, confidence
return np.empty((0,15))
|
the-stack_0_18197 | import pytest
import numpy as np
import tensorflow as tf
from decompose.distributions.cenNormal import CenNormal
from decompose.sklearn import DECOMPOSE
from decompose.data.lowRank import LowRank
tf.logging.set_verbosity(tf.logging.INFO)
@pytest.mark.system
@pytest.mark.slow
def test_sklearn(tmpdir):
"""Tests the sk-learn interface of the tensor factorisation estimator.
The test creates a `DECOMPOSE` object and applies its `fit_transform`
method to some low rank training data. The learned filter banks have
to reconstruct the data very well. Then unseen test data is transformed
into the learned basis. The test data has to be recoverd from the
transformed representation.
"""
# create temporary directory where the model and its checkpoints are stored
modelDirectory = str(tmpdir.mkdir("model"))
# create a synthetic low rank dataset
K, M_train, M_test = 3, [5000, 1000], [5000, 1000]
lrData = LowRank(rank=K, M_train=M_train, M_test=M_test)
# instantiate a model
priors, K, dtype = [CenNormal(), CenNormal()], K, np.float32
model = DECOMPOSE(modelDirectory, priors=priors, n_components=K,
dtype=dtype)
# train the model
U0 = model.fit_transform(lrData.training)
# check whether variance explained is between 0.95 and 1.
U1 = model.components_
assert(0.95 <= lrData.var_expl_training((U0, U1)) <= 1.)
# transform test data
transformModelDirectory = str(tmpdir.mkdir("transformModel"))
U0test = model.transform(transformModelDirectory=transformModelDirectory,
X=lrData.test)
assert(0.95 <= lrData.var_expl_test((U0test, U1)) <= 1.)
|
the-stack_0_18199 | from django.http import JsonResponse
from django.core.files.storage import default_storage
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from .forms import UploadFileForm
@require_POST
@csrf_exempt
def upload(request):
if request.method == 'POST':
form = UploadFileForm(request.POST, request.FILES)
if form.is_valid():
file = form.files['file']
name = default_storage.save(None, file)
return JsonResponse({
'uri': name,
'size': file.size,
})
else:
return JsonResponse({'errors': form.errors}, status=400)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.