content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
#!/usr/bin/env python
# GenerateJSONOutput.py
#
# Command-line interface for turning output root files into JSON files for further processing.
#
# By: Larry Lee - Dec 2017
import argparse
import sys
import os
import ROOT
ROOT.gSystem.Load(f"{os.getenv('HISTFITTER')}/lib/libSusyFitter.so")
ROOT.gROOT.SetBatch()
parser = argparse.ArgumentParser()
parser.add_argument(
"--inputFiles",
"-i",
type=str,
nargs="+",
help="input ROOT files -- if you give me just the nominal, I'll try to find the theory variations and the upper limit automatically",
required=True,
)
parser.add_argument(
"--format",
"-f",
type=str,
help="format of object names",
default="hypo_SU_%f_%f_0_10",
)
parser.add_argument(
"--interpretation",
"-p",
type=str,
help="interpretation of object name",
default="m0:m12",
)
parser.add_argument(
"--addCoordinates",
"-a",
type=str,
help="add additional coordinates to json using a python dictionary {existing json value name and math : new value name}",
default='{"m0":"x","m12":"y"}',
)
parser.add_argument("--cut", "-c", type=str, help="cut string", default="1")
parser.add_argument(
"--noAddTabs",
"-n",
help="don't convert JSON to human readable file",
action="store_true",
default=False,
)
args = parser.parse_args()
# Print out the settings
for arg in vars(args):
user_input = getattr(args, arg)
print(f">>> ... Setting: {arg: >20} {str(user_input): >40}")
print("")
def main():
for filename in args.inputFiles:
processFile(filename)
if args.addCoordinates != "":
addCoordinates(filename, args.addCoordinates)
if "Nominal" in filename:
print(">>> Attempting to find theory variation files")
try:
newfilename = filename.replace("Nominal", "Up")
if newfilename == filename:
raise
processFile(newfilename)
if args.addCoordinates != "":
addCoordinates(newfilename, args.addCoordinates)
except:
print(
">>> WARNING: Can't find file: %s"
% filename.replace("Nominal", "Up")
)
try:
newfilename = filename.replace("Nominal", "Down")
if newfilename == filename:
raise
processFile(newfilename)
if args.addCoordinates != "":
addCoordinates(newfilename, args.addCoordinates)
except:
print(
">>> WARNING: Can't find file: %s"
% filename.replace("Nominal", "Down")
)
try:
newfilename = filename.replace(
"_fixSigXSecNominal_hypotest", "_upperlimit"
)
if newfilename == filename:
raise
processFile(newfilename)
if args.addCoordinates != "":
addCoordinates(newfilename, args.addCoordinates)
except:
print(
">>> WARNING: Can't find file: %s"
% filename.replace("_fixSigXSecNominal_hypotest", "_upperlimit")
)
try:
newfilename = filename.replace("_Nominal", "_upperlimit")
if newfilename == filename:
raise
processFile(newfilename)
if args.addCoordinates != "":
addCoordinates(newfilename, args.addCoordinates)
except:
print(
">>> WARNING: Can't find file: %s"
% filename.replace("_fixSigXSecNominal_hypotest", "_upperlimit")
)
if not args.noAddTabs:
cleanUpJSON()
print(">>>")
print(">>> Done!")
print(">>>")
return
def processFile(file):
print("")
if os.path.isfile(file):
ROOT.CollectAndWriteHypoTestResults(
file, args.format, args.interpretation, args.cut
)
else:
print(">>> ERROR: File does not exist: %s" % file)
sys.exit(1)
print("")
return
def cleanUpJSON():
import json
import glob
for file in glob.glob("./*json"):
print(">>> Making file human readable: %s" % file)
data = json.load(open(file))
with open(file, "w") as f:
f.write(json.dumps(data, indent=4))
return
def addCoordinates(fileName, coordString):
import json
import re
coordDict = json.loads(coordString)
jsonFileName = fileName.split("/")[-1] # grab just the filename
jsonFileName = jsonFileName.replace(".root", "__1_harvest_list.json")
data = json.load(open(jsonFileName))
for i, hypo_test in enumerate(data): # an entry is one hypo test result
for key in coordDict: # each item of the result
# parse input arguments, thanks to Larry for regex suggestions
total = eval(re.sub(r"\b([a-zA-Z]+[0-9]*)\b", r'hypo_test["\g<1>"]', key))
# assign new key to value
hypo_test[coordDict[key]] = total
with open(jsonFileName, "w") as f:
f.write(json.dumps(data))
if __name__ == "__main__":
main()
| 27.979058 | 137 | 0.554454 | [
"BSD-2-Clause"
] | HistFitter/HistFitter | scripts/GenerateJSONOutput.py | 5,344 | Python |
import csv
import logging
import random
import re
from collections import OrderedDict
from enum import IntEnum
from pathlib import Path
from typing import Optional, Tuple, List
import attr
from torch.utils.data import Dataset
from syntok import segmenter
from sklearn.model_selection import train_test_split
logger = logging.getLogger(__name__)
DATA_DIR = Path(__file__).parent / 'data'
PRONOUNS = {'she', 'her', 'hers', 'he', 'him', 'his'}
PRONOUNS_GENDER = {'she': 'F', 'her': 'F', 'hers': 'F', 'he': 'M', 'him': 'M', 'his': 'M'}
class GAPLabel(IntEnum):
A, B, NEITHER = 0, 1, 2
@attr.s(auto_attribs=True)
class GAPExample(object):
id: str
url: str
tokens: List[str]
pronoun_index: int
a_start: int
a_end: int # exclusive
b_start: int
b_end: int # exclusive
label: Optional[GAPLabel]
def load_train_val_examples(random_seed, train_size=0.9) -> Tuple[List[GAPExample], List[GAPExample]]:
examples = []
for tsv_file in ('gap-development.tsv', 'gap-validation.tsv', 'gap-test.tsv'):
examples.extend(_load_gap(DATA_DIR / tsv_file))
examples_gender = [PRONOUNS_GENDER[e.tokens[e.pronoun_index].lower()] for e in examples]
train_examples, val_examples = train_test_split(
examples, random_state=random_seed, train_size=train_size,
shuffle=True, stratify=examples_gender)
return train_examples, val_examples
def load_test_examples(tsv_path: Path = DATA_DIR / 'test_stage_2.tsv') -> List[GAPExample]:
examples = _load_gap(tsv_path)
return examples
def _load_gap(tsv_path: Path) -> List[GAPExample]:
examples: List[GAPExample] = []
with tsv_path.open() as f:
reader = csv.DictReader(f, delimiter='\t')
for row in reader:
examples.append(_create_example(row))
logger.info('Loaded %d examples from %s', len(examples), tsv_path)
return examples
def _create_example(row: OrderedDict):
label = None
a_coref, b_coref = map(lambda x: row.get(f'{x}-coref', '').upper(), 'AB')
if a_coref == 'TRUE' and b_coref == 'FALSE':
label = GAPLabel.A
elif b_coref == 'TRUE' and a_coref == 'FALSE':
label = GAPLabel.B
elif a_coref == 'FALSE' and b_coref == 'FALSE':
label = GAPLabel.NEITHER
tokens = _word_tokenizer(row['Text'])
pronoun_index = _char_to_token_offset(
row['Text'], row['Pronoun'], int(row['Pronoun-offset']), tokens)
assert tokens[pronoun_index].lower() in PRONOUNS
a_start = _char_to_token_offset(
row['Text'], row['A'], int(row['A-offset']), tokens)
a_end = a_start + len(_word_tokenizer(row['A'])) # exclusive
b_start = _char_to_token_offset(
row['Text'], row['B'], int(row['B-offset']), tokens)
b_end = b_start + len(_word_tokenizer(row['B'])) # exclusive
example = GAPExample(
id=row['ID'],
url=row['URL'],
tokens=tokens,
pronoun_index=pronoun_index,
a_start=a_start,
a_end=a_end,
b_start=b_start,
b_end=b_end,
label=label)
return example
def _word_tokenizer(text: str) -> List[str]:
tokens: List[str] = []
for paragraph in segmenter.analyze(text):
for sentence in paragraph:
for token in sentence:
# Split tokens on additional characters not handled by syntok
token_value = token.value
for c in ('/', r'\*', "'", r'\.', '--', ':'):
token_value = re.sub(rf'({c})', r' \1 ', token_value)
tokens.extend(token_value.split())
return tokens
def _char_to_token_offset(
text: str,
mention: str,
char_offset: int,
text_tokens: List[str]) -> int:
char_index = token_index = 0
while char_index < char_offset:
if text[char_index:].startswith(text_tokens[token_index]):
char_index += len(text_tokens[token_index])
token_index += 1
else:
char_index += 1 # whitespace
return token_index
class GAPDataset(Dataset):
def __init__(self, examples: List[GAPExample], flip_prob: float = 0.0) -> None:
super().__init__()
self._examples = examples
assert 0.0 <= flip_prob <= 1.0
self._flip_prob = flip_prob
def __getitem__(self, index: int) -> GAPExample:
example = self._examples[index]
if (self._flip_prob == 1.0 or
(self._flip_prob > 0.0 and
random.random() <= self._flip_prob)):
example = self._flip_example(example)
return example
def __len__(self) -> int:
return len(self._examples)
def _flip_example(self, example: GAPExample) -> GAPExample:
new_label = example.label
if example.label == GAPLabel.A:
new_label = GAPLabel.B
elif example.label == GAPLabel.B:
new_label = GAPLabel.A
new_example = GAPExample(
id=example.id,
url=example.url,
tokens=example.tokens,
pronoun_index=example.pronoun_index,
a_start=example.b_start,
a_end=example.b_end,
b_start=example.a_start,
b_end=example.a_end,
label=new_label)
return new_example
| 30.74269 | 102 | 0.62393 | [
"MIT"
] | yasserglez/kaggle | gendered-pronoun-resolution/data.py | 5,257 | Python |
from operator import attrgetter
from go.base.utils import vumi_api_for_user
from go.config import configured_conversation_types
from go.vumitools.conversation.models import CONVERSATION_RUNNING
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.conf import settings
def get_uniques(contact_store, contact_keys=None,
plucker=attrgetter('msisdn')):
uniques = set()
contacts_manager = contact_store.contacts
contact_keys = contact_keys or contact_store.list_contacts()
for bunch in contacts_manager.load_all_bunches(contact_keys):
uniques.update([plucker(contact) for contact in bunch])
return uniques
def get_messages_count(conversations):
totals = {}
for conv in conversations:
totals.setdefault(conv.conversation_type, {})
totals[conv.conversation_type].setdefault('sent', 0)
totals[conv.conversation_type].setdefault('received', 0)
totals[conv.conversation_type]['sent'] += conv.count_sent_messages()
totals[conv.conversation_type]['received'] += conv.count_replies()
return totals
def send_user_account_summary(user):
user_api = vumi_api_for_user(user)
contact_store = user_api.contact_store
conv_store = user_api.conversation_store
contact_keys = contact_store.list_contacts()
uniques = get_uniques(contact_store, contact_keys=contact_keys,
plucker=attrgetter('msisdn'))
conversation_keys = conv_store.list_conversations()
all_conversations = []
bunches = conv_store.conversations.load_all_bunches(conversation_keys)
for bunch in bunches:
all_conversations.extend([user_api.wrap_conversation(conv)
for conv in bunch])
all_conversations.sort(key=(lambda conv: conv.created_at), reverse=True)
active_conversations = {}
known_types = configured_conversation_types()
for conv in all_conversations:
conv_list = active_conversations.setdefault(
known_types.get(conv.conversation_type, 'Unknown'), [])
if conv.get_status() == CONVERSATION_RUNNING:
conv_list.append(conv)
message_count = get_messages_count(all_conversations)
message_count_friendly = dict((known_types.get(conv_type), value)
for conv_type, value
in message_count.items())
total_messages_sent = sum(conv_type['sent'] for conv_type
in message_count.values())
total_messages_received = sum(conv_type['received'] for conv_type
in message_count.values())
total_message_count = total_messages_received + total_messages_sent
send_mail('Vumi Go Account Summary', render_to_string(
'account/account_summary_mail.txt', {
'all_conversations': all_conversations,
'user': user,
'unique_identifier': 'contact number',
'total_uniques': len(uniques),
'total_contacts': len(contact_keys),
'total_messages_received': total_messages_received,
'total_messages_sent': total_messages_sent,
'total_message_count': total_message_count,
'message_count': message_count,
'message_count_friendly': message_count_friendly,
'active_conversations': active_conversations,
}), settings.DEFAULT_FROM_EMAIL, [user.email],
fail_silently=False)
| 42.39759 | 76 | 0.688832 | [
"BSD-3-Clause"
] | lynnUg/vumi-go | go/account/utils.py | 3,519 | Python |
# Generated by Django 2.2.24 on 2021-12-13 07:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("taskflow3", "0017_auto_20211116_1526"),
]
operations = [
migrations.AlterModelOptions(
name="autoretrynodestrategy",
options={
"verbose_name": "节点自动重试策略 AutoRetryNodeStrategy",
"verbose_name_plural": "节点自动重试策略 AutoRetryNodeStrategy",
},
),
migrations.CreateModel(
name="TimeoutNodeConfig",
fields=[
("task_id", models.BigIntegerField(verbose_name="taskflow id")),
("root_pipeline_id", models.CharField(max_length=64, verbose_name="root pipeline id")),
(
"action",
models.CharField(
choices=[("forced_fail", "强制失败"), ("forced_fail_and_skip", "强制失败并跳过")],
max_length=32,
verbose_name="action",
),
),
(
"node_id",
models.CharField(max_length=64, primary_key=True, serialize=False, verbose_name="task node id"),
),
("timeout", models.IntegerField(verbose_name="node timeout time")),
],
options={
"verbose_name": "节点超时配置 TimeoutNodeConfig",
"verbose_name_plural": "节点超时配置 TimeoutNodeConfig",
"index_together": {("root_pipeline_id", "node_id")},
},
),
]
| 35.152174 | 116 | 0.512678 | [
"Apache-2.0"
] | brookylin/bk-sops | gcloud/taskflow3/migrations/0018_auto_20211213_1554.py | 1,695 | Python |
from django.contrib import admin
from models import Connection, Network
class ConnectionAdmin(admin.ModelAdmin):
list_display = ("id", 'device_1', 'device_2', 'network', 'concetion_type', )
list_filter = ['network', 'concetion_type']
search_fields = ['device_1__property_number', 'device_2__property_number']
admin.site.register(Connection, ConnectionAdmin)
admin.site.register(Network)
| 31 | 80 | 0.764268 | [
"MIT",
"Unlicense"
] | ShangShungInstitute/django-manage-it | manage_it/network/admin.py | 403 | Python |
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'j*imh*c$v&r#enk)ja+^g#r%+=8)7z^!&&3e!e9=f+3-4zh&87'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'users',
'series',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
# 127.0.0.1:8000/static will map to static directories, llly media will map.
# Tells django where to look and store media.
MEDIA_ROOT = '/vol/web/media'
# collect static in django collects static files from
STATIC_ROOT = '/vol/web/static'
# custom User model authorization
AUTH_USER_MODEL = 'core.User'
| 26.079137 | 91 | 0.688828 | [
"MIT"
] | Akshay-ch-dj/RESTapi-app-series | app/app/settings.py | 3,625 | Python |
"""
Provide classes to perform the groupby aggregate operations.
These are not exposed to the user and provide implementations of the grouping
operations, primarily in cython. These classes (BaseGrouper and BinGrouper)
are contained *in* the SeriesGroupBy and DataFrameGroupBy objects.
"""
from __future__ import annotations
import collections
import functools
from typing import (
Callable,
Generic,
Hashable,
Iterator,
Sequence,
final,
overload,
)
import numpy as np
from pandas._libs import (
NaT,
lib,
)
import pandas._libs.groupby as libgroupby
import pandas._libs.reduction as libreduction
from pandas._typing import (
ArrayLike,
DtypeObj,
NDFrameT,
Shape,
npt,
)
from pandas.errors import AbstractMethodError
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.cast import (
maybe_cast_pointwise_result,
maybe_downcast_to_dtype,
)
from pandas.core.dtypes.common import (
ensure_float64,
ensure_int64,
ensure_platform_int,
is_1d_only_ea_obj,
is_bool_dtype,
is_categorical_dtype,
is_complex_dtype,
is_datetime64_any_dtype,
is_float_dtype,
is_integer_dtype,
is_numeric_dtype,
is_sparse,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.missing import (
isna,
maybe_fill,
)
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
PeriodArray,
TimedeltaArray,
)
from pandas.core.arrays.boolean import BooleanDtype
from pandas.core.arrays.floating import (
Float64Dtype,
FloatingDtype,
)
from pandas.core.arrays.integer import (
Int64Dtype,
_IntegerDtype,
)
from pandas.core.arrays.masked import (
BaseMaskedArray,
BaseMaskedDtype,
)
from pandas.core.arrays.string_ import StringDtype
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.groupby import grouper
from pandas.core.indexes.api import (
CategoricalIndex,
Index,
MultiIndex,
ensure_index,
)
from pandas.core.series import Series
from pandas.core.sorting import (
compress_group_index,
decons_obs_group_ids,
get_flattened_list,
get_group_index,
get_group_index_sorter,
get_indexer_dict,
)
class WrappedCythonOp:
"""
Dispatch logic for functions defined in _libs.groupby
"""
# Functions for which we do _not_ attempt to cast the cython result
# back to the original dtype.
cast_blocklist = frozenset(["rank", "count", "size", "idxmin", "idxmax"])
def __init__(self, kind: str, how: str):
self.kind = kind
self.how = how
_CYTHON_FUNCTIONS = {
"aggregate": {
"add": "group_add",
"prod": "group_prod",
"min": "group_min",
"max": "group_max",
"mean": "group_mean",
"median": "group_median",
"var": "group_var",
"first": "group_nth",
"last": "group_last",
"ohlc": "group_ohlc",
},
"transform": {
"cumprod": "group_cumprod",
"cumsum": "group_cumsum",
"cummin": "group_cummin",
"cummax": "group_cummax",
"rank": "group_rank",
},
}
_MASKED_CYTHON_FUNCTIONS = {"cummin", "cummax", "min", "max"}
_cython_arity = {"ohlc": 4} # OHLC
# Note: we make this a classmethod and pass kind+how so that caching
# works at the class level and not the instance level
@classmethod
@functools.lru_cache(maxsize=None)
def _get_cython_function(
cls, kind: str, how: str, dtype: np.dtype, is_numeric: bool
):
dtype_str = dtype.name
ftype = cls._CYTHON_FUNCTIONS[kind][how]
# see if there is a fused-type version of function
# only valid for numeric
f = getattr(libgroupby, ftype)
if is_numeric:
return f
# error: Non-overlapping equality check (left operand type: "dtype[Any]", right
# operand type: "Literal['object']")
elif dtype == object: # type: ignore[comparison-overlap]
if "object" not in f.__signatures__:
# raise NotImplementedError here rather than TypeError later
raise NotImplementedError(
f"function is not implemented for this dtype: "
f"[how->{how},dtype->{dtype_str}]"
)
return f
def get_cython_func_and_vals(self, values: np.ndarray, is_numeric: bool):
"""
Find the appropriate cython function, casting if necessary.
Parameters
----------
values : np.ndarray
is_numeric : bool
Returns
-------
func : callable
values : np.ndarray
"""
how = self.how
kind = self.kind
if how in ["median", "cumprod"]:
# these two only have float64 implementations
if is_numeric:
values = ensure_float64(values)
else:
raise NotImplementedError(
f"function is not implemented for this dtype: "
f"[how->{how},dtype->{values.dtype.name}]"
)
func = getattr(libgroupby, f"group_{how}_float64")
return func, values
func = self._get_cython_function(kind, how, values.dtype, is_numeric)
if values.dtype.kind in ["i", "u"]:
if how in ["add", "var", "prod", "mean", "ohlc"]:
# result may still include NaN, so we have to cast
values = ensure_float64(values)
return func, values
def _disallow_invalid_ops(self, dtype: DtypeObj, is_numeric: bool = False):
"""
Check if we can do this operation with our cython functions.
Raises
------
NotImplementedError
This is either not a valid function for this dtype, or
valid but not implemented in cython.
"""
how = self.how
if is_numeric:
# never an invalid op for those dtypes, so return early as fastpath
return
if is_categorical_dtype(dtype):
# NotImplementedError for methods that can fall back to a
# non-cython implementation.
if how in ["add", "prod", "cumsum", "cumprod"]:
raise TypeError(f"{dtype} type does not support {how} operations")
raise NotImplementedError(f"{dtype} dtype not supported")
elif is_sparse(dtype):
# categoricals are only 1d, so we
# are not setup for dim transforming
raise NotImplementedError(f"{dtype} dtype not supported")
elif is_datetime64_any_dtype(dtype):
# we raise NotImplemented if this is an invalid operation
# entirely, e.g. adding datetimes
if how in ["add", "prod", "cumsum", "cumprod"]:
raise TypeError(f"datetime64 type does not support {how} operations")
elif is_timedelta64_dtype(dtype):
if how in ["prod", "cumprod"]:
raise TypeError(f"timedelta64 type does not support {how} operations")
def _get_output_shape(self, ngroups: int, values: np.ndarray) -> Shape:
how = self.how
kind = self.kind
arity = self._cython_arity.get(how, 1)
out_shape: Shape
if how == "ohlc":
out_shape = (ngroups, 4)
elif arity > 1:
raise NotImplementedError(
"arity of more than 1 is not supported for the 'how' argument"
)
elif kind == "transform":
out_shape = values.shape
else:
out_shape = (ngroups,) + values.shape[1:]
return out_shape
def get_out_dtype(self, dtype: np.dtype) -> np.dtype:
how = self.how
if how == "rank":
out_dtype = "float64"
else:
if is_numeric_dtype(dtype):
out_dtype = f"{dtype.kind}{dtype.itemsize}"
else:
out_dtype = "object"
return np.dtype(out_dtype)
@overload
def _get_result_dtype(self, dtype: np.dtype) -> np.dtype:
... # pragma: no cover
@overload
def _get_result_dtype(self, dtype: ExtensionDtype) -> ExtensionDtype:
... # pragma: no cover
def _get_result_dtype(self, dtype: DtypeObj) -> DtypeObj:
"""
Get the desired dtype of a result based on the
input dtype and how it was computed.
Parameters
----------
dtype : np.dtype or ExtensionDtype
Input dtype.
Returns
-------
np.dtype or ExtensionDtype
The desired dtype of the result.
"""
how = self.how
if how in ["add", "cumsum", "sum", "prod"]:
if dtype == np.dtype(bool):
return np.dtype(np.int64)
elif isinstance(dtype, (BooleanDtype, _IntegerDtype)):
return Int64Dtype()
elif how in ["mean", "median", "var"]:
if isinstance(dtype, (BooleanDtype, _IntegerDtype)):
return Float64Dtype()
elif is_float_dtype(dtype) or is_complex_dtype(dtype):
return dtype
elif is_numeric_dtype(dtype):
return np.dtype(np.float64)
return dtype
def uses_mask(self) -> bool:
return self.how in self._MASKED_CYTHON_FUNCTIONS
@final
def _ea_wrap_cython_operation(
self,
values: ExtensionArray,
min_count: int,
ngroups: int,
comp_ids: np.ndarray,
**kwargs,
) -> ArrayLike:
"""
If we have an ExtensionArray, unwrap, call _cython_operation, and
re-wrap if appropriate.
"""
# TODO: general case implementation overridable by EAs.
if isinstance(values, BaseMaskedArray) and self.uses_mask():
return self._masked_ea_wrap_cython_operation(
values,
min_count=min_count,
ngroups=ngroups,
comp_ids=comp_ids,
**kwargs,
)
if isinstance(values, (DatetimeArray, PeriodArray, TimedeltaArray)):
# All of the functions implemented here are ordinal, so we can
# operate on the tz-naive equivalents
npvalues = values._ndarray.view("M8[ns]")
elif isinstance(values.dtype, (BooleanDtype, _IntegerDtype)):
# IntegerArray or BooleanArray
npvalues = values.to_numpy("float64", na_value=np.nan)
elif isinstance(values.dtype, FloatingDtype):
# FloatingArray
npvalues = values.to_numpy(values.dtype.numpy_dtype, na_value=np.nan)
elif isinstance(values.dtype, StringDtype):
# StringArray
npvalues = values.to_numpy(object, na_value=np.nan)
else:
raise NotImplementedError(
f"function is not implemented for this dtype: {values.dtype}"
)
res_values = self._cython_op_ndim_compat(
npvalues,
min_count=min_count,
ngroups=ngroups,
comp_ids=comp_ids,
mask=None,
**kwargs,
)
if self.how in ["rank"]:
# i.e. how in WrappedCythonOp.cast_blocklist, since
# other cast_blocklist methods dont go through cython_operation
return res_values
return self._reconstruct_ea_result(values, res_values)
def _reconstruct_ea_result(self, values, res_values):
"""
Construct an ExtensionArray result from an ndarray result.
"""
# TODO: allow EAs to override this logic
if isinstance(
values.dtype, (BooleanDtype, _IntegerDtype, FloatingDtype, StringDtype)
):
dtype = self._get_result_dtype(values.dtype)
cls = dtype.construct_array_type()
return cls._from_sequence(res_values, dtype=dtype)
elif needs_i8_conversion(values.dtype):
i8values = res_values.view("i8")
return type(values)(i8values, dtype=values.dtype)
raise NotImplementedError
@final
def _masked_ea_wrap_cython_operation(
self,
values: BaseMaskedArray,
min_count: int,
ngroups: int,
comp_ids: np.ndarray,
**kwargs,
) -> BaseMaskedArray:
"""
Equivalent of `_ea_wrap_cython_operation`, but optimized for masked EA's
and cython algorithms which accept a mask.
"""
orig_values = values
# Copy to ensure input and result masks don't end up shared
mask = values._mask.copy()
result_mask = np.zeros(ngroups, dtype=bool)
arr = values._data
res_values = self._cython_op_ndim_compat(
arr,
min_count=min_count,
ngroups=ngroups,
comp_ids=comp_ids,
mask=mask,
result_mask=result_mask,
**kwargs,
)
dtype = self._get_result_dtype(orig_values.dtype)
assert isinstance(dtype, BaseMaskedDtype)
cls = dtype.construct_array_type()
if self.kind != "aggregate":
return cls(res_values.astype(dtype.type, copy=False), mask)
else:
return cls(res_values.astype(dtype.type, copy=False), result_mask)
@final
def _cython_op_ndim_compat(
self,
values: np.ndarray,
*,
min_count: int,
ngroups: int,
comp_ids: np.ndarray,
mask: np.ndarray | None = None,
result_mask: np.ndarray | None = None,
**kwargs,
) -> np.ndarray:
if values.ndim == 1:
# expand to 2d, dispatch, then squeeze if appropriate
values2d = values[None, :]
if mask is not None:
mask = mask[None, :]
if result_mask is not None:
result_mask = result_mask[None, :]
res = self._call_cython_op(
values2d,
min_count=min_count,
ngroups=ngroups,
comp_ids=comp_ids,
mask=mask,
result_mask=result_mask,
**kwargs,
)
if res.shape[0] == 1:
return res[0]
# otherwise we have OHLC
return res.T
return self._call_cython_op(
values,
min_count=min_count,
ngroups=ngroups,
comp_ids=comp_ids,
mask=mask,
result_mask=result_mask,
**kwargs,
)
@final
def _call_cython_op(
self,
values: np.ndarray, # np.ndarray[ndim=2]
*,
min_count: int,
ngroups: int,
comp_ids: np.ndarray,
mask: np.ndarray | None,
result_mask: np.ndarray | None,
**kwargs,
) -> np.ndarray: # np.ndarray[ndim=2]
orig_values = values
dtype = values.dtype
is_numeric = is_numeric_dtype(dtype)
is_datetimelike = needs_i8_conversion(dtype)
if is_datetimelike:
values = values.view("int64")
is_numeric = True
elif is_bool_dtype(dtype):
values = values.astype("int64")
elif is_integer_dtype(dtype):
# e.g. uint8 -> uint64, int16 -> int64
dtype_str = dtype.kind + "8"
values = values.astype(dtype_str, copy=False)
elif is_numeric:
if not is_complex_dtype(dtype):
values = ensure_float64(values)
values = values.T
if mask is not None:
mask = mask.T
if result_mask is not None:
result_mask = result_mask.T
out_shape = self._get_output_shape(ngroups, values)
func, values = self.get_cython_func_and_vals(values, is_numeric)
out_dtype = self.get_out_dtype(values.dtype)
result = maybe_fill(np.empty(out_shape, dtype=out_dtype))
if self.kind == "aggregate":
counts = np.zeros(ngroups, dtype=np.int64)
if self.how in ["min", "max", "mean"]:
func(
result,
counts,
values,
comp_ids,
min_count,
mask=mask,
result_mask=result_mask,
is_datetimelike=is_datetimelike,
)
elif self.how in ["add"]:
# We support datetimelike
func(
result,
counts,
values,
comp_ids,
min_count,
datetimelike=is_datetimelike,
)
else:
func(result, counts, values, comp_ids, min_count)
else:
# TODO: min_count
if self.uses_mask():
func(
result,
values,
comp_ids,
ngroups,
is_datetimelike,
mask=mask,
**kwargs,
)
else:
func(result, values, comp_ids, ngroups, is_datetimelike, **kwargs)
if self.kind == "aggregate":
# i.e. counts is defined. Locations where count<min_count
# need to have the result set to np.nan, which may require casting,
# see GH#40767
if is_integer_dtype(result.dtype) and not is_datetimelike:
cutoff = max(1, min_count)
empty_groups = counts < cutoff
if empty_groups.any():
# Note: this conversion could be lossy, see GH#40767
result = result.astype("float64")
result[empty_groups] = np.nan
result = result.T
if self.how not in self.cast_blocklist:
# e.g. if we are int64 and need to restore to datetime64/timedelta64
# "rank" is the only member of cast_blocklist we get here
res_dtype = self._get_result_dtype(orig_values.dtype)
op_result = maybe_downcast_to_dtype(result, res_dtype)
else:
op_result = result
# error: Incompatible return value type (got "Union[ExtensionArray, ndarray]",
# expected "ndarray")
return op_result # type: ignore[return-value]
@final
def cython_operation(
self,
*,
values: ArrayLike,
axis: int,
min_count: int = -1,
comp_ids: np.ndarray,
ngroups: int,
**kwargs,
) -> ArrayLike:
"""
Call our cython function, with appropriate pre- and post- processing.
"""
if values.ndim > 2:
raise NotImplementedError("number of dimensions is currently limited to 2")
elif values.ndim == 2:
assert axis == 1, axis
elif not is_1d_only_ea_obj(values):
# Note: it is *not* the case that axis is always 0 for 1-dim values,
# as we can have 1D ExtensionArrays that we need to treat as 2D
assert axis == 0
dtype = values.dtype
is_numeric = is_numeric_dtype(dtype)
# can we do this operation with our cython functions
# if not raise NotImplementedError
self._disallow_invalid_ops(dtype, is_numeric)
if not isinstance(values, np.ndarray):
# i.e. ExtensionArray
return self._ea_wrap_cython_operation(
values,
min_count=min_count,
ngroups=ngroups,
comp_ids=comp_ids,
**kwargs,
)
return self._cython_op_ndim_compat(
values,
min_count=min_count,
ngroups=ngroups,
comp_ids=comp_ids,
mask=None,
**kwargs,
)
class BaseGrouper:
"""
This is an internal Grouper class, which actually holds
the generated groups
Parameters
----------
axis : Index
groupings : Sequence[Grouping]
all the grouping instances to handle in this grouper
for example for grouper list to groupby, need to pass the list
sort : bool, default True
whether this grouper will give sorted result or not
group_keys : bool, default True
mutated : bool, default False
indexer : np.ndarray[np.intp], optional
the indexer created by Grouper
some groupers (TimeGrouper) will sort its axis and its
group_info is also sorted, so need the indexer to reorder
"""
axis: Index
def __init__(
self,
axis: Index,
groupings: Sequence[grouper.Grouping],
sort: bool = True,
group_keys: bool = True,
mutated: bool = False,
indexer: npt.NDArray[np.intp] | None = None,
dropna: bool = True,
):
assert isinstance(axis, Index), axis
self.axis = axis
self._groupings: list[grouper.Grouping] = list(groupings)
self._sort = sort
self.group_keys = group_keys
self.mutated = mutated
self.indexer = indexer
self.dropna = dropna
@property
def groupings(self) -> list[grouper.Grouping]:
return self._groupings
@property
def shape(self) -> Shape:
return tuple(ping.ngroups for ping in self.groupings)
def __iter__(self):
return iter(self.indices)
@property
def nkeys(self) -> int:
return len(self.groupings)
def get_iterator(
self, data: NDFrameT, axis: int = 0
) -> Iterator[tuple[Hashable, NDFrameT]]:
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
splitter = self._get_splitter(data, axis=axis)
keys = self.group_keys_seq
for key, group in zip(keys, splitter):
yield key, group.__finalize__(data, method="groupby")
@final
def _get_splitter(self, data: NDFrame, axis: int = 0) -> DataSplitter:
"""
Returns
-------
Generator yielding subsetted objects
__finalize__ has not been called for the subsetted objects returned.
"""
ids, _, ngroups = self.group_info
return get_splitter(data, ids, ngroups, axis=axis)
def _get_grouper(self):
"""
We are a grouper as part of another's groupings.
We have a specific method of grouping, so cannot
convert to a Index for our grouper.
"""
return self.groupings[0].grouping_vector
@final
@cache_readonly
def group_keys_seq(self):
if len(self.groupings) == 1:
return self.levels[0]
else:
ids, _, ngroups = self.group_info
# provide "flattened" iterator for multi-group setting
return get_flattened_list(ids, ngroups, self.levels, self.codes)
@final
def apply(
self, f: Callable, data: DataFrame | Series, axis: int = 0
) -> tuple[list, bool]:
mutated = self.mutated
splitter = self._get_splitter(data, axis=axis)
group_keys = self.group_keys_seq
result_values = []
# This calls DataSplitter.__iter__
zipped = zip(group_keys, splitter)
for key, group in zipped:
object.__setattr__(group, "name", key)
# group might be modified
group_axes = group.axes
res = f(group)
if not mutated and not _is_indexed_like(res, group_axes, axis):
mutated = True
result_values.append(res)
# getattr pattern for __name__ is needed for functools.partial objects
if len(group_keys) == 0 and getattr(f, "__name__", None) not in [
"idxmin",
"idxmax",
"nanargmin",
"nanargmax",
]:
# If group_keys is empty, then no function calls have been made,
# so we will not have raised even if this is an invalid dtype.
# So do one dummy call here to raise appropriate TypeError.
f(data.iloc[:0])
return result_values, mutated
@cache_readonly
def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]:
"""dict {group name -> group indices}"""
if len(self.groupings) == 1 and isinstance(self.result_index, CategoricalIndex):
# This shows unused categories in indices GH#38642
return self.groupings[0].indices
codes_list = [ping.codes for ping in self.groupings]
keys = [ping.group_index for ping in self.groupings]
return get_indexer_dict(codes_list, keys)
@final
@property
def codes(self) -> list[np.ndarray]:
return [ping.codes for ping in self.groupings]
@property
def levels(self) -> list[Index]:
return [ping.group_index for ping in self.groupings]
@property
def names(self) -> list[Hashable]:
return [ping.name for ping in self.groupings]
@final
def size(self) -> Series:
"""
Compute group sizes.
"""
ids, _, ngroups = self.group_info
if ngroups:
out = np.bincount(ids[ids != -1], minlength=ngroups)
else:
out = []
return Series(out, index=self.result_index, dtype="int64")
@cache_readonly
def groups(self) -> dict[Hashable, np.ndarray]:
"""dict {group name -> group labels}"""
if len(self.groupings) == 1:
return self.groupings[0].groups
else:
to_groupby = zip(*(ping.grouping_vector for ping in self.groupings))
index = Index(to_groupby)
return self.axis.groupby(index)
@final
@cache_readonly
def is_monotonic(self) -> bool:
# return if my group orderings are monotonic
return Index(self.group_info[0]).is_monotonic
@cache_readonly
def group_info(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp], int]:
comp_ids, obs_group_ids = self._get_compressed_codes()
ngroups = len(obs_group_ids)
comp_ids = ensure_platform_int(comp_ids)
return comp_ids, obs_group_ids, ngroups
@final
@cache_readonly
def codes_info(self) -> npt.NDArray[np.intp]:
# return the codes of items in original grouped axis
ids, _, _ = self.group_info
if self.indexer is not None:
sorter = np.lexsort((ids, self.indexer))
ids = ids[sorter]
ids = ensure_platform_int(ids)
# TODO: if numpy annotates np.lexsort, this ensure_platform_int
# may become unnecessary
return ids
@final
def _get_compressed_codes(self) -> tuple[np.ndarray, npt.NDArray[np.intp]]:
# The first returned ndarray may have any signed integer dtype
if len(self.groupings) > 1:
group_index = get_group_index(self.codes, self.shape, sort=True, xnull=True)
return compress_group_index(group_index, sort=self._sort)
ping = self.groupings[0]
return ping.codes, np.arange(len(ping.group_index), dtype=np.intp)
@final
@cache_readonly
def ngroups(self) -> int:
return len(self.result_index)
@property
def reconstructed_codes(self) -> list[np.ndarray]:
codes = self.codes
ids, obs_ids, _ = self.group_info
return decons_obs_group_ids(ids, obs_ids, self.shape, codes, xnull=True)
@final
@cache_readonly
def result_arraylike(self) -> ArrayLike:
"""
Analogous to result_index, but returning an ndarray/ExtensionArray
allowing us to retain ExtensionDtypes not supported by Index.
"""
# TODO(ExtensionIndex): once Index supports arbitrary EAs, this can
# be removed in favor of result_index
if len(self.groupings) == 1:
return self.groupings[0].group_arraylike
# result_index is MultiIndex
return self.result_index._values
@cache_readonly
def result_index(self) -> Index:
if len(self.groupings) == 1:
return self.groupings[0].result_index.rename(self.names[0])
codes = self.reconstructed_codes
levels = [ping.result_index for ping in self.groupings]
return MultiIndex(
levels=levels, codes=codes, verify_integrity=False, names=self.names
)
@final
def get_group_levels(self) -> list[ArrayLike]:
# Note: only called from _insert_inaxis_grouper_inplace, which
# is only called for BaseGrouper, never for BinGrouper
if len(self.groupings) == 1:
return [self.groupings[0].group_arraylike]
name_list = []
for ping, codes in zip(self.groupings, self.reconstructed_codes):
codes = ensure_platform_int(codes)
levels = ping.group_arraylike.take(codes)
name_list.append(levels)
return name_list
# ------------------------------------------------------------
# Aggregation functions
@final
def _cython_operation(
self,
kind: str,
values,
how: str,
axis: int,
min_count: int = -1,
**kwargs,
) -> ArrayLike:
"""
Returns the values of a cython operation.
"""
assert kind in ["transform", "aggregate"]
cy_op = WrappedCythonOp(kind=kind, how=how)
ids, _, _ = self.group_info
ngroups = self.ngroups
return cy_op.cython_operation(
values=values,
axis=axis,
min_count=min_count,
comp_ids=ids,
ngroups=ngroups,
**kwargs,
)
@final
def agg_series(
self, obj: Series, func: Callable, preserve_dtype: bool = False
) -> ArrayLike:
"""
Parameters
----------
obj : Series
func : function taking a Series and returning a scalar-like
preserve_dtype : bool
Whether the aggregation is known to be dtype-preserving.
Returns
-------
np.ndarray or ExtensionArray
"""
# test_groupby_empty_with_category gets here with self.ngroups == 0
# and len(obj) > 0
if len(obj) == 0:
# SeriesGrouper would raise if we were to call _aggregate_series_fast
result = self._aggregate_series_pure_python(obj, func)
elif not isinstance(obj._values, np.ndarray):
result = self._aggregate_series_pure_python(obj, func)
# we can preserve a little bit more aggressively with EA dtype
# because maybe_cast_pointwise_result will do a try/except
# with _from_sequence. NB we are assuming here that _from_sequence
# is sufficiently strict that it casts appropriately.
preserve_dtype = True
else:
result = self._aggregate_series_pure_python(obj, func)
npvalues = lib.maybe_convert_objects(result, try_float=False)
if preserve_dtype:
out = maybe_cast_pointwise_result(npvalues, obj.dtype, numeric_only=True)
else:
out = npvalues
return out
@final
def _aggregate_series_pure_python(
self, obj: Series, func: Callable
) -> npt.NDArray[np.object_]:
ids, _, ngroups = self.group_info
counts = np.zeros(ngroups, dtype=int)
result = np.empty(ngroups, dtype="O")
initialized = False
# equiv: splitter = self._get_splitter(obj, axis=0)
splitter = get_splitter(obj, ids, ngroups, axis=0)
for i, group in enumerate(splitter):
res = func(group)
res = libreduction.extract_result(res)
if not initialized:
# We only do this validation on the first iteration
libreduction.check_result_array(res, group.dtype)
initialized = True
counts[i] = group.shape[0]
result[i] = res
return result
class BinGrouper(BaseGrouper):
"""
This is an internal Grouper class
Parameters
----------
bins : the split index of binlabels to group the item of axis
binlabels : the label list
mutated : bool, default False
indexer : np.ndarray[np.intp]
Examples
--------
bins: [2, 4, 6, 8, 10]
binlabels: DatetimeIndex(['2005-01-01', '2005-01-03',
'2005-01-05', '2005-01-07', '2005-01-09'],
dtype='datetime64[ns]', freq='2D')
the group_info, which contains the label of each item in grouped
axis, the index of label in label list, group number, is
(array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4]), array([0, 1, 2, 3, 4]), 5)
means that, the grouped axis has 10 items, can be grouped into 5
labels, the first and second items belong to the first label, the
third and forth items belong to the second label, and so on
"""
bins: npt.NDArray[np.int64]
binlabels: Index
mutated: bool
def __init__(
self,
bins,
binlabels,
mutated: bool = False,
indexer=None,
):
self.bins = ensure_int64(bins)
self.binlabels = ensure_index(binlabels)
self.mutated = mutated
self.indexer = indexer
# These lengths must match, otherwise we could call agg_series
# with empty self.bins, which would raise in libreduction.
assert len(self.binlabels) == len(self.bins)
@cache_readonly
def groups(self):
"""dict {group name -> group labels}"""
# this is mainly for compat
# GH 3881
result = {
key: value
for key, value in zip(self.binlabels, self.bins)
if key is not NaT
}
return result
@property
def nkeys(self) -> int:
# still matches len(self.groupings), but we can hard-code
return 1
def _get_grouper(self):
"""
We are a grouper as part of another's groupings.
We have a specific method of grouping, so cannot
convert to a Index for our grouper.
"""
return self
def get_iterator(self, data: NDFrame, axis: int = 0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
if axis == 0:
slicer = lambda start, edge: data.iloc[start:edge]
else:
slicer = lambda start, edge: data.iloc[:, start:edge]
length = len(data.axes[axis])
start = 0
for edge, label in zip(self.bins, self.binlabels):
if label is not NaT:
yield label, slicer(start, edge)
start = edge
if start < length:
yield self.binlabels[-1], slicer(start, None)
@cache_readonly
def indices(self):
indices = collections.defaultdict(list)
i = 0
for label, bin in zip(self.binlabels, self.bins):
if i < bin:
if label is not NaT:
indices[label] = list(range(i, bin))
i = bin
return indices
@cache_readonly
def group_info(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp], int]:
ngroups = self.ngroups
obs_group_ids = np.arange(ngroups, dtype=np.intp)
rep = np.diff(np.r_[0, self.bins])
rep = ensure_platform_int(rep)
if ngroups == len(self.bins):
comp_ids = np.repeat(np.arange(ngroups), rep)
else:
comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep)
return (
ensure_platform_int(comp_ids),
obs_group_ids,
ngroups,
)
@cache_readonly
def reconstructed_codes(self) -> list[np.ndarray]:
# get unique result indices, and prepend 0 as groupby starts from the first
return [np.r_[0, np.flatnonzero(self.bins[1:] != self.bins[:-1]) + 1]]
@cache_readonly
def result_index(self):
if len(self.binlabels) != 0 and isna(self.binlabels[0]):
return self.binlabels[1:]
return self.binlabels
@property
def levels(self) -> list[Index]:
return [self.binlabels]
@property
def names(self) -> list[Hashable]:
return [self.binlabels.name]
@property
def groupings(self) -> list[grouper.Grouping]:
lev = self.binlabels
ping = grouper.Grouping(lev, lev, in_axis=False, level=None)
return [ping]
def _aggregate_series_fast(self, obj: Series, func: Callable) -> np.ndarray:
# -> np.ndarray[object]
raise NotImplementedError(
"This should not be reached; use _aggregate_series_pure_python"
)
def _is_indexed_like(obj, axes, axis: int) -> bool:
if isinstance(obj, Series):
if len(axes) > 1:
return False
return obj.axes[axis].equals(axes[axis])
elif isinstance(obj, DataFrame):
return obj.axes[axis].equals(axes[axis])
return False
# ----------------------------------------------------------------------
# Splitting / application
class DataSplitter(Generic[NDFrameT]):
def __init__(
self,
data: NDFrameT,
labels: npt.NDArray[np.intp],
ngroups: int,
axis: int = 0,
):
self.data = data
self.labels = ensure_platform_int(labels) # _should_ already be np.intp
self.ngroups = ngroups
self.axis = axis
assert isinstance(axis, int), axis
@cache_readonly
def slabels(self) -> npt.NDArray[np.intp]:
# Sorted labels
return self.labels.take(self._sort_idx)
@cache_readonly
def _sort_idx(self) -> npt.NDArray[np.intp]:
# Counting sort indexer
return get_group_index_sorter(self.labels, self.ngroups)
def __iter__(self):
sdata = self.sorted_data
if self.ngroups == 0:
# we are inside a generator, rather than raise StopIteration
# we merely return signal the end
return
starts, ends = lib.generate_slices(self.slabels, self.ngroups)
for start, end in zip(starts, ends):
yield self._chop(sdata, slice(start, end))
@cache_readonly
def sorted_data(self) -> NDFrameT:
return self.data.take(self._sort_idx, axis=self.axis)
def _chop(self, sdata, slice_obj: slice) -> NDFrame:
raise AbstractMethodError(self)
class SeriesSplitter(DataSplitter):
def _chop(self, sdata: Series, slice_obj: slice) -> Series:
# fastpath equivalent to `sdata.iloc[slice_obj]`
mgr = sdata._mgr.get_slice(slice_obj)
# __finalize__ not called here, must be applied by caller if applicable
# fastpath equivalent to:
# `return sdata._constructor(mgr, name=sdata.name, fastpath=True)`
obj = type(sdata)._from_mgr(mgr)
object.__setattr__(obj, "_flags", sdata._flags)
object.__setattr__(obj, "_name", sdata._name)
return obj
class FrameSplitter(DataSplitter):
def _chop(self, sdata: DataFrame, slice_obj: slice) -> DataFrame:
# Fastpath equivalent to:
# if self.axis == 0:
# return sdata.iloc[slice_obj]
# else:
# return sdata.iloc[:, slice_obj]
mgr = sdata._mgr.get_slice(slice_obj, axis=1 - self.axis)
# __finalize__ not called here, must be applied by caller if applicable
# fastpath equivalent to `return sdata._constructor(mgr)`
obj = type(sdata)._from_mgr(mgr)
object.__setattr__(obj, "_flags", sdata._flags)
return obj
def get_splitter(
data: NDFrame, labels: np.ndarray, ngroups: int, axis: int = 0
) -> DataSplitter:
if isinstance(data, Series):
klass: type[DataSplitter] = SeriesSplitter
else:
# i.e. DataFrame
klass = FrameSplitter
return klass(data, labels, ngroups, axis)
| 31.34114 | 88 | 0.584089 | [
"BSD-3-Clause"
] | CuteLemon/pandas | pandas/core/groupby/ops.py | 40,148 | Python |
#!/usr/bin/env python
import sys
from indicator import Indicator
def main():
stationId = sys.argv[1] if len(sys.argv) > 1 else Indicator.DEFAULT_STATION_ID
ind = Indicator(stationId)
print(ind.get_aqindex_url())
print(ind.get_all_stations_url())
print(ind.get_aqindex())
print(ind.get_data())
return 0
if __name__ == '__main__':
sys.exit(main())
| 22.411765 | 82 | 0.690289 | [
"MIT"
] | runningt/airquality_indicator | main.py | 381 | Python |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic arithmetic operators.
See the [python/math_ops](python/math_ops) guide.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.eager import context
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gen_sparse_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_math_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
# Aliases for some automatically-generated names.
linspace = gen_math_ops.lin_space
nextafter = gen_math_ops.next_after
arg_max = deprecation.deprecated(None, "Use `tf.math.argmax` instead")(arg_max) # pylint: disable=used-before-assignment
arg_min = deprecation.deprecated(None, "Use `tf.math.argmin` instead")(arg_min) # pylint: disable=used-before-assignment
tf_export(v1=["arg_max"])(arg_max)
tf_export(v1=["arg_min"])(arg_min)
# This is set by resource_variable_ops.py. It is included in this way since
# there is a circular dependency between math_ops and resource_variable_ops
_resource_variable_type = None
def _set_doc(doc):
def _decorator(func):
func.__doc__ = doc
return func
return _decorator
# pylint: disable=redefined-builtin
@tf_export(v1=["math.argmax", "argmax"])
@deprecation.deprecated_args(None, "Use the `axis` argument instead",
"dimension")
@_set_doc(
gen_math_ops.arg_max.__doc__.replace("dimensions", "axes").replace(
"dimension", "axis"))
def argmax(input,
axis=None,
name=None,
dimension=None,
output_type=dtypes.int64):
axis = deprecation.deprecated_argument_lookup(
"axis", axis, "dimension", dimension)
return argmax_v2(input, axis, output_type, name)
@tf_export("math.argmax", "argmax", v1=[])
def argmax_v2(input,
axis=None,
output_type=dtypes.int64,
name=None):
"""Returns the index with the largest value across axes of a tensor.
Note that in case of ties the identity of the return value is not guaranteed.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`,
`qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
int32 or int64, must be in the range `-rank(input), rank(input))`.
Describes which axis of the input Tensor to reduce across. For vectors,
use axis = 0.
output_type: An optional `tf.DType` from: `tf.int32, tf.int64`.
Defaults to `tf.int64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `output_type`.
"""
if axis is None:
axis = 0
return gen_math_ops.arg_max(input, axis, name=name, output_type=output_type)
@tf_export(v1=["math.argmin", "argmin"])
@deprecation.deprecated_args(None, "Use the `axis` argument instead",
"dimension")
@_set_doc(
gen_math_ops.arg_min.__doc__.replace("dimensions", "axes").replace(
"dimension", "axis"))
def argmin(input,
axis=None,
name=None,
dimension=None,
output_type=dtypes.int64):
axis = deprecation.deprecated_argument_lookup(
"axis", axis, "dimension", dimension)
return argmin_v2(input, axis, output_type, name)
@tf_export("math.argmin", "argmin", v1=[])
def argmin_v2(input,
axis=None,
output_type=dtypes.int64,
name=None):
"""Returns the index with the smallest value across axes of a tensor.
Note that in case of ties the identity of the return value is not guaranteed.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`,
`qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
int32 or int64, must be in the range `-rank(input), rank(input))`.
Describes which axis of the input Tensor to reduce across. For vectors,
use axis = 0.
output_type: An optional `tf.DType` from: `tf.int32, tf.int64`.
Defaults to `tf.int64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `output_type`.
"""
if axis is None:
axis = 0
return gen_math_ops.arg_min(input, axis, name=name, output_type=output_type)
# pylint: enable=redefined-builtin
# pylint: disable=anomalous-backslash-in-string,protected-access
# pylint: disable=g-docstring-has-escape
@tf_export("math.abs", "abs")
@dispatch.add_dispatch_support
def abs(x, name=None): # pylint: disable=redefined-builtin
r"""Computes the absolute value of a tensor.
Given a tensor `x` of complex numbers, this operation returns a tensor of type
`float32` or `float64` that is the absolute value of each element in `x`. All
elements in `x` must be complex numbers of the form \\(a + bj\\). The
absolute value is computed as \\( \sqrt{a^2 + b^2}\\). For example:
```python
x = tf.constant([[-2.25 + 4.75j], [-3.25 + 5.75j]])
tf.abs(x) # [5.25594902, 6.60492229]
```
Args:
x: A `Tensor` or `SparseTensor` of type `float16`, `float32`, `float64`,
`int32`, `int64`, `complex64` or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` the same size and type as `x` with absolute
values.
Note, for `complex64` or `complex128` input, the returned `Tensor` will be
of type `float32` or `float64`, respectively.
"""
with ops.name_scope(name, "Abs", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_complex:
return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name)
return gen_math_ops._abs(x, name=name)
# pylint: enable=g-docstring-has-escape
# pylint: disable=redefined-builtin
def _bucketize(input, boundaries, name=None):
return gen_math_ops.bucketize(input=input, boundaries=boundaries, name=name)
# pylint: enable=redefined-builtin
class DivideDelegateWithName(object):
"""Use Python2/Python3 division delegation to implement divide for tensors."""
def __init__(self, x, name):
"""Construct DivideDelegateWithName.
Args:
x: Tensor to use as left operand in operator overloads
name: The name that is preferred for the op created.
"""
self.x = x
self.name = name
def __truediv__(self, y):
return _truediv_python3(self.x, y, self.name)
def __floordiv__(self, y):
return floordiv(self.x, y, self.name)
def __div__(self, y):
return _div_python2(self.x, y, self.name)
@tf_export("math.divide", "divide")
@dispatch.add_dispatch_support
def divide(x, y, name=None):
"""Computes Python style division of `x` by `y`."""
if name is not None:
# Cannot use tensors operator overload, because it has no way to track
# override names. Use a dummy class to track the runtime division behavior
return DivideDelegateWithName(x, name) / y
else:
return x / y
@tf_export("math.multiply", "multiply")
@dispatch.add_dispatch_support
def multiply(x, y, name=None):
return gen_math_ops.mul(x, y, name)
multiply.__doc__ = gen_math_ops.mul.__doc__.replace("Multiply", "`tf.multiply`")
# TODO(aselle): put deprecation in after another round of global code changes
@deprecation.deprecated(
"2016-12-30",
"`tf.mul(x, y)` is deprecated, please use `tf.multiply(x, y)` or `x * y`")
def _mul(x, y, name=None):
return gen_math_ops.mul(x, y, name)
_mul.__doc__ = (
gen_math_ops.mul.__doc__ + ("" if _mul.__doc__ is None else _mul.__doc__))
@tf_export("math.subtract", "subtract")
@dispatch.add_dispatch_support
def subtract(x, y, name=None):
return gen_math_ops.sub(x, y, name)
subtract.__doc__ = gen_math_ops.sub.__doc__.replace("`Sub`", "`tf.subtract`")
# TODO(aselle): put deprecation in after another round of global code changes
@deprecation.deprecated(
"2016-12-30",
"`tf.sub(x, y)` is deprecated, please use `tf.subtract(x, y)` or `x - y`")
def _sub(x, y, name=None):
return gen_math_ops.sub(x, y, name)
_sub.__doc__ = (
gen_math_ops.sub.__doc__ + ("" if _sub.__doc__ is None else _sub.__doc__))
negative = gen_math_ops.neg
# pylint: disable=g-docstring-has-escape
@deprecation.deprecated(
"2016-12-30",
"`tf.neg(x)` is deprecated, please use `tf.negative(x)` or `-x`")
def _neg(x, name=None):
"""Computes numerical negative value element-wise.
I.e., \\(y = -x\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
return negative(x, name)
# pylint: enable=g-docstring-has-escape
@tf_export(v1=["math.scalar_mul", "scalar_mul"])
def scalar_mul(scalar, x, name=None):
"""Multiplies a scalar times a `Tensor` or `IndexedSlices` object.
Intended for use in gradient code which might deal with `IndexedSlices`
objects, which are easy to multiply by a scalar but more expensive to
multiply with arbitrary tensors.
Args:
scalar: A 0-D scalar `Tensor`. Must have known shape.
x: A `Tensor` or `IndexedSlices` to be scaled.
name: A name for the operation (optional).
Returns:
`scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`.
Raises:
ValueError: if scalar is not a 0-D `scalar`.
"""
scalar = ops.convert_to_tensor(
scalar, dtype=x.dtype.base_dtype, name="scalar")
shape = scalar.get_shape()
if shape.ndims == 0:
if isinstance(x, ops.IndexedSlices):
return ops.IndexedSlices(gen_math_ops.mul(scalar, x.values, name),
x.indices, x.dense_shape)
else:
return gen_math_ops.mul(scalar, x, name)
else:
raise ValueError("Only scalar multiply works, got shape %s" % shape)
@tf_export("math.scalar_mul", "scalar_mul", v1=[])
@_set_doc(scalar_mul.__doc__)
def scalar_mul_v2(scalar, x, name=None):
with ops.name_scope(name, "scalar_mul", [x]) as name:
return scalar_mul(scalar, x, name)
@tf_export("math.pow", "pow")
@dispatch.add_dispatch_support
def pow(x, y, name=None): # pylint: disable=redefined-builtin
r"""Computes the power of one value to another.
Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
corresponding elements in `x` and `y`. For example:
```python
x = tf.constant([[2, 2], [3, 3]])
y = tf.constant([[8, 16], [2, 3]])
tf.pow(x, y) # [[256, 65536], [9, 27]]
```
Args:
x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,
`complex64`, or `complex128`.
y: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,
`complex64`, or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`.
"""
with ops.name_scope(name, "Pow", [x]) as name:
return gen_math_ops._pow(x, y, name=name)
# pylint: disable=redefined-builtin,redefined-outer-name
@tf_export("dtypes.complex", "complex")
@dispatch.add_dispatch_support
def complex(real, imag, name=None):
r"""Converts two real numbers to a complex number.
Given a tensor `real` representing the real part of a complex number, and a
tensor `imag` representing the imaginary part of a complex number, this
operation returns complex numbers elementwise of the form \\(a + bj\\), where
*a* represents the `real` part and *b* represents the `imag` part.
The input tensors `real` and `imag` must have the same shape.
For example:
```python
real = tf.constant([2.25, 3.25])
imag = tf.constant([4.75, 5.75])
tf.complex(real, imag) # [[2.25 + 4.75j], [3.25 + 5.75j]]
```
Args:
real: A `Tensor`. Must be one of the following types: `float32`,
`float64`.
imag: A `Tensor`. Must have the same type as `real`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `complex64` or `complex128`.
"""
real = ops.convert_to_tensor(real, name="real")
imag = ops.convert_to_tensor(imag, name="imag")
with ops.name_scope(name, "Complex", [real, imag]) as name:
input_types = (real.dtype, imag.dtype)
if input_types == (dtypes.float64, dtypes.float64):
Tout = dtypes.complex128
elif input_types == (dtypes.float32, dtypes.float32):
Tout = dtypes.complex64
else:
raise TypeError("real and imag have incorrect types: "
"{} {}".format(real.dtype.name, imag.dtype.name))
return gen_math_ops._complex(real, imag, Tout=Tout, name=name)
@tf_export("math.real", v1=["math.real", "real"])
@deprecation.deprecated_endpoints("real")
@dispatch.add_dispatch_support
def real(input, name=None):
r"""Returns the real part of a complex (or real) tensor.
Given a tensor `input`, this operation returns a tensor of type `float` that
is the real part of each element in `input` considered as a complex number.
For example:
```python
x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
tf.real(x) # [-2.25, 3.25]
```
If `input` is already real, it is returned unchanged.
Args:
input: A `Tensor`. Must have numeric type.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Real", [input]) as name:
if input.dtype.is_complex:
real_dtype = input.dtype.real_dtype
return gen_math_ops.real(input, Tout=real_dtype, name=name)
else:
return input
@tf_export("math.imag", v1=["math.imag", "imag"])
@deprecation.deprecated_endpoints("imag")
@dispatch.add_dispatch_support
def imag(input, name=None):
r"""Returns the imaginary part of a complex (or real) tensor.
Given a tensor `input`, this operation returns a tensor of type `float` that
is the imaginary part of each element in `input` considered as a complex
number. If `input` is real, a tensor of all zeros is returned.
For example:
```python
x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
tf.imag(x) # [4.75, 5.75]
```
Args:
input: A `Tensor`. Must be one of the following types: `float`, `double`,
`complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Imag", [input]) as name:
if input.dtype.is_complex:
return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name)
else:
return array_ops.zeros_like(input)
@tf_export("math.angle", v1=["math.angle", "angle"])
@deprecation.deprecated_endpoints("angle")
@dispatch.add_dispatch_support
def angle(input, name=None):
r"""Returns the element-wise argument of a complex (or real) tensor.
Given a tensor `input`, this operation returns a tensor of type `float` that
is the argument of each element in `input` considered as a complex number.
The elements in `input` are considered to be complex numbers of the form
\\(a + bj\\), where *a* is the real part and *b* is the imaginary part.
If `input` is real then *b* is zero by definition.
The argument returned by this function is of the form \\(atan2(b, a)\\).
If `input` is real, a tensor of all zeros is returned.
For example:
```
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.angle(input) ==> [2.0132, 1.056]
```
Args:
input: A `Tensor`. Must be one of the following types: `float`, `double`,
`complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Angle", [input]) as name:
if input.dtype.is_complex:
return gen_math_ops.angle(input, Tout=input.dtype.real_dtype, name=name)
else:
return array_ops.zeros_like(input)
# pylint: enable=redefined-outer-name,redefined-builtin
@tf_export("math.round", "round")
@dispatch.add_dispatch_support
def round(x, name=None): # pylint: disable=redefined-builtin
"""Rounds the values of a tensor to the nearest integer, element-wise.
Rounds half to even. Also known as bankers rounding. If you want to round
according to the current system rounding mode use tf::cint.
For example:
```python
x = tf.constant([0.9, 2.5, 2.3, 1.5, -4.5])
tf.round(x) # [ 1.0, 2.0, 2.0, 2.0, -4.0 ]
```
Args:
x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, or `int64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as `x`.
"""
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_integer:
return x
else:
return gen_math_ops.round(x, name=name)
@tf_export("dtypes.cast", "cast")
@dispatch.add_dispatch_support
def cast(x, dtype, name=None):
"""Casts a tensor to a new type.
The operation casts `x` (in case of `Tensor`) or `x.values`
(in case of `SparseTensor` or `IndexedSlices`) to `dtype`.
For example:
```python
x = tf.constant([1.8, 2.2], dtype=tf.float32)
tf.cast(x, tf.int32) # [1, 2], dtype=tf.int32
```
The operation supports data types (for `x` and `dtype`) of
`uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`, `int64`,
`float16`, `float32`, `float64`, `complex64`, `complex128`, `bfloat16`.
In case of casting from complex types (`complex64`, `complex128`) to real
types, only the real part of `x` is returned. In case of casting from real
types to complex types (`complex64`, `complex128`), the imaginary part of the
returned value is set to `0`. The handling of complex types here matches the
behavior of numpy.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices` of numeric type. It could
be `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`,
`int64`, `float16`, `float32`, `float64`, `complex64`, `complex128`,
`bfloat16`.
dtype: The destination type. The list of supported dtypes is the same as
`x`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` and
same type as `dtype`.
Raises:
TypeError: If `x` cannot be cast to the `dtype`.
"""
base_type = dtypes.as_dtype(dtype).base_dtype
if isinstance(x,
(ops.Tensor, _resource_variable_type)) and base_type == x.dtype:
return x
with ops.name_scope(name, "Cast", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
values_cast = cast(x.values, base_type, name=name)
x = sparse_tensor.SparseTensor(x.indices, values_cast, x.dense_shape)
elif isinstance(x, ops.IndexedSlices):
values_cast = cast(x.values, base_type, name=name)
x = ops.IndexedSlices(values_cast, x.indices, x.dense_shape)
else:
# TODO(josh11b): If x is not already a Tensor, we could return
# ops.convert_to_tensor(x, dtype=dtype, ...) here, but that
# allows some conversions that cast() can't do, e.g. casting numbers to
# strings.
x = ops.convert_to_tensor(x, name="x")
if x.dtype.base_dtype != base_type:
x = gen_math_ops.cast(x, base_type, name=name)
if x.dtype.is_complex and base_type.is_floating:
logging.warn("Casting complex to real discards imaginary part.")
return x
@tf_export("dtypes.saturate_cast", "saturate_cast")
@dispatch.add_dispatch_support
def saturate_cast(value, dtype, name=None):
"""Performs a safe saturating cast of `value` to `dtype`.
This function casts the input to `dtype` without applying any scaling. If
there is a danger that values would over or underflow in the cast, this op
applies the appropriate clamping before the cast.
Args:
value: A `Tensor`.
dtype: The desired output `DType`.
name: A name for the operation (optional).
Returns:
`value` safely cast to `dtype`.
"""
# When casting to a type with smaller representable range, clamp.
# Note that this covers casting to unsigned types as well.
with ops.name_scope(name, "saturate_cast", [value]) as name:
value = ops.convert_to_tensor(value, name="value")
dtype = dtypes.as_dtype(dtype).base_dtype
if value.dtype.min < dtype.min:
value = gen_math_ops.maximum(value,
ops.convert_to_tensor(
dtype.min, dtype=value.dtype,
name="min"))
if value.dtype.max > dtype.max:
value = gen_math_ops.minimum(value,
ops.convert_to_tensor(
dtype.max, dtype=value.dtype,
name="max"))
return cast(value, dtype, name=name)
@deprecation.deprecated(date=None, instructions="Use tf.cast instead.")
@tf_export(v1=["to_float"])
def to_float(x, name="ToFloat"):
"""Casts a tensor to type `float32`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `float32`.
Raises:
TypeError: If `x` cannot be cast to the `float32`.
"""
return cast(x, dtypes.float32, name=name)
@deprecation.deprecated(date=None, instructions="Use tf.cast instead.")
@tf_export(v1=["to_double"])
def to_double(x, name="ToDouble"):
"""Casts a tensor to type `float64`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `float64`.
Raises:
TypeError: If `x` cannot be cast to the `float64`.
"""
return cast(x, dtypes.float64, name=name)
@deprecation.deprecated(date=None, instructions="Use tf.cast instead.")
@tf_export(v1=["to_int32"])
def to_int32(x, name="ToInt32"):
"""Casts a tensor to type `int32`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `int32`.
Raises:
TypeError: If `x` cannot be cast to the `int32`.
"""
return cast(x, dtypes.int32, name=name)
@deprecation.deprecated(date=None, instructions="Use tf.cast instead.")
@tf_export(v1=["to_int64"])
def to_int64(x, name="ToInt64"):
"""Casts a tensor to type `int64`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `int64`.
Raises:
TypeError: If `x` cannot be cast to the `int64`.
"""
return cast(x, dtypes.int64, name=name)
@deprecation.deprecated(date=None, instructions="Use tf.cast instead.")
@tf_export(v1=["to_bfloat16"])
def to_bfloat16(x, name="ToBFloat16"):
"""Casts a tensor to type `bfloat16`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `bfloat16`.
Raises:
TypeError: If `x` cannot be cast to the `bfloat16`.
"""
return cast(x, dtypes.bfloat16, name=name)
@deprecation.deprecated(date=None, instructions="Use tf.cast instead.")
@tf_export(v1=["to_complex64"])
def to_complex64(x, name="ToComplex64"):
"""Casts a tensor to type `complex64`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `complex64`.
Raises:
TypeError: If `x` cannot be cast to the `complex64`.
"""
return cast(x, dtypes.complex64, name=name)
@deprecation.deprecated(date=None, instructions="Use tf.cast instead.")
@tf_export(v1=["to_complex128"])
def to_complex128(x, name="ToComplex128"):
"""Casts a tensor to type `complex128`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `complex128`.
Raises:
TypeError: If `x` cannot be cast to the `complex128`.
"""
return cast(x, dtypes.complex128, name=name)
ops.Tensor._override_operator("__neg__", gen_math_ops.neg)
ops.Tensor._override_operator("__abs__", abs)
# __invert__ corresponds to the ~ operator. Here we follow the numpy convention
# ~ marks an elementwise bit-wise inverse. This is only implemented for boolean
# tensors and will throw a TypeError if used on nonboolean arrays
ops.Tensor._override_operator("__invert__", gen_math_ops.logical_not)
def _OverrideBinaryOperatorHelper(func, op_name, clazz_object=ops.Tensor):
"""Register operators with different tensor and scalar versions.
If `clazz_object` is `SparseTensor`, assumes `func` takes `(sp_indices,
sp_values, sp_shape, dense)` and outputs `(new_sp_values)`.
Args:
func: the operator
op_name: name of the operator being overridden
clazz_object: class to override for. Either `Tensor` or `SparseTensor`.
"""
def binary_op_wrapper(x, y):
with ops.name_scope(None, op_name, [x, y]) as name:
if isinstance(x, ops.Tensor) and isinstance(y, ops.Tensor):
return func(x, y, name=name)
elif not isinstance(y, sparse_tensor.SparseTensor):
try:
y = ops.convert_to_tensor_v2(y, dtype_hint=x.dtype.base_dtype,
name="y")
except TypeError:
# If the RHS is not a tensor, it might be a tensor aware object
# that can implement the operator with knowledge of itself
# and the tensor.
if hasattr(type(y), "__r%s__" % op_name):
return NotImplemented
else:
raise
return func(x, y, name=name)
def binary_op_wrapper_sparse(sp_x, y):
with ops.name_scope(None, op_name, [sp_x, y]) as name:
y = ops.convert_to_tensor(y, dtype=sp_x.dtype.base_dtype, name="y")
return sparse_tensor.SparseTensor(sp_x.indices,
func(
sp_x.indices,
sp_x.values,
sp_x.dense_shape,
y,
name=name), sp_x.dense_shape)
def r_binary_op_wrapper(y, x):
with ops.name_scope(None, op_name, [x, y]) as name:
x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name="x")
return func(x, y, name=name)
# Propagate func.__doc__ to the wrappers
try:
doc = func.__doc__
except AttributeError:
doc = None
binary_op_wrapper.__doc__ = doc
r_binary_op_wrapper.__doc__ = doc
binary_op_wrapper_sparse.__doc__ = doc
if clazz_object is ops.Tensor:
clazz_object._override_operator("__%s__" % op_name, binary_op_wrapper)
del binary_op_wrapper
clazz_object._override_operator("__r%s__" % op_name, r_binary_op_wrapper)
del r_binary_op_wrapper
else:
clazz_object._override_operator("__%s__" % op_name,
binary_op_wrapper_sparse)
del binary_op_wrapper_sparse
# Conversion table for __truediv__. None entries mean no conversion required.
_TRUEDIV_TABLE = {
dtypes.uint8: dtypes.float32,
dtypes.int8: dtypes.float32,
dtypes.uint16: dtypes.float32,
dtypes.int16: dtypes.float32,
dtypes.int32: dtypes.float64,
dtypes.int64: dtypes.float64,
dtypes.bfloat16: None,
dtypes.float16: None,
dtypes.float32: None,
dtypes.float64: None,
dtypes.complex64: None,
dtypes.complex128: None,
}
# NOTE: the support of "sparse (true)div dense" is currently not baked in into
# "tf.(true_)div()". Until such an API decision is made, the supported usage is
# to explicitly use the "/" operator to invoke either truediv or div.
def _sparse_dense_truediv(sp_indices, sp_values, sp_shape, y, name=None):
"""Internal helper function for 'sp_t / dense_t'."""
with ops.name_scope(name, "truediv",
[sp_indices, sp_values, sp_shape, y]) as name:
sp_values = ops.convert_to_tensor(sp_values, name="sp_values")
y = ops.convert_to_tensor(y, name="y")
x_dtype = sp_values.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
sp_values = cast(sp_values, dtype)
y = cast(y, dtype)
return gen_sparse_ops.sparse_dense_cwise_div(
sp_indices, sp_values, sp_shape, y, name=name)
def _truediv_python3(x, y, name=None):
with ops.name_scope(name, "truediv", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y")
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
x = cast(x, dtype)
y = cast(y, dtype)
return gen_math_ops.real_div(x, y, name=name)
def _div_python2(x, y, name=None):
"""Divide two values using Python 2 semantics. Used for Tensor.__div__.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` returns the quotient of x and y.
"""
with ops.name_scope(name, "div", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
if x_dtype.is_floating or x_dtype.is_complex:
return gen_math_ops.real_div(x, y, name=name)
else:
return gen_math_ops.floor_div(x, y, name=name)
@tf_export("math.truediv", "truediv")
@dispatch.add_dispatch_support
def truediv(x, y, name=None):
"""Divides x / y elementwise (using Python 3 division operator semantics).
NOTE: Prefer using the Tensor operator or tf.divide which obey Python
division operator semantics.
This function forces Python 3 division operator semantics where all integer
arguments are cast to floating types first. This op is generated by normal
`x / y` division in Python 3 and in Python 2.7 with
`from __future__ import division`. If you want integer division that rounds
down, use `x // y` or `tf.math.floordiv`.
`x` and `y` must have the same numeric type. If the inputs are floating
point, the output will have the same type. If the inputs are integral, the
inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`
and `int64` (matching the behavior of Numpy).
Args:
x: `Tensor` numerator of numeric type.
y: `Tensor` denominator of numeric type.
name: A name for the operation (optional).
Returns:
`x / y` evaluated in floating point.
Raises:
TypeError: If `x` and `y` have different dtypes.
"""
return _truediv_python3(x, y, name)
@deprecation.deprecated(
date=None,
instructions="Deprecated in favor of operator or tf.math.divide.")
@tf_export(v1=["div"])
def div(x, y, name=None):
"""Divides x / y elementwise (using Python 2 division operator semantics).
NOTE: Prefer using the Tensor division operator or tf.divide which obey Python
division operator semantics.
This function divides `x` and `y`, forcing Python 2.7 semantics. That is,
if one of `x` or `y` is a float, then the result will be a float.
Otherwise, the output will be an integer type. Flooring semantics are used
for integer division.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` returns the quotient of x and y.
"""
return _div_python2(x, y, name)
@tf_export("div_no_nan")
@dispatch.add_dispatch_support
def div_no_nan(x, y, name=None):
"""Computes an unsafe divide which returns 0 if the y is zero.
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`.
y: A `Tensor` whose dtype is compatible with `x`.
name: A name for the operation (optional).
Returns:
The element-wise value of the x divided by y.
"""
with ops.name_scope(name, "div_no_nan", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
return gen_math_ops.div_no_nan(x, y, name=name)
# TODO(aselle): This should be removed
mod = gen_math_ops.floor_mod
# TODO(aselle): Deprecate this once all internal functionality uses
# tf.truncatediv
@tf_export("math.floordiv", v1=["math.floordiv", "floordiv"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("floordiv")
def floordiv(x, y, name=None):
"""Divides `x / y` elementwise, rounding toward the most negative integer.
The same as `tf.div(x,y)` for integers, but uses `tf.floor(tf.div(x,y))` for
floating point arguments so that the result is always an integer (though
possibly an integer represented as floating point). This op is generated by
`x // y` floor division in Python 3 and in Python 2.7 with
`from __future__ import division`.
`x` and `y` must have the same type, and the result will have the same type
as well.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` rounded down.
Raises:
TypeError: If the inputs are complex.
"""
with ops.name_scope(name, "floordiv", [x, y]) as name:
return gen_math_ops.floor_div(x, y, name=name)
realdiv = gen_math_ops.real_div
truncatediv = gen_math_ops.truncate_div
# TODO(aselle): Rename this to floordiv when we can.
floor_div = gen_math_ops.floor_div
truncatemod = gen_math_ops.truncate_mod
floormod = gen_math_ops.floor_mod
def _mul_dispatch(x, y, name=None):
"""Dispatches cwise mul for "Dense*Dense" and "Dense*Sparse"."""
is_tensor_y = isinstance(y, ops.Tensor)
if is_tensor_y:
return gen_math_ops.mul(x, y, name=name)
else:
assert isinstance(y, sparse_tensor.SparseTensor) # Case: Dense * Sparse.
new_vals = gen_sparse_ops.sparse_dense_cwise_mul(y.indices, y.values,
y.dense_shape, x, name)
return sparse_tensor.SparseTensor(y.indices, new_vals, y.dense_shape)
# NOTE(aselle): When integer division is added for sparse_dense_cwise,
# div, truediv, and floordiv should be delegated appropriately for
# Python sematnics, analogous to dense cwise tensor operations.
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_div, "div",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(_sparse_dense_truediv, "truediv",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_mul, "mul",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(gen_math_ops.add, "add")
_OverrideBinaryOperatorHelper(gen_math_ops.sub, "sub")
_OverrideBinaryOperatorHelper(_mul_dispatch, "mul")
_OverrideBinaryOperatorHelper(_div_python2, "div")
_OverrideBinaryOperatorHelper(_truediv_python3, "truediv")
_OverrideBinaryOperatorHelper(floordiv, "floordiv")
_OverrideBinaryOperatorHelper(gen_math_ops.floor_mod, "mod")
_OverrideBinaryOperatorHelper(pow, "pow")
@tf_export("math.logical_xor", v1=["math.logical_xor", "logical_xor"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("logical_xor")
def logical_xor(x, y, name="LogicalXor"):
"""x ^ y = (x | y) & ~(x & y)."""
# TODO(alemi) Make this a cwise op if people end up relying on it.
return gen_math_ops.logical_and(
gen_math_ops.logical_or(x, y),
gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)),
name=name)
_OverrideBinaryOperatorHelper(gen_math_ops.logical_and, "and")
_OverrideBinaryOperatorHelper(gen_math_ops.logical_or, "or")
_OverrideBinaryOperatorHelper(logical_xor, "xor")
ops.Tensor._override_operator("__lt__", gen_math_ops.less)
ops.Tensor._override_operator("__le__", gen_math_ops.less_equal)
ops.Tensor._override_operator("__gt__", gen_math_ops.greater)
ops.Tensor._override_operator("__ge__", gen_math_ops.greater_equal)
@tf_export("range")
def range(start, limit=None, delta=1, dtype=None, name="range"): # pylint: disable=redefined-builtin
"""Creates a sequence of numbers.
Creates a sequence of numbers that begins at `start` and extends by
increments of `delta` up to but not including `limit`.
The dtype of the resulting tensor is inferred from the inputs unless
it is provided explicitly.
Like the Python builtin `range`, `start` defaults to 0, so that
`range(n) = range(0, n)`.
For example:
```python
start = 3
limit = 18
delta = 3
tf.range(start, limit, delta) # [3, 6, 9, 12, 15]
start = 3
limit = 1
delta = -0.5
tf.range(start, limit, delta) # [3, 2.5, 2, 1.5]
limit = 5
tf.range(limit) # [0, 1, 2, 3, 4]
```
Args:
start: A 0-D `Tensor` (scalar). Acts as first entry in the range if
`limit` is not None; otherwise, acts as range limit and first entry
defaults to 0.
limit: A 0-D `Tensor` (scalar). Upper limit of sequence,
exclusive. If None, defaults to the value of `start` while the first
entry of the range defaults to 0.
delta: A 0-D `Tensor` (scalar). Number that increments
`start`. Defaults to 1.
dtype: The type of the elements of the resulting tensor.
name: A name for the operation. Defaults to "range".
Returns:
An 1-D `Tensor` of type `dtype`.
@compatibility(numpy)
Equivalent to np.arange
@end_compatibility
"""
if limit is None:
start, limit = 0, start
with ops.name_scope(name, "Range", [start, limit, delta]) as name:
start = ops.convert_to_tensor(start, dtype=dtype, name="start")
limit = ops.convert_to_tensor(limit, dtype=dtype, name="limit")
delta = ops.convert_to_tensor(delta, dtype=dtype, name="delta")
# infer dtype if not explicitly provided
if dtype is None:
dtype_hierarchy = [
dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64
]
assert all(arg.dtype in dtype_hierarchy for arg in [start, limit, delta])
inferred_dtype = max(
[arg.dtype for arg in [start, limit, delta]],
key=dtype_hierarchy.index)
start = cast(start, inferred_dtype)
limit = cast(limit, inferred_dtype)
delta = cast(delta, inferred_dtype)
return gen_math_ops._range(start, limit, delta, name=name)
# Reduction operations
def _ReductionDims(x, axis, reduction_indices=None): # pylint: disable=invalid-name
"""Returns range(0, rank(x)) if reduction_indices is None."""
# TODO(aselle): Remove this after deprecation
if reduction_indices is not None:
if axis is not None:
raise ValueError("Can't specify both axis' and 'reduction_indices'.")
axis = reduction_indices
if axis is not None:
return axis
else:
# Fast path: avoid creating Rank and Range ops if ndims is known.
rank = common_shapes.rank(x)
if rank is not None:
return constant_op.constant(np.arange(rank), dtype=dtypes.int32)
if (isinstance(x, sparse_tensor.SparseTensor) and
x.dense_shape.shape.is_fully_defined()):
rank = x.dense_shape.shape.dims[0].value # sparse.dense_shape is 1-D.
return constant_op.constant(np.arange(rank), dtype=dtypes.int32)
# Otherwise, we rely on Range and Rank to do the right thing at run-time.
return range(0, array_ops.rank(x))
def _may_reduce_to_scalar(keepdims, axis, output):
"""Set a reduction's output shape to be a scalar if we are certain."""
if not common_shapes.has_fully_defined_shape(output) and (not keepdims) and (
axis is None):
output.set_shape(())
return output
@tf_export(v1=["math.reduce_sum", "reduce_sum"])
@deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def reduce_sum_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the sum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
x = tf.constant([[1, 1, 1], [1, 1, 1]])
tf.reduce_sum(x) # 6
tf.reduce_sum(x, 0) # [2, 2, 2]
tf.reduce_sum(x, 1) # [3, 3]
tf.reduce_sum(x, 1, keepdims=True) # [[3], [3]]
tf.reduce_sum(x, [0, 1]) # 6
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor, of the same dtype as the input_tensor.
@compatibility(numpy)
Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to
int64 while tensorflow returns the same dtype as the input.
@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup(
"axis", axis, "reduction_indices", reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_sum(input_tensor, axis, keepdims, name)
@tf_export("math.reduce_sum", "reduce_sum", v1=[])
@dispatch.add_dispatch_support
def reduce_sum(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the sum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
x = tf.constant([[1, 1, 1], [1, 1, 1]])
tf.reduce_sum(x) # 6
tf.reduce_sum(x, 0) # [2, 2, 2]
tf.reduce_sum(x, 1) # [3, 3]
tf.reduce_sum(x, 1, keepdims=True) # [[3], [3]]
tf.reduce_sum(x, [0, 1]) # 6
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor, of the same dtype as the input_tensor.
@compatibility(numpy)
Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to
int64 while tensorflow returns the same dtype as the input.
@end_compatibility
"""
keepdims = False if keepdims is None else keepdims
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops._sum(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))
@tf_export(v1=["math.count_nonzero", "count_nonzero"])
@deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
@deprecation.deprecated_args(
None, "reduction_indices is deprecated, use axis instead", "axis")
def count_nonzero(input_tensor,
axis=None,
keepdims=None,
dtype=dtypes.int64,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes number of nonzero elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
**NOTE** Floating point comparison to zero is done by exact floating point
equality check. Small values are **not** rounded to zero for purposes of
the nonzero check.
For example:
```python
x = tf.constant([[0, 1, 0], [1, 1, 0]])
tf.count_nonzero(x) # 3
tf.count_nonzero(x, 0) # [1, 2, 0]
tf.count_nonzero(x, 1) # [1, 2]
tf.count_nonzero(x, 1, keepdims=True) # [[1], [2]]
tf.count_nonzero(x, [0, 1]) # 3
```
**NOTE** Strings are compared against zero-length empty string `""`. Any
string with a size greater than zero is already considered as nonzero.
For example:
```python
x = tf.constant(["", "a", " ", "b", ""])
tf.count_nonzero(x) # 3, with "a", " ", and "b" as nonzero strings.
```
Args:
input_tensor: The tensor to reduce. Should be of numeric type, `bool`,
or `string`.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
dtype: The output dtype; defaults to `tf.int64`.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor (number of nonzero values).
"""
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
axis = deprecation.deprecated_argument_lookup(
"axis", axis,
"reduction_indices", reduction_indices
)
return count_nonzero_v2(input_tensor, axis, keepdims, dtype, name)
@tf_export("math.count_nonzero", v1=[])
def count_nonzero_v2(input, # pylint: disable=redefined-builtin
axis=None,
keepdims=None,
dtype=dtypes.int64,
name=None):
"""Computes number of nonzero elements across dimensions of a tensor.
Reduces `input` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
**NOTE** Floating point comparison to zero is done by exact floating point
equality check. Small values are **not** rounded to zero for purposes of
the nonzero check.
For example:
```python
x = tf.constant([[0, 1, 0], [1, 1, 0]])
tf.count_nonzero(x) # 3
tf.count_nonzero(x, 0) # [1, 2, 0]
tf.count_nonzero(x, 1) # [1, 2]
tf.count_nonzero(x, 1, keepdims=True) # [[1], [2]]
tf.count_nonzero(x, [0, 1]) # 3
```
**NOTE** Strings are compared against zero-length empty string `""`. Any
string with a size greater than zero is already considered as nonzero.
For example:
```python
x = tf.constant(["", "a", " ", "b", ""])
tf.count_nonzero(x) # 3, with "a", " ", and "b" as nonzero strings.
```
Args:
input: The tensor to reduce. Should be of numeric type, `bool`,
or `string`.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input), rank(input))`.
keepdims: If true, retains reduced dimensions with length 1.
dtype: The output dtype; defaults to `tf.int64`.
name: A name for the operation (optional).
Returns:
The reduced tensor (number of nonzero values).
"""
if keepdims is None:
keepdims = False
with ops.name_scope(name, "count_nonzero", [input]):
input = ops.convert_to_tensor(input, name="input")
# A scalar of 'zero' is enough as `not_equal` will broadcast.
zero = array_ops.zeros([], dtype=input.dtype)
return cast(
reduce_sum(
# int64 reduction happens on GPU
cast(gen_math_ops.not_equal(input, zero), dtypes.int64),
axis=axis,
keepdims=keepdims),
dtype=dtype)
@tf_export(v1=["math.reduce_mean", "reduce_mean"])
def reduce_mean_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the mean of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
x = tf.constant([[1., 1.], [2., 2.]])
tf.reduce_mean(x) # 1.5
tf.reduce_mean(x, 0) # [1.5, 1.5]
tf.reduce_mean(x, 1) # [1., 2.]
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.mean
Please note that `np.mean` has a `dtype` parameter that could be used to
specify the output type. By default this is `dtype=float64`. On the other
hand, `tf.reduce_mean` has an aggressive type inference from `input_tensor`,
for example:
```python
x = tf.constant([1, 0, 1, 0])
tf.reduce_mean(x) # 0
y = tf.constant([1., 0., 1., 0.])
tf.reduce_mean(y) # 0.5
```
@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup(
"axis", axis, "reduction_indices", reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_mean(input_tensor, axis, keepdims, name)
@tf_export("math.reduce_mean", "reduce_mean", v1=[])
@dispatch.add_dispatch_support
def reduce_mean(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the mean of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
x = tf.constant([[1., 1.], [2., 2.]])
tf.reduce_mean(x) # 1.5
tf.reduce_mean(x, 0) # [1.5, 1.5]
tf.reduce_mean(x, 1) # [1., 2.]
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.mean
Please note that `np.mean` has a `dtype` parameter that could be used to
specify the output type. By default this is `dtype=float64`. On the other
hand, `tf.reduce_mean` has an aggressive type inference from `input_tensor`,
for example:
```python
x = tf.constant([1, 0, 1, 0])
tf.reduce_mean(x) # 0
y = tf.constant([1., 0., 1., 0.])
tf.reduce_mean(y) # 0.5
```
@end_compatibility
"""
keepdims = False if keepdims is None else keepdims
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops.mean(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))
@tf_export("math.reduce_variance")
def reduce_variance(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the variance of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
x = tf.constant([[1., 2.], [3., 4.]])
tf.reduce_variance(x) # 1.25
tf.reduce_variance(x, 0) # [1., 1.]
tf.reduce_variance(x, 1) # [0.25, 0.25]
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name scope for the associated operations (optional).
Returns:
The reduced tensor, of the same dtype as the input_tensor.
@compatibility(numpy)
Equivalent to np.var
Please note that `np.var` has a `dtype` parameter that could be used to
specify the output type. By default this is `dtype=float64`. On the other
hand, `tf.reduce_variance` has an aggressive type inference from
`input_tensor`,
@end_compatibility
"""
name = name if name else "reduce_variance"
with ops.name_scope(name):
means = reduce_mean(input_tensor, axis=axis, keepdims=True)
squared_deviations = gen_math_ops.square(input_tensor - means)
return reduce_mean(squared_deviations, axis=axis, keepdims=keepdims)
@tf_export("math.reduce_std")
def reduce_std(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the standard deviation of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
x = tf.constant([[1., 2.], [3., 4.]])
tf.reduce_std(x) # 1.1180339887498949
tf.reduce_std(x, 0) # [1., 1.]
tf.reduce_std(x, 1) # [0.5, 0.5]
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name scope for the associated operations (optional).
Returns:
The reduced tensor, of the same dtype as the input_tensor.
@compatibility(numpy)
Equivalent to np.std
Please note that `np.std` has a `dtype` parameter that could be used to
specify the output type. By default this is `dtype=float64`. On the other
hand, `tf.reduce_std` has an aggressive type inference from `input_tensor`,
@end_compatibility
"""
name = name if name else "reduce_std"
with ops.name_scope(name):
variance = reduce_variance(input_tensor, axis=axis, keepdims=keepdims)
return gen_math_ops.sqrt(variance)
@tf_export("math.reduce_prod", "reduce_prod", v1=[])
@dispatch.add_dispatch_support
def reduce_prod(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the product of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.prod
@end_compatibility
"""
keepdims = False if keepdims is None else keepdims
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops.prod(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))
@tf_export(v1=["math.reduce_prod", "reduce_prod"])
@deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def reduce_prod_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the product of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.prod
@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup(
"axis", axis, "reduction_indices", reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_prod(input_tensor, axis, keepdims, name)
@tf_export(v1=["math.reduce_min", "reduce_min"])
@deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def reduce_min_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the minimum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have real numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.min
@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup(
"axis", axis, "reduction_indices", reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_min(input_tensor, axis, keepdims, name)
@tf_export("math.reduce_min", "reduce_min", v1=[])
@dispatch.add_dispatch_support
def reduce_min(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the minimum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have real numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.min
@end_compatibility
"""
keepdims = False if keepdims is None else keepdims
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops._min(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))
@tf_export(v1=["math.reduce_max", "reduce_max"])
@deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def reduce_max_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the maximum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have real numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.max
@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup(
"axis", axis, "reduction_indices", reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_max(input_tensor, axis, keepdims, name)
@tf_export("math.reduce_max", "reduce_max", v1=[])
@dispatch.add_dispatch_support
def reduce_max(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the maximum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have real numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.max
@end_compatibility
"""
keepdims = False if keepdims is None else keepdims
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops._max(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))
@tf_export(v1=["math.reduce_all", "reduce_all"])
@deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def reduce_all_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the "logical and" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
x = tf.constant([[True, True], [False, False]])
tf.reduce_all(x) # False
tf.reduce_all(x, 0) # [False, False]
tf.reduce_all(x, 1) # [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.all
@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup(
"axis", axis, "reduction_indices", reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_all(input_tensor, axis, keepdims, name)
@tf_export("reduce_all", "math.reduce_all", v1=[])
@dispatch.add_dispatch_support
def reduce_all(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the "logical and" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
x = tf.constant([[True, True], [False, False]])
tf.reduce_all(x) # False
tf.reduce_all(x, 0) # [False, False]
tf.reduce_all(x, 1) # [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.all
@end_compatibility
"""
keepdims = False if keepdims is None else keepdims
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops._all(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))
@tf_export(v1=["math.reduce_any", "reduce_any"])
@deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def reduce_any_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the "logical or" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
x = tf.constant([[True, True], [False, False]])
tf.reduce_any(x) # True
tf.reduce_any(x, 0) # [True, True]
tf.reduce_any(x, 1) # [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.any
@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup(
"axis", axis, "reduction_indices", reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_any(input_tensor, axis, keepdims, name)
@tf_export("math.reduce_any", "reduce_any", v1=[])
@dispatch.add_dispatch_support
def reduce_any(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the "logical or" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
x = tf.constant([[True, True], [False, False]])
tf.reduce_any(x) # True
tf.reduce_any(x, 0) # [True, True]
tf.reduce_any(x, 1) # [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.any
@end_compatibility
"""
keepdims = False if keepdims is None else keepdims
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops._any(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))
@tf_export(v1=["math.reduce_logsumexp", "reduce_logsumexp"])
@deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def reduce_logsumexp_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
This function is more numerically stable than log(sum(exp(input))). It avoids
overflows caused by taking the exp of large inputs and underflows caused by
taking the log of small inputs.
For example:
```python
x = tf.constant([[0., 0., 0.], [0., 0., 0.]])
tf.reduce_logsumexp(x) # log(6)
tf.reduce_logsumexp(x, 0) # [log(2), log(2), log(2)]
tf.reduce_logsumexp(x, 1) # [log(3), log(3)]
tf.reduce_logsumexp(x, 1, keepdims=True) # [[log(3)], [log(3)]]
tf.reduce_logsumexp(x, [0, 1]) # log(6)
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
"""
axis = deprecation.deprecated_argument_lookup(
"axis", axis, "reduction_indices", reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_logsumexp(input_tensor, axis, keepdims, name)
@tf_export("math.reduce_logsumexp", "reduce_logsumexp", v1=[])
def reduce_logsumexp(input_tensor, axis=None, keepdims=False, name=None):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
This function is more numerically stable than log(sum(exp(input))). It avoids
overflows caused by taking the exp of large inputs and underflows caused by
taking the log of small inputs.
For example:
```python
x = tf.constant([[0., 0., 0.], [0., 0., 0.]])
tf.reduce_logsumexp(x) # log(6)
tf.reduce_logsumexp(x, 0) # [log(2), log(2), log(2)]
tf.reduce_logsumexp(x, 1) # [log(3), log(3)]
tf.reduce_logsumexp(x, 1, keepdims=True) # [[log(3)], [log(3)]]
tf.reduce_logsumexp(x, [0, 1]) # log(6)
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
keepdims = False if keepdims is None else keepdims
input_tensor = ops.convert_to_tensor(input_tensor)
with ops.name_scope(name, "ReduceLogSumExp", [input_tensor]) as name:
raw_max = reduce_max(
input_tensor,
axis=axis,
keepdims=True)
my_max = array_ops.stop_gradient(
array_ops.where(
gen_math_ops.is_finite(raw_max), raw_max,
array_ops.zeros_like(raw_max)))
result = gen_math_ops.log(
reduce_sum(
gen_math_ops.exp(gen_math_ops.sub(input_tensor, my_max)),
axis,
keepdims=keepdims))
if not keepdims:
my_max = array_ops.reshape(my_max, array_ops.shape(result))
result = gen_math_ops.add(result, my_max)
return _may_reduce_to_scalar(keepdims, axis, result)
@tf_export("linalg.trace", v1=["linalg.trace", "trace"])
@deprecation.deprecated_endpoints("trace")
def trace(x, name=None):
"""Compute the trace of a tensor `x`.
`trace(x)` returns the sum along the main diagonal of each inner-most matrix
in x. If x is of rank `k` with shape `[I, J, K, ..., L, M, N]`, then output
is a tensor of rank `k-2` with dimensions `[I, J, K, ..., L]` where
`output[i, j, k, ..., l] = trace(x[i, j, i, ..., l, :, :])`
For example:
```python
x = tf.constant([[1, 2], [3, 4]])
tf.linalg.trace(x) # 5
x = tf.constant([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
tf.linalg.trace(x) # 15
x = tf.constant([[[1, 2, 3],
[4, 5, 6],
[7, 8, 9]],
[[-1, -2, -3],
[-4, -5, -6],
[-7, -8, -9]]])
tf.linalg.trace(x) # [15, -15]
```
Args:
x: tensor.
name: A name for the operation (optional).
Returns:
The trace of input tensor.
"""
with ops.name_scope(name, "Trace", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return reduce_sum(array_ops.matrix_diag_part(x), [-1], name=name)
@tf_export("linalg.matmul", "matmul")
def matmul(a,
b,
transpose_a=False,
transpose_b=False,
adjoint_a=False,
adjoint_b=False,
a_is_sparse=False,
b_is_sparse=False,
name=None):
"""Multiplies matrix `a` by matrix `b`, producing `a` * `b`.
The inputs must, following any transpositions, be tensors of rank >= 2
where the inner 2 dimensions specify valid matrix multiplication arguments,
and any further outer dimensions match.
Both matrices must be of the same type. The supported types are:
`float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`.
Either matrix can be transposed or adjointed (conjugated and transposed) on
the fly by setting one of the corresponding flag to `True`. These are `False`
by default.
If one or both of the matrices contain a lot of zeros, a more efficient
multiplication algorithm can be used by setting the corresponding
`a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
This optimization is only available for plain matrices (rank-2 tensors) with
datatypes `bfloat16` or `float32`.
For example:
```python
# 2-D tensor `a`
# [[1, 2, 3],
# [4, 5, 6]]
a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])
# 2-D tensor `b`
# [[ 7, 8],
# [ 9, 10],
# [11, 12]]
b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2])
# `a` * `b`
# [[ 58, 64],
# [139, 154]]
c = tf.matmul(a, b)
# 3-D tensor `a`
# [[[ 1, 2, 3],
# [ 4, 5, 6]],
# [[ 7, 8, 9],
# [10, 11, 12]]]
a = tf.constant(np.arange(1, 13, dtype=np.int32),
shape=[2, 2, 3])
# 3-D tensor `b`
# [[[13, 14],
# [15, 16],
# [17, 18]],
# [[19, 20],
# [21, 22],
# [23, 24]]]
b = tf.constant(np.arange(13, 25, dtype=np.int32),
shape=[2, 3, 2])
# `a` * `b`
# [[[ 94, 100],
# [229, 244]],
# [[508, 532],
# [697, 730]]]
c = tf.matmul(a, b)
# Since python >= 3.5 the @ operator is supported (see PEP 465).
# In TensorFlow, it simply calls the `tf.matmul()` function, so the
# following lines are equivalent:
d = a @ b @ [[10.], [11.]]
d = tf.matmul(tf.matmul(a, b), [[10.], [11.]])
```
Args:
a: `Tensor` of type `float16`, `float32`, `float64`, `int32`, `complex64`,
`complex128` and rank > 1.
b: `Tensor` with same type and rank as `a`.
transpose_a: If `True`, `a` is transposed before multiplication.
transpose_b: If `True`, `b` is transposed before multiplication.
adjoint_a: If `True`, `a` is conjugated and transposed before
multiplication.
adjoint_b: If `True`, `b` is conjugated and transposed before
multiplication.
a_is_sparse: If `True`, `a` is treated as a sparse matrix.
b_is_sparse: If `True`, `b` is treated as a sparse matrix.
name: Name for the operation (optional).
Returns:
A `Tensor` of the same type as `a` and `b` where each inner-most matrix is
the product of the corresponding matrices in `a` and `b`, e.g. if all
transpose or adjoint attributes are `False`:
`output`[..., i, j] = sum_k (`a`[..., i, k] * `b`[..., k, j]),
for all indices i, j.
Note: This is matrix product, not element-wise product.
Raises:
ValueError: If transpose_a and adjoint_a, or transpose_b and adjoint_b
are both set to True.
"""
with ops.name_scope(name, "MatMul", [a, b]) as name:
if transpose_a and adjoint_a:
raise ValueError("Only one of transpose_a and adjoint_a can be True.")
if transpose_b and adjoint_b:
raise ValueError("Only one of transpose_b and adjoint_b can be True.")
if context.executing_eagerly():
if not isinstance(a, (ops.EagerTensor, _resource_variable_type)):
a = ops.convert_to_tensor(a, name="a")
if not isinstance(b, (ops.EagerTensor, _resource_variable_type)):
b = ops.convert_to_tensor(b, name="b")
else:
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
# TODO(apassos) remove _shape_tuple here when it is not needed.
a_shape = a._shape_tuple() # pylint: disable=protected-access
b_shape = b._shape_tuple() # pylint: disable=protected-access
if (not a_is_sparse and
not b_is_sparse) and ((a_shape is None or len(a_shape) > 2) and
(b_shape is None or len(b_shape) > 2)):
# BatchMatmul does not support transpose, so we conjugate the matrix and
# use adjoint instead. Conj() is a noop for real matrices.
if transpose_a:
a = conj(a)
adjoint_a = True
if transpose_b:
b = conj(b)
adjoint_b = True
return gen_math_ops.batch_mat_mul(
a, b, adj_x=adjoint_a, adj_y=adjoint_b, name=name)
# Neither matmul nor sparse_matmul support adjoint, so we conjugate
# the matrix and use transpose instead. Conj() is a noop for real
# matrices.
if adjoint_a:
a = conj(a)
transpose_a = True
if adjoint_b:
b = conj(b)
transpose_b = True
use_sparse_matmul = False
if a_is_sparse or b_is_sparse:
sparse_matmul_types = [dtypes.bfloat16, dtypes.float32]
use_sparse_matmul = (
a.dtype in sparse_matmul_types and b.dtype in sparse_matmul_types)
if ((a.dtype == dtypes.bfloat16 or b.dtype == dtypes.bfloat16) and
a.dtype != b.dtype):
# matmul currently doesn't handle mixed-precision inputs.
use_sparse_matmul = True
if use_sparse_matmul:
ret = sparse_matmul(
a,
b,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse,
name=name)
# sparse_matmul always returns float32, even with
# bfloat16 inputs. This prevents us from configuring bfloat16 training.
# casting to bfloat16 also matches non-sparse matmul behavior better.
if a.dtype == dtypes.bfloat16 and b.dtype == dtypes.bfloat16:
ret = cast(ret, dtypes.bfloat16)
return ret
else:
return gen_math_ops.mat_mul(
a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name)
@tf_export("linalg.matvec")
def matvec(a,
b,
transpose_a=False,
adjoint_a=False,
a_is_sparse=False,
b_is_sparse=False,
name=None):
"""Multiplies matrix `a` by vector `b`, producing `a` * `b`.
The matrix `a` must, following any transpositions, be a tensor of rank >= 2,
and we must have `shape(b) = shape(a)[:-2] + [shape(a)[-1]]`.
Both `a` and `b` must be of the same type. The supported types are:
`float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`.
Matrix `a` can be transposed or adjointed (conjugated and transposed) on
the fly by setting one of the corresponding flag to `True`. These are `False`
by default.
If one or both of the inputs contain a lot of zeros, a more efficient
multiplication algorithm can be used by setting the corresponding
`a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
This optimization is only available for plain matrices/vectors (rank-2/1
tensors) with datatypes `bfloat16` or `float32`.
For example:
```python
# 2-D tensor `a`
# [[1, 2, 3],
# [4, 5, 6]]
a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])
# 1-D tensor `b`
# [7, 9, 11]
b = tf.constant([7, 9, 11], shape=[3])
# `a` * `b`
# [ 58, 64]
c = tf.matvec(a, b)
# 3-D tensor `a`
# [[[ 1, 2, 3],
# [ 4, 5, 6]],
# [[ 7, 8, 9],
# [10, 11, 12]]]
a = tf.constant(np.arange(1, 13, dtype=np.int32),
shape=[2, 2, 3])
# 2-D tensor `b`
# [[13, 14, 15],
# [16, 17, 18]]
b = tf.constant(np.arange(13, 19, dtype=np.int32),
shape=[2, 3])
# `a` * `b`
# [[ 86, 212],
# [410, 563]]
c = tf.matvec(a, b)
```
Args:
a: `Tensor` of type `float16`, `float32`, `float64`, `int32`, `complex64`,
`complex128` and rank > 1.
b: `Tensor` with same type and rank = `rank(a) - 1`.
transpose_a: If `True`, `a` is transposed before multiplication.
adjoint_a: If `True`, `a` is conjugated and transposed before
multiplication.
a_is_sparse: If `True`, `a` is treated as a sparse matrix.
b_is_sparse: If `True`, `b` is treated as a sparse matrix.
name: Name for the operation (optional).
Returns:
A `Tensor` of the same type as `a` and `b` where each inner-most vector is
the product of the corresponding matrices in `a` and vectors in `b`, e.g. if
all transpose or adjoint attributes are `False`:
`output`[..., i] = sum_k (`a`[..., i, k] * `b`[..., k]), for all indices i.
Note: This is matrix-vector product, not element-wise product.
Raises:
ValueError: If transpose_a and adjoint_a are both set to True.
"""
with ops.name_scope(name, "MatVec", [a, b]) as name:
output = matmul(
a,
array_ops.expand_dims(b, axis=-1),
transpose_a=transpose_a,
adjoint_a=adjoint_a,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse)
return array_ops.squeeze(output, axis=-1)
_OverrideBinaryOperatorHelper(matmul, "matmul")
sparse_matmul = deprecation.deprecated(None, "Use `tf.linalg.matmul` instead")(
gen_math_ops.sparse_mat_mul)
tf_export(v1=["sparse_matmul"])(sparse_matmul)
@ops.RegisterStatistics("MatMul", "flops")
def _calc_mat_mul_flops(graph, node):
"""Calculates the compute resources needed for MatMul."""
transpose_a = node.attr["transpose_a"].b
a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
a_shape.assert_is_fully_defined()
if transpose_a:
k = int(a_shape[0])
else:
k = int(a_shape[1])
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
output_count = np.prod(output_shape.as_list())
return ops.OpStats("flops", (k * output_count * 2))
def _as_indexed_slices(x, optimize=True):
"""Convert 'x' to IndexedSlices.
Convert a dense Tensor to a block-sparse IndexedSlices.
Args:
x: Either a Tensor object, or an IndexedSlices object.
optimize: if true, attempt to optimize the conversion of 'x'.
Returns:
An IndexedSlices object.
Raises:
TypeError: If 'x' is not a Tensor or an IndexedSlices object.
"""
# TODO(touts): op_scope
if not isinstance(x, (ops.Tensor, ops.IndexedSlices)):
raise TypeError("Not a Tensor or IndexedSlices: %s" % type(x))
if isinstance(x, ops.IndexedSlices):
return x
x_shape = array_ops.shape_internal(x, optimize=optimize)
return ops.IndexedSlices(x, range(0, x_shape[0]), x_shape)
def _as_indexed_slices_list(inputs, optimize=True):
"""Convert all elements of 'inputs' to IndexedSlices.
Additionally, homogenize the types of all the indices to
either int32 or int64.
Args:
inputs: List containing either Tensor or IndexedSlices objects.
optimize: if true, attempt to optimize the conversion of each input.
Returns:
A list of IndexedSlices objects.
Raises:
TypeError: If 'inputs' is not a list or a tuple.
"""
if not isinstance(inputs, (list, tuple)):
raise TypeError("Expected a list or tuple, not a %s" % type(inputs))
outputs = [_as_indexed_slices(i, optimize=optimize) for i in inputs]
with_int32_index = [
o.indices for o in outputs if o.indices.dtype == dtypes.int32
]
if not with_int32_index or len(with_int32_index) == len(outputs):
return outputs
casted_outputs = []
for o in outputs:
if o.indices.dtype == dtypes.int32:
casted_outputs.append(
ops.IndexedSlices(o.values, cast(o.indices, dtypes.int64),
o.dense_shape))
else:
casted_outputs.append(o)
return casted_outputs
@tf_export("math.add_n", "add_n")
@dispatch.add_dispatch_support
def add_n(inputs, name=None):
"""Adds all input tensors element-wise.
Converts `IndexedSlices` objects into dense tensors prior to adding.
Args:
inputs: A list of `Tensor` or `IndexedSlices` objects, each with same shape
and type.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
if not inputs or not isinstance(inputs, (list, tuple)):
raise ValueError("inputs must be a list of at least one "
"Tensor/IndexedSlices with the same dtype and shape")
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, (ops.Tensor, ops.IndexedSlices)) for x in inputs):
raise ValueError("inputs must be a list of at least one "
"Tensor/IndexedSlices with the same dtype and shape")
if len(inputs) == 1:
if isinstance(inputs[0], ops.IndexedSlices):
values = ops.convert_to_tensor(inputs[0])
else:
values = inputs[0]
if name:
return array_ops.identity(values, name=name)
return values
return gen_math_ops.add_n(inputs, name=name)
@tf_export("math.accumulate_n", v1=["math.accumulate_n", "accumulate_n"])
@deprecation.deprecated_endpoints("accumulate_n")
def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
"""Returns the element-wise sum of a list of tensors.
Optionally, pass `shape` and `tensor_dtype` for shape and type checking,
otherwise, these are inferred.
`tf.math.accumulate_n` performs the same operation as `tf.add_n`, but does not
wait for all of its inputs to be ready before beginning to sum. This can
save memory if inputs are ready at different times, since minimum temporary
storage is proportional to the output size rather than the inputs size.
`accumulate_n` is differentiable (but wasn't previous to TensorFlow 1.7).
For example:
```python
a = tf.constant([[1, 2], [3, 4]])
b = tf.constant([[5, 0], [0, 6]])
tf.math.accumulate_n([a, b, a]) # [[7, 4], [6, 14]]
# Explicitly pass shape and type
tf.math.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)
# [[7, 4],
# [6, 14]]
```
Args:
inputs: A list of `Tensor` objects, each with same shape and type.
shape: Shape of elements of `inputs`.
tensor_dtype: The type of `inputs`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
def _input_error():
return ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if not inputs or not isinstance(inputs, (list, tuple)):
raise _input_error()
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise _input_error()
if not all(x.dtype == inputs[0].dtype for x in inputs):
raise _input_error()
if shape is not None:
shape = tensor_shape.as_shape(shape)
else:
shape = tensor_shape.unknown_shape()
for input_tensor in inputs:
if isinstance(input_tensor, ops.Tensor):
shape = shape.merge_with(input_tensor.get_shape())
# tensor_dtype is for safety only; operator's output type computed in C++
if tensor_dtype is not None and tensor_dtype != inputs[0].dtype:
raise TypeError("tensor_dtype is {}, but input is of type {}".format(
tensor_dtype, inputs[0].dtype))
if len(inputs) == 1 and name is None:
return inputs[0]
elif len(inputs) == 1 and name is not None:
return array_ops.identity(inputs[0], name=name)
elif context.executing_eagerly():
# TemporaryVariable not currently supported in eager mode; fall back
# onto AddN for now.
# TODO(frreiss) remove this once the lifetime of eager variables gets
# addressed
return add_n(inputs, name=name)
else:
return gen_math_ops.accumulate_nv2(inputs, name=name, shape=shape) # pylint: disable=protected-access
@ops.RegisterGradient("AccumulateNV2")
def _accumulate_n_grad(op, grad):
"""Same as gradient for AddN. Copies the gradient to all inputs."""
# Not broadcasting.
return [grad] * len(op.inputs)
@tf_export("math.sigmoid", "nn.sigmoid", "sigmoid")
def sigmoid(x, name=None):
"""Computes sigmoid of `x` element-wise.
Specifically, `y = 1 / (1 + exp(-x))`.
Args:
x: A Tensor with type `float16`, `float32`, `float64`, `complex64`,
or `complex128`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x`.
@compatibility(scipy)
Equivalent to scipy.special.expit
@end_compatibility
"""
with ops.name_scope(name, "Sigmoid", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.sigmoid(x, name=name)
@tf_export("math.log_sigmoid", v1=["math.log_sigmoid", "log_sigmoid"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("log_sigmoid")
def log_sigmoid(x, name=None):
"""Computes log sigmoid of `x` element-wise.
Specifically, `y = log(1 / (1 + exp(-x)))`. For numerical stability,
we use `y = -tf.nn.softplus(-x)`.
Args:
x: A Tensor with type `float32` or `float64`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x`.
"""
with ops.name_scope(name, "LogSigmoid", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.neg(gen_nn_ops.softplus(-x), name=name)
@tf_export("math.bincount", v1=[])
def bincount(arr,
weights=None,
minlength=None,
maxlength=None,
dtype=dtypes.int32,
name=None):
"""Counts the number of occurrences of each value in an integer array.
If `minlength` and `maxlength` are not given, returns a vector with length
`tf.reduce_max(arr) + 1` if `arr` is non-empty, and length 0 otherwise.
If `weights` are non-None, then index `i` of the output stores the sum of the
value in `weights` at each index where the corresponding value in `arr` is
`i`.
Args:
arr: An int32 tensor of non-negative values.
weights: If non-None, must be the same shape as arr. For each value in
`arr`, the bin will be incremented by the corresponding weight instead of
1.
minlength: If given, ensures the output has length at least `minlength`,
padding with zeros at the end if necessary.
maxlength: If given, skips values in `arr` that are equal or greater than
`maxlength`, ensuring that the output has length at most `maxlength`.
dtype: If `weights` is None, determines the type of the output bins.
name: A name scope for the associated operations (optional).
Returns:
A vector with the same dtype as `weights` or the given `dtype`. The bin
values.
"""
name = "bincount" if name is None else name
with ops.name_scope(name):
arr = ops.convert_to_tensor(arr, name="arr", dtype=dtypes.int32)
array_is_nonempty = reduce_prod(array_ops.shape(arr)) > 0
output_size = cast(array_is_nonempty, dtypes.int32) * (reduce_max(arr) + 1)
if minlength is not None:
minlength = ops.convert_to_tensor(
minlength, name="minlength", dtype=dtypes.int32)
output_size = gen_math_ops.maximum(minlength, output_size)
if maxlength is not None:
maxlength = ops.convert_to_tensor(
maxlength, name="maxlength", dtype=dtypes.int32)
output_size = gen_math_ops.minimum(maxlength, output_size)
if weights is not None:
weights = ops.convert_to_tensor(weights, name="weights")
return gen_math_ops.unsorted_segment_sum(weights, arr, output_size)
weights = constant_op.constant([], dtype)
return gen_math_ops.bincount(arr, output_size, weights)
@tf_export(v1=["math.bincount", "bincount"])
@deprecation.deprecated_endpoints("bincount")
def bincount_v1(arr,
weights=None,
minlength=None,
maxlength=None,
dtype=dtypes.int32):
"""Counts the number of occurrences of each value in an integer array.
If `minlength` and `maxlength` are not given, returns a vector with length
`tf.reduce_max(arr) + 1` if `arr` is non-empty, and length 0 otherwise.
If `weights` are non-None, then index `i` of the output stores the sum of the
value in `weights` at each index where the corresponding value in `arr` is
`i`.
Args:
arr: An int32 tensor of non-negative values.
weights: If non-None, must be the same shape as arr. For each value in
`arr`, the bin will be incremented by the corresponding weight instead of
1.
minlength: If given, ensures the output has length at least `minlength`,
padding with zeros at the end if necessary.
maxlength: If given, skips values in `arr` that are equal or greater than
`maxlength`, ensuring that the output has length at most `maxlength`.
dtype: If `weights` is None, determines the type of the output bins.
Returns:
A vector with the same dtype as `weights` or the given `dtype`. The bin
values.
"""
return bincount(arr, weights, minlength, maxlength, dtype)
@tf_export("math.cumsum", "cumsum")
def cumsum(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative sum of the tensor `x` along `axis`.
By default, this op performs an inclusive cumsum, which means that the first
element of the input is identical to the first element of the output:
```python
tf.cumsum([a, b, c]) # [a, a + b, a + b + c]
```
By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed
instead:
```python
tf.cumsum([a, b, c], exclusive=True) # [0, a, a + b]
```
By setting the `reverse` kwarg to `True`, the cumsum is performed in the
opposite direction:
```python
tf.cumsum([a, b, c], reverse=True) # [a + b + c, b + c, c]
```
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
```python
tf.cumsum([a, b, c], exclusive=True, reverse=True) # [b + c, c, 0]
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor` of type `int32` (default: 0). Must be in the range
`[-rank(x), rank(x))`.
exclusive: If `True`, perform exclusive cumsum.
reverse: A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Cumsum", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumsum(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
@tf_export("math.cumprod", v1=["math.cumprod", "cumprod"])
@deprecation.deprecated_endpoints("cumprod")
def cumprod(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative product of the tensor `x` along `axis`.
By default, this op performs an inclusive cumprod, which means that the
first element of the input is identical to the first element of the output:
```python
tf.math.cumprod([a, b, c]) # [a, a * b, a * b * c]
```
By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
performed
instead:
```python
tf.math.cumprod([a, b, c], exclusive=True) # [1, a, a * b]
```
By setting the `reverse` kwarg to `True`, the cumprod is performed in the
opposite direction:
```python
tf.math.cumprod([a, b, c], reverse=True) # [a * b * c, b * c, c]
```
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
```python
tf.math.cumprod([a, b, c], exclusive=True, reverse=True) # [b * c, c, 1]
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor` of type `int32` (default: 0). Must be in the range
`[-rank(x), rank(x))`.
exclusive: If `True`, perform exclusive cumprod.
reverse: A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Cumprod", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumprod(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
@tf_export("math.conj", v1=["math.conj", "conj"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("conj")
def conj(x, name=None):
r"""Returns the complex conjugate of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
complex numbers that are the complex conjugate of each element in `input`. The
complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the
real part and *b* is the imaginary part.
The complex conjugate returned by this operation is of the form \\(a - bj\\).
For example:
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.math.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
If `x` is real, it is returned unchanged.
Args:
x: `Tensor` to conjugate. Must have numeric or variant type.
name: A name for the operation (optional).
Returns:
A `Tensor` that is the conjugate of `x` (with the same type).
Raises:
TypeError: If `x` is not a numeric tensor.
"""
if isinstance(x, ops.Tensor):
dt = x.dtype
if dt.is_floating or dt.is_integer:
return x
with ops.name_scope(name, "Conj", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_complex or x.dtype == dtypes.variant:
return gen_math_ops.conj(x, name=name)
elif x.dtype.is_floating or x.dtype.is_integer:
return x
else:
raise TypeError(
"Expected numeric or variant tensor, got dtype %r" % x.dtype)
def _BroadcastShape(op):
"""Common shape function for binary operators that broadcast their inputs."""
return [
common_shapes.broadcast_shape(op.inputs[0].get_shape(),
op.inputs[1].get_shape())
]
def reduced_shape(input_shape, axes):
"""Helper function for reduction ops.
Args:
input_shape: 1-D Tensor, the shape of the Tensor being reduced.
axes: 1-D Tensor, the reduction axes.
Returns:
A 1-D Tensor, the output shape as if keepdims were set to True.
"""
# Example:
# cast needed for SparseTensor reductions
if context.executing_eagerly():
input_shape = input_shape.numpy()
axes = axes.numpy()
input_shape[axes] = 1
return input_shape
input_shape = cast(input_shape, dtypes.int32) # [2, 3, 5, 7]
axes = cast(axes, dtypes.int32) # [1, 2]
input_rank = array_ops.size(input_shape) # 4
axes = (axes + input_rank) % input_rank
axes_shape = array_ops.shape(axes) # [2]
return gen_data_flow_ops.dynamic_stitch( # [2, 1, 1, 7]
[
range(input_rank), # [0, 1, 2, 3]
axes
], # [1, 2]
[
input_shape, # [2, 3, 5, 7]
array_ops.fill(axes_shape, 1)
]) # [1, 1]
def _unsorted_segment_N(data, segment_ids, num_segments):
""" Helper function for unsorted_segment_mean/_sqrtN. Computes the number
of segment entries with 0-entries set to 1 to allow division by N.
"""
# bincount doesn't support negative indices so we use unsorted_segment_sum
segment_ids_shape = array_ops.shape_internal(segment_ids)
ones_tensor = array_ops.ones(segment_ids_shape, dtype=data.dtype)
N = gen_math_ops.unsorted_segment_sum(ones_tensor, segment_ids, num_segments)
# add dimensions for all non-reduced axes
ndims_output = data.shape.ndims - segment_ids.shape.ndims
broadcast_shape = [num_segments] + [1] * ndims_output
N = array_ops.reshape(N, broadcast_shape)
return gen_math_ops.maximum(N, 1)
@tf_export(
"math.unsorted_segment_mean",
v1=["math.unsorted_segment_mean", "unsorted_segment_mean"])
@deprecation.deprecated_endpoints("unsorted_segment_mean")
@dispatch.add_dispatch_support
def unsorted_segment_mean(data, segment_ids, num_segments, name=None):
r"""Computes the mean along segments of a tensor.
Read [the section on
segmentation](https://tensorflow.org/api_guides/python/math_ops#segmentation)
for an explanation of segments.
This operator is similar to the unsorted segment sum operator found
[here](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
Instead of computing the sum over segments, it computes the mean of all
entries belonging to a segment such that:
\\(output_i = 1/N_i \sum_{j...} data[j...]\\) where the sum is over tuples
`j...` such that `segment_ids[j...] == i` with \\N_i\\ being the number of
occurrences of id \\i\\.
If there is no entry for a given segment ID `i`, it outputs 0.
If the given segment ID `i` is negative, the value is dropped and will not
be added to the sum of the segment.
Args:
data: A `Tensor` with floating point or complex dtype.
segment_ids: An integer tensor whose shape is a prefix of `data.shape`.
num_segments: An integer scalar `Tensor`. The number of distinct
segment IDs.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has same shape as data, except for the first `segment_ids.rank`
dimensions, which are replaced with a single dimension which has size
`num_segments`.
"""
with ops.name_scope(name, "UnsortedSegmentMean"):
data = ops.convert_to_tensor(data)
segment_ids = ops.convert_to_tensor(segment_ids)
N = _unsorted_segment_N(data, segment_ids, num_segments)
summed = gen_math_ops.unsorted_segment_sum(data, segment_ids, num_segments)
return summed / N
@tf_export(
"math.unsorted_segment_sqrt_n",
v1=["math.unsorted_segment_sqrt_n", "unsorted_segment_sqrt_n"])
@deprecation.deprecated_endpoints("unsorted_segment_sqrt_n")
@dispatch.add_dispatch_support
def unsorted_segment_sqrt_n(data, segment_ids, num_segments, name=None):
r"""Computes the sum along segments of a tensor divided by the sqrt(N).
Read [the section on
segmentation](https://tensorflow.org/api_guides/python/math_ops#segmentation)
for an explanation of segments.
This operator is similar to the unsorted segment sum operator found
[here](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
Additionally to computing the sum over segments, it divides the results by
sqrt(N).
\\(output_i = 1/sqrt(N_i) \sum_{j...} data[j...]\\) where the sum is over
tuples `j...` such that `segment_ids[j...] == i` with \\N_i\\ being the
number of occurrences of id \\i\\.
If there is no entry for a given segment ID `i`, it outputs 0.
Note that this op only supports floating point and complex dtypes,
due to tf.sqrt only supporting these types.
If the given segment ID `i` is negative, the value is dropped and will not
be added to the sum of the segment.
Args:
data: A `Tensor` with floating point or complex dtype.
segment_ids: An integer tensor whose shape is a prefix of `data.shape`.
num_segments: An integer scalar `Tensor`. The number of distinct
segment IDs.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has same shape as data, except for the first `segment_ids.rank`
dimensions, which are replaced with a single dimension which has size
`num_segments`.
"""
with ops.name_scope(name, "UnsortedSegmentSqrtN"):
data = ops.convert_to_tensor(data)
segment_ids = ops.convert_to_tensor(segment_ids)
N = _unsorted_segment_N(data, segment_ids, num_segments)
summed = gen_math_ops.unsorted_segment_sum(data, segment_ids, num_segments)
return summed / gen_math_ops.sqrt(N)
@tf_export(v1=["sparse.segment_sum", "sparse_segment_sum"])
@deprecation.deprecated_endpoints("sparse_segment_sum")
def sparse_segment_sum(data, indices, segment_ids, name=None,
num_segments=None):
r"""Computes the sum along sparse segments of a tensor.
Read [the section on
segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation)
for an explanation of segments.
Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first
dimension, selecting a subset of dimension 0, specified by `indices`.
`segment_ids` is allowed to have missing ids, in which case the output will
be zeros at those indices. In those cases `num_segments` is used to determine
the size of the output.
For example:
```python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
# Select two rows, one segment.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
# => [[0 0 0 0]]
# Select two rows, two segment.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
# => [[ 1 2 3 4]
# [-1 -2 -3 -4]]
# With missing segment ids.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 2]),
num_segments=4)
# => [[ 1 2 3 4]
# [ 0 0 0 0]
# [-1 -2 -3 -4]
# [ 0 0 0 0]]
# Select all rows, two segments.
tf.sparse.segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
# => [[0 0 0 0]
# [5 6 7 8]]
# Which is equivalent to:
tf.segment_sum(c, tf.constant([0, 0, 1]))
```
Args:
data: A `Tensor` with data that will be assembled in the output.
indices: A 1-D `Tensor` with indices into `data`. Has same rank as
`segment_ids`.
segment_ids: A 1-D `Tensor` with indices into the output `Tensor`.
Values should be sorted and can be repeated.
name: A name for the operation (optional).
num_segments: An optional int32 scalar. Indicates the size of the output
`Tensor`.
Returns:
A `tensor` of the shape as data, except for dimension 0 which
has size `k`, the number of segments specified via `num_segments` or
inferred for the last element in `segments_ids`.
"""
if num_segments is not None:
return gen_math_ops.sparse_segment_sum_with_num_segments(
data=data,
indices=indices,
segment_ids=segment_ids,
num_segments=num_segments,
name=name)
else:
return gen_math_ops.sparse_segment_sum(
data=data, indices=indices, segment_ids=segment_ids, name=name)
@tf_export("sparse.segment_sum", v1=[])
def sparse_segment_sum_v2(data,
indices,
segment_ids,
num_segments=None,
name=None):
return sparse_segment_mean(
data, indices, segment_ids, name=name, num_segments=num_segments)
@tf_export(v1=["sparse.segment_mean", "sparse_segment_mean"])
@deprecation.deprecated_endpoints("sparse_segment_mean")
def sparse_segment_mean(data,
indices,
segment_ids,
name=None,
num_segments=None):
r"""Computes the mean along sparse segments of a tensor.
Read [the section on
segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation)
for an explanation of segments.
Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first
dimension, selecting a subset of dimension 0, specified by `indices`.
`segment_ids` is allowed to have missing ids, in which case the output will
be zeros at those indices. In those cases `num_segments` is used to determine
the size of the output.
Args:
data: A `Tensor` with data that will be assembled in the output.
indices: A 1-D `Tensor` with indices into `data`. Has same rank as
`segment_ids`.
segment_ids: A 1-D `Tensor` with indices into the output `Tensor`.
Values should be sorted and can be repeated.
name: A name for the operation (optional).
num_segments: An optional int32 scalar. Indicates the size of the output
`Tensor`.
Returns:
A `tensor` of the shape as data, except for dimension 0 which
has size `k`, the number of segments specified via `num_segments` or
inferred for the last element in `segments_ids`.
"""
if num_segments is not None:
return gen_math_ops.sparse_segment_mean_with_num_segments(
data=data,
indices=indices,
segment_ids=segment_ids,
num_segments=num_segments,
name=name)
else:
return gen_math_ops.sparse_segment_mean(
data=data, indices=indices, segment_ids=segment_ids, name=name)
@tf_export("sparse.segment_mean", v1=[])
def sparse_segment_mean_v2(data,
indices,
segment_ids,
num_segments=None,
name=None):
r"""Computes the mean along sparse segments of a tensor.
Read [the section on
segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation)
for an explanation of segments.
Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first
dimension, selecting a subset of dimension 0, specified by `indices`.
`segment_ids` is allowed to have missing ids, in which case the output will
be zeros at those indices. In those cases `num_segments` is used to determine
the size of the output.
Args:
data: A `Tensor` with data that will be assembled in the output.
indices: A 1-D `Tensor` with indices into `data`. Has same rank as
`segment_ids`.
segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
should be sorted and can be repeated.
num_segments: An optional int32 scalar. Indicates the size of the output
`Tensor`.
name: A name for the operation (optional).
Returns:
A `tensor` of the shape as data, except for dimension 0 which
has size `k`, the number of segments specified via `num_segments` or
inferred for the last element in `segments_ids`.
"""
return sparse_segment_mean(
data, indices, segment_ids, name=name, num_segments=num_segments)
@tf_export(v1=["sparse.segment_sqrt_n", "sparse_segment_sqrt_n"])
@deprecation.deprecated_endpoints("sparse_segment_sqrt_n")
def sparse_segment_sqrt_n(data,
indices,
segment_ids,
name=None,
num_segments=None):
r"""Computes the sum along sparse segments of a tensor divided by the sqrt(N).
`N` is the size of the segment being reduced.
Args:
data: A `Tensor` with data that will be assembled in the output.
indices: A 1-D `Tensor` with indices into `data`. Has same rank as
`segment_ids`.
segment_ids: A 1-D `Tensor` with indices into the output `Tensor`.
Values should be sorted and can be repeated.
name: A name for the operation (optional).
num_segments: An optional int32 scalar. Indicates the size of the output
`Tensor`.
Returns:
A `tensor` of the shape as data, except for dimension 0 which
has size `k`, the number of segments specified via `num_segments` or
inferred for the last element in `segments_ids`.
"""
if num_segments is not None:
return gen_math_ops.sparse_segment_sqrt_n_with_num_segments(
data=data,
indices=indices,
segment_ids=segment_ids,
num_segments=num_segments,
name=name)
else:
return gen_math_ops.sparse_segment_sqrt_n(
data=data, indices=indices, segment_ids=segment_ids, name=name)
@tf_export("sparse.segment_sqrt_n", v1=[])
def sparse_segment_sqrt_n_v2(data,
indices,
segment_ids,
num_segments=None,
name=None):
r"""Computes the sum along sparse segments of a tensor divided by the sqrt(N).
`N` is the size of the segment being reduced.
Args:
data: A `Tensor` with data that will be assembled in the output.
indices: A 1-D `Tensor` with indices into `data`. Has same rank as
`segment_ids`.
segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
should be sorted and can be repeated.
num_segments: An optional int32 scalar. Indicates the size of the output
`Tensor`.
name: A name for the operation (optional).
Returns:
A `tensor` of the shape as data, except for dimension 0 which
has size `k`, the number of segments specified via `num_segments` or
inferred for the last element in `segments_ids`.
"""
return sparse_segment_sqrt_n(
data, indices, segment_ids, name=name, num_segments=num_segments)
@tf_export("tensordot", "linalg.tensordot")
def tensordot(a, b, axes, name=None):
r"""Tensor contraction of a and b along specified axes.
Tensordot (also known as tensor contraction) sums the product of elements
from `a` and `b` over the indices specified by `a_axes` and `b_axes`.
The lists `a_axes` and `b_axes` specify those pairs of axes along which to
contract the tensors. The axis `a_axes[i]` of `a` must have the same dimension
as axis `b_axes[i]` of `b` for all `i` in `range(0, len(a_axes))`. The lists
`a_axes` and `b_axes` must have identical length and consist of unique
integers that specify valid axes for each of the tensors.
This operation corresponds to `numpy.tensordot(a, b, axes)`.
Example 1: When `a` and `b` are matrices (order 2), the case `axes = 1`
is equivalent to matrix multiplication.
Example 2: When `a` and `b` are matrices (order 2), the case
`axes = [[1], [0]]` is equivalent to matrix multiplication.
Example 3: Suppose that \\(a_{ijk}\\) and \\(b_{lmn}\\) represent two
tensors of order 3. Then, `contract(a, b, [[0], [2]])` is the order 4 tensor
\\(c_{jklm}\\) whose entry
corresponding to the indices \\((j,k,l,m)\\) is given by:
\\( c_{jklm} = \sum_i a_{ijk} b_{lmi} \\).
In general, `order(c) = order(a) + order(b) - 2*len(axes[0])`.
Args:
a: `Tensor` of type `float32` or `float64`.
b: `Tensor` with the same type as `a`.
axes: Either a scalar `N`, or a list or an `int32` `Tensor` of shape [2, k].
If axes is a scalar, sum over the last N axes of a and the first N axes of
b in order. If axes is a list or `Tensor` the first and second row contain
the set of unique integers specifying axes along which the contraction is
computed, for `a` and `b`, respectively. The number of axes for `a` and
`b` must be equal.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `a`.
Raises:
ValueError: If the shapes of `a`, `b`, and `axes` are incompatible.
IndexError: If the values in axes exceed the rank of the corresponding
tensor.
"""
def _tensordot_reshape(a, axes, flipped=False):
"""Helper method to perform transpose and reshape for contraction op.
This method is helpful in reducing `math_ops.tensordot` to `math_ops.matmul`
using `array_ops.transpose` and `array_ops.reshape`. The method takes a
tensor and performs the correct transpose and reshape operation for a given
set of indices. It returns the reshaped tensor as well as a list of indices
necessary to reshape the tensor again after matrix multiplication.
Args:
a: `Tensor`.
axes: List or `int32` `Tensor` of unique indices specifying valid axes of
`a`.
flipped: An optional `bool`. Defaults to `False`. If `True`, the method
assumes that `a` is the second argument in the contraction operation.
Returns:
A tuple `(reshaped_a, free_dims, free_dims_static)` where `reshaped_a` is
the tensor `a` reshaped to allow contraction via `matmul`, `free_dims` is
either a list of integers or an `int32` `Tensor`, depending on whether
the shape of a is fully specified, and free_dims_static is either a list
of integers and None values, or None, representing the inferred
static shape of the free dimensions
"""
if a.get_shape().is_fully_defined() and isinstance(axes, (list, tuple)):
shape_a = a.get_shape().as_list()
axes = [i if i >= 0 else i + len(shape_a) for i in axes]
free = [i for i in xrange(len(shape_a)) if i not in axes]
free_dims = [shape_a[i] for i in free]
prod_free = int(np.prod([shape_a[i] for i in free]))
prod_axes = int(np.prod([shape_a[i] for i in axes]))
perm = list(axes) + free if flipped else free + list(axes)
new_shape = [prod_axes, prod_free] if flipped else [prod_free, prod_axes]
reshaped_a = array_ops.reshape(array_ops.transpose(a, perm), new_shape)
return reshaped_a, free_dims, free_dims
else:
if a.get_shape().ndims is not None and isinstance(axes, (list, tuple)):
shape_a = a.get_shape().as_list()
axes = [i if i >= 0 else i + len(shape_a) for i in axes]
free = [i for i in xrange(len(shape_a)) if i not in axes]
axes_dims = [shape_a[i] for i in axes]
free_dims = [shape_a[i] for i in free]
free_dims_static = free_dims
axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name="axes")
free = ops.convert_to_tensor(free, dtype=dtypes.int32, name="free")
shape_a = array_ops.shape(a)
else:
free_dims_static = None
shape_a = array_ops.shape(a)
rank_a = array_ops.rank(a)
axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name="axes")
axes = array_ops.where(axes >= 0, axes, axes + rank_a)
free, _ = array_ops.setdiff1d(range(rank_a), axes)
free_dims = array_ops.gather(shape_a, free)
axes_dims = array_ops.gather(shape_a, axes)
prod_free_dims = reduce_prod(free_dims)
prod_axes_dims = reduce_prod(axes_dims)
if flipped:
perm = array_ops.concat([axes, free], 0)
new_shape = array_ops.stack([prod_axes_dims, prod_free_dims])
else:
perm = array_ops.concat([free, axes], 0)
new_shape = array_ops.stack([prod_free_dims, prod_axes_dims])
reshaped_a = array_ops.reshape(array_ops.transpose(a, perm), new_shape)
return reshaped_a, free_dims, free_dims_static
def _tensordot_axes(a, axes):
"""Generates two sets of contraction axes for the two tensor arguments."""
a_shape = a.get_shape()
if isinstance(axes, compat.integral_types):
if axes < 0:
raise ValueError("'axes' must be at least 0.")
if a_shape.ndims is not None:
if axes > a_shape.ndims:
raise ValueError("'axes' must not be larger than the number of "
"dimensions of tensor %s." % a)
return (list(xrange(a_shape.ndims - axes, a_shape.ndims)),
list(xrange(axes)))
else:
rank = array_ops.rank(a)
return (range(rank - axes, rank, dtype=dtypes.int32),
range(axes, dtype=dtypes.int32))
elif isinstance(axes, (list, tuple)):
if len(axes) != 2:
raise ValueError("'axes' must be an integer or have length 2.")
a_axes = axes[0]
b_axes = axes[1]
if isinstance(a_axes, compat.integral_types) and \
isinstance(b_axes, compat.integral_types):
a_axes = [a_axes]
b_axes = [b_axes]
if len(a_axes) != len(b_axes):
raise ValueError(
"Different number of contraction axes 'a' and 'b', %s != %s." %
(len(a_axes), len(b_axes)))
return a_axes, b_axes
else:
axes = ops.convert_to_tensor(axes, name="axes", dtype=dtypes.int32)
return axes[0], axes[1]
with ops.name_scope(name, "Tensordot", [a, b, axes]) as name:
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
a_axes, b_axes = _tensordot_axes(a, axes)
a_reshape, a_free_dims, a_free_dims_static = _tensordot_reshape(a, a_axes)
b_reshape, b_free_dims, b_free_dims_static = _tensordot_reshape(
b, b_axes, True)
ab_matmul = matmul(a_reshape, b_reshape)
if isinstance(a_free_dims, list) and isinstance(b_free_dims, list):
return array_ops.reshape(ab_matmul, a_free_dims + b_free_dims, name=name)
else:
a_free_dims = ops.convert_to_tensor(a_free_dims, dtype=dtypes.int32)
b_free_dims = ops.convert_to_tensor(b_free_dims, dtype=dtypes.int32)
product = array_ops.reshape(
ab_matmul, array_ops.concat([a_free_dims, b_free_dims], 0), name=name)
if a_free_dims_static is not None and b_free_dims_static is not None:
product.set_shape(a_free_dims_static + b_free_dims_static)
return product
@tf_export("math.polyval")
def polyval(coeffs, x, name=None):
r"""Computes the elementwise value of a polynomial.
If `x` is a tensor and `coeffs` is a list n + 1 tensors, this function returns
the value of the n-th order polynomial
p(x) = coeffs[n-1] + coeffs[n-2] * x + ... + coeffs[0] * x**(n-1)
evaluated using Horner's method, i.e.
p(x) = coeffs[n-1] + x * (coeffs[n-2] + ... + x * (coeffs[1] +
x * coeffs[0]))
Args:
coeffs: A list of `Tensor` representing the coefficients of the polynomial.
x: A `Tensor` representing the variable of the polynomial.
name: A name for the operation (optional).
Returns:
A `tensor` of the shape as the expression p(x) with usual broadcasting rules
for element-wise addition and multiplication applied.
@compatibility(numpy)
Equivalent to numpy.polyval.
@end_compatibility
"""
with ops.name_scope(name, "polyval", nest.flatten(coeffs) + [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if len(coeffs) < 1:
return array_ops.zeros_like(x, name=name)
coeffs = [
ops.convert_to_tensor(coeff, name=("coeff_%d" % index))
for index, coeff in enumerate(coeffs)
]
p = coeffs[0]
for c in coeffs[1:]:
p = c + p * x
return p
| 35.536813 | 121 | 0.669589 | [
"Apache-2.0"
] | minminsun/tensorflow | tensorflow/python/ops/math_ops.py | 129,354 | Python |
"""
Median and Mean for Cauchy distribution
---------------------------------------
This plot shows graphically that mean-based statistics are not robust for
the Cauchy distribution. Median-based statistics should be used instead.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from scipy import optimize
from scipy.stats import cauchy, norm
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
def robust_mean_mixture(x):
"""Compute the mean via a mixture of two Gaussians
One Gaussian accounts for outliers, and one Gaussian accounts for
the true distribution. This cannot be computed analytically, so
it uses scipy's function optimization
"""
if len(x) == 1:
return x
x = x.ravel()
mu_bg = np.mean(x)
sig_bg = 3 * np.std(x)
likelihood = lambda v: -np.sum(np.log(norm.pdf(x, v[0], v[1])
+ norm.pdf(x, mu_bg, sig_bg)))
v0 = np.array([0, 30])
v_best = optimize.fmin(likelihood, v0, disp=False)
return v_best[0]
def robust_mean_iterated(x, sigma_cut=3):
"""Compute the robust mean iteratively
After computing the mean, points further than 3 sigma from the mean
are removed and the result is repeated until convergence.
"""
flag = np.ones(x.shape, dtype=bool)
n_to_keep = x.size
while True:
xf = x[flag]
mu = xf.mean()
sig = xf.std()
if len(xf) == 1:
break
x_sig = abs((x - mu) / sig)
too_far = (x_sig > sigma_cut)
flag[too_far] = False
n_flag = flag.sum()
if n_flag == n_to_keep:
break
else:
n_to_keep = n_flag
return mu
#------------------------------------------------------------
# Create the distribution and compute means and medians
np.random.seed(6)
mu = 0
gamma = 2
xi = cauchy(mu, gamma).rvs(100)
Nrange = np.arange(1, len(xi) + 1)
mean = [np.mean(xi[:N]) for N in Nrange]
median = [np.median(xi[:N]) for N in Nrange]
mean_mixture = [robust_mean_mixture(xi[:N]) for N in Nrange]
mean_iter = [robust_mean_iterated(xi[:N]) for N in Nrange]
#------------------------------------------------------------
# Plot the results as a function of number of points
fig = plt.figure(figsize=(5, 3.75))
fig.subplots_adjust(hspace=0.05)
# first plot the mean
ax = fig.add_subplot(211)
ax.plot(Nrange, mean, '-.b', label='mean')
ax.plot(Nrange, median, '-k', label='median')
ax.plot(Nrange, mean_mixture, ':r', label='robust mean (mixture)')
ax.plot(Nrange, mean_iter, '--g', label='robust mean (sigma-clip)')
ax.plot(Nrange, 0 * Nrange, '-', c='gray', lw=0.5)
ax.set_xlim(0, 100)
ax.set_ylim(-7, 7)
ax.legend(loc=4, ncol=2, frameon=False)
ax.set_ylabel('Value')
ax.xaxis.set_major_formatter(plt.NullFormatter())
# now plot the median
ax = fig.add_subplot(212)
ax.scatter(Nrange, xi, lw=0, s=10, c='k')
ax.plot(Nrange, 0 * Nrange, '-', c='gray')
ax.set_xlim(0, 100)
ax.set_ylim(-75, 75)
ax.set_xlabel('Sample Size')
ax.set_ylabel('Value')
plt.show()
| 29.785714 | 79 | 0.626965 | [
"BSD-2-Clause"
] | larsmans/astroML | book_figures/chapter3/fig_cauchy_median_mean.py | 3,753 | Python |
"""
Utilities for fast persistence of big data, with optional compression.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2009 Gael Varoquaux
# License: BSD Style, 3 clauses.
import pickle
import traceback
import os
import zlib
import warnings
from io import BytesIO
from ._compat import _basestring, PY3_OR_LATER
if PY3_OR_LATER:
Unpickler = pickle._Unpickler
Pickler = pickle._Pickler
def asbytes(s):
if isinstance(s, bytes):
return s
return s.encode('latin1')
else:
Unpickler = pickle.Unpickler
Pickler = pickle.Pickler
asbytes = str
def hex_str(an_int):
"""Converts an int to an hexadecimal string
"""
return '{0:#x}'.format(an_int)
_MEGA = 2 ** 20
# Compressed pickle header format: _ZFILE_PREFIX followed by _MAX_LEN
# bytes which contains the length of the zlib compressed data as an
# hexadecimal string. For example: 'ZF0x139 '
_ZFILE_PREFIX = asbytes('ZF')
_MAX_LEN = len(hex_str(2 ** 64))
###############################################################################
# Compressed file with Zlib
def _read_magic(file_handle):
""" Utility to check the magic signature of a file identifying it as a
Zfile
"""
magic = file_handle.read(len(_ZFILE_PREFIX))
# Pickling needs file-handles at the beginning of the file
file_handle.seek(0)
return magic
def read_zfile(file_handle):
"""Read the z-file and return the content as a string
Z-files are raw data compressed with zlib used internally by joblib
for persistence. Backward compatibility is not guaranteed. Do not
use for external purposes.
"""
file_handle.seek(0)
assert _read_magic(file_handle) == _ZFILE_PREFIX, \
"File does not have the right magic"
header_length = len(_ZFILE_PREFIX) + _MAX_LEN
length = file_handle.read(header_length)
length = length[len(_ZFILE_PREFIX):]
length = int(length, 16)
# With python2 and joblib version <= 0.8.4 compressed pickle header is one
# character wider so we need to ignore an additional space if present.
# Note: the first byte of the zlib data is guaranteed not to be a
# space according to
# https://tools.ietf.org/html/rfc6713#section-2.1
next_byte = file_handle.read(1)
if next_byte != b' ':
# The zlib compressed data has started and we need to go back
# one byte
file_handle.seek(header_length)
# We use the known length of the data to tell Zlib the size of the
# buffer to allocate.
data = zlib.decompress(file_handle.read(), 15, length)
assert len(data) == length, (
"Incorrect data length while decompressing %s."
"The file could be corrupted." % file_handle)
return data
def write_zfile(file_handle, data, compress=1):
"""Write the data in the given file as a Z-file.
Z-files are raw data compressed with zlib used internally by joblib
for persistence. Backward compatibility is not guarantied. Do not
use for external purposes.
"""
file_handle.write(_ZFILE_PREFIX)
length = hex_str(len(data))
# Store the length of the data
file_handle.write(asbytes(length.ljust(_MAX_LEN)))
file_handle.write(zlib.compress(asbytes(data), compress))
###############################################################################
# Utility objects for persistence.
class NDArrayWrapper(object):
""" An object to be persisted instead of numpy arrays.
The only thing this object does, is to carry the filename in which
the array has been persisted, and the array subclass.
"""
def __init__(self, filename, subclass, allow_mmap=True):
"Store the useful information for later"
self.filename = filename
self.subclass = subclass
self.allow_mmap = allow_mmap
def read(self, unpickler):
"Reconstruct the array"
filename = os.path.join(unpickler._dirname, self.filename)
# Load the array from the disk
np_ver = [int(x) for x in unpickler.np.__version__.split('.', 2)[:2]]
# use getattr instead of self.allow_mmap to ensure backward compat
# with NDArrayWrapper instances pickled with joblib < 0.9.0
allow_mmap = getattr(self, 'allow_mmap', True)
memmap_kwargs = ({} if not allow_mmap
else {'mmap_mode': unpickler.mmap_mode})
array = unpickler.np.load(filename, **memmap_kwargs)
# Reconstruct subclasses. This does not work with old
# versions of numpy
if (hasattr(array, '__array_prepare__')
and not self.subclass in (unpickler.np.ndarray,
unpickler.np.memmap)):
# We need to reconstruct another subclass
new_array = unpickler.np.core.multiarray._reconstruct(
self.subclass, (0,), 'b')
new_array.__array_prepare__(array)
array = new_array
return array
#def __reduce__(self):
# return None
class ZNDArrayWrapper(NDArrayWrapper):
"""An object to be persisted instead of numpy arrays.
This object store the Zfile filename in which
the data array has been persisted, and the meta information to
retrieve it.
The reason that we store the raw buffer data of the array and
the meta information, rather than array representation routine
(tostring) is that it enables us to use completely the strided
model to avoid memory copies (a and a.T store as fast). In
addition saving the heavy information separately can avoid
creating large temporary buffers when unpickling data with
large arrays.
"""
def __init__(self, filename, init_args, state):
"Store the useful information for later"
self.filename = filename
self.state = state
self.init_args = init_args
def read(self, unpickler):
"Reconstruct the array from the meta-information and the z-file"
# Here we a simply reproducing the unpickling mechanism for numpy
# arrays
filename = os.path.join(unpickler._dirname, self.filename)
array = unpickler.np.core.multiarray._reconstruct(*self.init_args)
with open(filename, 'rb') as f:
data = read_zfile(f)
state = self.state + (data,)
array.__setstate__(state)
return array
###############################################################################
# Pickler classes
class NumpyPickler(Pickler):
"""A pickler to persist of big data efficiently.
The main features of this object are:
* persistence of numpy arrays in separate .npy files, for which
I/O is fast.
* optional compression using Zlib, with a special care on avoid
temporaries.
"""
dispatch = Pickler.dispatch.copy()
def __init__(self, filename, compress=0, cache_size=10, protocol=None):
self._filename = filename
self._filenames = [filename, ]
self.cache_size = cache_size
self.compress = compress
if not self.compress:
self.file = open(filename, 'wb')
else:
self.file = BytesIO()
# Count the number of npy files that we have created:
self._npy_counter = 1
# By default we want a pickle protocol that only changes with
# the major python version and not the minor one
if protocol is None:
protocol = (pickle.DEFAULT_PROTOCOL if PY3_OR_LATER
else pickle.HIGHEST_PROTOCOL)
Pickler.__init__(self, self.file,
protocol=protocol)
# delayed import of numpy, to avoid tight coupling
try:
import numpy as np
except ImportError:
np = None
self.np = np
def _write_array(self, array, filename):
if not self.compress:
self.np.save(filename, array)
allow_mmap = not array.dtype.hasobject
container = NDArrayWrapper(os.path.basename(filename),
type(array),
allow_mmap=allow_mmap)
else:
filename += '.z'
# Efficient compressed storage:
# The meta data is stored in the container, and the core
# numerics in a z-file
_, init_args, state = array.__reduce__()
# the last entry of 'state' is the data itself
with open(filename, 'wb') as zfile:
write_zfile(zfile, state[-1], compress=self.compress)
state = state[:-1]
container = ZNDArrayWrapper(os.path.basename(filename),
init_args, state)
return container, filename
def save(self, obj):
""" Subclass the save method, to save ndarray subclasses in npy
files, rather than pickling them. Of course, this is a
total abuse of the Pickler class.
"""
if (self.np is not None and type(obj) in
(self.np.ndarray, self.np.matrix, self.np.memmap)):
size = obj.size * obj.itemsize
if self.compress and size < self.cache_size * _MEGA:
# When compressing, as we are not writing directly to the
# disk, it is more efficient to use standard pickling
if type(obj) is self.np.memmap:
# Pickling doesn't work with memmaped arrays
obj = self.np.asarray(obj)
return Pickler.save(self, obj)
if not obj.dtype.hasobject:
try:
filename = '%s_%02i.npy' % (self._filename,
self._npy_counter)
# This converts the array in a container
obj, filename = self._write_array(obj, filename)
self._filenames.append(filename)
self._npy_counter += 1
except Exception:
# XXX: We should have a logging mechanism
print('Failed to save %s to .npy file:\n%s' % (
type(obj),
traceback.format_exc()))
return Pickler.save(self, obj)
def close(self):
if self.compress:
with open(self._filename, 'wb') as zfile:
write_zfile(zfile, self.file.getvalue(), self.compress)
class NumpyUnpickler(Unpickler):
"""A subclass of the Unpickler to unpickle our numpy pickles.
"""
dispatch = Unpickler.dispatch.copy()
def __init__(self, filename, file_handle, mmap_mode=None):
self._filename = os.path.basename(filename)
self._dirname = os.path.dirname(filename)
self.mmap_mode = mmap_mode
self.file_handle = self._open_pickle(file_handle)
Unpickler.__init__(self, self.file_handle)
try:
import numpy as np
except ImportError:
np = None
self.np = np
def _open_pickle(self, file_handle):
return file_handle
def load_build(self):
""" This method is called to set the state of a newly created
object.
We capture it to replace our place-holder objects,
NDArrayWrapper, by the array we are interested in. We
replace them directly in the stack of pickler.
"""
Unpickler.load_build(self)
if isinstance(self.stack[-1], NDArrayWrapper):
if self.np is None:
raise ImportError('Trying to unpickle an ndarray, '
"but numpy didn't import correctly")
nd_array_wrapper = self.stack.pop()
array = nd_array_wrapper.read(self)
self.stack.append(array)
# Be careful to register our new method.
if PY3_OR_LATER:
dispatch[pickle.BUILD[0]] = load_build
else:
dispatch[pickle.BUILD] = load_build
class ZipNumpyUnpickler(NumpyUnpickler):
"""A subclass of our Unpickler to unpickle on the fly from
compressed storage."""
def __init__(self, filename, file_handle):
NumpyUnpickler.__init__(self, filename,
file_handle,
mmap_mode=None)
def _open_pickle(self, file_handle):
return BytesIO(read_zfile(file_handle))
###############################################################################
# Utility functions
def dump(value, filename, compress=0, cache_size=100, protocol=None):
"""Fast persistence of an arbitrary Python object into one or multiple
files, with dedicated storage for numpy arrays.
Parameters
-----------
value: any Python object
The object to store to disk
filename: string
The name of the file in which it is to be stored
compress: integer for 0 to 9, optional
Optional compression level for the data. 0 is no compression.
Higher means more compression, but also slower read and
write times. Using a value of 3 is often a good compromise.
See the notes for more details.
cache_size: positive number, optional
Fixes the order of magnitude (in megabytes) of the cache used
for in-memory compression. Note that this is just an order of
magnitude estimate and that for big arrays, the code will go
over this value at dump and at load time.
protocol: positive int
Pickle protocol, see pickle.dump documentation for more details.
Returns
-------
filenames: list of strings
The list of file names in which the data is stored. If
compress is false, each array is stored in a different file.
See Also
--------
joblib.load : corresponding loader
Notes
-----
Memmapping on load cannot be used for compressed files. Thus
using compression can significantly slow down loading. In
addition, compressed files take extra extra memory during
dump and load.
"""
if compress is True:
# By default, if compress is enabled, we want to be using 3 by
# default
compress = 3
if not isinstance(filename, _basestring):
# People keep inverting arguments, and the resulting error is
# incomprehensible
raise ValueError(
'Second argument should be a filename, %s (type %s) was given'
% (filename, type(filename))
)
try:
pickler = NumpyPickler(filename, compress=compress,
cache_size=cache_size, protocol=protocol)
pickler.dump(value)
pickler.close()
finally:
if 'pickler' in locals() and hasattr(pickler, 'file'):
pickler.file.flush()
pickler.file.close()
return pickler._filenames
def load(filename, mmap_mode=None):
"""Reconstruct a Python object from a file persisted with joblib.dump.
Parameters
-----------
filename: string
The name of the file from which to load the object
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, the arrays are memory-mapped from the disk. This
mode has no effect for compressed files. Note that in this
case the reconstructed object might not longer match exactly
the originally pickled object.
Returns
-------
result: any Python object
The object stored in the file.
See Also
--------
joblib.dump : function to save an object
Notes
-----
This function can load numpy array files saved separately during the
dump. If the mmap_mode argument is given, it is passed to np.load and
arrays are loaded as memmaps. As a consequence, the reconstructed
object might not match the original pickled object. Note that if the
file was saved with compression, the arrays cannot be memmaped.
"""
with open(filename, 'rb') as file_handle:
# We are careful to open the file handle early and keep it open to
# avoid race-conditions on renames. That said, if data are stored in
# companion files, moving the directory will create a race when
# joblib tries to access the companion files.
if _read_magic(file_handle) == _ZFILE_PREFIX:
if mmap_mode is not None:
warnings.warn('file "%(filename)s" appears to be a zip, '
'ignoring mmap_mode "%(mmap_mode)s" flag passed'
% locals(), Warning, stacklevel=2)
unpickler = ZipNumpyUnpickler(filename, file_handle=file_handle)
else:
unpickler = NumpyUnpickler(filename, file_handle=file_handle,
mmap_mode=mmap_mode)
try:
obj = unpickler.load()
except UnicodeDecodeError as exc:
# More user-friendly error message
if PY3_OR_LATER:
new_exc = ValueError(
'You may be trying to read with '
'python 3 a joblib pickle generated with python 2. '
'This feature is not supported by joblib.')
new_exc.__cause__ = exc
raise new_exc
finally:
if hasattr(unpickler, 'file_handle'):
unpickler.file_handle.close()
return obj
| 36.826638 | 79 | 0.611516 | [
"MIT"
] | Con-Mi/lambda-packs | Sklearn_scipy_numpy/source/sklearn/externals/joblib/numpy_pickle.py | 17,419 | Python |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import contextlib
import itertools
import logging
import re
import warnings
from typing import Optional, Tuple
import numpy as np
import torch
from fairseq.file_io import PathManager
from fairseq import utils
import os
logger = logging.getLogger(__name__)
def infer_language_pair(path):
"""Infer language pair from filename: <split>.<lang1>-<lang2>.(...).idx"""
src, dst = None, None
for filename in PathManager.ls(path):
parts = filename.split(".")
if len(parts) >= 3 and len(parts[1].split("-")) == 2:
return parts[1].split("-")
return src, dst
def collate_tokens(
values,
pad_idx,
eos_idx=None,
left_pad=False,
move_eos_to_beginning=False,
pad_to_length=None,
pad_to_multiple=1,
pad_to_bsz=None,
):
"""Convert a list of 1d tensors into a padded 2d tensor."""
size = max(v.size(0) for v in values)
size = size if pad_to_length is None else max(size, pad_to_length)
if pad_to_multiple != 1 and size % pad_to_multiple != 0:
size = int(((size - 0.1) // pad_to_multiple + 1) * pad_to_multiple)
batch_size = len(values) if pad_to_bsz is None else max(len(values), pad_to_bsz)
res = values[0].new(batch_size, size).fill_(pad_idx)
def copy_tensor(src, dst):
assert dst.numel() == src.numel()
if move_eos_to_beginning:
if eos_idx is None:
# if no eos_idx is specified, then use the last token in src
dst[0] = src[-1]
else:
dst[0] = eos_idx
dst[1:] = src[:-1]
else:
dst.copy_(src)
for i, v in enumerate(values):
copy_tensor(v, res[i][size - len(v) :] if left_pad else res[i][: len(v)])
return res
def load_indexed_dataset(
path, dictionary=None, dataset_impl=None, combine=False, default="cached"
):
"""A helper function for loading indexed datasets.
Args:
path (str): path to indexed dataset (e.g., 'data-bin/train')
dictionary (~fairseq.data.Dictionary): data dictionary
dataset_impl (str, optional): which dataset implementation to use. If
not provided, it will be inferred automatically. For legacy indexed
data we use the 'cached' implementation by default.
combine (bool, optional): automatically load and combine multiple
datasets. For example, if *path* is 'data-bin/train', then we will
combine 'data-bin/train', 'data-bin/train1', ... and return a
single ConcatDataset instance.
"""
import fairseq.data.indexed_dataset as indexed_dataset
from fairseq.data.concat_dataset import ConcatDataset
datasets = []
for k in itertools.count():
path_k = path + (str(k) if k > 0 else "")
try:
path_k = indexed_dataset.get_indexed_dataset_to_local(path_k)
except Exception as e:
if "StorageException: [404] Path not found" in str(e):
logger.warning(f"path_k: {e} not found")
else:
raise e
dataset_impl_k = dataset_impl
if dataset_impl_k is None:
dataset_impl_k = indexed_dataset.infer_dataset_impl(path_k)
dataset = indexed_dataset.make_dataset(
path_k,
impl=dataset_impl_k or default,
fix_lua_indexing=True,
dictionary=dictionary,
)
if dataset is None:
break
logger.info("loaded {:,} examples from: {}".format(len(dataset), path_k))
datasets.append(dataset)
if not combine:
break
if len(datasets) == 0:
return None
elif len(datasets) == 1:
return datasets[0]
else:
return ConcatDataset(datasets)
@contextlib.contextmanager
def numpy_seed(seed, *addl_seeds):
"""Context manager which seeds the NumPy PRNG with the specified seed and
restores the state afterward"""
if seed is None:
yield
return
if len(addl_seeds) > 0:
seed = int(hash((seed, *addl_seeds)) % 1e6)
state = np.random.get_state()
np.random.seed(seed)
try:
yield
finally:
np.random.set_state(state)
def collect_filtered(function, iterable, filtered):
"""
Similar to :func:`filter` but collects filtered elements in ``filtered``.
Args:
function (callable): function that returns ``False`` for elements that
should be filtered
iterable (iterable): iterable to filter
filtered (list): list to store filtered elements
"""
for el in iterable:
if function(el):
yield el
else:
filtered.append(el)
def _filter_by_size_dynamic(indices, size_fn, max_positions, raise_exception=False):
def compare_leq(a, b):
return a <= b if not isinstance(a, tuple) else max(a) <= b
def check_size(idx):
if isinstance(max_positions, float) or isinstance(max_positions, int):
return size_fn(idx) <= max_positions
elif isinstance(max_positions, dict):
idx_size = size_fn(idx)
assert isinstance(idx_size, dict)
intersect_keys = set(max_positions.keys()) & set(idx_size.keys())
return all(
all(
a is None or b is None or a <= b
for a, b in zip(idx_size[key], max_positions[key])
)
for key in intersect_keys
)
else:
# For MultiCorpusSampledDataset, will generalize it later
if not isinstance(size_fn(idx), Iterable):
return all(size_fn(idx) <= b for b in max_positions)
return all(
a is None or b is None or a <= b
for a, b in zip(size_fn(idx), max_positions)
)
ignored = []
itr = collect_filtered(check_size, indices, ignored)
indices = np.fromiter(itr, dtype=np.int64, count=-1)
return indices, ignored
def filter_by_size(indices, dataset, max_positions, raise_exception=False):
"""
[deprecated] Filter indices based on their size.
Use `FairseqDataset::filter_indices_by_size` instead.
Args:
indices (List[int]): ordered list of dataset indices
dataset (FairseqDataset): fairseq dataset instance
max_positions (tuple): filter elements larger than this size.
Comparisons are done component-wise.
raise_exception (bool, optional): if ``True``, raise an exception if
any elements are filtered (default: False).
"""
warnings.warn(
"data_utils.filter_by_size is deprecated. "
"Use `FairseqDataset::filter_indices_by_size` instead.",
stacklevel=2,
)
if isinstance(max_positions, float) or isinstance(max_positions, int):
if hasattr(dataset, "sizes") and isinstance(dataset.sizes, np.ndarray):
ignored = indices[dataset.sizes[indices] > max_positions].tolist()
indices = indices[dataset.sizes[indices] <= max_positions]
elif (
hasattr(dataset, "sizes")
and isinstance(dataset.sizes, list)
and len(dataset.sizes) == 1
):
ignored = indices[dataset.sizes[0][indices] > max_positions].tolist()
indices = indices[dataset.sizes[0][indices] <= max_positions]
else:
indices, ignored = _filter_by_size_dynamic(
indices, dataset.size, max_positions
)
else:
indices, ignored = _filter_by_size_dynamic(indices, dataset.size, max_positions)
if len(ignored) > 0 and raise_exception:
raise Exception(
(
"Size of sample #{} is invalid (={}) since max_positions={}, "
"skip this example with --skip-invalid-size-inputs-valid-test"
).format(ignored[0], dataset.size(ignored[0]), max_positions)
)
if len(ignored) > 0:
logger.warning(
(
"{} samples have invalid sizes and will be skipped, "
"max_positions={}, first few sample ids={}"
).format(len(ignored), max_positions, ignored[:10])
)
return indices
def filter_paired_dataset_indices_by_size(src_sizes, tgt_sizes, indices, max_sizes):
"""Filter a list of sample indices. Remove those that are longer
than specified in max_sizes.
Args:
indices (np.array): original array of sample indices
max_sizes (int or list[int] or tuple[int]): max sample size,
can be defined separately for src and tgt (then list or tuple)
Returns:
np.array: filtered sample array
list: list of removed indices
"""
if max_sizes is None:
return indices, []
if type(max_sizes) in (int, float):
max_src_size, max_tgt_size = max_sizes, max_sizes
else:
max_src_size, max_tgt_size = max_sizes
if tgt_sizes is None:
ignored = indices[src_sizes[indices] > max_src_size]
else:
ignored = indices[
(src_sizes[indices] > max_src_size) | (tgt_sizes[indices] > max_tgt_size)
]
if len(ignored) > 0:
if tgt_sizes is None:
indices = indices[src_sizes[indices] <= max_src_size]
else:
indices = indices[
(src_sizes[indices] <= max_src_size)
& (tgt_sizes[indices] <= max_tgt_size)
]
return indices, ignored.tolist()
def batch_by_size(
indices,
num_tokens_fn,
num_tokens_vec=None,
max_tokens=None,
max_sentences=None,
required_batch_size_multiple=1,
fixed_shapes=None,
):
"""
Yield mini-batches of indices bucketed by size. Batches may contain
sequences of different lengths.
Args:
indices (List[int]): ordered list of dataset indices
num_tokens_fn (callable): function that returns the number of tokens at
a given index
num_tokens_vec (List[int], optional): precomputed vector of the number
of tokens for each index in indices (to enable faster batch generation)
max_tokens (int, optional): max number of tokens in each batch
(default: None).
max_sentences (int, optional): max number of sentences in each
batch (default: None).
required_batch_size_multiple (int, optional): require batch size to
be less than N or a multiple of N (default: 1).
fixed_shapes (List[Tuple[int, int]], optional): if given, batches will
only be created with the given shapes. *max_sentences* and
*required_batch_size_multiple* will be ignored (default: None).
"""
try:
from fairseq.data.data_utils_fast import (
batch_by_size_fn,
batch_by_size_vec,
batch_fixed_shapes_fast,
)
except ImportError:
raise ImportError(
"Please build Cython components with: "
"`python setup.py build_ext --inplace`"
)
except ValueError:
raise ValueError(
"Please build (or rebuild) Cython components with `python setup.py build_ext --inplace`."
)
# added int() to avoid TypeError: an integer is required
max_tokens = int(max_tokens) if max_tokens is not None else -1
max_sentences = max_sentences if max_sentences is not None else -1
bsz_mult = required_batch_size_multiple
if not isinstance(indices, np.ndarray):
indices = np.fromiter(indices, dtype=np.int64, count=-1)
if num_tokens_vec is not None and not isinstance(num_tokens_vec, np.ndarray):
num_tokens_vec = np.fromiter(num_tokens_vec, dtype=np.int64, count=-1)
if fixed_shapes is None:
if num_tokens_vec is None:
return batch_by_size_fn(
indices,
num_tokens_fn,
max_tokens,
max_sentences,
bsz_mult,
)
else:
return batch_by_size_vec(
indices,
num_tokens_vec,
max_tokens,
max_sentences,
bsz_mult,
)
else:
fixed_shapes = np.array(fixed_shapes, dtype=np.int64)
sort_order = np.lexsort(
[
fixed_shapes[:, 1].argsort(), # length
fixed_shapes[:, 0].argsort(), # bsz
]
)
fixed_shapes_sorted = fixed_shapes[sort_order]
return batch_fixed_shapes_fast(indices, num_tokens_fn, fixed_shapes_sorted)
def post_process(sentence: str, symbol: str):
if symbol == "sentencepiece":
sentence = sentence.replace(" ", "").replace("\u2581", " ").strip()
elif symbol == "wordpiece":
sentence = sentence.replace(" ", "").replace("_", " ").strip()
elif symbol == "letter":
sentence = sentence.replace(" ", "").replace("|", " ").strip()
elif symbol == "silence":
import re
sentence = sentence.replace("<SIL>", "")
sentence = re.sub(" +", " ", sentence).strip()
elif symbol == "_EOW":
sentence = sentence.replace(" ", "").replace("_EOW", " ").strip()
elif symbol in {"subword_nmt", "@@ ", "@@"}:
if symbol == "subword_nmt":
symbol = "@@ "
sentence = (sentence + " ").replace(symbol, "").rstrip()
elif symbol == "none":
pass
elif symbol is not None:
raise NotImplementedError(f"Unknown post_process option: {symbol}")
return sentence
def compute_mask_indices(
shape: Tuple[int, int],
padding_mask: Optional[torch.Tensor],
mask_prob: float,
mask_length: int,
mask_type: str = "static",
mask_other: float = 0.0,
min_masks: int = 0,
no_overlap: bool = False,
min_space: int = 0,
) -> np.ndarray:
"""
Computes random mask spans for a given shape
Args:
shape: the the shape for which to compute masks.
should be of size 2 where first element is batch size and 2nd is timesteps
padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements
mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by
number of timesteps divided by length of mask span to mask approximately this percentage of all elements.
however due to overlaps, the actual number will be smaller (unless no_overlap is True)
mask_type: how to compute mask lengths
static = fixed size
uniform = sample from uniform distribution [mask_other, mask_length*2]
normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element
poisson = sample from possion distribution with lambda = mask length
min_masks: minimum number of masked spans
no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping
min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans
"""
bsz, all_sz = shape
mask = np.full((bsz, all_sz), False)
all_num_mask = int(
# add a random number for probabilistic rounding
mask_prob * all_sz / float(mask_length)
+ np.random.rand()
)
all_num_mask = max(min_masks, all_num_mask)
mask_idcs = []
for i in range(bsz):
if padding_mask is not None:
sz = all_sz - padding_mask[i].long().sum().item()
num_mask = int(
# add a random number for probabilistic rounding
mask_prob * sz / float(mask_length)
+ np.random.rand()
)
num_mask = max(min_masks, num_mask)
else:
sz = all_sz
num_mask = all_num_mask
if mask_type == "static":
lengths = np.full(num_mask, mask_length)
elif mask_type == "uniform":
lengths = np.random.randint(mask_other, mask_length * 2 + 1, size=num_mask)
elif mask_type == "normal":
lengths = np.random.normal(mask_length, mask_other, size=num_mask)
lengths = [max(1, int(round(x))) for x in lengths]
elif mask_type == "poisson":
lengths = np.random.poisson(mask_length, size=num_mask)
lengths = [int(round(x)) for x in lengths]
else:
raise Exception("unknown mask selection " + mask_type)
if sum(lengths) == 0:
lengths[0] = min(mask_length, sz - 1)
if no_overlap:
mask_idc = []
def arrange(s, e, length, keep_length):
span_start = np.random.randint(s, e - length)
mask_idc.extend(span_start + i for i in range(length))
new_parts = []
if span_start - s - min_space >= keep_length:
new_parts.append((s, span_start - min_space + 1))
if e - span_start - keep_length - min_space > keep_length:
new_parts.append((span_start + length + min_space, e))
return new_parts
parts = [(0, sz)]
min_length = min(lengths)
for length in sorted(lengths, reverse=True):
lens = np.fromiter(
(e - s if e - s >= length + min_space else 0 for s, e in parts),
np.int,
)
l_sum = np.sum(lens)
if l_sum == 0:
break
probs = lens / np.sum(lens)
c = np.random.choice(len(parts), p=probs)
s, e = parts.pop(c)
parts.extend(arrange(s, e, length, min_length))
mask_idc = np.asarray(mask_idc)
else:
min_len = min(lengths)
if sz - min_len <= num_mask:
min_len = sz - num_mask - 1
mask_idc = np.random.choice(sz - min_len, num_mask, replace=False)
mask_idc = np.asarray(
[
mask_idc[j] + offset
for j in range(len(mask_idc))
for offset in range(lengths[j])
]
)
mask_idcs.append(np.unique(mask_idc[mask_idc < sz]))
min_len = min([len(m) for m in mask_idcs])
for i, mask_idc in enumerate(mask_idcs):
if len(mask_idc) > min_len:
mask_idc = np.random.choice(mask_idc, min_len, replace=False)
mask[i, mask_idc] = True
return mask
def get_mem_usage():
try:
import psutil
mb = 1024 * 1024
return f"used={psutil.virtual_memory().used / mb}Mb; avail={psutil.virtual_memory().available / mb}Mb"
except ImportError:
return "N/A"
# lens: torch.LongTensor
# returns: torch.BoolTensor
def lengths_to_padding_mask(lens):
bsz, max_lens = lens.size(0), torch.max(lens).item()
mask = torch.arange(max_lens).to(lens.device).view(1, max_lens)
mask = mask.expand(bsz, -1) >= lens.view(bsz, 1).expand(-1, max_lens)
return mask
# lens: torch.LongTensor
# returns: torch.BoolTensor
def lengths_to_mask(lens):
return ~lengths_to_padding_mask(lens)
def get_buckets(sizes, num_buckets):
buckets = np.unique(
np.percentile(
sizes,
np.linspace(0, 100, num_buckets + 1),
interpolation="lower",
)[1:]
)
return buckets
def get_bucketed_sizes(orig_sizes, buckets):
sizes = np.copy(orig_sizes)
assert np.min(sizes) >= 0
start_val = -1
for end_val in buckets:
mask = (sizes > start_val) & (sizes <= end_val)
sizes[mask] = end_val
start_val = end_val
return sizes
def _find_extra_valid_paths(dataset_path: str) -> set:
paths = utils.split_paths(dataset_path)
all_valid_paths = set()
for sub_dir in paths:
contents = PathManager.ls(sub_dir)
valid_paths = [c for c in contents if re.match("valid*[0-9].*", c) is not None]
all_valid_paths |= {os.path.basename(p) for p in valid_paths}
# Remove .bin, .idx etc
roots = {os.path.splitext(p)[0] for p in all_valid_paths}
return roots
def raise_if_valid_subsets_unintentionally_ignored(train_cfg) -> None:
"""Raises if there are paths matching 'valid*[0-9].*' which are not combined or ignored."""
if (
train_cfg.dataset.ignore_unused_valid_subsets
or train_cfg.dataset.combine_valid_subsets
or train_cfg.dataset.disable_validation
or not hasattr(train_cfg.task, "data")
):
return
other_paths = _find_extra_valid_paths(train_cfg.task.data)
specified_subsets = train_cfg.dataset.valid_subset.split(",")
ignored_paths = [p for p in other_paths if p not in specified_subsets]
if ignored_paths:
advice = "Set --combine-val to combine them or --ignore-unused-valid-subsets to ignore them."
msg = f"Valid paths {ignored_paths} will be ignored. {advice}"
raise ValueError(msg)
| 35.769748 | 120 | 0.608279 | [
"MIT"
] | 1130310223/fairseq | fairseq/data/data_utils.py | 21,283 | Python |
from flask import Flask
from flask_bootstrap import Bootstrap
from config import config_options
bootstrap = Bootstrap()
def create_app(config_name):
app = Flask(__name__)
# create ap configurations
app.config.from_object(config_options[config_name])
# initialize flask extensions
bootstrap.init_app(app)
# Registering the blueprint
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
# setting config
from .requests import configure_request
configure_request(app)
return app
| 20.740741 | 55 | 0.755357 | [
"MIT"
] | AliKheirAbdi/NewsApp | app/__init__.py | 560 | Python |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from pathlib import Path
import pandas as pd
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
from .processing import process_daily
class FileWatcher(FileSystemEventHandler):
def __init__(self, path, sheets):
super().__init__()
self._path = Path(path)
self._sheets = sheets
self._observer = Observer()
self._observer.schedule(self, str(self._path.parent))
def _update_sheets(self):
self._sheets.clear()
sheets = pd.read_excel(self._path, None)
for key, val in sheets.items():
if key[:2] == '20':
sheets[key] = process_daily(val)
self._sheets.update(sheets)
def on_created(self, event):
if Path(event.src_path) == self._path:
self._update_sheets()
def run(self):
self._update_sheets()
self._observer.start()
print('running...')
def stop(self):
self._observer.stop()
self._observer.join()
print('stopped')
| 29.547619 | 69 | 0.643836 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | jhrmnn/pybudget | src/budget/watcher.py | 1,241 | Python |
import numpy as np
np.random.seed(0)
import pandas as pd
df1 = pd.DataFrame(
{
"A": ["A0", "A1", "A2", "A3"],
"B": ["B0", "B1", "B2", "B3"],
},
index=[0, 1, 2, 3]
)
df2 = pd.DataFrame(
{
"A": ["A4", "A5", "A6", "A7"],
"B": ["B4", "B5", "B6", "B7"],
},
index=[0, 1, 2, 5]
)
df12_append = df1.append(df2)
print(df12_append)
df_concat = pd.concat([df1, df2], join="inner", ignore_index=True)
print("concatenated:", df_concat)
left = pd.DataFrame(
{
"key": ["K0", "K1", "K2"],
"A": ["A0", "A1", "A2"]
}
)
right = pd.DataFrame(
{
"key": ["K0", "K1", "K2"],
"B": ["B0", "B1", "B2"]
}
)
print(left)
print(right)
print(pd.merge(left, right, on="key"))
left = pd.DataFrame(
{
"A": ["A0", "A1", "A2"],
},
index=["K0", "K1", "K2"]
)
right = pd.DataFrame(
{
"B": ["B0", "B1", "B2"],
},
index=["K0", "K1", "K3"]
)
print(left)
print(right)
print(left.join(right, how="outer"))
| 16.655738 | 66 | 0.451772 | [
"MIT"
] | tkaya94/UdemyDataScience | 3_Pandas/ConcatAppendJoin.py | 1,016 | Python |
import os
from utils import yaml_stream
from sqlalchemy import Table
def importyaml(connection, metadata, source_path):
activityIDs = {
"copying": 5,
"manufacturing": 1,
"research_material": 4,
"research_time": 3,
"invention": 8,
"reaction": 11
}
industryBlueprints = Table('industryBlueprints', metadata)
industryActivity = Table('industryActivity', metadata)
industryActivityMaterials = Table('industryActivityMaterials', metadata)
industryActivityProducts = Table('industryActivityProducts', metadata)
industryActivitySkills = Table('industryActivitySkills', metadata)
industryActivityProbabilities = Table('industryActivityProbabilities', metadata)
print("Importing Blueprints")
trans = connection.begin()
with open(
os.path.join(source_path, 'fsd', 'blueprints.yaml'), 'r'
) as yamlstream:
for blueprint in yaml_stream.read_by_any(yamlstream):
for blueprint_id, blueprint_details in blueprint.items():
connection.execute(
industryBlueprints.insert(),
typeID=blueprint_id,
maxProductionLimit=blueprint_details["maxProductionLimit"]
)
for activity in blueprint_details['activities']:
connection.execute(
industryActivity.insert(),
typeID=blueprint_id,
activityID=activityIDs[activity],
time=blueprint_details['activities'][activity]['time']
)
if 'materials' in blueprint_details['activities'][activity]:
for material in blueprint_details['activities'][activity]['materials']:
connection.execute(
industryActivityMaterials.insert(),
typeID=blueprint_id,
activityID=activityIDs[activity],
materialTypeID=material['typeID'],
quantity=material['quantity']
)
if 'products' in blueprint_details['activities'][activity]:
for product in blueprint_details['activities'][activity]['products']:
connection.execute(
industryActivityProducts.insert(),
typeID=blueprint_id,
activityID=activityIDs[activity],
productTypeID=product['typeID'],
quantity=product['quantity']
)
if 'probability' in product:
connection.execute(
industryActivityProbabilities.insert(),
typeID=blueprint_id,
activityID=activityIDs[activity],
productTypeID=product['typeID'],
probability=product['probability']
)
try:
if 'skills' in blueprint_details['activities'][activity]:
for skill in blueprint_details['activities'][activity]['skills']:
connection.execute(
industryActivitySkills.insert(),
typeID=blueprint_id,
activityID=activityIDs[activity],
skillID=skill['typeID'],
level=skill['level']
)
except:
print('{} has a bad skill'.format(blueprint_id))
trans.commit()
| 46.635294 | 95 | 0.494198 | [
"MIT"
] | Caffe1neAdd1ct/yamlloader | tableloader/tableFunctions/blueprints.py | 3,964 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutTuples(Koan):
def test_creating_a_tuple(self):
count_of_three = (1, 2, 5)
self.assertEqual(5, count_of_three[2])
def test_tuples_are_immutable_so_item_assignment_is_not_possible(self):
count_of_three = (1, 2, 5)
try:
count_of_three[2] = "three"
except TypeError as ex:
msg = ex.args[0]
# Note, assertRegexpMatches() uses regular expression pattern matching,
# so you don't have to copy the whole message.
self.assertRegexpMatches(msg,"'tuple' object does not support item assignment")
def test_tuples_are_immutable_so_appending_is_not_possible(self):
count_of_three = (1, 2, 5)
with self.assertRaises(AttributeError): count_of_three.append("boom")
# Tuples are less flexible than lists, but faster.
def test_tuples_can_only_be_changed_through_replacement(self):
count_of_three = (1, 2, 5)
list_count = list(count_of_three)
list_count.append("boom")
count_of_three = tuple(list_count)
self.assertEqual((1,2,5,"boom"), count_of_three)
def test_tuples_of_one_look_peculiar(self):
self.assertEqual(int, (1).__class__)
self.assertEqual(tuple, (1,).__class__)
self.assertEqual(tuple, ("Hello comma!", ).__class__)
def test_tuple_constructor_can_be_surprising(self):
self.assertEqual(('S', 'u', 'r', 'p', 'r', 'i', 's', 'e', '!'), tuple("Surprise!"))
def test_creating_empty_tuples(self):
self.assertEqual(tuple(), ())
self.assertEqual((), tuple()) #Sometimes less confusing
def test_tuples_can_be_embedded(self):
lat = (37, 14, 6, 'N')
lon = (115, 48, 40, 'W')
place = ('Area 51', lat, lon)
self.assertEqual(('Area 51', (37, 14, 6, 'N'), (115, 48, 40, 'W')), place)
def test_tuples_are_good_for_representing_records(self):
locations = [
("Illuminati HQ", (38, 52, 15.56, 'N'), (77, 3, 21.46, 'W')),
("Stargate B", (41, 10, 43.92, 'N'), (1, 49, 34.29, 'W')),
]
locations.append( ("Cthulu", (26, 40, 1, 'N'), (70, 45, 7, 'W')) )
self.assertEqual("Cthulu", locations[2][0])
self.assertEqual(15.56, locations[0][1][2])
| 34.014493 | 91 | 0.611419 | [
"MIT"
] | Oulanos/Python_Koans | python3/koans/about_tuples.py | 2,347 | Python |
# Generated by Django 2.2.1 on 2019-05-17 22:55
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('wells', '0088_fieldsprovided'),
('wells', '0088_remove_well_ems_id'),
]
operations = [
]
| 17.666667 | 47 | 0.637736 | [
"Apache-2.0"
] | bcgov/gwells | app/backend/wells/migrations/0089_merge_20190517_2255.py | 265 | Python |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from django.contrib.contenttypes import generic
class Migration(SchemaMigration):
def forwards(self, orm):
# renaming ExperimentACL to 'ObjectACL'
db.rename_table('tardis_portal_experimentacl',
'tardis_portal_objectacl')
# changing experiment to content_types
db.add_column('tardis_portal_objectacl',
'content_type',
self.gf('django.db.models.fields.related.ForeignKey')(
to=orm['contenttypes.ContentType'],
default=1))
db.add_column('tardis_portal_objectacl',
'object_id',
self.gf(
'django.db.models.fields.PositiveIntegerField')(
default=1))
gfk = generic.GenericForeignKey('content_type', 'object_id')
gfk.contribute_to_class(orm.ObjectACL, "content_object")
def backwards(self, orm):
# renaming ExperimentACL to 'ObjectACL'
pass
#raise RuntimeError('Cannot reverse this migration.')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'tardis_portal.author_experiment': {
'Meta': {'ordering': "['order']", 'unique_together': "(('experiment', 'author'),)", 'object_name': 'Author_Experiment'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Experiment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '2000', 'blank': 'True'})
},
'tardis_portal.datafileparameter': {
'Meta': {'ordering': "['name']", 'object_name': 'DatafileParameter'},
'datetime_value': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.ParameterName']"}),
'numerical_value': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'parameterset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.DatafileParameterSet']"}),
'string_value': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'tardis_portal.datafileparameterset': {
'Meta': {'ordering': "['id']", 'object_name': 'DatafileParameterSet'},
'dataset_file': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Dataset_File']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'schema': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Schema']"})
},
'tardis_portal.dataset': {
'Meta': {'ordering': "['-id']", 'object_name': 'Dataset'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'experiments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'datasets'", 'symmetrical': 'False', 'to': "orm['tardis_portal.Experiment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'immutable': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'tardis_portal.dataset_file': {
'Meta': {'ordering': "['filename']", 'object_name': 'Dataset_File'},
'created_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'dataset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Dataset']"}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'md5sum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'mimetype': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'modification_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sha512sum': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'size': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'})
},
'tardis_portal.datasetparameter': {
'Meta': {'ordering': "['name']", 'object_name': 'DatasetParameter'},
'datetime_value': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.ParameterName']"}),
'numerical_value': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'parameterset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.DatasetParameterSet']"}),
'string_value': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'tardis_portal.datasetparameterset': {
'Meta': {'ordering': "['id']", 'object_name': 'DatasetParameterSet'},
'dataset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Dataset']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'schema': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Schema']"})
},
'tardis_portal.experiment': {
'Meta': {'object_name': 'Experiment'},
'approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'created_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'handle': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institution_name': ('django.db.models.fields.CharField', [], {'default': "'Monash University'", 'max_length': '400'}),
'license': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.License']", 'null': 'True', 'blank': 'True'}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'public_access': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'update_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'tardis_portal.experimentparameter': {
'Meta': {'ordering': "['name']", 'object_name': 'ExperimentParameter'},
'datetime_value': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.ParameterName']"}),
'numerical_value': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'parameterset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.ExperimentParameterSet']"}),
'string_value': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'tardis_portal.experimentparameterset': {
'Meta': {'ordering': "['id']", 'object_name': 'ExperimentParameterSet'},
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Experiment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'schema': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Schema']"})
},
'tardis_portal.freetextsearchfield': {
'Meta': {'object_name': 'FreeTextSearchField'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter_name': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.ParameterName']"})
},
'tardis_portal.groupadmin': {
'Meta': {'object_name': 'GroupAdmin'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'tardis_portal.license': {
'Meta': {'object_name': 'License'},
'allows_distribution': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '2000', 'blank': 'True'}),
'internal_description': ('django.db.models.fields.TextField', [], {}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '400'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '2000'})
},
'tardis_portal.location': {
'Meta': {'object_name': 'Location'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_available': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'priority': ('django.db.models.fields.IntegerField', [], {}),
'transfer_provider': ('django.db.models.fields.CharField', [], {'default': "'local'", 'max_length': '10'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'url': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '400'})
},
'tardis_portal.objectacl': {
'Meta': {'ordering': "['content_type', 'object_id']", 'object_name': 'ObjectACL'},
'aclOwnershipType': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'canDelete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'canRead': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'canWrite': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'effectiveDate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'entityId': ('django.db.models.fields.CharField', [], {'max_length': '320'}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Experiment']"}),
'expiryDate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isOwner': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'pluginId': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'tardis_portal.parametername': {
'Meta': {'ordering': "('order', 'name')", 'unique_together': "(('schema', 'name'),)", 'object_name': 'ParameterName'},
'choices': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'comparison_type': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'data_type': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'immutable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_searchable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '9999', 'null': 'True', 'blank': 'True'}),
'schema': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Schema']"}),
'units': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'})
},
'tardis_portal.providerparameter': {
'Meta': {'unique_together': "(('location', 'name'),)", 'object_name': 'ProviderParameter'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'})
},
'tardis_portal.replica': {
'Meta': {'unique_together': "(('datafile', 'location'),)", 'object_name': 'Replica'},
'datafile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Dataset_File']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Location']"}),
'protocol': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'stay_remote': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'tardis_portal.schema': {
'Meta': {'object_name': 'Schema'},
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'immutable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'namespace': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'subtype': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'tardis_portal.token': {
'Meta': {'object_name': 'Token'},
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Experiment']"}),
'expiry_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 7, 18, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'tardis_portal.userauthentication': {
'Meta': {'object_name': 'UserAuthentication'},
'authenticationMethod': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'userProfile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.UserProfile']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'tardis_portal.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isDjangoAccount': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['tardis_portal']
| 76.363636 | 182 | 0.562762 | [
"Apache-2.0"
] | nrmay/mytardis | tardis/tardis_portal/south_migrations/0022_object_acl_1.py | 21,000 | Python |
from features.models import Attempt, Feature
from jury.models import JudgeRequestAssigment, JudgeRequest
from teams.models import Team
for team in Team.objects.all():
for feature in Feature.objects.all():
request = JudgeRequest.objects.filter(team=team, feature=feature).last()
total, num = 0, 0
if request is not None:
for assignee in request.assignees.all():
if assignee.score is not None:
total += assignee.score
num += 1
if num > 0:
attempt = Attempt.objects.get(team=team, feature=feature)
if total/num != attempt.score:
print("Inconsistent score for {}, feature {}: total_score: {}, "
"number of judges: {}, "
"attempt score: {}".format(team, feature, total, num, attempt.score))
else:
if Attempt.objects.filter(team=team, feature=feature).exists():
print("Out of date attempt for {}, feature {}".format(team, feature))
| 45.695652 | 91 | 0.584206 | [
"MIT"
] | altostratous/gatuino-scoreboard | scripts/check_attempts.py | 1,051 | Python |
import discord
from discord.ext import commands
import asyncio
import wolframalpha
from aiohttp import ClientSession
from html2text import html2text
from random import choice, randint
from re import sub
#setup wolframalpha API
client = wolframalpha.Client(open('WA_KEY').readline().rstrip())
class Api(commands.Cog):
"""Get random cute pics"""
def __init__(self, bot):
self.bot = bot
self.colours = [0x1abc9c, 0x11806a, 0x2ecc71, 0x1f8b4c, 0x3498db, 0x206694, 0x9b59b6, 0x71368a, 0xe91e63, 0xad1457, 0xf1c40f, 0xc27c0e, 0xa84300, 0xe74c3c, 0x992d22, 0x95a5a6, 0x607d8b, 0x979c9f, 0x546e7a]
@commands.command(name='ask',
description="replies to a query with the short text answer of the wolfram alpha API",
brief="wolfram alpha API")
async def ask(self, ctx, *, query):
res = client.query(query)
if res['@success'] == 'false':
strRes = "Couldn't find an answer"
else:
strRes = next(res.results).text
embed = discord.Embed(
title=query,
description=strRes,
color=self.bot.embed_color)
await ctx.send(embed=embed)
@commands.command(name='dog',
description="send random dog picture",
brief="send dog pic",
aliases=['auau'])
async def dog(self, ctx):
while True:
result, error = await get_json('https://random.dog/woof.json')
if error:
await ctx.send(error)
return
if result['url'].endswith('.mp4'):
pass
else:
embed = discord.Embed(color=choice(self.colours))
embed.set_image(url=result['url'])
await ctx.send(embed=embed)
return
@commands.command(name='cat',
description="send random cat picture",
brief="send cat pic",
aliases=['antiauau', 'miau'])
async def cat(self, ctx):
result, error = await get_json('http://aws.random.cat/meow')
if error:
await ctx.send(error)
return
embed = discord.Embed(color=choice(self.colours))
embed.set_image(url=result['file'])
await ctx.send(embed=embed)
@commands.command(name='xkcd',
brief="send xkcd comic")
async def xkcd(self, ctx, args = None):
"""
send xkcd comic
*xkcd -> sends newest comic
*xkcd random -> sends random comic
*xkcd [number] -> sends a specific comic
"""
url = None
if not args:
url = 'http://xkcd.com/info.0.json'
elif args.isdigit():
url = f'http://xkcd.com/{int(args)}/info.0.json'
elif args.lower() == 'random':
result, error = await get_json('http://xkcd.com/info.0.json')
if error:
await ctx.send(error)
return
number = randint(0, result['num'])
url = f'http://xkcd.com/{number}/info.0.json'
result, error = await get_json(url)
if error:
await ctx.send(error)
return
embed = discord.Embed(color=choice(self.colours))
embed.set_image(url=result['img'])
await ctx.send(embed=embed)
@commands.command(name='lmgtfy',
description="give link for let me google that for you",
brief="let me google that for you")
async def lmgtfy(self, ctx, *query):
await ctx.send(f"http://lmgtfy.com/?q={'+'.join(query)}")
@commands.command(name='lmddgtfy',
description="give link for let me duck duck go that for you",
brief="let me duck duck go that for you")
async def lmddgtfy(self, ctx, *query):
await ctx.send(f"http://lmddgtfy.net/?q={'%20'.join(query)}")
@commands.command(name='urban',
description="Get a urban defenition of a query",
brief="search urban")
async def urban(self, ctx, * query : str):
url = f"http://api.urbandictionary.com/v0/define?term={'+'.join(query)}"
result, error = await get_json(url)
if error:
await ctx.send(error)
return
if result["list"]:
top_def = result['list'][0]
embed = discord.Embed(
title=f"Definition of {top_def['word']}",
url=top_def['permalink'],
description=top_def['definition'],
color=self.bot.embed_color)
embed.set_thumbnail(
url = "http://campbelllawobserver.com/wp-content/uploads/2014/03/Urban-Dictionary-e1372286057646.png")
embed.add_field(
name="Example",
value=top_def['example'],
inline=False)
embed.add_field(
name=":thumbsup:",
value=top_def['thumbs_up'],
inline=True)
embed.add_field(
name=":thumbsdown:",
value=top_def['thumbs_down'],
inline=True)
embed.set_footer(text=f"Submited by {top_def['author']}")
await ctx.send(embed =embed)
else:
await ctx.send("Your query gave no results.")
@commands.command(name='hoogle',
brief="search hoogle")
async def hoogle(self, ctx, * query : str):
"""Searches Hoggle and returns first two options
Click title to see full search"""
url = f"https://hoogle.haskell.org?mode=json&hoogle={'+'.join(query)}&start=1&count=1"
result, error = await get_json(url)
if error:
await ctx.send(error)
return
embed = discord.Embed(
title=f"Definition of {' '.join(query)}",
url=f"https://hoogle.haskell.org/?hoogle={'+'.join(query)}",
color=self.bot.embed_color)
embed.set_thumbnail(
url = "https://upload.wikimedia.org/wikipedia/commons/thumb/c/c3/Lambda-letter-lowercase-symbol-Garamond.svg/1200px-Lambda-letter-lowercase-symbol-Garamond.svg.png")
if not result:
embed.add_field(
name = "No results found",
value="*undefined*",
inline=False)
else:
for l in result:
val = "*Module:* " + l["module"]["name"] + "\n"
val+= sub(r'\n{2,}', '\n\n', sub(r"\n +", "\n" , html2text(l["docs"])))
embed.add_field(
name= html2text(l["item"]),
value= val,
inline=False)
embed.set_footer(text="first option in Hoogle (Click title for more)")
await ctx.send(embed=embed)
async def get_json(url):
try:
async with ClientSession() as session:
async with session.get(url) as response:
result = await response.json()
return result, None
except:
return None, "Something unexpected went wrong."
def setup(bot):
bot.add_cog(Api(bot))
| 37.523316 | 213 | 0.541011 | [
"MIT"
] | JoseFilipeFerreira/JBB.py | extensions/api.py | 7,242 | Python |
"""
Copyright 2020 Qiniu Cloud (qiniu.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from .spl_packet_utils import *
from .spl_base_command import SplBaseCommand
class SplStreamingBatchCommand(SplBaseCommand):
def process_data(self, argv=None, input_stream=sys.stdin.buffer, output_stream=sys.__stdout__.buffer):
while True:
execute_meta = self.process_protocol_execute(input_stream)
resp = self.streaming_handle(self.lines)
send_packet(output_stream, execute_meta, resp)
self.lines = []
if self.is_finish:
break
| 37.827586 | 106 | 0.741112 | [
"Apache-2.0"
] | Billseeyao/pandora-python-sdk.v2 | pdr_python_sdk/pdr_python_sdk/spl/spl_streaming_batch_command.py | 1,097 | Python |
import argparse
import configparser
import json
import os
import sys
import time
import requests
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from util import logger
class ConversationScraper:
"""Scraper that retrieves, process and stores all messages belonging to a specific Facebook conversation"""
REQUEST_WAIT = 10
ERROR_WAIT = 30
CONVERSATION_ENDMARK = "end_of_history"
def __init__(self, convID, cookie, fb_dtsg, outDir):
self._directory = os.path.join(outDir,str(convID))
self._convID = convID
self._cookie = cookie
self._fb_dtsg = fb_dtsg
"""
POST Request full form data
(<ids_type> is "thread_fbids" for group conversations, "user_ids" otherwise)
"messages[<ids_type>][][offset]": "",
"messages[<ids_type>][][timestamp]": "",
"messages[<ids_type>][][]": "",
"client": "",
"__user": "",
"__a": "",
"__dyn": "",
"__req": "",
"fb_dtsg": "",
"ttstamp": "",
"__rev": ""
"""
def generateRequestData(self, offset, timestamp, chunkSize, isGroupConversation=False):
"""Generate the data for the POST request.
:return: the generated data
"""
ids_type = "thread_fbids" if isGroupConversation else "user_ids"
dataForm = {"messages[{}][{}][offset]".format(ids_type, self._convID) : str(offset),
"messages[{}][{}][timestamp]".format(ids_type, self._convID): timestamp,
"messages[{}][{}][limit]".format(ids_type, self._convID): str(chunkSize),
"client": "web_messenger",
"__a": "",
"__dyn": "",
"__req": "",
"fb_dtsg": self._fb_dtsg}
return dataForm
"""
POST Request all header:
"Host": "www.facebook.com",
"Origin": "https://www.facebook.com",
"Referer": "https://www.facebook.com",
"accept-encoding": "gzip,deflate",
"accept-language": "en-US,en;q=0.8",
"cookie": "",
"pragma": "no-cache",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.122 Safari/537.36",
"content-type": "application/x-www-form-urlencoded",
"accept": "*/*",
"cache-control": "no-cache"
"""
def executeRequest(self, requestData):
"""Executes the POST request and retrieves the correspondent response content.
Request headers are generated here
:return: the response content
"""
headers = {"Host": "www.facebook.com",
"Origin":"https://www.facebook.com",
"Referer":"https://www.facebook.com",
"accept-encoding": "gzip,deflate",
"accept-language": "en-US,en;q=0.8",
"cookie": self._cookie,
"pragma": "no-cache",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.122 Safari/537.36",
"content-type": "application/x-www-form-urlencoded",
"accept": "*/*",
"cache-control": "no-cache"}
url = "https://www.facebook.com/ajax/mercury/thread_info.php"
start = time.time()
response = requests.post(url, data=requestData, headers=headers)
end = time.time()
logger.info("Retrieved in {0:.2f}s".format(end-start))
#Remove additional leading characters
msgsData = response.text[9:]
return msgsData
def writeMessages(self, messages):
with open(os.path.join(self._directory,"conversation.json"), 'w') as conv:
conv.write(json.dumps(messages))
command = "python -mjson.tool " + os.path.join(self._directory, "conversation.json") + " > " + os.path.join(self._directory, "conversation.pretty.json")
os.system(command)
def scrapeConversation(self, merge, offset, timestampOffset, chunkSize, limit, isGroupConversation):
"""Retrieves conversation messages and stores them in a JSON file
If merge is specified, the new messages will be merged with the previous version of the conversation, if present
"""
if merge:
if not os.path.exists(os.path.join(self._directory,"conversation.json")):
logger.error("Conversation not present. Merge operation not possible")
return
with open(os.path.join(self._directory,"conversation.json")) as conv:
convMessages = json.load(conv)
numMergedMsgs = 0
if not os.path.exists(self._directory):
os.makedirs(self._directory)
logger.info("Starting scraping of conversation {}".format(self._convID))
messages = []
msgsData = ""
timestamp = "" if timestampOffset == 0 else str(timestampOffset)
while self.CONVERSATION_ENDMARK not in msgsData:
requestChunkSize = chunkSize if limit <= 0 else min(chunkSize, limit-len(messages))
reqData = self.generateRequestData(offset, timestamp, requestChunkSize, isGroupConversation)
logger.info("Retrieving messages {}-{}".format(offset, requestChunkSize+offset))
msgsData = self.executeRequest(reqData)
jsonData = json.loads(msgsData)
if jsonData and ('payload' in jsonData) and jsonData['payload']:
if ('actions' in jsonData['payload']) and jsonData['payload']['actions']:
actions = jsonData['payload']['actions']
#case when the last message already present in the conversation
#is older newer than the first one of the current retrieved chunk
if merge and convMessages[-1]["timestamp"] > actions[0]["timestamp"]:
for i, action in enumerate(actions):
if convMessages[-1]["timestamp"] == actions[i]["timestamp"]:
numMergedMsgs = len(actions[i+1:-1]) + len(messages)
messages = convMessages + actions[i+1:-1] + messages
break
break
#We retrieve one message two times, as the first one of the previous chunk
#and as the last one of the new one. So we here remove the duplicate,
#but only once we already retrieved at least one chunk
if len(messages) == 0:
messages = actions
else:
messages = actions[:-1] + messages
#update timestamp
timestamp = str(actions[0]["timestamp"])
else:
if 'errorSummary' in jsonData:
logger.error("Response error: " + jsonData['errorSummary'])
else:
logger.error("Response error. No messages found")
logger.error(msgsData)
return
else:
logger.error("Response error. Empty data or payload")
logger.error(msgsData)
logger.info("Retrying in {} seconds".format(self.ERROR_WAIT))
time.sleep(self.ERROR_WAIT)
continue
offset += chunkSize
if limit!= 0 and len(messages) >= limit:
break
time.sleep(self.REQUEST_WAIT)
if merge:
logger.info("Successfully merged {} new messages".format(numMergedMsgs))
logger.info("Conversation total message count = {}".format(len(messages)))
else:
logger.info("Conversation scraped successfully. {} messages retrieved".format(len(messages)))
self.writeMessages(messages)
def main(args=None):
parser = argparse.ArgumentParser(description='Conversation Scraper')
parser.add_argument('--id', metavar='conversationID', dest='convID', required=True)
parser.add_argument('--size', metavar='chunkSize', type=int, dest='chunkSize', default=2000,
help="number of messages to retrieve for each request")
#TODO not working, the timestamp seems the only relevant parameter
parser.add_argument('--off', metavar='offset', type=int, dest='offset', default=0,
help="messages number scraping offset")
#TODO to test, ??better single var
parser.add_argument('--date', metavar='offset', type=int, dest='timestampOffset', default=0,
help="messages timestamp scraping offset, has precedence over messages number offset")
parser.add_argument('--limit', type=int, dest='limit', default=0,
help="number of messages to be retrieved")
#Tells the program to try to merge the new messages with the previously scraped conversation
#avoid the need to scrape it all from the beginning
parser.add_argument('-m', dest='merge', action='store_true',
help="merge the new messages with previously scraped conversation")
parser.add_argument('-g', dest='isGroupConversation', action='store_true',
help="specify if you want to scrape a group conversation")
parser.set_defaults(merge=False)
baseFolderPath = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
parser.add_argument('--out', metavar='outputDir', dest='outDir',
default=os.path.join(baseFolderPath, 'Messages'))
parser.add_argument('--conf', metavar='configFilepath', dest='configFilepath',
default=os.path.join(baseFolderPath, 'config.ini'))
args = parser.parse_args()
convID = args.convID
chunkSize = args.chunkSize
timestampOffset = args.timestampOffset
offset = args.offset
limit = args.limit
merge = args.merge
isGroupConversation = args.isGroupConversation
outDir = args.outDir
configFilepath = args.configFilepath
DATA_SECTION = "User Data"
config = configparser.ConfigParser(interpolation=None)
config.read(configFilepath)
cookie = config.get(DATA_SECTION, "Cookie")
fb_dtsg = config.get(DATA_SECTION, "Fb_dtsg")
scraper = ConversationScraper(convID, cookie, fb_dtsg, outDir)
scraper.scrapeConversation(merge, offset, timestampOffset, chunkSize, limit, isGroupConversation)
if __name__ == "__main__":
main(sys.argv[1:]) | 44.210084 | 161 | 0.599316 | [
"Apache-2.0"
] | 5agado/conversation-analyzer | src/util/conversationScraper.py | 10,522 | Python |
# 033
# Ask the user to enter two numbers. Use whole number division to divide
# the first number by the second and also work out the remainder and
# display the answer in a user-friendly way
# (e.g. if they enter 7 and 2 display “7 divided by 2 is 3
# with 1 remaining”).
from typing import List
def check_num_list(prompt: str, max_length: int = 0,
min_length: int = 0) -> List[float]:
"""Function to check if users input is a number, splitting number
by spaces and checking that the correct amount of numbers are
entered, returning them in a list"""
while True:
try:
num = input(prompt)
num = num.split(' ')
if min_length:
assert len(num) >= min_length, f'Please enter at least' \
f' {min_length} numbers'
if max_length:
assert len(num) <= max_length, f'Please enter no more ' \
f'than {max_length} ' \
f'numbers'
for index, value in enumerate(num):
num[index] = float(value)
return num
except Exception as e:
print(e)
def integer_devision_remainder(nums: List[float]) -> str:
return f'{nums[0]} / {nums[1]} = {nums[0] // nums[1]} with' \
f' a remainder of {nums[0] % nums[1]}'
if __name__ == '__main__':
print('This program will tell you floor division and remainder '
'between two numbers')
request = 'Enter two numbers:'
print(integer_devision_remainder(check_num_list(request, 2, 2)))
| 33.959184 | 73 | 0.5625 | [
"Apache-2.0"
] | DGrifferty/Python | 150-Challenges/Challenges 27 - 34/Challenge 33.py | 1,668 | Python |
from django.urls import path
from .views import (
PostListView,
PostDetailView,
PostCreateView,
PostUpdateView,
PostDeleteView,
UserPostListView
)
from . import views
urlpatterns = [
path('', PostListView.as_view(), name = 'blog-home'),
path('user/<str:username>', UserPostListView.as_view(), name = 'user-posts'),
path('post/<int:pk>/', PostDetailView.as_view(), name = 'post-detail'),
path('post/new/', PostCreateView.as_view(), name = 'post-create'),
path('post/<int:pk>/update/', PostUpdateView.as_view(), name = 'post-update'),
path('post/<int:pk>/delete/', PostDeleteView.as_view(), name = 'post-delete'),
path('about/', views.about, name = 'blog-about'),
]
| 34.142857 | 82 | 0.659693 | [
"MIT"
] | kgomathi2910/Blog-App | blog/urls.py | 717 | Python |
# Code made for Sergio Andrés Díaz Ariza
# 23 March 2021
# License MIT
# Transport Phenomena: Python Program-Assesment 3.3
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
sns.set()
class Wire_Param:
def Power_from_volt(self, V, R_Km, Dis_insul, Diameter_mm):
As = (np.pi * (Diameter_mm / 1000) ** 2) / 4
R = R_Km * Dis_insul * As / 1000
W = (V ** 2) / R # Power generated = Egen
return W # [w/m^3]
def Param_m(self, h, Kcopper, Diameter_mm):
m = np.sqrt((4 * h * 0.2) / (Kcopper * (Diameter_mm / 1000)))
return m
class Coeff_vector_B:
def Eq_1(self, h, Tsurr, Egen, K, L):
b1 = h * Tsurr - (Egen * (-L / 2)) + (h * (Egen * ((-L / 2) ** 2)) / 2 * K)
return b1
def Eq_2(self, Tsurr):
b2 = Tsurr
return b2
def Eq_3(self):
b3 = 0
return b3
def Eq_4(self):
b4 = 0
return b4
class T_profile:
def T_z_to_L_2(self, Egen, z1, Kcopper, C1, C2):
T = ((-Egen * ((z1) ** 2)) / 2 * Kcopper) + C1 * (z1) + C2
return T
def T_z_from_L_2_to_L(self, Tsurr, C3, C4, Param_m, z2):
T = Tsurr + (C3 * np.exp((Param_m * z2))) + (C4 * np.exp((-Param_m * z2)))
return T
Wire = Wire_Param()
Vect_B = Coeff_vector_B()
Profile = T_profile()
Egen = Wire.Power_from_volt(30e-6, 5.32, 0.2, 2.32) # [W/m^3]
Param_m = Wire.Param_m(15, 386, 2.32) # Value L--> must be positive fo calculus
b1 = Vect_B.Eq_1(15, 25 + 273.15, Egen, 386, -0.2)
b2 = Vect_B.Eq_2(25 + 273.15)
b3 = Vect_B.Eq_3()
b4 = Vect_B.Eq_4()
B = np.array([b1, b2, b3, b4])
A = np.array([[-386 + (15 * (-0.2)), 15, 0, 0], [0, 1, -1, -1], [1, 0, -Param_m, Param_m],
[0, 0, (np.exp(Param_m * 0.2)) * ((-386 * Param_m) - 15),
(np.exp(-Param_m * 0.2)) * ((386 * Param_m) - 15)]])
# A_inv = np.linalg.inv(A) ---> We can find the solution directly
C = np.linalg.solve(A, B)
# solve for Temperature Profile
T_L_2 = Profile.T_z_to_L_2(Egen, np.arange(-0.2, 0.0, 1e-6), 386, C[0], C[1])
T_L = Profile.T_z_from_L_2_to_L(25 + 273.15, C[2], C[3], Param_m, np.arange(0.0, 0.2, 1e-6))
print(T_L_2)
print(T_L)
plt.figure(1)
plt.plot(np.arange(0, 0.2, 1e-6), T_L_2, label='$0<L<0.2$')
plt.plot(np.arange(0.2, 0.4, 1e-6), T_L, label='$0.2<L<0.4$')
plt.title("Temperature Profile in a Wie\n with Insulated part", fontsize=16)
plt.ylabel("Temperature $[K]$", fontsize=14)
plt.xlabel("Long of wire $[m]$ ", fontsize=14)
plt.legend()
plt.show()
| 27.911111 | 92 | 0.573248 | [
"MIT"
] | Daz-Riza-Seriog/Transport_Phenomena | Heat_Transfer/3.3-Wire_Isolation_Base__Fin.py | 2,514 | Python |
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Factory method for easily getting imdbs by name."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__sets = {}
from ..datasets.pascal_voc import pascal_voc
from ..datasets.coco import coco
from ..datasets.imagenet import imagenet
from ..datasets.vg import vg
import numpy as np
# Set up voc_<year>_<split>
for year in ['2007', '2012']:
for split in ['train', 'val', 'trainval', 'test']:
name = 'voc_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: pascal_voc(split, year))
# Set up coco_2014_<split>
for year in ['2014']:
for split in ['train', 'val', 'minival', 'valminusminival', 'trainval']:
name = 'coco_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: coco(split, year))
# Set up coco_2014_cap_<split>
for year in ['2014']:
for split in ['train', 'val', 'capval', 'valminuscapval', 'trainval']:
name = 'coco_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: coco(split, year))
# Set up coco_2015_<split>
for year in ['2015']:
for split in ['test', 'test-dev']:
name = 'coco_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: coco(split, year))
# Set up vg_<split>
# for version in ['1600-400-20']:
# for split in ['minitrain', 'train', 'minival', 'val', 'test']:
# name = 'vg_{}_{}'.format(version,split)
# __sets[name] = (lambda split=split,
# version=version: vg(version, split))
for version in ['150-50-20', '150-50-50', '500-150-80',
'750-250-150', '1750-700-450', '1600-400-20']:
for split in ['minitrain', 'smalltrain', 'train',
'minival', 'smallval', 'val', 'test']:
name = 'vg_{}_{}'.format(version, split)
__sets[name] = (lambda split=split, version=version: vg(version, split))
# set up image net.
for split in ['train', 'val', 'val1', 'val2', 'test']:
name = 'imagenet_{}'.format(split)
devkit_path = 'data/imagenet/ILSVRC/devkit'
data_path = 'data/imagenet/ILSVRC'
__sets[name] = (lambda split=split, devkit_path=devkit_path,
data_path=data_path: imagenet(split, devkit_path, data_path))
def get_imdb(name):
"""Get an imdb (image database) by name."""
if name not in __sets:
raise KeyError('Unknown dataset: {}'.format(name))
return __sets[name]()
def list_imdbs():
"""List all registered imdbs."""
return list(__sets.keys())
| 35.272727 | 79 | 0.625552 | [
"BSD-2-Clause"
] | wangvation/torch-mobilenet | lib/datasets/factory.py | 2,716 | Python |
import platform
from re import I
import sys
from pprint import pp, pprint
if platform.system() == "Windows":
filename = ".\\packages\\react-app\\src\\App.jsx"
else:
filename = "./packages/react-app/src/App.jsx"
networkStruct = {
"localhost": "localhost",
"mainnet": "mainnet",
"kovan": "kovan",
"rinkeby": "rinkeby",
"ropsten": "ropsten",
"goerli": "goerli",
"xdai": "xdai",
"matic": "matic",
"mumbai": "mumbai",
"localArbitrum": "localArbitrum",
"localArbitrumL1": "localArbitrumL1",
"rinkebyArbitrum": "rinkebyArbitrum",
"arbitrum": "arbitrum",
"localOptimismL1": "localOptimismL1",
"localOptimism": "localOptimism",
"kovanOptimism": "kovanOptimism",
"optimism": "optimism",
}
def changeNetwork(network):
with open(filename,'+r', encoding='utf-8') as f:
t = f.read()
t = t.replace('const targetNetwork = NETWORKS.ropsten;', 'const targetNetwork = NETWORKS.{};'.format(network))
f.seek(0, 0)
f.write(t)
f.truncate()
print("切换网络到: {}".format(network))
if __name__ == "__main__":
input = sys.argv[1]
try:
network = networkStruct[input]
except:
print("请输入正确的网络名。")
print("请参考:")
pprint(networkStruct)
changeNetwork(network)
| 24.641509 | 118 | 0.606432 | [
"MIT"
] | WeLightProject/Tai-Shang-NFT-Wallet | changeReactAppNetwork.py | 1,344 | Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Script to upload the mappings of Freebase to Wikidata.
Can be easily adapted to upload other String identifiers as well
This bot needs the dump from
https://developers.google.com/freebase/data#freebase-wikidata-mappings
The script takes a single parameter:
-filename: the filename to read the freebase-wikidata mappings from;
default: fb2w.nt.gz
"""
#
# (C) Denny Vrandecic, 2013
# (C) Pywikibot team, 2013-2018
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
import gzip
import os
import sys
import pywikibot
class FreebaseMapperRobot(object):
"""Freebase Mapping bot."""
def __init__(self, filename):
"""Initializer."""
self.repo = pywikibot.Site('wikidata', 'wikidata').data_repository()
self.filename = filename
if not os.path.exists(self.filename):
pywikibot.output('Cannot find %s. Try providing the absolute path.'
% self.filename)
sys.exit(1)
def run(self):
"""Run the bot."""
# Set up some items we will use a lot.
self.claim = pywikibot.Claim(self.repo, 'P646') # freebase mapping
# And sources!
self.statedin = pywikibot.Claim(self.repo, 'P248') # stated in
# Freebase data dump
freebasedumpitem = pywikibot.ItemPage(self.repo, 'Q15241312')
self.statedin.setTarget(freebasedumpitem)
# date of publication
self.dateofpub = pywikibot.Claim(self.repo, 'P577')
oct28 = pywikibot.WbTime(site=self.repo, year=2013, month=10, day=28,
precision='day')
self.dateofpub.setTarget(oct28)
for line in gzip.open(self.filename):
self.processLine(line.strip())
def processLine(self, line):
"""Process a single line."""
if not line or line.startswith('#'):
return
mid, sameas, qid, dot = line.split()
if sameas != '<https://www.w3.org/2002/07/owl#sameAs>':
return
if dot != '.':
return
if not mid.startswith('<https://rdf.freebase.com/ns/m'):
return
mid = '/m/' + mid[30:-1]
if not qid.startswith('<https://www.wikidata.org/entity/Q'):
return
qid = 'Q' + qid[33:-1]
data = pywikibot.ItemPage(self.repo, qid)
data.get()
if not data.labels:
label = ''
elif 'en' in data.labels:
label = data.labels['en']
else:
# Just pick up the first label
label = list(data.labels.values())[0]
pywikibot.output('Parsed: {} <--> {}'.format(qid, mid))
pywikibot.output('{} is {}'.format(data.getID(), label))
if data.claims and 'P646' in data.claims:
# We assume that there is only one claim.
# If there are multiple ones, our logs might be wrong
# but the constraint value reports will catch them
if mid != data.claims['P646'][0].getTarget():
pywikibot.output('Mismatch: expected {}, has {} instead'
.format(mid,
data.claims['P646'][0].getTarget()))
else:
pywikibot.output('Already has mid set, is consistent.')
else:
# No claim set, lets add it.
pywikibot.output('Going to add a new claim.')
self.claim.setTarget(mid)
data.addClaim(self.claim)
self.claim.addSources([self.statedin, self.dateofpub])
pywikibot.output('Claim added!')
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: str
"""
filename = 'fb2w.nt.gz' # Default filename
for arg in pywikibot.handle_args(args):
if arg.startswith('-filename'):
filename = arg[11:]
bot = FreebaseMapperRobot(filename)
bot.run()
if __name__ == '__main__':
main()
| 32.920635 | 79 | 0.583173 | [
"MIT"
] | 5j9/pywikibot-core | scripts/freebasemappingupload.py | 4,148 | Python |
#!/usr/bin/python3.5
import yaml
import os.path
import logging
logger = logging.getLogger('rsync_backup')
config = None
cmd = '/usr/bin/rsync'
# load the configuration file in
def load_config(filepath):
global config
with open(filepath, "r") as file_descriptor:
config = yaml.load(file_descriptor)
logger.debug("Config: ", config)
# Returns a list of tuplets (of commands src and destination)
def get_commands():
logger.debug("Config: ", config)
locations = config['backup']['locations']
commands = []
# Test to see if the source and destination locations exist/are accessible
_test_locations()
# Compile the commands
commands = [(_get_cmd(location['src'],location['dest'], 'noexec'),_get_cmd(location['src'],location['dest'],'exec')) for location in locations]
_print_commands(commands)
return commands
def _get_cmd (source, destination, mode='noexec'):
params = config['backup']['commands'][mode]
noexec_cmd = [cmd]
noexec_cmd += params
noexec_cmd += (source, destination)
#noexec_cmd_str = " ".join(noexec_cmd)
return noexec_cmd
def _test_locations():
locations = config['backup']['locations']
# Test to see if the source and destination locations exist/are accessible
bad_locations = [(os.path.exists(location['src']), location['src']) for location in locations]
bad_locations += ([(os.path.exists(location['dest']), location['dest']) for location in locations])
logger.debug(bad_locations)
for location in bad_locations:
if not location[0]:
logger.debug('Error location ' + location[1] + " does not exist")
if len([location for location in bad_locations if not location[0]]) > 0:
logger.debug("One or more locations are not accessible, existing")
#exit()
def _print_commands(commands):
logger.debug("Commands:")
for command_pair in commands:
logger.debug("Noexec: %s", command_pair[0])
logger.debug("Exec: %s", command_pair[1])
def get_log_path():
if config == None: exit("You must load the config first")
return config['backup']['log']
def get_delete_limit():
if config == None: exit("You must load the config first")
return config['backup']['delete_limit']
def get_email_smtp():
if config == None: exit("You must load the config first")
return config['backup']['email']['smtp']
def get_email_smtp_port():
if config == None: exit("You must load the config first")
return config['backup']['email']['smtp_port']
def get_email_username():
if config == None: exit("You must load the config first")
return config['backup']['email']['username']
def get_email_password():
if config == None: exit("You must load the config first")
return config['backup']['email']['password']
def get_email_target():
if config == None: exit("You must load the config first")
return config['backup']['email']['target']
| 33.340659 | 148 | 0.658866 | [
"Apache-2.0"
] | krcooke/rsync_backup | bin/utils/config.py | 3,034 | Python |
"""
sphinx.domains.cpp
~~~~~~~~~~~~~~~~~~
The C++ language domain.
:copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from typing import (
Any, Callable, Dict, Generator, Iterator, List, Tuple, Type, TypeVar, Union
)
from docutils import nodes
from docutils.nodes import Element, Node, TextElement, system_message
from docutils.parsers.rst import directives
from sphinx import addnodes
from sphinx.addnodes import desc_signature, pending_xref
from sphinx.application import Sphinx
from sphinx.builders import Builder
from sphinx.directives import ObjectDescription
from sphinx.domains import Domain, ObjType
from sphinx.environment import BuildEnvironment
from sphinx.errors import NoUri
from sphinx.locale import _, __
from sphinx.roles import SphinxRole, XRefRole
from sphinx.transforms import SphinxTransform
from sphinx.transforms.post_transforms import ReferencesResolver
from sphinx.util import logging
from sphinx.util.cfamily import (
NoOldIdError, ASTBaseBase, ASTAttribute, verify_description_mode, StringifyTransform,
BaseParser, DefinitionError, UnsupportedMultiCharacterCharLiteral,
identifier_re, anon_identifier_re, integer_literal_re, octal_literal_re,
hex_literal_re, binary_literal_re, float_literal_re,
char_literal_re
)
from sphinx.util.docfields import Field, GroupedField
from sphinx.util.docutils import SphinxDirective
from sphinx.util.nodes import make_refnode
logger = logging.getLogger(__name__)
T = TypeVar('T')
"""
Important note on ids
----------------------------------------------------------------------------
Multiple id generation schemes are used due to backwards compatibility.
- v1: 1.2.3 <= version < 1.3
The style used before the rewrite.
It is not the actual old code, but a replication of the behaviour.
- v2: 1.3 <= version < now
Standardised mangling scheme from
https://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling
though not completely implemented.
All versions are generated and attached to elements. The newest is used for
the index. All of the versions should work as permalinks.
Signature Nodes and Tagnames
----------------------------------------------------------------------------
Each signature is in a desc_signature node, where all children are
desc_signature_line nodes. Each of these lines will have the attribute
'sphinx_line_type' set to one of the following (prioritized):
- 'declarator', if the line contains the name of the declared object.
- 'templateParams', if the line starts a template parameter list,
- 'templateParams', if the line has template parameters
Note: such lines might get a new tag in the future.
- 'templateIntroduction, if the line is on the form 'conceptName{...}'
No other desc_signature nodes should exist (so far).
Grammar
----------------------------------------------------------------------------
See https://www.nongnu.org/hcb/ for the grammar,
and https://github.com/cplusplus/draft/blob/master/source/grammar.tex,
and https://github.com/cplusplus/concepts-ts
for the newest grammar.
common grammar things:
template-declaration ->
"template" "<" template-parameter-list ">" declaration
template-parameter-list ->
template-parameter
| template-parameter-list "," template-parameter
template-parameter ->
type-parameter
| parameter-declaration # i.e., same as a function argument
type-parameter ->
"class" "..."[opt] identifier[opt]
| "class" identifier[opt] "=" type-id
| "typename" "..."[opt] identifier[opt]
| "typename" identifier[opt] "=" type-id
| "template" "<" template-parameter-list ">"
"class" "..."[opt] identifier[opt]
| "template" "<" template-parameter-list ">"
"class" identifier[opt] "=" id-expression
# also, from C++17 we can have "typename" in template templates
templateDeclPrefix ->
"template" "<" template-parameter-list ">"
simple-declaration ->
attribute-specifier-seq[opt] decl-specifier-seq[opt]
init-declarator-list[opt] ;
# Drop the semi-colon. For now: drop the attributes (TODO).
# Use at most 1 init-declarator.
-> decl-specifier-seq init-declarator
-> decl-specifier-seq declarator initializer
decl-specifier ->
storage-class-specifier ->
( "static" (only for member_object and function_object)
| "extern" (only for member_object and function_object)
| "register"
)
thread_local[opt] (only for member_object)
(it can also appear before the others)
| type-specifier -> trailing-type-specifier
| function-specifier -> "inline" | "virtual" | "explicit" (only
for function_object)
| "friend" (only for function_object)
| "constexpr" (only for member_object and function_object)
trailing-type-specifier ->
simple-type-specifier
| elaborated-type-specifier
| typename-specifier
| cv-qualifier -> "const" | "volatile"
stricter grammar for decl-specifier-seq (with everything, each object
uses a subset):
visibility storage-class-specifier function-specifier "friend"
"constexpr" "volatile" "const" trailing-type-specifier
# where trailing-type-specifier can no be cv-qualifier
# Inside e.g., template paramters a strict subset is used
# (see type-specifier-seq)
trailing-type-specifier ->
simple-type-specifier ->
::[opt] nested-name-specifier[opt] type-name
| ::[opt] nested-name-specifier "template" simple-template-id
| "char" | "bool" | ect.
| decltype-specifier
| elaborated-type-specifier ->
class-key attribute-specifier-seq[opt] ::[opt]
nested-name-specifier[opt] identifier
| class-key ::[opt] nested-name-specifier[opt] template[opt]
simple-template-id
| "enum" ::[opt] nested-name-specifier[opt] identifier
| typename-specifier ->
"typename" ::[opt] nested-name-specifier identifier
| "typename" ::[opt] nested-name-specifier template[opt]
simple-template-id
class-key -> "class" | "struct" | "union"
type-name ->* identifier | simple-template-id
# ignoring attributes and decltype, and then some left-factoring
trailing-type-specifier ->
rest-of-trailing
("class" | "struct" | "union" | "typename") rest-of-trailing
build-in -> "char" | "bool" | ect.
decltype-specifier
rest-of-trailing -> (with some simplification)
"::"[opt] list-of-elements-separated-by-::
element ->
"template"[opt] identifier ("<" template-argument-list ">")[opt]
template-argument-list ->
template-argument "..."[opt]
| template-argument-list "," template-argument "..."[opt]
template-argument ->
constant-expression
| type-specifier-seq abstract-declarator
| id-expression
declarator ->
ptr-declarator
| noptr-declarator parameters-and-qualifiers trailing-return-type
(TODO: for now we don't support trailing-eturn-type)
ptr-declarator ->
noptr-declarator
| ptr-operator ptr-declarator
noptr-declarator ->
declarator-id attribute-specifier-seq[opt] ->
"..."[opt] id-expression
| rest-of-trailing
| noptr-declarator parameters-and-qualifiers
| noptr-declarator "[" constant-expression[opt] "]"
attribute-specifier-seq[opt]
| "(" ptr-declarator ")"
ptr-operator ->
"*" attribute-specifier-seq[opt] cv-qualifier-seq[opt]
| "& attribute-specifier-seq[opt]
| "&&" attribute-specifier-seq[opt]
| "::"[opt] nested-name-specifier "*" attribute-specifier-seq[opt]
cv-qualifier-seq[opt]
# function_object must use a parameters-and-qualifiers, the others may
# use it (e.g., function poitners)
parameters-and-qualifiers ->
"(" parameter-clause ")" attribute-specifier-seq[opt]
cv-qualifier-seq[opt] ref-qualifier[opt]
exception-specification[opt]
ref-qualifier -> "&" | "&&"
exception-specification ->
"noexcept" ("(" constant-expression ")")[opt]
"throw" ("(" type-id-list ")")[opt]
# TODO: we don't implement attributes
# member functions can have initializers, but we fold them into here
memberFunctionInit -> "=" "0"
# (note: only "0" is allowed as the value, according to the standard,
# right?)
enum-head ->
enum-key attribute-specifier-seq[opt] nested-name-specifier[opt]
identifier enum-base[opt]
enum-key -> "enum" | "enum struct" | "enum class"
enum-base ->
":" type
enumerator-definition ->
identifier
| identifier "=" constant-expression
We additionally add the possibility for specifying the visibility as the
first thing.
concept_object:
goal:
just a declaration of the name (for now)
grammar: only a single template parameter list, and the nested name
may not have any template argument lists
"template" "<" template-parameter-list ">"
nested-name-specifier
type_object:
goal:
either a single type (e.g., "MyClass:Something_T" or a typedef-like
thing (e.g. "Something Something_T" or "int I_arr[]"
grammar, single type: based on a type in a function parameter, but
without a name:
parameter-declaration
-> attribute-specifier-seq[opt] decl-specifier-seq
abstract-declarator[opt]
# Drop the attributes
-> decl-specifier-seq abstract-declarator[opt]
grammar, typedef-like: no initilizer
decl-specifier-seq declarator
Can start with a templateDeclPrefix.
member_object:
goal: as a type_object which must have a declarator, and optionally
with a initializer
grammar:
decl-specifier-seq declarator initializer
Can start with a templateDeclPrefix.
function_object:
goal: a function declaration, TODO: what about templates? for now: skip
grammar: no initializer
decl-specifier-seq declarator
Can start with a templateDeclPrefix.
class_object:
goal: a class declaration, but with specification of a base class
grammar:
nested-name "final"[opt] (":" base-specifier-list)[opt]
base-specifier-list ->
base-specifier "..."[opt]
| base-specifier-list, base-specifier "..."[opt]
base-specifier ->
base-type-specifier
| "virtual" access-spe"cifier[opt] base-type-specifier
| access-specifier[opt] "virtual"[opt] base-type-specifier
Can start with a templateDeclPrefix.
enum_object:
goal: an unscoped enum or a scoped enum, optionally with the underlying
type specified
grammar:
("class" | "struct")[opt] visibility[opt] nested-name (":" type)[opt]
enumerator_object:
goal: an element in a scoped or unscoped enum. The name should be
injected according to the scopedness.
grammar:
nested-name ("=" constant-expression)
namespace_object:
goal: a directive to put all following declarations in a specific scope
grammar:
nested-name
"""
_string_re = re.compile(r"[LuU8]?('([^'\\]*(?:\\.[^'\\]*)*)'"
r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S)
_visibility_re = re.compile(r'\b(public|private|protected)\b')
_operator_re = re.compile(r'''(?x)
\[\s*\]
| \(\s*\)
| \+\+ | --
| ->\*? | \,
| (<<|>>)=? | && | \|\|
| [!<>=/*%+|&^~-]=?
| (\b(and|and_eq|bitand|bitor|compl|not|not_eq|or|or_eq|xor|xor_eq)\b)
''')
_fold_operator_re = re.compile(r'''(?x)
->\* | \.\* | \,
| (<<|>>)=? | && | \|\|
| !=
| [<>=/*%+|&^~-]=?
''')
# see https://en.cppreference.com/w/cpp/keyword
_keywords = [
'alignas', 'alignof', 'and', 'and_eq', 'asm', 'auto', 'bitand', 'bitor',
'bool', 'break', 'case', 'catch', 'char', 'char16_t', 'char32_t', 'class',
'compl', 'concept', 'const', 'constexpr', 'const_cast', 'continue',
'decltype', 'default', 'delete', 'do', 'double', 'dynamic_cast', 'else',
'enum', 'explicit', 'export', 'extern', 'false', 'float', 'for', 'friend',
'goto', 'if', 'inline', 'int', 'long', 'mutable', 'namespace', 'new',
'noexcept', 'not', 'not_eq', 'nullptr', 'operator', 'or', 'or_eq',
'private', 'protected', 'public', 'register', 'reinterpret_cast',
'requires', 'return', 'short', 'signed', 'sizeof', 'static',
'static_assert', 'static_cast', 'struct', 'switch', 'template', 'this',
'thread_local', 'throw', 'true', 'try', 'typedef', 'typeid', 'typename',
'union', 'unsigned', 'using', 'virtual', 'void', 'volatile', 'wchar_t',
'while', 'xor', 'xor_eq'
]
_max_id = 4
_id_prefix = [None, '', '_CPPv2', '_CPPv3', '_CPPv4']
# Ids are used in lookup keys which are used across pickled files,
# so when _max_id changes, make sure to update the ENV_VERSION.
# ------------------------------------------------------------------------------
# Id v1 constants
# ------------------------------------------------------------------------------
_id_fundamental_v1 = {
'char': 'c',
'signed char': 'c',
'unsigned char': 'C',
'int': 'i',
'signed int': 'i',
'unsigned int': 'U',
'long': 'l',
'signed long': 'l',
'unsigned long': 'L',
'bool': 'b'
}
_id_shorthands_v1 = {
'std::string': 'ss',
'std::ostream': 'os',
'std::istream': 'is',
'std::iostream': 'ios',
'std::vector': 'v',
'std::map': 'm'
}
_id_operator_v1 = {
'new': 'new-operator',
'new[]': 'new-array-operator',
'delete': 'delete-operator',
'delete[]': 'delete-array-operator',
# the arguments will make the difference between unary and binary
# '+(unary)' : 'ps',
# '-(unary)' : 'ng',
# '&(unary)' : 'ad',
# '*(unary)' : 'de',
'~': 'inv-operator',
'+': 'add-operator',
'-': 'sub-operator',
'*': 'mul-operator',
'/': 'div-operator',
'%': 'mod-operator',
'&': 'and-operator',
'|': 'or-operator',
'^': 'xor-operator',
'=': 'assign-operator',
'+=': 'add-assign-operator',
'-=': 'sub-assign-operator',
'*=': 'mul-assign-operator',
'/=': 'div-assign-operator',
'%=': 'mod-assign-operator',
'&=': 'and-assign-operator',
'|=': 'or-assign-operator',
'^=': 'xor-assign-operator',
'<<': 'lshift-operator',
'>>': 'rshift-operator',
'<<=': 'lshift-assign-operator',
'>>=': 'rshift-assign-operator',
'==': 'eq-operator',
'!=': 'neq-operator',
'<': 'lt-operator',
'>': 'gt-operator',
'<=': 'lte-operator',
'>=': 'gte-operator',
'!': 'not-operator',
'&&': 'sand-operator',
'||': 'sor-operator',
'++': 'inc-operator',
'--': 'dec-operator',
',': 'comma-operator',
'->*': 'pointer-by-pointer-operator',
'->': 'pointer-operator',
'()': 'call-operator',
'[]': 'subscript-operator'
}
# ------------------------------------------------------------------------------
# Id v > 1 constants
# ------------------------------------------------------------------------------
_id_fundamental_v2 = {
# not all of these are actually parsed as fundamental types, TODO: do that
'void': 'v',
'bool': 'b',
'char': 'c',
'signed char': 'a',
'unsigned char': 'h',
'wchar_t': 'w',
'char32_t': 'Di',
'char16_t': 'Ds',
'short': 's',
'short int': 's',
'signed short': 's',
'signed short int': 's',
'unsigned short': 't',
'unsigned short int': 't',
'int': 'i',
'signed': 'i',
'signed int': 'i',
'unsigned': 'j',
'unsigned int': 'j',
'long': 'l',
'long int': 'l',
'signed long': 'l',
'signed long int': 'l',
'unsigned long': 'm',
'unsigned long int': 'm',
'long long': 'x',
'long long int': 'x',
'signed long long': 'x',
'signed long long int': 'x',
'unsigned long long': 'y',
'unsigned long long int': 'y',
'float': 'f',
'double': 'd',
'long double': 'e',
'auto': 'Da',
'decltype(auto)': 'Dc',
'std::nullptr_t': 'Dn'
}
_id_operator_v2 = {
'new': 'nw',
'new[]': 'na',
'delete': 'dl',
'delete[]': 'da',
# the arguments will make the difference between unary and binary
# in operator definitions
# '+(unary)' : 'ps',
# '-(unary)' : 'ng',
# '&(unary)' : 'ad',
# '*(unary)' : 'de',
'~': 'co', 'compl': 'co',
'+': 'pl',
'-': 'mi',
'*': 'ml',
'/': 'dv',
'%': 'rm',
'&': 'an', 'bitand': 'an',
'|': 'or', 'bitor': 'or',
'^': 'eo', 'xor': 'eo',
'=': 'aS',
'+=': 'pL',
'-=': 'mI',
'*=': 'mL',
'/=': 'dV',
'%=': 'rM',
'&=': 'aN', 'and_eq': 'aN',
'|=': 'oR', 'or_eq': 'oR',
'^=': 'eO', 'xor_eq': 'eO',
'<<': 'ls',
'>>': 'rs',
'<<=': 'lS',
'>>=': 'rS',
'==': 'eq',
'!=': 'ne', 'not_eq': 'ne',
'<': 'lt',
'>': 'gt',
'<=': 'le',
'>=': 'ge',
'!': 'nt', 'not': 'nt',
'&&': 'aa', 'and': 'aa',
'||': 'oo', 'or': 'oo',
'++': 'pp',
'--': 'mm',
',': 'cm',
'->*': 'pm',
'->': 'pt',
'()': 'cl',
'[]': 'ix',
'.*': 'ds' # this one is not overloadable, but we need it for expressions
}
_id_operator_unary_v2 = {
'++': 'pp_',
'--': 'mm_',
'*': 'de',
'&': 'ad',
'+': 'ps',
'-': 'ng',
'!': 'nt', 'not': 'nt',
'~': 'co', 'compl': 'co'
}
_id_char_from_prefix = {
None: 'c', 'u8': 'c',
'u': 'Ds', 'U': 'Di', 'L': 'w'
} # type: Dict[Any, str]
# these are ordered by preceedence
_expression_bin_ops = [
['||', 'or'],
['&&', 'and'],
['|', 'bitor'],
['^', 'xor'],
['&', 'bitand'],
['==', '!=', 'not_eq'],
['<=', '>=', '<', '>'],
['<<', '>>'],
['+', '-'],
['*', '/', '%'],
['.*', '->*']
]
_expression_unary_ops = ["++", "--", "*", "&", "+", "-", "!", "not", "~", "compl"]
_expression_assignment_ops = ["=", "*=", "/=", "%=", "+=", "-=",
">>=", "<<=", "&=", "and_eq", "^=", "|=", "xor_eq", "or_eq"]
_id_explicit_cast = {
'dynamic_cast': 'dc',
'static_cast': 'sc',
'const_cast': 'cc',
'reinterpret_cast': 'rc'
}
class _DuplicateSymbolError(Exception):
def __init__(self, symbol: "Symbol", declaration: "ASTDeclaration") -> None:
assert symbol
assert declaration
self.symbol = symbol
self.declaration = declaration
def __str__(self) -> str:
return "Internal C++ duplicate symbol error:\n%s" % self.symbol.dump(0)
class ASTBase(ASTBaseBase):
pass
# Names
################################################################################
class ASTIdentifier(ASTBase):
def __init__(self, identifier: str) -> None:
assert identifier is not None
assert len(identifier) != 0
self.identifier = identifier
def is_anon(self) -> bool:
return self.identifier[0] == '@'
def get_id(self, version: int) -> str:
if self.is_anon() and version < 3:
raise NoOldIdError()
if version == 1:
if self.identifier == 'size_t':
return 's'
else:
return self.identifier
if self.identifier == "std":
return 'St'
elif self.identifier[0] == "~":
# a destructor, just use an arbitrary version of dtors
return 'D0'
else:
if self.is_anon():
return 'Ut%d_%s' % (len(self.identifier) - 1, self.identifier[1:])
else:
return str(len(self.identifier)) + self.identifier
# and this is where we finally make a difference between __str__ and the display string
def __str__(self) -> str:
return self.identifier
def get_display_string(self) -> str:
return "[anonymous]" if self.is_anon() else self.identifier
def describe_signature(self, signode: TextElement, mode: str, env: "BuildEnvironment",
prefix: str, templateArgs: str, symbol: "Symbol") -> None:
verify_description_mode(mode)
if mode == 'markType':
targetText = prefix + self.identifier + templateArgs
pnode = addnodes.pending_xref('', refdomain='cpp',
reftype='identifier',
reftarget=targetText, modname=None,
classname=None)
key = symbol.get_lookup_key()
pnode['cpp:parent_key'] = key
if self.is_anon():
pnode += nodes.strong(text="[anonymous]")
else:
pnode += nodes.Text(self.identifier)
signode += pnode
elif mode == 'lastIsName':
if self.is_anon():
signode += nodes.strong(text="[anonymous]")
else:
signode += addnodes.desc_name(self.identifier, self.identifier)
elif mode == 'noneIsName':
if self.is_anon():
signode += nodes.strong(text="[anonymous]")
else:
signode += nodes.Text(self.identifier)
else:
raise Exception('Unknown description mode: %s' % mode)
class ASTNestedNameElement(ASTBase):
def __init__(self, identOrOp: Union[ASTIdentifier, "ASTOperator"],
templateArgs: "ASTTemplateArgs") -> None:
self.identOrOp = identOrOp
self.templateArgs = templateArgs
def is_operator(self) -> bool:
return False
def get_id(self, version: int) -> str:
res = self.identOrOp.get_id(version)
if self.templateArgs:
res += self.templateArgs.get_id(version)
return res
def _stringify(self, transform: StringifyTransform) -> str:
res = transform(self.identOrOp)
if self.templateArgs:
res += transform(self.templateArgs)
return res
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", prefix: str, symbol: "Symbol") -> None:
tArgs = str(self.templateArgs) if self.templateArgs is not None else ''
self.identOrOp.describe_signature(signode, mode, env, prefix, tArgs, symbol)
if self.templateArgs is not None:
self.templateArgs.describe_signature(signode, mode, env, symbol)
class ASTNestedName(ASTBase):
def __init__(self, names: List[ASTNestedNameElement],
templates: List[bool], rooted: bool) -> None:
assert len(names) > 0
self.names = names
self.templates = templates
assert len(self.names) == len(self.templates)
self.rooted = rooted
@property
def name(self) -> "ASTNestedName":
return self
def num_templates(self) -> int:
count = 0
for n in self.names:
if n.is_operator():
continue
if n.templateArgs:
count += 1
return count
def get_id(self, version: int, modifiers: str = '') -> str:
if version == 1:
tt = str(self)
if tt in _id_shorthands_v1:
return _id_shorthands_v1[tt]
else:
return '::'.join(n.get_id(version) for n in self.names)
res = []
if len(self.names) > 1 or len(modifiers) > 0:
res.append('N')
res.append(modifiers)
for n in self.names:
res.append(n.get_id(version))
if len(self.names) > 1 or len(modifiers) > 0:
res.append('E')
return ''.join(res)
def _stringify(self, transform: StringifyTransform) -> str:
res = []
if self.rooted:
res.append('')
for i in range(len(self.names)):
n = self.names[i]
t = self.templates[i]
if t:
res.append("template " + transform(n))
else:
res.append(transform(n))
return '::'.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
# just print the name part, with template args, not template params
if mode == 'noneIsName':
signode += nodes.Text(str(self))
elif mode == 'param':
name = str(self)
signode += nodes.emphasis(name, name)
elif mode == 'markType' or mode == 'lastIsName' or mode == 'markName':
# Each element should be a pending xref targeting the complete
# prefix. however, only the identifier part should be a link, such
# that template args can be a link as well.
# For 'lastIsName' we should also prepend template parameter lists.
templateParams = [] # type: List[Any]
if mode == 'lastIsName':
assert symbol is not None
if symbol.declaration.templatePrefix is not None:
templateParams = symbol.declaration.templatePrefix.templates
iTemplateParams = 0
templateParamsPrefix = ''
prefix = ''
first = True
names = self.names[:-1] if mode == 'lastIsName' else self.names
# If lastIsName, then wrap all of the prefix in a desc_addname,
# else append directly to signode.
# NOTE: Breathe relies on the prefix being in the desc_addname node,
# so it can remove it in inner declarations.
dest = signode
if mode == 'lastIsName':
dest = addnodes.desc_addname()
for i in range(len(names)):
nne = names[i]
template = self.templates[i]
if not first:
dest += nodes.Text('::')
prefix += '::'
if template:
dest += nodes.Text("template ")
first = False
txt_nne = str(nne)
if txt_nne != '':
if nne.templateArgs and iTemplateParams < len(templateParams):
templateParamsPrefix += str(templateParams[iTemplateParams])
iTemplateParams += 1
nne.describe_signature(dest, 'markType',
env, templateParamsPrefix + prefix, symbol)
prefix += txt_nne
if mode == 'lastIsName':
if len(self.names) > 1:
dest += addnodes.desc_addname('::', '::')
signode += dest
if self.templates[-1]:
signode += nodes.Text("template ")
self.names[-1].describe_signature(signode, mode, env, '', symbol)
else:
raise Exception('Unknown description mode: %s' % mode)
################################################################################
# Expressions
################################################################################
class ASTExpression(ASTBase):
def get_id(self, version: int) -> str:
raise NotImplementedError(repr(self))
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
raise NotImplementedError(repr(self))
# Primary expressions
################################################################################
class ASTLiteral(ASTExpression):
pass
class ASTPointerLiteral(ASTLiteral):
def _stringify(self, transform: StringifyTransform) -> str:
return 'nullptr'
def get_id(self, version: int) -> str:
return 'LDnE'
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode.append(nodes.Text('nullptr'))
class ASTBooleanLiteral(ASTLiteral):
def __init__(self, value: bool) -> None:
self.value = value
def _stringify(self, transform: StringifyTransform) -> str:
if self.value:
return 'true'
else:
return 'false'
def get_id(self, version: int) -> str:
if self.value:
return 'L1E'
else:
return 'L0E'
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode.append(nodes.Text(str(self)))
class ASTNumberLiteral(ASTLiteral):
def __init__(self, data: str) -> None:
self.data = data
def _stringify(self, transform: StringifyTransform) -> str:
return self.data
def get_id(self, version: int) -> str:
return "L%sE" % self.data
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
txt = str(self)
signode.append(nodes.Text(txt, txt))
class ASTStringLiteral(ASTLiteral):
def __init__(self, data: str) -> None:
self.data = data
def _stringify(self, transform: StringifyTransform) -> str:
return self.data
def get_id(self, version: int) -> str:
# note: the length is not really correct with escaping
return "LA%d_KcE" % (len(self.data) - 2)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
txt = str(self)
signode.append(nodes.Text(txt, txt))
class ASTCharLiteral(ASTLiteral):
def __init__(self, prefix: str, data: str) -> None:
self.prefix = prefix # may be None when no prefix
self.data = data
assert prefix in _id_char_from_prefix
self.type = _id_char_from_prefix[prefix]
decoded = data.encode().decode('unicode-escape')
if len(decoded) == 1:
self.value = ord(decoded)
else:
raise UnsupportedMultiCharacterCharLiteral(decoded)
def _stringify(self, transform: StringifyTransform) -> str:
if self.prefix is None:
return "'" + self.data + "'"
else:
return self.prefix + "'" + self.data + "'"
def get_id(self, version: int) -> str:
return self.type + str(self.value)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
txt = str(self)
signode.append(nodes.Text(txt, txt))
class ASTThisLiteral(ASTExpression):
def _stringify(self, transform: StringifyTransform) -> str:
return "this"
def get_id(self, version: int) -> str:
return "fpT"
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode.append(nodes.Text("this"))
class ASTFoldExpr(ASTExpression):
def __init__(self, leftExpr: ASTExpression,
op: str, rightExpr: ASTExpression) -> None:
assert leftExpr is not None or rightExpr is not None
self.leftExpr = leftExpr
self.op = op
self.rightExpr = rightExpr
def _stringify(self, transform: StringifyTransform) -> str:
res = ['(']
if self.leftExpr:
res.append(transform(self.leftExpr))
res.append(' ')
res.append(transform(self.op))
res.append(' ')
res.append('...')
if self.rightExpr:
res.append(' ')
res.append(transform(self.op))
res.append(' ')
res.append(transform(self.rightExpr))
res.append(')')
return ''.join(res)
def get_id(self, version: int) -> str:
assert version >= 3
if version == 3:
return str(self)
# https://github.com/itanium-cxx-abi/cxx-abi/pull/67
res = []
if self.leftExpr is None: # (... op expr)
res.append('fl')
elif self.rightExpr is None: # (expr op ...)
res.append('fr')
else: # (expr op ... op expr)
# we don't check where the parameter pack is,
# we just always call this a binary left fold
res.append('fL')
res.append(_id_operator_v2[self.op])
if self.leftExpr:
res.append(self.leftExpr.get_id(version))
if self.rightExpr:
res.append(self.rightExpr.get_id(version))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode.append(nodes.Text('('))
if self.leftExpr:
self.leftExpr.describe_signature(signode, mode, env, symbol)
signode.append(nodes.Text(' '))
signode.append(nodes.Text(self.op))
signode.append(nodes.Text(' '))
signode.append(nodes.Text('...'))
if self.rightExpr:
signode.append(nodes.Text(' '))
signode.append(nodes.Text(self.op))
signode.append(nodes.Text(' '))
self.rightExpr.describe_signature(signode, mode, env, symbol)
signode.append(nodes.Text(')'))
class ASTParenExpr(ASTExpression):
def __init__(self, expr: ASTExpression):
self.expr = expr
def _stringify(self, transform: StringifyTransform) -> str:
return '(' + transform(self.expr) + ')'
def get_id(self, version: int) -> str:
return self.expr.get_id(version)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode.append(nodes.Text('(', '('))
self.expr.describe_signature(signode, mode, env, symbol)
signode.append(nodes.Text(')', ')'))
class ASTIdExpression(ASTExpression):
def __init__(self, name: ASTNestedName):
# note: this class is basically to cast a nested name as an expression
self.name = name
def _stringify(self, transform: StringifyTransform) -> str:
return transform(self.name)
def get_id(self, version: int) -> str:
return self.name.get_id(version)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
self.name.describe_signature(signode, mode, env, symbol)
# Postfix expressions
################################################################################
class ASTPostfixOp(ASTBase):
def get_id(self, idPrefix: str, version: int) -> str:
raise NotImplementedError(repr(self))
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
raise NotImplementedError(repr(self))
class ASTPostfixArray(ASTPostfixOp):
def __init__(self, expr: ASTExpression):
self.expr = expr
def _stringify(self, transform: StringifyTransform) -> str:
return '[' + transform(self.expr) + ']'
def get_id(self, idPrefix: str, version: int) -> str:
return 'ix' + idPrefix + self.expr.get_id(version)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode.append(nodes.Text('['))
self.expr.describe_signature(signode, mode, env, symbol)
signode.append(nodes.Text(']'))
class ASTPostfixMember(ASTPostfixOp):
def __init__(self, name: ASTNestedName):
self.name = name
def _stringify(self, transform: StringifyTransform) -> str:
return '.' + transform(self.name)
def get_id(self, idPrefix: str, version: int) -> str:
return 'dt' + idPrefix + self.name.get_id(version)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode.append(nodes.Text('.'))
self.name.describe_signature(signode, 'noneIsName', env, symbol)
class ASTPostfixMemberOfPointer(ASTPostfixOp):
def __init__(self, name: ASTNestedName):
self.name = name
def _stringify(self, transform: StringifyTransform) -> str:
return '->' + transform(self.name)
def get_id(self, idPrefix: str, version: int) -> str:
return 'pt' + idPrefix + self.name.get_id(version)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode.append(nodes.Text('->'))
self.name.describe_signature(signode, 'noneIsName', env, symbol)
class ASTPostfixInc(ASTPostfixOp):
def _stringify(self, transform: StringifyTransform) -> str:
return '++'
def get_id(self, idPrefix: str, version: int) -> str:
return 'pp' + idPrefix
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode.append(nodes.Text('++'))
class ASTPostfixDec(ASTPostfixOp):
def _stringify(self, transform: StringifyTransform) -> str:
return '--'
def get_id(self, idPrefix: str, version: int) -> str:
return 'mm' + idPrefix
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode.append(nodes.Text('--'))
class ASTPostfixCallExpr(ASTPostfixOp):
def __init__(self, lst: Union["ASTParenExprList", "ASTBracedInitList"]) -> None:
self.lst = lst
def _stringify(self, transform: StringifyTransform) -> str:
return transform(self.lst)
def get_id(self, idPrefix: str, version: int) -> str:
res = ['cl', idPrefix]
for e in self.lst.exprs:
res.append(e.get_id(version))
res.append('E')
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
self.lst.describe_signature(signode, mode, env, symbol)
class ASTPostfixExpr(ASTExpression):
def __init__(self, prefix: "ASTType", postFixes: List[ASTPostfixOp]):
self.prefix = prefix
self.postFixes = postFixes
def _stringify(self, transform: StringifyTransform) -> str:
res = [transform(self.prefix)]
for p in self.postFixes:
res.append(transform(p))
return ''.join(res)
def get_id(self, version: int) -> str:
id = self.prefix.get_id(version)
for p in self.postFixes:
id = p.get_id(id, version)
return id
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
self.prefix.describe_signature(signode, mode, env, symbol)
for p in self.postFixes:
p.describe_signature(signode, mode, env, symbol)
class ASTExplicitCast(ASTExpression):
def __init__(self, cast: str, typ: "ASTType", expr: ASTExpression):
assert cast in _id_explicit_cast
self.cast = cast
self.typ = typ
self.expr = expr
def _stringify(self, transform: StringifyTransform) -> str:
res = [self.cast]
res.append('<')
res.append(transform(self.typ))
res.append('>(')
res.append(transform(self.expr))
res.append(')')
return ''.join(res)
def get_id(self, version: int) -> str:
return (_id_explicit_cast[self.cast] +
self.typ.get_id(version) +
self.expr.get_id(version))
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode.append(nodes.Text(self.cast))
signode.append(nodes.Text('<'))
self.typ.describe_signature(signode, mode, env, symbol)
signode.append(nodes.Text('>'))
signode.append(nodes.Text('('))
self.expr.describe_signature(signode, mode, env, symbol)
signode.append(nodes.Text(')'))
class ASTTypeId(ASTExpression):
def __init__(self, typeOrExpr: Union["ASTType", ASTExpression], isType: bool):
self.typeOrExpr = typeOrExpr
self.isType = isType
def _stringify(self, transform: StringifyTransform) -> str:
return 'typeid(' + transform(self.typeOrExpr) + ')'
def get_id(self, version: int) -> str:
prefix = 'ti' if self.isType else 'te'
return prefix + self.typeOrExpr.get_id(version)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode.append(nodes.Text('typeid'))
signode.append(nodes.Text('('))
self.typeOrExpr.describe_signature(signode, mode, env, symbol)
signode.append(nodes.Text(')'))
# Unary expressions
################################################################################
class ASTUnaryOpExpr(ASTExpression):
def __init__(self, op: str, expr: ASTExpression):
self.op = op
self.expr = expr
def _stringify(self, transform: StringifyTransform) -> str:
if self.op[0] in 'cn':
return transform(self.op) + " " + transform(self.expr)
else:
return transform(self.op) + transform(self.expr)
def get_id(self, version: int) -> str:
return _id_operator_unary_v2[self.op] + self.expr.get_id(version)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode.append(nodes.Text(self.op))
if self.op[0] in 'cn':
signode.append(nodes.Text(' '))
self.expr.describe_signature(signode, mode, env, symbol)
class ASTSizeofParamPack(ASTExpression):
def __init__(self, identifier: ASTIdentifier):
self.identifier = identifier
def _stringify(self, transform: StringifyTransform) -> str:
return "sizeof...(" + transform(self.identifier) + ")"
def get_id(self, version: int) -> str:
return 'sZ' + self.identifier.get_id(version)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode.append(nodes.Text('sizeof...('))
self.identifier.describe_signature(signode, mode, env,
symbol=symbol, prefix="", templateArgs="")
signode.append(nodes.Text(')'))
class ASTSizeofType(ASTExpression):
def __init__(self, typ: "ASTType"):
self.typ = typ
def _stringify(self, transform: StringifyTransform) -> str:
return "sizeof(" + transform(self.typ) + ")"
def get_id(self, version: int) -> str:
return 'st' + self.typ.get_id(version)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode.append(nodes.Text('sizeof('))
self.typ.describe_signature(signode, mode, env, symbol)
signode.append(nodes.Text(')'))
class ASTSizeofExpr(ASTExpression):
def __init__(self, expr: ASTExpression):
self.expr = expr
def _stringify(self, transform: StringifyTransform) -> str:
return "sizeof " + transform(self.expr)
def get_id(self, version: int) -> str:
return 'sz' + self.expr.get_id(version)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode.append(nodes.Text('sizeof '))
self.expr.describe_signature(signode, mode, env, symbol)
class ASTAlignofExpr(ASTExpression):
def __init__(self, typ: "ASTType"):
self.typ = typ
def _stringify(self, transform: StringifyTransform) -> str:
return "alignof(" + transform(self.typ) + ")"
def get_id(self, version: int) -> str:
return 'at' + self.typ.get_id(version)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode.append(nodes.Text('alignof('))
self.typ.describe_signature(signode, mode, env, symbol)
signode.append(nodes.Text(')'))
class ASTNoexceptExpr(ASTExpression):
def __init__(self, expr: ASTExpression):
self.expr = expr
def _stringify(self, transform: StringifyTransform) -> str:
return "noexcept(" + transform(self.expr) + ")"
def get_id(self, version: int) -> str:
return 'nx' + self.expr.get_id(version)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode.append(nodes.Text('noexcept('))
self.expr.describe_signature(signode, mode, env, symbol)
signode.append(nodes.Text(')'))
class ASTNewExpr(ASTExpression):
def __init__(self, rooted: bool, isNewTypeId: bool, typ: "ASTType",
initList: Union["ASTParenExprList", "ASTBracedInitList"]) -> None:
self.rooted = rooted
self.isNewTypeId = isNewTypeId
self.typ = typ
self.initList = initList
def _stringify(self, transform: StringifyTransform) -> str:
res = []
if self.rooted:
res.append('::')
res.append('new ')
# TODO: placement
if self.isNewTypeId:
res.append(transform(self.typ))
else:
assert False
if self.initList is not None:
res.append(transform(self.initList))
return ''.join(res)
def get_id(self, version: int) -> str:
# the array part will be in the type mangling, so na is not used
res = ['nw']
# TODO: placement
res.append('_')
res.append(self.typ.get_id(version))
if self.initList is not None:
res.append(self.initList.get_id(version))
else:
res.append('E')
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
if self.rooted:
signode.append(nodes.Text('::'))
signode.append(nodes.Text('new '))
# TODO: placement
if self.isNewTypeId:
self.typ.describe_signature(signode, mode, env, symbol)
else:
assert False
if self.initList is not None:
self.initList.describe_signature(signode, mode, env, symbol)
class ASTDeleteExpr(ASTExpression):
def __init__(self, rooted: bool, array: bool, expr: ASTExpression):
self.rooted = rooted
self.array = array
self.expr = expr
def _stringify(self, transform: StringifyTransform) -> str:
res = []
if self.rooted:
res.append('::')
res.append('delete ')
if self.array:
res.append('[] ')
res.append(transform(self.expr))
return ''.join(res)
def get_id(self, version: int) -> str:
if self.array:
id = "da"
else:
id = "dl"
return id + self.expr.get_id(version)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
if self.rooted:
signode.append(nodes.Text('::'))
signode.append(nodes.Text('delete '))
if self.array:
signode.append(nodes.Text('[] '))
self.expr.describe_signature(signode, mode, env, symbol)
# Other expressions
################################################################################
class ASTCastExpr(ASTExpression):
def __init__(self, typ: "ASTType", expr: ASTExpression):
self.typ = typ
self.expr = expr
def _stringify(self, transform: StringifyTransform) -> str:
res = ['(']
res.append(transform(self.typ))
res.append(')')
res.append(transform(self.expr))
return ''.join(res)
def get_id(self, version: int) -> str:
return 'cv' + self.typ.get_id(version) + self.expr.get_id(version)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode.append(nodes.Text('('))
self.typ.describe_signature(signode, mode, env, symbol)
signode.append(nodes.Text(')'))
self.expr.describe_signature(signode, mode, env, symbol)
class ASTBinOpExpr(ASTExpression):
def __init__(self, exprs: List[ASTExpression], ops: List[str]):
assert len(exprs) > 0
assert len(exprs) == len(ops) + 1
self.exprs = exprs
self.ops = ops
def _stringify(self, transform: StringifyTransform) -> str:
res = []
res.append(transform(self.exprs[0]))
for i in range(1, len(self.exprs)):
res.append(' ')
res.append(self.ops[i - 1])
res.append(' ')
res.append(transform(self.exprs[i]))
return ''.join(res)
def get_id(self, version: int) -> str:
assert version >= 2
res = []
for i in range(len(self.ops)):
res.append(_id_operator_v2[self.ops[i]])
res.append(self.exprs[i].get_id(version))
res.append(self.exprs[-1].get_id(version))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
self.exprs[0].describe_signature(signode, mode, env, symbol)
for i in range(1, len(self.exprs)):
signode.append(nodes.Text(' '))
signode.append(nodes.Text(self.ops[i - 1]))
signode.append(nodes.Text(' '))
self.exprs[i].describe_signature(signode, mode, env, symbol)
class ASTBracedInitList(ASTBase):
def __init__(self, exprs: List[Union[ASTExpression, "ASTBracedInitList"]],
trailingComma: bool) -> None:
self.exprs = exprs
self.trailingComma = trailingComma
def get_id(self, version: int) -> str:
return "il%sE" % ''.join(e.get_id(version) for e in self.exprs)
def _stringify(self, transform: StringifyTransform) -> str:
exprs = [transform(e) for e in self.exprs]
trailingComma = ',' if self.trailingComma else ''
return '{%s%s}' % (', '.join(exprs), trailingComma)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
signode.append(nodes.Text('{'))
first = True
for e in self.exprs:
if not first:
signode.append(nodes.Text(', '))
else:
first = False
e.describe_signature(signode, mode, env, symbol)
if self.trailingComma:
signode.append(nodes.Text(','))
signode.append(nodes.Text('}'))
class ASTAssignmentExpr(ASTExpression):
def __init__(self, exprs: List[Union[ASTExpression, ASTBracedInitList]], ops: List[str]):
assert len(exprs) > 0
assert len(exprs) == len(ops) + 1
self.exprs = exprs
self.ops = ops
def _stringify(self, transform: StringifyTransform) -> str:
res = []
res.append(transform(self.exprs[0]))
for i in range(1, len(self.exprs)):
res.append(' ')
res.append(self.ops[i - 1])
res.append(' ')
res.append(transform(self.exprs[i]))
return ''.join(res)
def get_id(self, version: int) -> str:
res = []
for i in range(len(self.ops)):
res.append(_id_operator_v2[self.ops[i]])
res.append(self.exprs[i].get_id(version))
res.append(self.exprs[-1].get_id(version))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
self.exprs[0].describe_signature(signode, mode, env, symbol)
for i in range(1, len(self.exprs)):
signode.append(nodes.Text(' '))
signode.append(nodes.Text(self.ops[i - 1]))
signode.append(nodes.Text(' '))
self.exprs[i].describe_signature(signode, mode, env, symbol)
class ASTCommaExpr(ASTExpression):
def __init__(self, exprs: List[ASTExpression]):
assert len(exprs) > 0
self.exprs = exprs
def _stringify(self, transform: StringifyTransform) -> str:
return ', '.join(transform(e) for e in self.exprs)
def get_id(self, version: int) -> str:
id_ = _id_operator_v2[',']
res = []
for i in range(len(self.exprs) - 1):
res.append(id_)
res.append(self.exprs[i].get_id(version))
res.append(self.exprs[-1].get_id(version))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
self.exprs[0].describe_signature(signode, mode, env, symbol)
for i in range(1, len(self.exprs)):
signode.append(nodes.Text(', '))
self.exprs[i].describe_signature(signode, mode, env, symbol)
class ASTFallbackExpr(ASTExpression):
def __init__(self, expr: str):
self.expr = expr
def _stringify(self, transform: StringifyTransform) -> str:
return self.expr
def get_id(self, version: int) -> str:
return str(self.expr)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode += nodes.Text(self.expr)
################################################################################
# Types
################################################################################
# Things for ASTNestedName
################################################################################
class ASTOperator(ASTBase):
def is_anon(self) -> bool:
return False
def is_operator(self) -> bool:
return True
def get_id(self, version: int) -> str:
raise NotImplementedError()
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", prefix: str, templateArgs: str,
symbol: "Symbol") -> None:
verify_description_mode(mode)
identifier = str(self)
if mode == 'lastIsName':
signode += addnodes.desc_name(identifier, identifier)
else:
signode += addnodes.desc_addname(identifier, identifier)
class ASTOperatorBuildIn(ASTOperator):
def __init__(self, op: str) -> None:
self.op = op
def get_id(self, version: int) -> str:
if version == 1:
ids = _id_operator_v1
if self.op not in ids:
raise NoOldIdError()
else:
ids = _id_operator_v2
if self.op not in ids:
raise Exception('Internal error: Build-in operator "%s" can not '
'be mapped to an id.' % self.op)
return ids[self.op]
def _stringify(self, transform: StringifyTransform) -> str:
if self.op in ('new', 'new[]', 'delete', 'delete[]') or self.op[0] in "abcnox":
return 'operator ' + self.op
else:
return 'operator' + self.op
class ASTOperatorLiteral(ASTOperator):
def __init__(self, identifier: ASTIdentifier) -> None:
self.identifier = identifier
def get_id(self, version: int) -> str:
if version == 1:
raise NoOldIdError()
else:
return 'li' + self.identifier.get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
return 'operator""' + transform(self.identifier)
class ASTOperatorType(ASTOperator):
def __init__(self, type: "ASTType") -> None:
self.type = type
def get_id(self, version: int) -> str:
if version == 1:
return 'castto-%s-operator' % self.type.get_id(version)
else:
return 'cv' + self.type.get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
return ''.join(['operator ', transform(self.type)])
def get_name_no_template(self) -> str:
return str(self)
class ASTTemplateArgConstant(ASTBase):
def __init__(self, value: ASTExpression) -> None:
self.value = value
def _stringify(self, transform: StringifyTransform) -> str:
return transform(self.value)
def get_id(self, version: int) -> str:
if version == 1:
return str(self).replace(' ', '-')
if version == 2:
return 'X' + str(self) + 'E'
return 'X' + self.value.get_id(version) + 'E'
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
self.value.describe_signature(signode, mode, env, symbol)
class ASTTemplateArgs(ASTBase):
def __init__(self, args: List[Union["ASTType", ASTTemplateArgConstant]],
packExpansion: bool) -> None:
assert args is not None
self.args = args
self.packExpansion = packExpansion
def get_id(self, version: int) -> str:
if version == 1:
res = []
res.append(':')
res.append('.'.join(a.get_id(version) for a in self.args))
res.append(':')
return ''.join(res)
res = []
res.append('I')
if len(self.args) > 0:
for a in self.args[:-1]:
res.append(a.get_id(version))
if self.packExpansion:
res.append('J')
res.append(self.args[-1].get_id(version))
if self.packExpansion:
res.append('E')
res.append('E')
return ''.join(res)
def _stringify(self, transform: StringifyTransform) -> str:
res = ', '.join(transform(a) for a in self.args)
if self.packExpansion:
res += '...'
return '<' + res + '>'
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
signode += nodes.Text('<')
first = True
for a in self.args:
if not first:
signode += nodes.Text(', ')
first = False
a.describe_signature(signode, 'markType', env, symbol=symbol)
if self.packExpansion:
signode += nodes.Text('...')
signode += nodes.Text('>')
# Main part of declarations
################################################################################
class ASTTrailingTypeSpec(ASTBase):
def get_id(self, version: int) -> str:
raise NotImplementedError(repr(self))
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
raise NotImplementedError(repr(self))
class ASTTrailingTypeSpecFundamental(ASTTrailingTypeSpec):
def __init__(self, name: str) -> None:
self.name = name
def _stringify(self, transform: StringifyTransform) -> str:
return self.name
def get_id(self, version: int) -> str:
if version == 1:
res = []
for a in self.name.split(' '):
if a in _id_fundamental_v1:
res.append(_id_fundamental_v1[a])
else:
res.append(a)
return '-'.join(res)
if self.name not in _id_fundamental_v2:
raise Exception(
'Semi-internal error: Fundamental type "%s" can not be mapped '
'to an id. Is it a true fundamental type? If not so, the '
'parser should have rejected it.' % self.name)
return _id_fundamental_v2[self.name]
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode += nodes.Text(str(self.name))
class ASTTrailingTypeSpecDecltypeAuto(ASTTrailingTypeSpec):
def _stringify(self, transform: StringifyTransform) -> str:
return 'decltype(auto)'
def get_id(self, version: int) -> str:
if version == 1:
raise NoOldIdError()
return 'Dc'
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode.append(nodes.Text(str(self)))
class ASTTrailingTypeSpecDecltype(ASTTrailingTypeSpec):
def __init__(self, expr: ASTExpression):
self.expr = expr
def _stringify(self, transform: StringifyTransform) -> str:
return 'decltype(' + transform(self.expr) + ')'
def get_id(self, version: int) -> str:
if version == 1:
raise NoOldIdError()
return 'DT' + self.expr.get_id(version) + "E"
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode.append(nodes.Text('decltype('))
self.expr.describe_signature(signode, mode, env, symbol)
signode.append(nodes.Text(')'))
class ASTTrailingTypeSpecName(ASTTrailingTypeSpec):
def __init__(self, prefix: str, nestedName: ASTNestedName) -> None:
self.prefix = prefix
self.nestedName = nestedName
@property
def name(self) -> ASTNestedName:
return self.nestedName
def get_id(self, version: int) -> str:
return self.nestedName.get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
res = []
if self.prefix:
res.append(self.prefix)
res.append(' ')
res.append(transform(self.nestedName))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
if self.prefix:
signode += addnodes.desc_annotation(self.prefix, self.prefix)
signode += nodes.Text(' ')
self.nestedName.describe_signature(signode, mode, env, symbol=symbol)
class ASTFunctionParameter(ASTBase):
def __init__(self, arg: Union["ASTTypeWithInit",
"ASTTemplateParamConstrainedTypeWithInit"],
ellipsis: bool = False) -> None:
self.arg = arg
self.ellipsis = ellipsis
def get_id(self, version: int, objectType: str = None, symbol: "Symbol" = None) -> str:
# this is not part of the normal name mangling in C++
if symbol:
# the anchor will be our parent
return symbol.parent.declaration.get_id(version, prefixed=None)
# else, do the usual
if self.ellipsis:
return 'z'
else:
return self.arg.get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
if self.ellipsis:
return '...'
else:
return transform(self.arg)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
if self.ellipsis:
signode += nodes.Text('...')
else:
self.arg.describe_signature(signode, mode, env, symbol=symbol)
class ASTParametersQualifiers(ASTBase):
def __init__(self, args: List[ASTFunctionParameter],
volatile: bool, const: bool, refQual: str,
exceptionSpec: str, override: bool, final: bool, initializer: str) -> None:
self.args = args
self.volatile = volatile
self.const = const
self.refQual = refQual
self.exceptionSpec = exceptionSpec
self.override = override
self.final = final
self.initializer = initializer
@property
def function_params(self) -> List[ASTFunctionParameter]:
return self.args
def get_modifiers_id(self, version: int) -> str:
res = []
if self.volatile:
res.append('V')
if self.const:
if version == 1:
res.append('C')
else:
res.append('K')
if self.refQual == '&&':
res.append('O')
elif self.refQual == '&':
res.append('R')
return ''.join(res)
def get_param_id(self, version: int) -> str:
if version == 1:
if len(self.args) == 0:
return ''
else:
return '__' + '.'.join(a.get_id(version) for a in self.args)
if len(self.args) == 0:
return 'v'
else:
return ''.join(a.get_id(version) for a in self.args)
def _stringify(self, transform: StringifyTransform) -> str:
res = []
res.append('(')
first = True
for a in self.args:
if not first:
res.append(', ')
first = False
res.append(str(a))
res.append(')')
if self.volatile:
res.append(' volatile')
if self.const:
res.append(' const')
if self.refQual:
res.append(' ')
res.append(self.refQual)
if self.exceptionSpec:
res.append(' ')
res.append(str(self.exceptionSpec))
if self.final:
res.append(' final')
if self.override:
res.append(' override')
if self.initializer:
res.append(' = ')
res.append(self.initializer)
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
paramlist = addnodes.desc_parameterlist()
for arg in self.args:
param = addnodes.desc_parameter('', '', noemph=True)
if mode == 'lastIsName': # i.e., outer-function params
arg.describe_signature(param, 'param', env, symbol=symbol)
else:
arg.describe_signature(param, 'markType', env, symbol=symbol)
paramlist += param
signode += paramlist
def _add_anno(signode: TextElement, text: str) -> None:
signode += nodes.Text(' ')
signode += addnodes.desc_annotation(text, text)
def _add_text(signode: TextElement, text: str) -> None:
signode += nodes.Text(' ' + text)
if self.volatile:
_add_anno(signode, 'volatile')
if self.const:
_add_anno(signode, 'const')
if self.refQual:
_add_text(signode, self.refQual)
if self.exceptionSpec:
_add_anno(signode, str(self.exceptionSpec))
if self.final:
_add_anno(signode, 'final')
if self.override:
_add_anno(signode, 'override')
if self.initializer:
_add_text(signode, '= ' + str(self.initializer))
class ASTDeclSpecsSimple(ASTBase):
def __init__(self, storage: str, threadLocal: bool, inline: bool, virtual: bool,
explicit: bool, constexpr: bool, volatile: bool, const: bool,
friend: bool, attrs: List[ASTAttribute]) -> None:
self.storage = storage
self.threadLocal = threadLocal
self.inline = inline
self.virtual = virtual
self.explicit = explicit
self.constexpr = constexpr
self.volatile = volatile
self.const = const
self.friend = friend
self.attrs = attrs
def mergeWith(self, other: "ASTDeclSpecsSimple") -> "ASTDeclSpecsSimple":
if not other:
return self
return ASTDeclSpecsSimple(self.storage or other.storage,
self.threadLocal or other.threadLocal,
self.inline or other.inline,
self.virtual or other.virtual,
self.explicit or other.explicit,
self.constexpr or other.constexpr,
self.volatile or other.volatile,
self.const or other.const,
self.friend or other.friend,
self.attrs + other.attrs)
def _stringify(self, transform: StringifyTransform) -> str:
res = [] # type: List[str]
res.extend(transform(attr) for attr in self.attrs)
if self.storage:
res.append(self.storage)
if self.threadLocal:
res.append('thread_local')
if self.inline:
res.append('inline')
if self.friend:
res.append('friend')
if self.virtual:
res.append('virtual')
if self.explicit:
res.append('explicit')
if self.constexpr:
res.append('constexpr')
if self.volatile:
res.append('volatile')
if self.const:
res.append('const')
return ' '.join(res)
def describe_signature(self, signode: TextElement) -> None:
addSpace = False
for attr in self.attrs:
if addSpace:
signode += nodes.Text(' ')
addSpace = True
attr.describe_signature(signode)
def _add(signode: TextElement, text: str) -> bool:
if addSpace:
signode += nodes.Text(' ')
signode += addnodes.desc_annotation(text, text)
return True
if self.storage:
addSpace = _add(signode, self.storage)
if self.threadLocal:
addSpace = _add(signode, 'thread_local')
if self.inline:
addSpace = _add(signode, 'inline')
if self.friend:
addSpace = _add(signode, 'friend')
if self.virtual:
addSpace = _add(signode, 'virtual')
if self.explicit:
addSpace = _add(signode, 'explicit')
if self.constexpr:
addSpace = _add(signode, 'constexpr')
if self.volatile:
addSpace = _add(signode, 'volatile')
if self.const:
addSpace = _add(signode, 'const')
class ASTDeclSpecs(ASTBase):
def __init__(self, outer: str,
leftSpecs: ASTDeclSpecsSimple, rightSpecs: ASTDeclSpecsSimple,
trailing: ASTTrailingTypeSpec) -> None:
# leftSpecs and rightSpecs are used for output
# allSpecs are used for id generation
self.outer = outer
self.leftSpecs = leftSpecs
self.rightSpecs = rightSpecs
self.allSpecs = self.leftSpecs.mergeWith(self.rightSpecs)
self.trailingTypeSpec = trailing
def get_id(self, version: int) -> str:
if version == 1:
res = []
res.append(self.trailingTypeSpec.get_id(version))
if self.allSpecs.volatile:
res.append('V')
if self.allSpecs.const:
res.append('C')
return ''.join(res)
res = []
if self.allSpecs.volatile:
res.append('V')
if self.allSpecs.const:
res.append('K')
if self.trailingTypeSpec is not None:
res.append(self.trailingTypeSpec.get_id(version))
return ''.join(res)
def _stringify(self, transform: StringifyTransform) -> str:
res = [] # type: List[str]
l = transform(self.leftSpecs)
if len(l) > 0:
res.append(l)
if self.trailingTypeSpec:
if len(res) > 0:
res.append(" ")
res.append(transform(self.trailingTypeSpec))
r = str(self.rightSpecs)
if len(r) > 0:
if len(res) > 0:
res.append(" ")
res.append(r)
return "".join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
numChildren = len(signode)
self.leftSpecs.describe_signature(signode)
addSpace = len(signode) != numChildren
if self.trailingTypeSpec:
if addSpace:
signode += nodes.Text(' ')
numChildren = len(signode)
self.trailingTypeSpec.describe_signature(signode, mode, env,
symbol=symbol)
addSpace = len(signode) != numChildren
if len(str(self.rightSpecs)) > 0:
if addSpace:
signode += nodes.Text(' ')
self.rightSpecs.describe_signature(signode)
# Declarator
################################################################################
class ASTArray(ASTBase):
def __init__(self, size: ASTExpression):
self.size = size
def _stringify(self, transform: StringifyTransform) -> str:
if self.size:
return '[' + transform(self.size) + ']'
else:
return '[]'
def get_id(self, version: int) -> str:
if version == 1:
return 'A'
if version == 2:
if self.size:
return 'A' + str(self.size) + '_'
else:
return 'A_'
if self.size:
return 'A' + self.size.get_id(version) + '_'
else:
return 'A_'
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
signode.append(nodes.Text("["))
if self.size:
self.size.describe_signature(signode, mode, env, symbol)
signode.append(nodes.Text("]"))
class ASTDeclarator(ASTBase):
@property
def name(self) -> ASTNestedName:
raise NotImplementedError(repr(self))
@property
def isPack(self) -> bool:
raise NotImplementedError(repr(self))
@property
def function_params(self) -> List[ASTFunctionParameter]:
raise NotImplementedError(repr(self))
def require_space_after_declSpecs(self) -> bool:
raise NotImplementedError(repr(self))
def get_modifiers_id(self, version: int) -> str:
raise NotImplementedError(repr(self))
def get_param_id(self, version: int) -> str:
raise NotImplementedError(repr(self))
def get_ptr_suffix_id(self, version: int) -> str:
raise NotImplementedError(repr(self))
def get_type_id(self, version: int, returnTypeId: str) -> str:
raise NotImplementedError(repr(self))
def is_function_type(self) -> bool:
raise NotImplementedError(repr(self))
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
raise NotImplementedError(repr(self))
class ASTDeclaratorNameParamQual(ASTDeclarator):
def __init__(self, declId: ASTNestedName,
arrayOps: List[ASTArray],
paramQual: ASTParametersQualifiers) -> None:
self.declId = declId
self.arrayOps = arrayOps
self.paramQual = paramQual
@property
def name(self) -> ASTNestedName:
return self.declId
@property
def isPack(self) -> bool:
return False
@property
def function_params(self) -> List[ASTFunctionParameter]:
return self.paramQual.function_params
# only the modifiers for a function, e.g.,
def get_modifiers_id(self, version: int) -> str:
# cv-qualifiers
if self.paramQual:
return self.paramQual.get_modifiers_id(version)
raise Exception("This should only be called on a function: %s" % self)
def get_param_id(self, version: int) -> str: # only the parameters (if any)
if self.paramQual:
return self.paramQual.get_param_id(version)
else:
return ''
def get_ptr_suffix_id(self, version: int) -> str: # only the array specifiers
return ''.join(a.get_id(version) for a in self.arrayOps)
def get_type_id(self, version: int, returnTypeId: str) -> str:
assert version >= 2
res = []
# TOOD: can we actually have both array ops and paramQual?
res.append(self.get_ptr_suffix_id(version))
if self.paramQual:
res.append(self.get_modifiers_id(version))
res.append('F')
res.append(returnTypeId)
res.append(self.get_param_id(version))
res.append('E')
else:
res.append(returnTypeId)
return ''.join(res)
# ------------------------------------------------------------------------
def require_space_after_declSpecs(self) -> bool:
return self.declId is not None
def is_function_type(self) -> bool:
return self.paramQual is not None
def _stringify(self, transform: StringifyTransform) -> str:
res = []
if self.declId:
res.append(transform(self.declId))
for op in self.arrayOps:
res.append(transform(op))
if self.paramQual:
res.append(transform(self.paramQual))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
if self.declId:
self.declId.describe_signature(signode, mode, env, symbol)
for op in self.arrayOps:
op.describe_signature(signode, mode, env, symbol)
if self.paramQual:
self.paramQual.describe_signature(signode, mode, env, symbol)
class ASTDeclaratorNameBitField(ASTDeclarator):
def __init__(self, declId: ASTNestedName, size: ASTExpression):
self.declId = declId
self.size = size
@property
def name(self) -> ASTNestedName:
return self.declId
def get_param_id(self, version: int) -> str: # only the parameters (if any)
return ''
def get_ptr_suffix_id(self, version: int) -> str: # only the array specifiers
return ''
# ------------------------------------------------------------------------
def require_space_after_declSpecs(self) -> bool:
return self.declId is not None
def is_function_type(self) -> bool:
return False
def _stringify(self, transform: StringifyTransform) -> str:
res = []
if self.declId:
res.append(transform(self.declId))
res.append(" : ")
res.append(transform(self.size))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
if self.declId:
self.declId.describe_signature(signode, mode, env, symbol)
signode.append(nodes.Text(' : ', ' : '))
self.size.describe_signature(signode, mode, env, symbol)
class ASTDeclaratorPtr(ASTDeclarator):
def __init__(self, next: ASTDeclarator, volatile: bool, const: bool,
attrs: List[ASTAttribute]) -> None:
assert next
self.next = next
self.volatile = volatile
self.const = const
self.attrs = attrs
@property
def name(self) -> ASTNestedName:
return self.next.name
@property
def function_params(self) -> List[ASTFunctionParameter]:
return self.next.function_params
def require_space_after_declSpecs(self) -> bool:
return self.next.require_space_after_declSpecs()
def _stringify(self, transform: StringifyTransform) -> str:
res = ['*']
for a in self.attrs:
res.append(transform(a))
if len(self.attrs) > 0 and (self.volatile or self.const):
res.append(' ')
if self.volatile:
res.append('volatile')
if self.const:
if self.volatile:
res.append(' ')
res.append('const')
if self.const or self.volatile or len(self.attrs) > 0:
if self.next.require_space_after_declSpecs():
res.append(' ')
res.append(transform(self.next))
return ''.join(res)
def get_modifiers_id(self, version: int) -> str:
return self.next.get_modifiers_id(version)
def get_param_id(self, version: int) -> str:
return self.next.get_param_id(version)
def get_ptr_suffix_id(self, version: int) -> str:
if version == 1:
res = ['P']
if self.volatile:
res.append('V')
if self.const:
res.append('C')
res.append(self.next.get_ptr_suffix_id(version))
return ''.join(res)
res = [self.next.get_ptr_suffix_id(version)]
res.append('P')
if self.volatile:
res.append('V')
if self.const:
res.append('C')
return ''.join(res)
def get_type_id(self, version: int, returnTypeId: str) -> str:
# ReturnType *next, so we are part of the return type of 'next
res = ['P']
if self.volatile:
res.append('V')
if self.const:
res.append('C')
res.append(returnTypeId)
return self.next.get_type_id(version, returnTypeId=''.join(res))
def is_function_type(self) -> bool:
return self.next.is_function_type()
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
signode += nodes.Text("*")
for a in self.attrs:
a.describe_signature(signode)
if len(self.attrs) > 0 and (self.volatile or self.const):
signode += nodes.Text(' ')
def _add_anno(signode: TextElement, text: str) -> None:
signode += addnodes.desc_annotation(text, text)
if self.volatile:
_add_anno(signode, 'volatile')
if self.const:
if self.volatile:
signode += nodes.Text(' ')
_add_anno(signode, 'const')
if self.const or self.volatile or len(self.attrs) > 0:
if self.next.require_space_after_declSpecs():
signode += nodes.Text(' ')
self.next.describe_signature(signode, mode, env, symbol)
class ASTDeclaratorRef(ASTDeclarator):
def __init__(self, next: ASTDeclarator, attrs: List[ASTAttribute]) -> None:
assert next
self.next = next
self.attrs = attrs
@property
def name(self) -> ASTNestedName:
return self.next.name
@property
def isPack(self) -> bool:
return True
@property
def function_params(self) -> List[ASTFunctionParameter]:
return self.next.function_params
def require_space_after_declSpecs(self) -> bool:
return self.next.require_space_after_declSpecs()
def _stringify(self, transform: StringifyTransform) -> str:
res = ['&']
for a in self.attrs:
res.append(transform(a))
if len(self.attrs) > 0 and self.next.require_space_after_declSpecs():
res.append(' ')
res.append(transform(self.next))
return ''.join(res)
def get_modifiers_id(self, version: int) -> str:
return self.next.get_modifiers_id(version)
def get_param_id(self, version: int) -> str: # only the parameters (if any)
return self.next.get_param_id(version)
def get_ptr_suffix_id(self, version: int) -> str:
if version == 1:
return 'R' + self.next.get_ptr_suffix_id(version)
else:
return self.next.get_ptr_suffix_id(version) + 'R'
def get_type_id(self, version: int, returnTypeId: str) -> str:
assert version >= 2
# ReturnType &next, so we are part of the return type of 'next
return self.next.get_type_id(version, returnTypeId='R' + returnTypeId)
def is_function_type(self) -> bool:
return self.next.is_function_type()
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
signode += nodes.Text("&")
for a in self.attrs:
a.describe_signature(signode)
if len(self.attrs) > 0 and self.next.require_space_after_declSpecs():
signode += nodes.Text(' ')
self.next.describe_signature(signode, mode, env, symbol)
class ASTDeclaratorParamPack(ASTDeclarator):
def __init__(self, next: ASTDeclarator) -> None:
assert next
self.next = next
@property
def name(self) -> ASTNestedName:
return self.next.name
@property
def function_params(self) -> List[ASTFunctionParameter]:
return self.next.function_params
def require_space_after_declSpecs(self) -> bool:
return False
def _stringify(self, transform: StringifyTransform) -> str:
res = transform(self.next)
if self.next.name:
res = ' ' + res
return '...' + res
def get_modifiers_id(self, version: int) -> str:
return self.next.get_modifiers_id(version)
def get_param_id(self, version: int) -> str: # only the parameters (if any)
return self.next.get_param_id(version)
def get_ptr_suffix_id(self, version: int) -> str:
if version == 1:
return 'Dp' + self.next.get_ptr_suffix_id(version)
else:
return self.next.get_ptr_suffix_id(version) + 'Dp'
def get_type_id(self, version: int, returnTypeId: str) -> str:
assert version >= 2
# ReturnType... next, so we are part of the return type of 'next
return self.next.get_type_id(version, returnTypeId='Dp' + returnTypeId)
def is_function_type(self) -> bool:
return self.next.is_function_type()
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
signode += nodes.Text("...")
if self.next.name:
signode += nodes.Text(' ')
self.next.describe_signature(signode, mode, env, symbol)
class ASTDeclaratorMemPtr(ASTDeclarator):
def __init__(self, className: ASTNestedName,
const: bool, volatile: bool, next: ASTDeclarator) -> None:
assert className
assert next
self.className = className
self.const = const
self.volatile = volatile
self.next = next
@property
def name(self) -> ASTNestedName:
return self.next.name
@property
def function_params(self) -> List[ASTFunctionParameter]:
return self.next.function_params
def require_space_after_declSpecs(self) -> bool:
return True
def _stringify(self, transform: StringifyTransform) -> str:
res = []
res.append(transform(self.className))
res.append('::*')
if self.volatile:
res.append('volatile')
if self.const:
if self.volatile:
res.append(' ')
res.append('const')
if self.next.require_space_after_declSpecs():
res.append(' ')
res.append(transform(self.next))
return ''.join(res)
def get_modifiers_id(self, version: int) -> str:
if version == 1:
raise NoOldIdError()
else:
return self.next.get_modifiers_id(version)
def get_param_id(self, version: int) -> str: # only the parameters (if any)
if version == 1:
raise NoOldIdError()
else:
return self.next.get_param_id(version)
def get_ptr_suffix_id(self, version: int) -> str:
if version == 1:
raise NoOldIdError()
else:
raise NotImplementedError()
return self.next.get_ptr_suffix_id(version) + 'Dp'
def get_type_id(self, version: int, returnTypeId: str) -> str:
assert version >= 2
# ReturnType name::* next, so we are part of the return type of next
nextReturnTypeId = ''
if self.volatile:
nextReturnTypeId += 'V'
if self.const:
nextReturnTypeId += 'K'
nextReturnTypeId += 'M'
nextReturnTypeId += self.className.get_id(version)
nextReturnTypeId += returnTypeId
return self.next.get_type_id(version, nextReturnTypeId)
def is_function_type(self) -> bool:
return self.next.is_function_type()
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
self.className.describe_signature(signode, mode, env, symbol)
signode += nodes.Text('::*')
def _add_anno(signode: TextElement, text: str) -> None:
signode += addnodes.desc_annotation(text, text)
if self.volatile:
_add_anno(signode, 'volatile')
if self.const:
if self.volatile:
signode += nodes.Text(' ')
_add_anno(signode, 'const')
if self.next.require_space_after_declSpecs():
signode += nodes.Text(' ')
self.next.describe_signature(signode, mode, env, symbol)
class ASTDeclaratorParen(ASTDeclarator):
def __init__(self, inner: ASTDeclarator, next: ASTDeclarator) -> None:
assert inner
assert next
self.inner = inner
self.next = next
# TODO: we assume the name, params, and qualifiers are in inner
@property
def name(self) -> ASTNestedName:
return self.inner.name
@property
def function_params(self) -> List[ASTFunctionParameter]:
return self.inner.function_params
def require_space_after_declSpecs(self) -> bool:
return True
def _stringify(self, transform: StringifyTransform) -> str:
res = ['(']
res.append(transform(self.inner))
res.append(')')
res.append(transform(self.next))
return ''.join(res)
def get_modifiers_id(self, version: int) -> str:
return self.inner.get_modifiers_id(version)
def get_param_id(self, version: int) -> str: # only the parameters (if any)
return self.inner.get_param_id(version)
def get_ptr_suffix_id(self, version: int) -> str:
if version == 1:
raise NoOldIdError() # TODO: was this implemented before?
return self.next.get_ptr_suffix_id(version) + \
self.inner.get_ptr_suffix_id(version)
else:
return self.inner.get_ptr_suffix_id(version) + \
self.next.get_ptr_suffix_id(version)
def get_type_id(self, version: int, returnTypeId: str) -> str:
assert version >= 2
# ReturnType (inner)next, so 'inner' returns everything outside
nextId = self.next.get_type_id(version, returnTypeId)
return self.inner.get_type_id(version, returnTypeId=nextId)
def is_function_type(self) -> bool:
return self.inner.is_function_type()
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
signode += nodes.Text('(')
self.inner.describe_signature(signode, mode, env, symbol)
signode += nodes.Text(')')
self.next.describe_signature(signode, "noneIsName", env, symbol)
# Type and initializer stuff
##############################################################################################
class ASTPackExpansionExpr(ASTExpression):
def __init__(self, expr: Union[ASTExpression, ASTBracedInitList]):
self.expr = expr
def _stringify(self, transform: StringifyTransform) -> str:
return transform(self.expr) + '...'
def get_id(self, version: int) -> str:
id = self.expr.get_id(version)
return 'sp' + id
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
self.expr.describe_signature(signode, mode, env, symbol)
signode += nodes.Text('...')
class ASTParenExprList(ASTBase):
def __init__(self, exprs: List[Union[ASTExpression, ASTBracedInitList]]) -> None:
self.exprs = exprs
def get_id(self, version: int) -> str:
return "pi%sE" % ''.join(e.get_id(version) for e in self.exprs)
def _stringify(self, transform: StringifyTransform) -> str:
exprs = [transform(e) for e in self.exprs]
return '(%s)' % ', '.join(exprs)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
signode.append(nodes.Text('('))
first = True
for e in self.exprs:
if not first:
signode.append(nodes.Text(', '))
else:
first = False
e.describe_signature(signode, mode, env, symbol)
signode.append(nodes.Text(')'))
class ASTInitializer(ASTBase):
def __init__(self, value: Union[ASTExpression, ASTBracedInitList],
hasAssign: bool = True) -> None:
self.value = value
self.hasAssign = hasAssign
def _stringify(self, transform: StringifyTransform) -> str:
val = transform(self.value)
if self.hasAssign:
return ' = ' + val
else:
return val
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
if self.hasAssign:
signode.append(nodes.Text(' = '))
self.value.describe_signature(signode, 'markType', env, symbol)
class ASTType(ASTBase):
def __init__(self, declSpecs: ASTDeclSpecs, decl: ASTDeclarator) -> None:
assert declSpecs
assert decl
self.declSpecs = declSpecs
self.decl = decl
@property
def name(self) -> ASTNestedName:
return self.decl.name
@property
def isPack(self) -> bool:
return self.decl.isPack
@property
def function_params(self) -> List[ASTFunctionParameter]:
return self.decl.function_params
def get_id(self, version: int, objectType: str = None,
symbol: "Symbol" = None) -> str:
if version == 1:
res = []
if objectType: # needs the name
if objectType == 'function': # also modifiers
res.append(symbol.get_full_nested_name().get_id(version))
res.append(self.decl.get_param_id(version))
res.append(self.decl.get_modifiers_id(version))
if (self.declSpecs.leftSpecs.constexpr or
(self.declSpecs.rightSpecs and
self.declSpecs.rightSpecs.constexpr)):
res.append('CE')
elif objectType == 'type': # just the name
res.append(symbol.get_full_nested_name().get_id(version))
else:
print(objectType)
assert False
else: # only type encoding
if self.decl.is_function_type():
raise NoOldIdError()
res.append(self.declSpecs.get_id(version))
res.append(self.decl.get_ptr_suffix_id(version))
res.append(self.decl.get_param_id(version))
return ''.join(res)
# other versions
res = []
if objectType: # needs the name
if objectType == 'function': # also modifiers
modifiers = self.decl.get_modifiers_id(version)
res.append(symbol.get_full_nested_name().get_id(version, modifiers))
if version >= 4:
# with templates we need to mangle the return type in as well
templ = symbol.declaration.templatePrefix
if templ is not None:
typeId = self.decl.get_ptr_suffix_id(version)
returnTypeId = self.declSpecs.get_id(version)
res.append(typeId)
res.append(returnTypeId)
res.append(self.decl.get_param_id(version))
elif objectType == 'type': # just the name
res.append(symbol.get_full_nested_name().get_id(version))
else:
print(objectType)
assert False
else: # only type encoding
# the 'returnType' of a non-function type is simply just the last
# type, i.e., for 'int*' it is 'int'
returnTypeId = self.declSpecs.get_id(version)
typeId = self.decl.get_type_id(version, returnTypeId)
res.append(typeId)
return ''.join(res)
def _stringify(self, transform: StringifyTransform) -> str:
res = []
declSpecs = transform(self.declSpecs)
res.append(declSpecs)
if self.decl.require_space_after_declSpecs() and len(declSpecs) > 0:
res.append(' ')
res.append(transform(self.decl))
return ''.join(res)
def get_type_declaration_prefix(self) -> str:
if self.declSpecs.trailingTypeSpec:
return 'typedef'
else:
return 'type'
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
self.declSpecs.describe_signature(signode, 'markType', env, symbol)
if (self.decl.require_space_after_declSpecs() and
len(str(self.declSpecs)) > 0):
signode += nodes.Text(' ')
# for parameters that don't really declare new names we get 'markType',
# this should not be propagated, but be 'noneIsName'.
if mode == 'markType':
mode = 'noneIsName'
self.decl.describe_signature(signode, mode, env, symbol)
class ASTTemplateParamConstrainedTypeWithInit(ASTBase):
def __init__(self, type: ASTType, init: ASTType) -> None:
assert type
self.type = type
self.init = init
@property
def name(self) -> ASTNestedName:
return self.type.name
@property
def isPack(self) -> bool:
return self.type.isPack
def get_id(self, version: int, objectType: str = None, symbol: "Symbol" = None) -> str:
# this is not part of the normal name mangling in C++
assert version >= 2
if symbol:
# the anchor will be our parent
return symbol.parent.declaration.get_id(version, prefixed=False)
else:
return self.type.get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
res = transform(self.type)
if self.init:
res += " = "
res += transform(self.init)
return res
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
self.type.describe_signature(signode, mode, env, symbol)
if self.init:
signode += nodes.Text(" = ")
self.init.describe_signature(signode, mode, env, symbol)
class ASTTypeWithInit(ASTBase):
def __init__(self, type: ASTType, init: ASTInitializer) -> None:
self.type = type
self.init = init
@property
def name(self) -> ASTNestedName:
return self.type.name
@property
def isPack(self) -> bool:
return self.type.isPack
def get_id(self, version: int, objectType: str = None,
symbol: "Symbol" = None) -> str:
if objectType != 'member':
return self.type.get_id(version, objectType)
if version == 1:
return (symbol.get_full_nested_name().get_id(version) + '__' +
self.type.get_id(version))
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
res = []
res.append(transform(self.type))
if self.init:
res.append(transform(self.init))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
self.type.describe_signature(signode, mode, env, symbol)
if self.init:
self.init.describe_signature(signode, mode, env, symbol)
class ASTTypeUsing(ASTBase):
def __init__(self, name: ASTNestedName, type: ASTType) -> None:
self.name = name
self.type = type
def get_id(self, version: int, objectType: str = None,
symbol: "Symbol" = None) -> str:
if version == 1:
raise NoOldIdError()
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
res = []
res.append(transform(self.name))
if self.type:
res.append(' = ')
res.append(transform(self.type))
return ''.join(res)
def get_type_declaration_prefix(self) -> str:
return 'using'
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
self.name.describe_signature(signode, mode, env, symbol=symbol)
if self.type:
signode += nodes.Text(' = ')
self.type.describe_signature(signode, 'markType', env, symbol=symbol)
# Other declarations
##############################################################################################
class ASTConcept(ASTBase):
def __init__(self, nestedName: ASTNestedName, initializer: ASTInitializer) -> None:
self.nestedName = nestedName
self.initializer = initializer
@property
def name(self) -> ASTNestedName:
return self.nestedName
def get_id(self, version: int, objectType: str = None,
symbol: "Symbol" = None) -> str:
if version == 1:
raise NoOldIdError()
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
res = transform(self.nestedName)
if self.initializer:
res += transform(self.initializer)
return res
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
self.nestedName.describe_signature(signode, mode, env, symbol)
if self.initializer:
self.initializer.describe_signature(signode, mode, env, symbol)
class ASTBaseClass(ASTBase):
def __init__(self, name: ASTNestedName, visibility: str,
virtual: bool, pack: bool) -> None:
self.name = name
self.visibility = visibility
self.virtual = virtual
self.pack = pack
def _stringify(self, transform: StringifyTransform) -> str:
res = []
if self.visibility is not None:
res.append(self.visibility)
res.append(' ')
if self.virtual:
res.append('virtual ')
res.append(transform(self.name))
if self.pack:
res.append('...')
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
if self.visibility is not None:
signode += addnodes.desc_annotation(self.visibility,
self.visibility)
signode += nodes.Text(' ')
if self.virtual:
signode += addnodes.desc_annotation('virtual', 'virtual')
signode += nodes.Text(' ')
self.name.describe_signature(signode, 'markType', env, symbol=symbol)
if self.pack:
signode += nodes.Text('...')
class ASTClass(ASTBase):
def __init__(self, name: ASTNestedName, final: bool, bases: List[ASTBaseClass]) -> None:
self.name = name
self.final = final
self.bases = bases
def get_id(self, version: int, objectType: str, symbol: "Symbol") -> str:
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
res = []
res.append(transform(self.name))
if self.final:
res.append(' final')
if len(self.bases) > 0:
res.append(' : ')
first = True
for b in self.bases:
if not first:
res.append(', ')
first = False
res.append(transform(b))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
self.name.describe_signature(signode, mode, env, symbol=symbol)
if self.final:
signode += nodes.Text(' ')
signode += addnodes.desc_annotation('final', 'final')
if len(self.bases) > 0:
signode += nodes.Text(' : ')
for b in self.bases:
b.describe_signature(signode, mode, env, symbol=symbol)
signode += nodes.Text(', ')
signode.pop()
class ASTUnion(ASTBase):
def __init__(self, name: ASTNestedName) -> None:
self.name = name
def get_id(self, version: int, objectType: str, symbol: "Symbol") -> str:
if version == 1:
raise NoOldIdError()
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
return transform(self.name)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
self.name.describe_signature(signode, mode, env, symbol=symbol)
class ASTEnum(ASTBase):
def __init__(self, name: ASTNestedName, scoped: str,
underlyingType: ASTType) -> None:
self.name = name
self.scoped = scoped
self.underlyingType = underlyingType
def get_id(self, version: int, objectType: str, symbol: "Symbol") -> str:
if version == 1:
raise NoOldIdError()
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
res = []
if self.scoped:
res.append(self.scoped)
res.append(' ')
res.append(transform(self.name))
if self.underlyingType:
res.append(' : ')
res.append(transform(self.underlyingType))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
# self.scoped has been done by the CPPEnumObject
self.name.describe_signature(signode, mode, env, symbol=symbol)
if self.underlyingType:
signode += nodes.Text(' : ')
self.underlyingType.describe_signature(signode, 'noneIsName',
env, symbol=symbol)
class ASTEnumerator(ASTBase):
def __init__(self, name: ASTNestedName, init: ASTInitializer) -> None:
self.name = name
self.init = init
def get_id(self, version: int, objectType: str, symbol: "Symbol") -> str:
if version == 1:
raise NoOldIdError()
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
res = []
res.append(transform(self.name))
if self.init:
res.append(transform(self.init))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
verify_description_mode(mode)
self.name.describe_signature(signode, mode, env, symbol)
if self.init:
self.init.describe_signature(signode, 'markType', env, symbol)
################################################################################
# Templates
################################################################################
# Parameters
################################################################################
class ASTTemplateParam(ASTBase):
def get_identifier(self) -> ASTIdentifier:
raise NotImplementedError(repr(self))
def get_id(self, version: int) -> str:
raise NotImplementedError(repr(self))
def describe_signature(self, parentNode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
raise NotImplementedError(repr(self))
class ASTTemplateKeyParamPackIdDefault(ASTTemplateParam):
def __init__(self, key: str, identifier: ASTIdentifier,
parameterPack: bool, default: ASTType) -> None:
assert key
if parameterPack:
assert default is None
self.key = key
self.identifier = identifier
self.parameterPack = parameterPack
self.default = default
def get_identifier(self) -> ASTIdentifier:
return self.identifier
def get_id(self, version: int) -> str:
assert version >= 2
# this is not part of the normal name mangling in C++
res = []
if self.parameterPack:
res.append('Dp')
else:
res.append('0') # we need to put something
return ''.join(res)
def _stringify(self, transform: StringifyTransform) -> str:
res = [self.key]
if self.parameterPack:
if self.identifier:
res.append(' ')
res.append('...')
if self.identifier:
if not self.parameterPack:
res.append(' ')
res.append(transform(self.identifier))
if self.default:
res.append(' = ')
res.append(transform(self.default))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode += nodes.Text(self.key)
if self.parameterPack:
if self.identifier:
signode += nodes.Text(' ')
signode += nodes.Text('...')
if self.identifier:
if not self.parameterPack:
signode += nodes.Text(' ')
self.identifier.describe_signature(signode, mode, env, '', '', symbol)
if self.default:
signode += nodes.Text(' = ')
self.default.describe_signature(signode, 'markType', env, symbol)
class ASTTemplateParamType(ASTTemplateParam):
def __init__(self, data: ASTTemplateKeyParamPackIdDefault) -> None:
assert data
self.data = data
@property
def name(self) -> ASTNestedName:
id = self.get_identifier()
return ASTNestedName([ASTNestedNameElement(id, None)], [False], rooted=False)
@property
def isPack(self) -> bool:
return self.data.parameterPack
def get_identifier(self) -> ASTIdentifier:
return self.data.get_identifier()
def get_id(self, version: int, objectType: str = None, symbol: "Symbol" = None) -> str:
# this is not part of the normal name mangling in C++
assert version >= 2
if symbol:
# the anchor will be our parent
return symbol.parent.declaration.get_id(version, prefixed=False)
else:
return self.data.get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
return transform(self.data)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
self.data.describe_signature(signode, mode, env, symbol)
class ASTTemplateParamTemplateType(ASTTemplateParam):
def __init__(self, nestedParams: "ASTTemplateParams",
data: ASTTemplateKeyParamPackIdDefault) -> None:
assert nestedParams
assert data
self.nestedParams = nestedParams
self.data = data
@property
def name(self) -> ASTNestedName:
id = self.get_identifier()
return ASTNestedName([ASTNestedNameElement(id, None)], [False], rooted=False)
@property
def isPack(self) -> bool:
return self.data.parameterPack
def get_identifier(self) -> ASTIdentifier:
return self.data.get_identifier()
def get_id(self, version: int, objectType: str = None, symbol: "Symbol" = None) -> str:
assert version >= 2
# this is not part of the normal name mangling in C++
if symbol:
# the anchor will be our parent
return symbol.parent.declaration.get_id(version, prefixed=None)
else:
return self.nestedParams.get_id(version) + self.data.get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
return transform(self.nestedParams) + transform(self.data)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
self.nestedParams.describe_signature(signode, 'noneIsName', env, symbol)
signode += nodes.Text(' ')
self.data.describe_signature(signode, mode, env, symbol)
class ASTTemplateParamNonType(ASTTemplateParam):
def __init__(self,
param: Union[ASTTypeWithInit,
ASTTemplateParamConstrainedTypeWithInit]) -> None:
assert param
self.param = param
@property
def name(self) -> ASTNestedName:
id = self.get_identifier()
return ASTNestedName([ASTNestedNameElement(id, None)], [False], rooted=False)
@property
def isPack(self) -> bool:
return self.param.isPack
def get_identifier(self) -> ASTIdentifier:
name = self.param.name
if name:
assert len(name.names) == 1
assert name.names[0].identOrOp
assert not name.names[0].templateArgs
res = name.names[0].identOrOp
assert isinstance(res, ASTIdentifier)
return res
else:
return None
def get_id(self, version: int, objectType: str = None, symbol: "Symbol" = None) -> str:
assert version >= 2
# this is not part of the normal name mangling in C++
if symbol:
# the anchor will be our parent
return symbol.parent.declaration.get_id(version, prefixed=None)
else:
return '_' + self.param.get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
return transform(self.param)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
self.param.describe_signature(signode, mode, env, symbol)
class ASTTemplateParams(ASTBase):
def __init__(self, params: List[ASTTemplateParam]) -> None:
assert params is not None
self.params = params
def get_id(self, version: int) -> str:
assert version >= 2
res = []
res.append("I")
for param in self.params:
res.append(param.get_id(version))
res.append("E")
return ''.join(res)
def _stringify(self, transform: StringifyTransform) -> str:
res = []
res.append("template<")
res.append(", ".join(transform(a) for a in self.params))
res.append("> ")
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
signode += nodes.Text("template<")
first = True
for param in self.params:
if not first:
signode += nodes.Text(", ")
first = False
param.describe_signature(signode, mode, env, symbol)
signode += nodes.Text(">")
def describe_signature_as_introducer(
self, parentNode: desc_signature, mode: str, env: "BuildEnvironment",
symbol: "Symbol", lineSpec: bool) -> None:
def makeLine(parentNode: desc_signature) -> addnodes.desc_signature_line:
signode = addnodes.desc_signature_line()
parentNode += signode
signode.sphinx_line_type = 'templateParams'
return signode
lineNode = makeLine(parentNode)
lineNode += nodes.Text("template<")
first = True
for param in self.params:
if not first:
lineNode += nodes.Text(", ")
first = False
if lineSpec:
lineNode = makeLine(parentNode)
param.describe_signature(lineNode, mode, env, symbol)
if lineSpec and not first:
lineNode = makeLine(parentNode)
lineNode += nodes.Text(">")
# Template introducers
################################################################################
class ASTTemplateIntroductionParameter(ASTBase):
def __init__(self, identifier: ASTIdentifier, parameterPack: bool) -> None:
self.identifier = identifier
self.parameterPack = parameterPack
@property
def name(self) -> ASTNestedName:
id = self.get_identifier()
return ASTNestedName([ASTNestedNameElement(id, None)], [False], rooted=False)
@property
def isPack(self) -> bool:
return self.parameterPack
def get_identifier(self) -> ASTIdentifier:
return self.identifier
def get_id(self, version: int, objectType: str = None, symbol: "Symbol" = None) -> str:
assert version >= 2
# this is not part of the normal name mangling in C++
if symbol:
# the anchor will be our parent
return symbol.parent.declaration.get_id(version, prefixed=None)
else:
if self.parameterPack:
return 'Dp'
else:
return '0' # we need to put something
def get_id_as_arg(self, version: int) -> str:
assert version >= 2
# used for the implicit requires clause
res = self.identifier.get_id(version)
if self.parameterPack:
return 'sp' + res
else:
return res
def _stringify(self, transform: StringifyTransform) -> str:
res = []
if self.parameterPack:
res.append('...')
res.append(transform(self.identifier))
return ''.join(res)
def describe_signature(self, signode: TextElement, mode: str,
env: "BuildEnvironment", symbol: "Symbol") -> None:
if self.parameterPack:
signode += nodes.Text('...')
self.identifier.describe_signature(signode, mode, env, '', '', symbol)
class ASTTemplateIntroduction(ASTBase):
def __init__(self, concept: ASTNestedName,
params: List[ASTTemplateIntroductionParameter]) -> None:
assert len(params) > 0
self.concept = concept
self.params = params
def get_id(self, version: int) -> str:
assert version >= 2
# first do the same as a normal template parameter list
res = []
res.append("I")
for param in self.params:
res.append(param.get_id(version))
res.append("E")
# let's use X expr E, which is otherwise for constant template args
res.append("X")
res.append(self.concept.get_id(version))
res.append("I")
for param in self.params:
res.append(param.get_id_as_arg(version))
res.append("E")
res.append("E")
return ''.join(res)
def _stringify(self, transform: StringifyTransform) -> str:
res = []
res.append(transform(self.concept))
res.append('{')
res.append(', '.join(transform(param) for param in self.params))
res.append('} ')
return ''.join(res)
def describe_signature_as_introducer(
self, parentNode: desc_signature, mode: str,
env: "BuildEnvironment", symbol: "Symbol", lineSpec: bool) -> None:
# Note: 'lineSpec' has no effect on template introductions.
signode = addnodes.desc_signature_line()
parentNode += signode
signode.sphinx_line_type = 'templateIntroduction'
self.concept.describe_signature(signode, 'markType', env, symbol)
signode += nodes.Text('{')
first = True
for param in self.params:
if not first:
signode += nodes.Text(', ')
first = False
param.describe_signature(signode, mode, env, symbol)
signode += nodes.Text('}')
class ASTTemplateDeclarationPrefix(ASTBase):
def __init__(self,
templates: List[Union[ASTTemplateParams,
ASTTemplateIntroduction]]) -> None:
# templates is None means it's an explicit instantiation of a variable
self.templates = templates
def get_id(self, version: int) -> str:
assert version >= 2
# this is not part of a normal name mangling system
res = []
for t in self.templates:
res.append(t.get_id(version))
return ''.join(res)
def _stringify(self, transform: StringifyTransform) -> str:
res = []
for t in self.templates:
res.append(transform(t))
return ''.join(res)
def describe_signature(self, signode: desc_signature, mode: str,
env: "BuildEnvironment", symbol: "Symbol", lineSpec: bool) -> None:
verify_description_mode(mode)
for t in self.templates:
t.describe_signature_as_introducer(signode, 'lastIsName', env, symbol, lineSpec)
################################################################################
################################################################################
class ASTDeclaration(ASTBase):
def __init__(self, objectType: str, directiveType: str, visibility: str,
templatePrefix: ASTTemplateDeclarationPrefix, declaration: Any) -> None:
self.objectType = objectType
self.directiveType = directiveType
self.visibility = visibility
self.templatePrefix = templatePrefix
self.declaration = declaration
self.symbol = None # type: Symbol
# set by CPPObject._add_enumerator_to_parent
self.enumeratorScopedSymbol = None # type: Symbol
def clone(self) -> "ASTDeclaration":
if self.templatePrefix:
templatePrefixClone = self.templatePrefix.clone()
else:
templatePrefixClone = None
return ASTDeclaration(self.objectType, self.directiveType,
self.visibility, templatePrefixClone,
self.declaration.clone())
@property
def name(self) -> ASTNestedName:
return self.declaration.name
@property
def function_params(self) -> List[ASTFunctionParameter]:
if self.objectType != 'function':
return None
return self.declaration.function_params
def get_id(self, version: int, prefixed: bool = True) -> str:
if version == 1:
if self.templatePrefix:
raise NoOldIdError()
if self.objectType == 'enumerator' and self.enumeratorScopedSymbol:
return self.enumeratorScopedSymbol.declaration.get_id(version)
return self.declaration.get_id(version, self.objectType, self.symbol)
# version >= 2
if self.objectType == 'enumerator' and self.enumeratorScopedSymbol:
return self.enumeratorScopedSymbol.declaration.get_id(version, prefixed)
if prefixed:
res = [_id_prefix[version]]
else:
res = []
if self.templatePrefix:
res.append(self.templatePrefix.get_id(version))
res.append(self.declaration.get_id(version, self.objectType, self.symbol))
return ''.join(res)
def get_newest_id(self) -> str:
return self.get_id(_max_id, True)
def _stringify(self, transform: StringifyTransform) -> str:
res = []
if self.visibility and self.visibility != "public":
res.append(self.visibility)
res.append(' ')
if self.templatePrefix:
res.append(transform(self.templatePrefix))
res.append(transform(self.declaration))
return ''.join(res)
def describe_signature(self, signode: desc_signature, mode: str,
env: "BuildEnvironment", options: Dict) -> None:
verify_description_mode(mode)
assert self.symbol
# The caller of the domain added a desc_signature node.
# Always enable multiline:
signode['is_multiline'] = True
# Put each line in a desc_signature_line node.
mainDeclNode = addnodes.desc_signature_line()
mainDeclNode.sphinx_line_type = 'declarator'
mainDeclNode['add_permalink'] = not self.symbol.isRedeclaration
if self.templatePrefix:
self.templatePrefix.describe_signature(signode, mode, env,
symbol=self.symbol,
lineSpec=options.get('tparam-line-spec'))
signode += mainDeclNode
if self.visibility and self.visibility != "public":
mainDeclNode += addnodes.desc_annotation(self.visibility + " ",
self.visibility + " ")
if self.objectType == 'type':
prefix = self.declaration.get_type_declaration_prefix()
prefix += ' '
mainDeclNode += addnodes.desc_annotation(prefix, prefix)
elif self.objectType == 'concept':
mainDeclNode += addnodes.desc_annotation('concept ', 'concept ')
elif self.objectType == 'member':
pass
elif self.objectType == 'function':
pass
elif self.objectType == 'class':
assert self.directiveType in ('class', 'struct')
prefix = self.directiveType + ' '
mainDeclNode += addnodes.desc_annotation(prefix, prefix)
elif self.objectType == 'union':
mainDeclNode += addnodes.desc_annotation('union ', 'union ')
elif self.objectType == 'enum':
if self.directiveType == 'enum':
prefix = 'enum '
elif self.directiveType == 'enum-class':
prefix = 'enum class '
elif self.directiveType == 'enum-struct':
prefix = 'enum struct '
else:
assert False # wrong directiveType used
mainDeclNode += addnodes.desc_annotation(prefix, prefix)
elif self.objectType == 'enumerator':
mainDeclNode += addnodes.desc_annotation('enumerator ', 'enumerator ')
else:
assert False
self.declaration.describe_signature(mainDeclNode, mode, env, self.symbol)
class ASTNamespace(ASTBase):
def __init__(self, nestedName: ASTNestedName,
templatePrefix: ASTTemplateDeclarationPrefix) -> None:
self.nestedName = nestedName
self.templatePrefix = templatePrefix
def _stringify(self, transform: StringifyTransform) -> str:
res = []
if self.templatePrefix:
res.append(transform(self.templatePrefix))
res.append(transform(self.nestedName))
return ''.join(res)
class SymbolLookupResult:
def __init__(self, symbols: Iterator["Symbol"], parentSymbol: "Symbol",
identOrOp: Union[ASTIdentifier, ASTOperator], templateParams: Any,
templateArgs: ASTTemplateArgs) -> None:
self.symbols = symbols
self.parentSymbol = parentSymbol
self.identOrOp = identOrOp
self.templateParams = templateParams
self.templateArgs = templateArgs
class LookupKey:
def __init__(self, data: List[Tuple[ASTNestedNameElement,
Union[ASTTemplateParams,
ASTTemplateIntroduction],
str]]) -> None:
self.data = data
class Symbol:
debug_indent = 0
debug_indent_string = " "
debug_lookup = False
debug_show_tree = False
@staticmethod
def debug_print(*args: Any) -> None:
print(Symbol.debug_indent_string * Symbol.debug_indent, end="")
print(*args)
def _assert_invariants(self) -> None:
if not self.parent:
# parent == None means global scope, so declaration means a parent
assert not self.identOrOp
assert not self.templateParams
assert not self.templateArgs
assert not self.declaration
assert not self.docname
else:
if self.declaration:
assert self.docname
def __setattr__(self, key: str, value: Any) -> None:
if key == "children":
assert False
else:
return super().__setattr__(key, value)
def __init__(self, parent: "Symbol", identOrOp: Union[ASTIdentifier, ASTOperator],
templateParams: Union[ASTTemplateParams, ASTTemplateIntroduction],
templateArgs: Any, declaration: ASTDeclaration, docname: str) -> None:
self.parent = parent
# declarations in a single directive are linked together
self.siblingAbove = None # type: Symbol
self.siblingBelow = None # type: Symbol
self.identOrOp = identOrOp
self.templateParams = templateParams # template<templateParams>
self.templateArgs = templateArgs # identifier<templateArgs>
self.declaration = declaration
self.docname = docname
self.isRedeclaration = False
self._assert_invariants()
# Remember to modify Symbol.remove if modifications to the parent change.
self._children = [] # type: List[Symbol]
self._anonChildren = [] # type: List[Symbol]
# note: _children includes _anonChildren
if self.parent:
self.parent._children.append(self)
if self.declaration:
self.declaration.symbol = self
# Do symbol addition after self._children has been initialised.
self._add_template_and_function_params()
def _fill_empty(self, declaration: ASTDeclaration, docname: str) -> None:
self._assert_invariants()
assert not self.declaration
assert not self.docname
assert declaration
assert docname
self.declaration = declaration
self.declaration.symbol = self
self.docname = docname
self._assert_invariants()
# and symbol addition should be done as well
self._add_template_and_function_params()
def _add_template_and_function_params(self) -> None:
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("_add_template_and_function_params:")
# Note: we may be called from _fill_empty, so the symbols we want
# to add may actually already be present (as empty symbols).
# add symbols for the template params
if self.templateParams:
for tp in self.templateParams.params:
if not tp.get_identifier():
continue
# only add a declaration if we our self are from a declaration
if self.declaration:
decl = ASTDeclaration('templateParam', None, None, None, tp)
else:
decl = None
nne = ASTNestedNameElement(tp.get_identifier(), None)
nn = ASTNestedName([nne], [False], rooted=False)
self._add_symbols(nn, [], decl, self.docname)
# add symbols for function parameters, if any
if self.declaration is not None and self.declaration.function_params is not None:
for fp in self.declaration.function_params:
if fp.arg is None:
continue
nn = fp.arg.name
if nn is None:
continue
# (comparing to the template params: we have checked that we are a declaration)
decl = ASTDeclaration('functionParam', None, None, None, fp)
assert not nn.rooted
assert len(nn.names) == 1
self._add_symbols(nn, [], decl, self.docname)
if Symbol.debug_lookup:
Symbol.debug_indent -= 1
def remove(self) -> None:
if self.parent is None:
return
assert self in self.parent._children
self.parent._children.remove(self)
self.parent = None
def clear_doc(self, docname: str) -> None:
newChildren = [] # type: List[Symbol]
for sChild in self._children:
sChild.clear_doc(docname)
if sChild.declaration and sChild.docname == docname:
sChild.declaration = None
sChild.docname = None
if sChild.siblingAbove is not None:
sChild.siblingAbove.siblingBelow = sChild.siblingBelow
if sChild.siblingBelow is not None:
sChild.siblingBelow.siblingAbove = sChild.siblingAbove
sChild.siblingAbove = None
sChild.siblingBelow = None
newChildren.append(sChild)
self._children = newChildren
def get_all_symbols(self) -> Iterator[Any]:
yield self
for sChild in self._children:
for s in sChild.get_all_symbols():
yield s
@property
def children_recurse_anon(self) -> Generator["Symbol", None, None]:
for c in self._children:
yield c
if not c.identOrOp.is_anon():
continue
yield from c.children_recurse_anon
def get_lookup_key(self) -> "LookupKey":
# The pickle files for the environment and for each document are distinct.
# The environment has all the symbols, but the documents has xrefs that
# must know their scope. A lookup key is essentially a specification of
# how to find a specific symbol.
symbols = []
s = self
while s.parent:
symbols.append(s)
s = s.parent
symbols.reverse()
key = []
for s in symbols:
nne = ASTNestedNameElement(s.identOrOp, s.templateArgs)
if s.declaration is not None:
key.append((nne, s.templateParams, s.declaration.get_newest_id()))
else:
key.append((nne, s.templateParams, None))
return LookupKey(key)
def get_full_nested_name(self) -> ASTNestedName:
symbols = []
s = self
while s.parent:
symbols.append(s)
s = s.parent
symbols.reverse()
names = []
templates = []
for s in symbols:
names.append(ASTNestedNameElement(s.identOrOp, s.templateArgs))
templates.append(False)
return ASTNestedName(names, templates, rooted=False)
def _find_first_named_symbol(self, identOrOp: Union[ASTIdentifier, ASTOperator],
templateParams: Any, templateArgs: ASTTemplateArgs,
templateShorthand: bool, matchSelf: bool,
recurseInAnon: bool, correctPrimaryTemplateArgs: bool
) -> "Symbol":
if Symbol.debug_lookup:
Symbol.debug_print("_find_first_named_symbol ->")
res = self._find_named_symbols(identOrOp, templateParams, templateArgs,
templateShorthand, matchSelf, recurseInAnon,
correctPrimaryTemplateArgs,
searchInSiblings=False)
try:
return next(res)
except StopIteration:
return None
def _find_named_symbols(self, identOrOp: Union[ASTIdentifier, ASTOperator],
templateParams: Any, templateArgs: ASTTemplateArgs,
templateShorthand: bool, matchSelf: bool,
recurseInAnon: bool, correctPrimaryTemplateArgs: bool,
searchInSiblings: bool) -> Iterator["Symbol"]:
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("_find_named_symbols:")
Symbol.debug_indent += 1
Symbol.debug_print("self:")
print(self.to_string(Symbol.debug_indent + 1), end="")
Symbol.debug_print("identOrOp: ", identOrOp)
Symbol.debug_print("templateParams: ", templateParams)
Symbol.debug_print("templateArgs: ", templateArgs)
Symbol.debug_print("templateShorthand: ", templateShorthand)
Symbol.debug_print("matchSelf: ", matchSelf)
Symbol.debug_print("recurseInAnon: ", recurseInAnon)
Symbol.debug_print("correctPrimaryTemplateAargs:", correctPrimaryTemplateArgs)
Symbol.debug_print("searchInSiblings: ", searchInSiblings)
def isSpecialization() -> bool:
# the names of the template parameters must be given exactly as args
# and params that are packs must in the args be the name expanded
if len(templateParams.params) != len(templateArgs.args):
return True
# having no template params and no arguments is also a specialization
if len(templateParams.params) == 0:
return True
for i in range(len(templateParams.params)):
param = templateParams.params[i]
arg = templateArgs.args[i]
# TODO: doing this by string manipulation is probably not the most efficient
paramName = str(param.name)
argTxt = str(arg)
isArgPackExpansion = argTxt.endswith('...')
if param.isPack != isArgPackExpansion:
return True
argName = argTxt[:-3] if isArgPackExpansion else argTxt
if paramName != argName:
return True
return False
if correctPrimaryTemplateArgs:
if templateParams is not None and templateArgs is not None:
# If both are given, but it's not a specialization, then do lookup as if
# there is no argument list.
# For example: template<typename T> int A<T>::var;
if not isSpecialization():
templateArgs = None
def matches(s: "Symbol") -> bool:
if s.identOrOp != identOrOp:
return False
if (s.templateParams is None) != (templateParams is None):
if templateParams is not None:
# we query with params, they must match params
return False
if not templateShorthand:
# we don't query with params, and we do care about them
return False
if templateParams:
# TODO: do better comparison
if str(s.templateParams) != str(templateParams):
return False
if (s.templateArgs is None) != (templateArgs is None):
return False
if s.templateArgs:
# TODO: do better comparison
if str(s.templateArgs) != str(templateArgs):
return False
return True
def candidates() -> Generator[Symbol, None, None]:
s = self
if Symbol.debug_lookup:
Symbol.debug_print("searching in self:")
print(s.to_string(Symbol.debug_indent + 1), end="")
while True:
if matchSelf:
yield s
if recurseInAnon:
yield from s.children_recurse_anon
else:
yield from s._children
if s.siblingAbove is None:
break
s = s.siblingAbove
if Symbol.debug_lookup:
Symbol.debug_print("searching in sibling:")
print(s.to_string(Symbol.debug_indent + 1), end="")
for s in candidates():
if Symbol.debug_lookup:
Symbol.debug_print("candidate:")
print(s.to_string(Symbol.debug_indent + 1), end="")
if matches(s):
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("matches")
Symbol.debug_indent -= 3
yield s
if Symbol.debug_lookup:
Symbol.debug_indent += 2
if Symbol.debug_lookup:
Symbol.debug_indent -= 2
def _symbol_lookup(self, nestedName: ASTNestedName, templateDecls: List[Any],
onMissingQualifiedSymbol: Callable[["Symbol", Union[ASTIdentifier, ASTOperator], Any, ASTTemplateArgs], "Symbol"], # NOQA
strictTemplateParamArgLists: bool, ancestorLookupType: str,
templateShorthand: bool, matchSelf: bool,
recurseInAnon: bool, correctPrimaryTemplateArgs: bool,
searchInSiblings: bool) -> SymbolLookupResult:
# ancestorLookupType: if not None, specifies the target type of the lookup
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("_symbol_lookup:")
Symbol.debug_indent += 1
Symbol.debug_print("self:")
print(self.to_string(Symbol.debug_indent + 1), end="")
Symbol.debug_print("nestedName: ", nestedName)
Symbol.debug_print("templateDecls: ", templateDecls)
Symbol.debug_print("strictTemplateParamArgLists:", strictTemplateParamArgLists)
Symbol.debug_print("ancestorLookupType:", ancestorLookupType)
Symbol.debug_print("templateShorthand: ", templateShorthand)
Symbol.debug_print("matchSelf: ", matchSelf)
Symbol.debug_print("recurseInAnon: ", recurseInAnon)
Symbol.debug_print("correctPrimaryTemplateArgs: ", correctPrimaryTemplateArgs)
Symbol.debug_print("searchInSiblings: ", searchInSiblings)
if strictTemplateParamArgLists:
# Each template argument list must have a template parameter list.
# But to declare a template there must be an additional template parameter list.
assert (nestedName.num_templates() == len(templateDecls) or
nestedName.num_templates() + 1 == len(templateDecls))
else:
assert len(templateDecls) <= nestedName.num_templates() + 1
names = nestedName.names
# find the right starting point for lookup
parentSymbol = self
if nestedName.rooted:
while parentSymbol.parent:
parentSymbol = parentSymbol.parent
if ancestorLookupType is not None:
# walk up until we find the first identifier
firstName = names[0]
if not firstName.is_operator():
while parentSymbol.parent:
if parentSymbol.find_identifier(firstName.identOrOp,
matchSelf=matchSelf,
recurseInAnon=recurseInAnon,
searchInSiblings=searchInSiblings):
# if we are in the scope of a constructor but wants to
# reference the class we need to walk one extra up
if (len(names) == 1 and ancestorLookupType == 'class' and matchSelf and
parentSymbol.parent and
parentSymbol.parent.identOrOp == firstName.identOrOp):
pass
else:
break
parentSymbol = parentSymbol.parent
if Symbol.debug_lookup:
Symbol.debug_print("starting point:")
print(parentSymbol.to_string(Symbol.debug_indent + 1), end="")
# and now the actual lookup
iTemplateDecl = 0
for name in names[:-1]:
identOrOp = name.identOrOp
templateArgs = name.templateArgs
if strictTemplateParamArgLists:
# there must be a parameter list
if templateArgs:
assert iTemplateDecl < len(templateDecls)
templateParams = templateDecls[iTemplateDecl]
iTemplateDecl += 1
else:
templateParams = None
else:
# take the next template parameter list if there is one
# otherwise it's ok
if templateArgs and iTemplateDecl < len(templateDecls):
templateParams = templateDecls[iTemplateDecl]
iTemplateDecl += 1
else:
templateParams = None
symbol = parentSymbol._find_first_named_symbol(
identOrOp,
templateParams, templateArgs,
templateShorthand=templateShorthand,
matchSelf=matchSelf,
recurseInAnon=recurseInAnon,
correctPrimaryTemplateArgs=correctPrimaryTemplateArgs)
if symbol is None:
symbol = onMissingQualifiedSymbol(parentSymbol, identOrOp,
templateParams, templateArgs)
if symbol is None:
if Symbol.debug_lookup:
Symbol.debug_indent -= 2
return None
# We have now matched part of a nested name, and need to match more
# so even if we should matchSelf before, we definitely shouldn't
# even more. (see also issue #2666)
matchSelf = False
parentSymbol = symbol
if Symbol.debug_lookup:
Symbol.debug_print("handle last name from:")
print(parentSymbol.to_string(Symbol.debug_indent + 1), end="")
# handle the last name
name = names[-1]
identOrOp = name.identOrOp
templateArgs = name.templateArgs
if iTemplateDecl < len(templateDecls):
assert iTemplateDecl + 1 == len(templateDecls)
templateParams = templateDecls[iTemplateDecl]
else:
assert iTemplateDecl == len(templateDecls)
templateParams = None
symbols = parentSymbol._find_named_symbols(
identOrOp, templateParams, templateArgs,
templateShorthand=templateShorthand, matchSelf=matchSelf,
recurseInAnon=recurseInAnon, correctPrimaryTemplateArgs=False,
searchInSiblings=searchInSiblings)
if Symbol.debug_lookup:
symbols = list(symbols) # type: ignore
Symbol.debug_indent -= 2
return SymbolLookupResult(symbols, parentSymbol,
identOrOp, templateParams, templateArgs)
def _add_symbols(self, nestedName: ASTNestedName, templateDecls: List[Any],
declaration: ASTDeclaration, docname: str) -> "Symbol":
# Used for adding a whole path of symbols, where the last may or may not
# be an actual declaration.
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("_add_symbols:")
Symbol.debug_indent += 1
Symbol.debug_print("tdecls:", templateDecls)
Symbol.debug_print("nn: ", nestedName)
Symbol.debug_print("decl: ", declaration)
Symbol.debug_print("doc: ", docname)
def onMissingQualifiedSymbol(parentSymbol: "Symbol",
identOrOp: Union[ASTIdentifier, ASTOperator],
templateParams: Any, templateArgs: ASTTemplateArgs
) -> "Symbol":
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("_add_symbols, onMissingQualifiedSymbol:")
Symbol.debug_indent += 1
Symbol.debug_print("templateParams:", templateParams)
Symbol.debug_print("identOrOp: ", identOrOp)
Symbol.debug_print("templateARgs: ", templateArgs)
Symbol.debug_indent -= 2
return Symbol(parent=parentSymbol, identOrOp=identOrOp,
templateParams=templateParams,
templateArgs=templateArgs, declaration=None,
docname=None)
lookupResult = self._symbol_lookup(nestedName, templateDecls,
onMissingQualifiedSymbol,
strictTemplateParamArgLists=True,
ancestorLookupType=None,
templateShorthand=False,
matchSelf=False,
recurseInAnon=False,
correctPrimaryTemplateArgs=True,
searchInSiblings=False)
assert lookupResult is not None # we create symbols all the way, so that can't happen
symbols = list(lookupResult.symbols)
if len(symbols) == 0:
if Symbol.debug_lookup:
Symbol.debug_print("_add_symbols, result, no symbol:")
Symbol.debug_indent += 1
Symbol.debug_print("templateParams:", lookupResult.templateParams)
Symbol.debug_print("identOrOp: ", lookupResult.identOrOp)
Symbol.debug_print("templateArgs: ", lookupResult.templateArgs)
Symbol.debug_print("declaration: ", declaration)
Symbol.debug_print("docname: ", docname)
Symbol.debug_indent -= 1
symbol = Symbol(parent=lookupResult.parentSymbol,
identOrOp=lookupResult.identOrOp,
templateParams=lookupResult.templateParams,
templateArgs=lookupResult.templateArgs,
declaration=declaration,
docname=docname)
if Symbol.debug_lookup:
Symbol.debug_indent -= 2
return symbol
if Symbol.debug_lookup:
Symbol.debug_print("_add_symbols, result, symbols:")
Symbol.debug_indent += 1
Symbol.debug_print("number symbols:", len(symbols))
Symbol.debug_indent -= 1
if not declaration:
if Symbol.debug_lookup:
Symbol.debug_print("no delcaration")
Symbol.debug_indent -= 2
# good, just a scope creation
# TODO: what if we have more than one symbol?
return symbols[0]
noDecl = []
withDecl = []
dupDecl = []
for s in symbols:
if s.declaration is None:
noDecl.append(s)
elif s.isRedeclaration:
dupDecl.append(s)
else:
withDecl.append(s)
if Symbol.debug_lookup:
Symbol.debug_print("#noDecl: ", len(noDecl))
Symbol.debug_print("#withDecl:", len(withDecl))
Symbol.debug_print("#dupDecl: ", len(dupDecl))
# With partial builds we may start with a large symbol tree stripped of declarations.
# Essentially any combination of noDecl, withDecl, and dupDecls seems possible.
# TODO: make partial builds fully work. What should happen when the primary symbol gets
# deleted, and other duplicates exist? The full document should probably be rebuild.
# First check if one of those with a declaration matches.
# If it's a function, we need to compare IDs,
# otherwise there should be only one symbol with a declaration.
def makeCandSymbol() -> "Symbol":
if Symbol.debug_lookup:
Symbol.debug_print("begin: creating candidate symbol")
symbol = Symbol(parent=lookupResult.parentSymbol,
identOrOp=lookupResult.identOrOp,
templateParams=lookupResult.templateParams,
templateArgs=lookupResult.templateArgs,
declaration=declaration,
docname=docname)
if Symbol.debug_lookup:
Symbol.debug_print("end: creating candidate symbol")
return symbol
if len(withDecl) == 0:
candSymbol = None
else:
candSymbol = makeCandSymbol()
def handleDuplicateDeclaration(symbol: "Symbol", candSymbol: "Symbol") -> None:
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("redeclaration")
Symbol.debug_indent -= 1
Symbol.debug_indent -= 2
# Redeclaration of the same symbol.
# Let the new one be there, but raise an error to the client
# so it can use the real symbol as subscope.
# This will probably result in a duplicate id warning.
candSymbol.isRedeclaration = True
raise _DuplicateSymbolError(symbol, declaration)
if declaration.objectType != "function":
assert len(withDecl) <= 1
handleDuplicateDeclaration(withDecl[0], candSymbol)
# (not reachable)
# a function, so compare IDs
candId = declaration.get_newest_id()
if Symbol.debug_lookup:
Symbol.debug_print("candId:", candId)
for symbol in withDecl:
oldId = symbol.declaration.get_newest_id()
if Symbol.debug_lookup:
Symbol.debug_print("oldId: ", oldId)
if candId == oldId:
handleDuplicateDeclaration(symbol, candSymbol)
# (not reachable)
# no candidate symbol found with matching ID
# if there is an empty symbol, fill that one
if len(noDecl) == 0:
if Symbol.debug_lookup:
Symbol.debug_print("no match, no empty, candSybmol is not None?:", candSymbol is not None) # NOQA
Symbol.debug_indent -= 2
if candSymbol is not None:
return candSymbol
else:
return makeCandSymbol()
else:
if Symbol.debug_lookup:
Symbol.debug_print("no match, but fill an empty declaration, candSybmol is not None?:", candSymbol is not None) # NOQA
Symbol.debug_indent -= 2
if candSymbol is not None:
candSymbol.remove()
# assert len(noDecl) == 1
# TODO: enable assertion when we at some point find out how to do cleanup
# for now, just take the first one, it should work fine ... right?
symbol = noDecl[0]
# If someone first opened the scope, and then later
# declares it, e.g,
# .. namespace:: Test
# .. namespace:: nullptr
# .. class:: Test
symbol._fill_empty(declaration, docname)
return symbol
def merge_with(self, other: "Symbol", docnames: List[str],
env: "BuildEnvironment") -> None:
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("merge_with:")
assert other is not None
def unconditionalAdd(self, otherChild):
# TODO: hmm, should we prune by docnames?
self._children.append(otherChild)
otherChild.parent = self
otherChild._assert_invariants()
if Symbol.debug_lookup:
Symbol.debug_indent += 1
for otherChild in other._children:
if Symbol.debug_lookup:
Symbol.debug_print("otherChild:\n", otherChild.to_string(Symbol.debug_indent))
Symbol.debug_indent += 1
if otherChild.isRedeclaration:
unconditionalAdd(self, otherChild)
if Symbol.debug_lookup:
Symbol.debug_print("isRedeclaration")
Symbol.debug_indent -= 1
continue
candiateIter = self._find_named_symbols(
identOrOp=otherChild.identOrOp,
templateParams=otherChild.templateParams,
templateArgs=otherChild.templateArgs,
templateShorthand=False, matchSelf=False,
recurseInAnon=False, correctPrimaryTemplateArgs=False,
searchInSiblings=False)
candidates = list(candiateIter)
if Symbol.debug_lookup:
Symbol.debug_print("raw candidate symbols:", len(candidates))
symbols = [s for s in candidates if not s.isRedeclaration]
if Symbol.debug_lookup:
Symbol.debug_print("non-duplicate candidate symbols:", len(symbols))
if len(symbols) == 0:
unconditionalAdd(self, otherChild)
if Symbol.debug_lookup:
Symbol.debug_indent -= 1
continue
ourChild = None
if otherChild.declaration is None:
if Symbol.debug_lookup:
Symbol.debug_print("no declaration in other child")
ourChild = symbols[0]
else:
queryId = otherChild.declaration.get_newest_id()
if Symbol.debug_lookup:
Symbol.debug_print("queryId: ", queryId)
for symbol in symbols:
if symbol.declaration is None:
if Symbol.debug_lookup:
Symbol.debug_print("empty candidate")
# if in the end we have non matching, but have an empty one,
# then just continue with that
ourChild = symbol
continue
candId = symbol.declaration.get_newest_id()
if Symbol.debug_lookup:
Symbol.debug_print("candidate:", candId)
if candId == queryId:
ourChild = symbol
break
if Symbol.debug_lookup:
Symbol.debug_indent -= 1
if ourChild is None:
unconditionalAdd(self, otherChild)
continue
if otherChild.declaration and otherChild.docname in docnames:
if not ourChild.declaration:
ourChild._fill_empty(otherChild.declaration, otherChild.docname)
elif ourChild.docname != otherChild.docname:
name = str(ourChild.declaration)
msg = __("Duplicate declaration, also defined in '%s'.\n"
"Declaration is '%s'.")
msg = msg % (ourChild.docname, name)
logger.warning(msg, location=otherChild.docname)
else:
# Both have declarations, and in the same docname.
# This can apparently happen, it should be safe to
# just ignore it, right?
# Hmm, only on duplicate declarations, right?
msg = "Internal C++ domain error during symbol merging.\n"
msg += "ourChild:\n" + ourChild.to_string(1)
msg += "\notherChild:\n" + otherChild.to_string(1)
logger.warning(msg, location=otherChild.docname)
ourChild.merge_with(otherChild, docnames, env)
if Symbol.debug_lookup:
Symbol.debug_indent -= 2
def add_name(self, nestedName: ASTNestedName,
templatePrefix: ASTTemplateDeclarationPrefix = None) -> "Symbol":
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("add_name:")
if templatePrefix:
templateDecls = templatePrefix.templates
else:
templateDecls = []
res = self._add_symbols(nestedName, templateDecls,
declaration=None, docname=None)
if Symbol.debug_lookup:
Symbol.debug_indent -= 1
return res
def add_declaration(self, declaration: ASTDeclaration, docname: str) -> "Symbol":
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("add_declaration:")
assert declaration
assert docname
nestedName = declaration.name
if declaration.templatePrefix:
templateDecls = declaration.templatePrefix.templates
else:
templateDecls = []
res = self._add_symbols(nestedName, templateDecls, declaration, docname)
if Symbol.debug_lookup:
Symbol.debug_indent -= 1
return res
def find_identifier(self, identOrOp: Union[ASTIdentifier, ASTOperator],
matchSelf: bool, recurseInAnon: bool, searchInSiblings: bool
) -> "Symbol":
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("find_identifier:")
Symbol.debug_indent += 1
Symbol.debug_print("identOrOp: ", identOrOp)
Symbol.debug_print("matchSelf: ", matchSelf)
Symbol.debug_print("recurseInAnon: ", recurseInAnon)
Symbol.debug_print("searchInSiblings:", searchInSiblings)
print(self.to_string(Symbol.debug_indent + 1), end="")
Symbol.debug_indent -= 2
current = self
while current is not None:
if Symbol.debug_lookup:
Symbol.debug_indent += 2
Symbol.debug_print("trying:")
print(current.to_string(Symbol.debug_indent + 1), end="")
Symbol.debug_indent -= 2
if matchSelf and current.identOrOp == identOrOp:
return current
children = current.children_recurse_anon if recurseInAnon else current._children
for s in children:
if s.identOrOp == identOrOp:
return s
if not searchInSiblings:
break
current = current.siblingAbove
return None
def direct_lookup(self, key: "LookupKey") -> "Symbol":
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("direct_lookup:")
Symbol.debug_indent += 1
s = self
for name, templateParams, id_ in key.data:
if id_ is not None:
res = None
for cand in s._children:
if cand.declaration is None:
continue
if cand.declaration.get_newest_id() == id_:
res = cand
break
s = res
else:
identOrOp = name.identOrOp
templateArgs = name.templateArgs
s = s._find_first_named_symbol(identOrOp,
templateParams, templateArgs,
templateShorthand=False,
matchSelf=False,
recurseInAnon=False,
correctPrimaryTemplateArgs=False)
if Symbol.debug_lookup:
Symbol.debug_print("name: ", name)
Symbol.debug_print("templateParams:", templateParams)
Symbol.debug_print("id: ", id_)
if s is not None:
print(s.to_string(Symbol.debug_indent + 1), end="")
else:
Symbol.debug_print("not found")
if s is None:
if Symbol.debug_lookup:
Symbol.debug_indent -= 2
return None
if Symbol.debug_lookup:
Symbol.debug_indent -= 2
return s
def find_name(self, nestedName: ASTNestedName, templateDecls: List[Any],
typ: str, templateShorthand: bool, matchSelf: bool,
recurseInAnon: bool, searchInSiblings: bool) -> Tuple[List["Symbol"], str]:
# templateShorthand: missing template parameter lists for templates is ok
# If the first component is None,
# then the second component _may_ be a string explaining why.
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("find_name:")
Symbol.debug_indent += 1
Symbol.debug_print("self:")
print(self.to_string(Symbol.debug_indent + 1), end="")
Symbol.debug_print("nestedName: ", nestedName)
Symbol.debug_print("templateDecls: ", templateDecls)
Symbol.debug_print("typ: ", typ)
Symbol.debug_print("templateShorthand:", templateShorthand)
Symbol.debug_print("matchSelf: ", matchSelf)
Symbol.debug_print("recurseInAnon: ", recurseInAnon)
Symbol.debug_print("searchInSiblings: ", searchInSiblings)
class QualifiedSymbolIsTemplateParam(Exception):
pass
def onMissingQualifiedSymbol(parentSymbol: "Symbol",
identOrOp: Union[ASTIdentifier, ASTOperator],
templateParams: Any,
templateArgs: ASTTemplateArgs) -> "Symbol":
# TODO: Maybe search without template args?
# Though, the correctPrimaryTemplateArgs does
# that for primary templates.
# Is there another case where it would be good?
if parentSymbol.declaration is not None:
if parentSymbol.declaration.objectType == 'templateParam':
raise QualifiedSymbolIsTemplateParam()
return None
try:
lookupResult = self._symbol_lookup(nestedName, templateDecls,
onMissingQualifiedSymbol,
strictTemplateParamArgLists=False,
ancestorLookupType=typ,
templateShorthand=templateShorthand,
matchSelf=matchSelf,
recurseInAnon=recurseInAnon,
correctPrimaryTemplateArgs=False,
searchInSiblings=searchInSiblings)
except QualifiedSymbolIsTemplateParam:
return None, "templateParamInQualified"
if lookupResult is None:
# if it was a part of the qualification that could not be found
if Symbol.debug_lookup:
Symbol.debug_indent -= 2
return None, None
res = list(lookupResult.symbols)
if len(res) != 0:
if Symbol.debug_lookup:
Symbol.debug_indent -= 2
return res, None
if lookupResult.parentSymbol.declaration is not None:
if lookupResult.parentSymbol.declaration.objectType == 'templateParam':
return None, "templateParamInQualified"
# try without template params and args
symbol = lookupResult.parentSymbol._find_first_named_symbol(
lookupResult.identOrOp, None, None,
templateShorthand=templateShorthand, matchSelf=matchSelf,
recurseInAnon=recurseInAnon, correctPrimaryTemplateArgs=False)
if Symbol.debug_lookup:
Symbol.debug_indent -= 2
if symbol is not None:
return [symbol], None
else:
return None, None
def find_declaration(self, declaration: ASTDeclaration, typ: str, templateShorthand: bool,
matchSelf: bool, recurseInAnon: bool) -> "Symbol":
# templateShorthand: missing template parameter lists for templates is ok
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print("find_declaration:")
nestedName = declaration.name
if declaration.templatePrefix:
templateDecls = declaration.templatePrefix.templates
else:
templateDecls = []
def onMissingQualifiedSymbol(parentSymbol: "Symbol",
identOrOp: Union[ASTIdentifier, ASTOperator],
templateParams: Any,
templateArgs: ASTTemplateArgs) -> "Symbol":
return None
lookupResult = self._symbol_lookup(nestedName, templateDecls,
onMissingQualifiedSymbol,
strictTemplateParamArgLists=False,
ancestorLookupType=typ,
templateShorthand=templateShorthand,
matchSelf=matchSelf,
recurseInAnon=recurseInAnon,
correctPrimaryTemplateArgs=False,
searchInSiblings=False)
if Symbol.debug_lookup:
Symbol.debug_indent -= 1
if lookupResult is None:
return None
symbols = list(lookupResult.symbols)
if len(symbols) == 0:
return None
querySymbol = Symbol(parent=lookupResult.parentSymbol,
identOrOp=lookupResult.identOrOp,
templateParams=lookupResult.templateParams,
templateArgs=lookupResult.templateArgs,
declaration=declaration,
docname='fakeDocnameForQuery')
queryId = declaration.get_newest_id()
for symbol in symbols:
if symbol.declaration is None:
continue
candId = symbol.declaration.get_newest_id()
if candId == queryId:
querySymbol.remove()
return symbol
querySymbol.remove()
return None
def to_string(self, indent: int) -> str:
res = [Symbol.debug_indent_string * indent]
if not self.parent:
res.append('::')
else:
if self.templateParams:
res.append(str(self.templateParams))
res.append('\n')
res.append(Symbol.debug_indent_string * indent)
if self.identOrOp:
res.append(str(self.identOrOp))
else:
res.append(str(self.declaration))
if self.templateArgs:
res.append(str(self.templateArgs))
if self.declaration:
res.append(": ")
if self.isRedeclaration:
res.append('!!duplicate!! ')
res.append(str(self.declaration))
if self.docname:
res.append('\t(')
res.append(self.docname)
res.append(')')
res.append('\n')
return ''.join(res)
def dump(self, indent: int) -> str:
res = [self.to_string(indent)]
for c in self._children:
res.append(c.dump(indent + 1))
return ''.join(res)
class DefinitionParser(BaseParser):
# those without signedness and size modifiers
# see https://en.cppreference.com/w/cpp/language/types
_simple_fundemental_types = (
'void', 'bool', 'char', 'wchar_t', 'char16_t', 'char32_t', 'int',
'float', 'double', 'auto'
)
_prefix_keys = ('class', 'struct', 'enum', 'union', 'typename')
@property
def language(self) -> str:
return 'C++'
@property
def id_attributes(self):
return self.config.cpp_id_attributes
@property
def paren_attributes(self):
return self.config.cpp_paren_attributes
def _parse_string(self) -> str:
if self.current_char != '"':
return None
startPos = self.pos
self.pos += 1
escape = False
while True:
if self.eof:
self.fail("Unexpected end during inside string.")
elif self.current_char == '"' and not escape:
self.pos += 1
break
elif self.current_char == '\\':
escape = True
else:
escape = False
self.pos += 1
return self.definition[startPos:self.pos]
def _parse_literal(self) -> ASTLiteral:
# -> integer-literal
# | character-literal
# | floating-literal
# | string-literal
# | boolean-literal -> "false" | "true"
# | pointer-literal -> "nullptr"
# | user-defined-literal
self.skip_ws()
if self.skip_word('nullptr'):
return ASTPointerLiteral()
if self.skip_word('true'):
return ASTBooleanLiteral(True)
if self.skip_word('false'):
return ASTBooleanLiteral(False)
for regex in [float_literal_re, binary_literal_re, hex_literal_re,
integer_literal_re, octal_literal_re]:
pos = self.pos
if self.match(regex):
while self.current_char in 'uUlLfF':
self.pos += 1
return ASTNumberLiteral(self.definition[pos:self.pos])
string = self._parse_string()
if string is not None:
return ASTStringLiteral(string)
# character-literal
if self.match(char_literal_re):
prefix = self.last_match.group(1) # may be None when no prefix
data = self.last_match.group(2)
try:
return ASTCharLiteral(prefix, data)
except UnicodeDecodeError as e:
self.fail("Can not handle character literal. Internal error was: %s" % e)
except UnsupportedMultiCharacterCharLiteral:
self.fail("Can not handle character literal"
" resulting in multiple decoded characters.")
# TODO: user-defined lit
return None
def _parse_fold_or_paren_expression(self) -> ASTExpression:
# "(" expression ")"
# fold-expression
# -> ( cast-expression fold-operator ... )
# | ( ... fold-operator cast-expression )
# | ( cast-expression fold-operator ... fold-operator cast-expression
if self.current_char != '(':
return None
self.pos += 1
self.skip_ws()
if self.skip_string_and_ws("..."):
# ( ... fold-operator cast-expression )
if not self.match(_fold_operator_re):
self.fail("Expected fold operator after '...' in fold expression.")
op = self.matched_text
rightExpr = self._parse_cast_expression()
if not self.skip_string(')'):
self.fail("Expected ')' in end of fold expression.")
return ASTFoldExpr(None, op, rightExpr)
# try first parsing a unary right fold, or a binary fold
pos = self.pos
try:
self.skip_ws()
leftExpr = self._parse_cast_expression()
self.skip_ws()
if not self.match(_fold_operator_re):
self.fail("Expected fold operator after left expression in fold expression.")
op = self.matched_text
self.skip_ws()
if not self.skip_string_and_ws('...'):
self.fail("Expected '...' after fold operator in fold expression.")
except DefinitionError as eFold:
self.pos = pos
# fall back to a paren expression
try:
res = self._parse_expression()
self.skip_ws()
if not self.skip_string(')'):
self.fail("Expected ')' in end of parenthesized expression.")
except DefinitionError as eExpr:
raise self._make_multi_error([
(eFold, "If fold expression"),
(eExpr, "If parenthesized expression")
], "Error in fold expression or parenthesized expression.")
return ASTParenExpr(res)
# now it definitely is a fold expression
if self.skip_string(')'):
return ASTFoldExpr(leftExpr, op, None)
if not self.match(_fold_operator_re):
self.fail("Expected fold operator or ')' after '...' in fold expression.")
if op != self.matched_text:
self.fail("Operators are different in binary fold: '%s' and '%s'."
% (op, self.matched_text))
rightExpr = self._parse_cast_expression()
self.skip_ws()
if not self.skip_string(')'):
self.fail("Expected ')' to end binary fold expression.")
return ASTFoldExpr(leftExpr, op, rightExpr)
def _parse_primary_expression(self) -> ASTExpression:
# literal
# "this"
# lambda-expression
# "(" expression ")"
# fold-expression
# id-expression -> we parse this with _parse_nested_name
self.skip_ws()
res = self._parse_literal() # type: ASTExpression
if res is not None:
return res
self.skip_ws()
if self.skip_word("this"):
return ASTThisLiteral()
# TODO: try lambda expression
res = self._parse_fold_or_paren_expression()
if res is not None:
return res
nn = self._parse_nested_name()
if nn is not None:
return ASTIdExpression(nn)
return None
def _parse_initializer_list(self, name: str, open: str, close: str
) -> Tuple[List[Union[ASTExpression,
ASTBracedInitList]],
bool]:
# Parse open and close with the actual initializer-list inbetween
# -> initializer-clause '...'[opt]
# | initializer-list ',' initializer-clause '...'[opt]
self.skip_ws()
if not self.skip_string_and_ws(open):
return None, None
if self.skip_string(close):
return [], False
exprs = [] # type: List[Union[ASTExpression, ASTBracedInitList]]
trailingComma = False
while True:
self.skip_ws()
expr = self._parse_initializer_clause()
self.skip_ws()
if self.skip_string('...'):
exprs.append(ASTPackExpansionExpr(expr))
else:
exprs.append(expr)
self.skip_ws()
if self.skip_string(close):
break
if not self.skip_string_and_ws(','):
self.fail("Error in %s, expected ',' or '%s'." % (name, close))
if self.current_char == close and close == '}':
self.pos += 1
trailingComma = True
break
return exprs, trailingComma
def _parse_paren_expression_list(self) -> ASTParenExprList:
# -> '(' expression-list ')'
# though, we relax it to also allow empty parens
# as it's needed in some cases
#
# expression-list
# -> initializer-list
exprs, trailingComma = self._parse_initializer_list("parenthesized expression-list",
'(', ')')
if exprs is None:
return None
return ASTParenExprList(exprs)
def _parse_initializer_clause(self) -> Union[ASTExpression, ASTBracedInitList]:
bracedInitList = self._parse_braced_init_list()
if bracedInitList is not None:
return bracedInitList
return self._parse_assignment_expression(inTemplate=False)
def _parse_braced_init_list(self) -> ASTBracedInitList:
# -> '{' initializer-list ','[opt] '}'
# | '{' '}'
exprs, trailingComma = self._parse_initializer_list("braced-init-list", '{', '}')
if exprs is None:
return None
return ASTBracedInitList(exprs, trailingComma)
def _parse_expression_list_or_braced_init_list(
self
) -> Union[ASTParenExprList, ASTBracedInitList]:
paren = self._parse_paren_expression_list()
if paren is not None:
return paren
return self._parse_braced_init_list()
def _parse_postfix_expression(self) -> ASTPostfixExpr:
# -> primary
# | postfix "[" expression "]"
# | postfix "[" braced-init-list [opt] "]"
# | postfix "(" expression-list [opt] ")"
# | postfix "." "template" [opt] id-expression
# | postfix "->" "template" [opt] id-expression
# | postfix "." pseudo-destructor-name
# | postfix "->" pseudo-destructor-name
# | postfix "++"
# | postfix "--"
# | simple-type-specifier "(" expression-list [opt] ")"
# | simple-type-specifier braced-init-list
# | typename-specifier "(" expression-list [opt] ")"
# | typename-specifier braced-init-list
# | "dynamic_cast" "<" type-id ">" "(" expression ")"
# | "static_cast" "<" type-id ">" "(" expression ")"
# | "reinterpret_cast" "<" type-id ">" "(" expression ")"
# | "const_cast" "<" type-id ">" "(" expression ")"
# | "typeid" "(" expression ")"
# | "typeid" "(" type-id ")"
prefixType = None
prefix = None # type: Any
self.skip_ws()
cast = None
for c in _id_explicit_cast:
if self.skip_word_and_ws(c):
cast = c
break
if cast is not None:
prefixType = "cast"
if not self.skip_string("<"):
self.fail("Expected '<' afer '%s'." % cast)
typ = self._parse_type(False)
self.skip_ws()
if not self.skip_string_and_ws(">"):
self.fail("Expected '>' after type in '%s'." % cast)
if not self.skip_string("("):
self.fail("Expected '(' in '%s'." % cast)
def parser() -> ASTExpression:
return self._parse_expression()
expr = self._parse_expression_fallback([')'], parser)
self.skip_ws()
if not self.skip_string(")"):
self.fail("Expected ')' to end '%s'." % cast)
prefix = ASTExplicitCast(cast, typ, expr)
elif self.skip_word_and_ws("typeid"):
prefixType = "typeid"
if not self.skip_string_and_ws('('):
self.fail("Expected '(' after 'typeid'.")
pos = self.pos
try:
typ = self._parse_type(False)
prefix = ASTTypeId(typ, isType=True)
if not self.skip_string(')'):
self.fail("Expected ')' to end 'typeid' of type.")
except DefinitionError as eType:
self.pos = pos
try:
def parser() -> ASTExpression:
return self._parse_expression()
expr = self._parse_expression_fallback([')'], parser)
prefix = ASTTypeId(expr, isType=False)
if not self.skip_string(')'):
self.fail("Expected ')' to end 'typeid' of expression.")
except DefinitionError as eExpr:
self.pos = pos
header = "Error in 'typeid(...)'."
header += " Expected type or expression."
errors = []
errors.append((eType, "If type"))
errors.append((eExpr, "If expression"))
raise self._make_multi_error(errors, header)
else: # a primary expression or a type
pos = self.pos
try:
prefix = self._parse_primary_expression()
prefixType = 'expr'
except DefinitionError as eOuter:
self.pos = pos
try:
# we are potentially casting, so save parens for us
# TODO: hmm, would we need to try both with operatorCast and with None?
prefix = self._parse_type(False, 'operatorCast')
prefixType = 'typeOperatorCast'
# | simple-type-specifier "(" expression-list [opt] ")"
# | simple-type-specifier braced-init-list
# | typename-specifier "(" expression-list [opt] ")"
# | typename-specifier braced-init-list
self.skip_ws()
if self.current_char != '(' and self.current_char != '{':
self.fail("Expecting '(' or '{' after type in cast expression.")
except DefinitionError as eInner:
self.pos = pos
header = "Error in postfix expression,"
header += " expected primary expression or type."
errors = []
errors.append((eOuter, "If primary expression"))
errors.append((eInner, "If type"))
raise self._make_multi_error(errors, header)
# and now parse postfixes
postFixes = [] # type: List[ASTPostfixOp]
while True:
self.skip_ws()
if prefixType in ['expr', 'cast', 'typeid']:
if self.skip_string_and_ws('['):
expr = self._parse_expression()
self.skip_ws()
if not self.skip_string(']'):
self.fail("Expected ']' in end of postfix expression.")
postFixes.append(ASTPostfixArray(expr))
continue
if self.skip_string('.'):
if self.skip_string('*'):
# don't steal the dot
self.pos -= 2
elif self.skip_string('..'):
# don't steal the dot
self.pos -= 3
else:
name = self._parse_nested_name()
postFixes.append(ASTPostfixMember(name))
continue
if self.skip_string('->'):
if self.skip_string('*'):
# don't steal the arrow
self.pos -= 3
else:
name = self._parse_nested_name()
postFixes.append(ASTPostfixMemberOfPointer(name))
continue
if self.skip_string('++'):
postFixes.append(ASTPostfixInc())
continue
if self.skip_string('--'):
postFixes.append(ASTPostfixDec())
continue
lst = self._parse_expression_list_or_braced_init_list()
if lst is not None:
postFixes.append(ASTPostfixCallExpr(lst))
continue
break
return ASTPostfixExpr(prefix, postFixes)
def _parse_unary_expression(self) -> ASTExpression:
# -> postfix
# | "++" cast
# | "--" cast
# | unary-operator cast -> (* | & | + | - | ! | ~) cast
# The rest:
# | "sizeof" unary
# | "sizeof" "(" type-id ")"
# | "sizeof" "..." "(" identifier ")"
# | "alignof" "(" type-id ")"
# | noexcept-expression -> noexcept "(" expression ")"
# | new-expression
# | delete-expression
self.skip_ws()
for op in _expression_unary_ops:
# TODO: hmm, should we be able to backtrack here?
if op[0] in 'cn':
res = self.skip_word(op)
else:
res = self.skip_string(op)
if res:
expr = self._parse_cast_expression()
return ASTUnaryOpExpr(op, expr)
if self.skip_word_and_ws('sizeof'):
if self.skip_string_and_ws('...'):
if not self.skip_string_and_ws('('):
self.fail("Expecting '(' after 'sizeof...'.")
if not self.match(identifier_re):
self.fail("Expecting identifier for 'sizeof...'.")
ident = ASTIdentifier(self.matched_text)
self.skip_ws()
if not self.skip_string(")"):
self.fail("Expecting ')' to end 'sizeof...'.")
return ASTSizeofParamPack(ident)
if self.skip_string_and_ws('('):
typ = self._parse_type(named=False)
self.skip_ws()
if not self.skip_string(')'):
self.fail("Expecting ')' to end 'sizeof'.")
return ASTSizeofType(typ)
expr = self._parse_unary_expression()
return ASTSizeofExpr(expr)
if self.skip_word_and_ws('alignof'):
if not self.skip_string_and_ws('('):
self.fail("Expecting '(' after 'alignof'.")
typ = self._parse_type(named=False)
self.skip_ws()
if not self.skip_string(')'):
self.fail("Expecting ')' to end 'alignof'.")
return ASTAlignofExpr(typ)
if self.skip_word_and_ws('noexcept'):
if not self.skip_string_and_ws('('):
self.fail("Expecting '(' after 'noexcept'.")
expr = self._parse_expression()
self.skip_ws()
if not self.skip_string(')'):
self.fail("Expecting ')' to end 'noexcept'.")
return ASTNoexceptExpr(expr)
# new-expression
pos = self.pos
rooted = self.skip_string('::')
self.skip_ws()
if not self.skip_word_and_ws('new'):
self.pos = pos
else:
# new-placement[opt] new-type-id new-initializer[opt]
# new-placement[opt] ( type-id ) new-initializer[opt]
isNewTypeId = True
if self.skip_string_and_ws('('):
# either this is a new-placement or it's the second production
# without placement, and it's actually the ( type-id ) part
self.fail("Sorry, neither new-placement nor parenthesised type-id "
"in new-epression is supported yet.")
# set isNewTypeId = False if it's (type-id)
if isNewTypeId:
declSpecs = self._parse_decl_specs(outer=None)
decl = self._parse_declarator(named=False, paramMode="new")
else:
self.fail("Sorry, parenthesised type-id in new expression not yet supported.")
lst = self._parse_expression_list_or_braced_init_list()
return ASTNewExpr(rooted, isNewTypeId, ASTType(declSpecs, decl), lst)
# delete-expression
pos = self.pos
rooted = self.skip_string('::')
self.skip_ws()
if not self.skip_word_and_ws('delete'):
self.pos = pos
else:
array = self.skip_string_and_ws('[')
if array and not self.skip_string_and_ws(']'):
self.fail("Expected ']' in array delete-expression.")
expr = self._parse_cast_expression()
return ASTDeleteExpr(rooted, array, expr)
return self._parse_postfix_expression()
def _parse_cast_expression(self) -> ASTExpression:
# -> unary | "(" type-id ")" cast
pos = self.pos
self.skip_ws()
if self.skip_string('('):
try:
typ = self._parse_type(False)
if not self.skip_string(')'):
self.fail("Expected ')' in cast expression.")
expr = self._parse_cast_expression()
return ASTCastExpr(typ, expr)
except DefinitionError as exCast:
self.pos = pos
try:
return self._parse_unary_expression()
except DefinitionError as exUnary:
errs = []
errs.append((exCast, "If type cast expression"))
errs.append((exUnary, "If unary expression"))
raise self._make_multi_error(errs, "Error in cast expression.")
else:
return self._parse_unary_expression()
def _parse_logical_or_expression(self, inTemplate: bool) -> ASTExpression:
# logical-or = logical-and ||
# logical-and = inclusive-or &&
# inclusive-or = exclusive-or |
# exclusive-or = and ^
# and = equality &
# equality = relational ==, !=
# relational = shift <, >, <=, >=
# shift = additive <<, >>
# additive = multiplicative +, -
# multiplicative = pm *, /, %
# pm = cast .*, ->*
def _parse_bin_op_expr(self: DefinitionParser,
opId: int, inTemplate: bool) -> ASTExpression:
if opId + 1 == len(_expression_bin_ops):
def parser(inTemplate: bool) -> ASTExpression:
return self._parse_cast_expression()
else:
def parser(inTemplate: bool) -> ASTExpression:
return _parse_bin_op_expr(self, opId + 1, inTemplate=inTemplate)
exprs = []
ops = []
exprs.append(parser(inTemplate=inTemplate))
while True:
self.skip_ws()
if inTemplate and self.current_char == '>':
break
pos = self.pos
oneMore = False
for op in _expression_bin_ops[opId]:
if op[0] in 'abcnox':
if not self.skip_word(op):
continue
else:
if not self.skip_string(op):
continue
if op == '&' and self.current_char == '&':
# don't split the && 'token'
self.pos -= 1
# and btw. && has lower precedence, so we are done
break
try:
expr = parser(inTemplate=inTemplate)
exprs.append(expr)
ops.append(op)
oneMore = True
break
except DefinitionError:
self.pos = pos
if not oneMore:
break
return ASTBinOpExpr(exprs, ops)
return _parse_bin_op_expr(self, 0, inTemplate=inTemplate)
def _parse_conditional_expression_tail(self, orExprHead: Any) -> None:
# -> "?" expression ":" assignment-expression
return None
def _parse_assignment_expression(self, inTemplate: bool) -> ASTExpression:
# -> conditional-expression
# | logical-or-expression assignment-operator initializer-clause
# | throw-expression
# TODO: parse throw-expression: "throw" assignment-expression [opt]
# if not a throw expression, then:
# -> conditional-expression ->
# logical-or-expression
# | logical-or-expression "?" expression ":" assignment-expression
# | logical-or-expression assignment-operator initializer-clause
exprs = [] # type: List[Union[ASTExpression, ASTBracedInitList]]
ops = []
orExpr = self._parse_logical_or_expression(inTemplate=inTemplate)
exprs.append(orExpr)
# TODO: handle ternary with _parse_conditional_expression_tail
while True:
oneMore = False
self.skip_ws()
for op in _expression_assignment_ops:
if op[0] in 'anox':
if not self.skip_word(op):
continue
else:
if not self.skip_string(op):
continue
expr = self._parse_initializer_clause()
exprs.append(expr)
ops.append(op)
oneMore = True
if not oneMore:
break
if len(ops) == 0:
return orExpr
else:
return ASTAssignmentExpr(exprs, ops)
def _parse_constant_expression(self, inTemplate: bool) -> ASTExpression:
# -> conditional-expression
orExpr = self._parse_logical_or_expression(inTemplate=inTemplate)
# TODO: use _parse_conditional_expression_tail
return orExpr
def _parse_expression(self) -> ASTExpression:
# -> assignment-expression
# | expression "," assignment-expresion
exprs = [self._parse_assignment_expression(inTemplate=False)]
while True:
self.skip_ws()
if not self.skip_string(','):
break
exprs.append(self._parse_assignment_expression(inTemplate=False))
if len(exprs) == 1:
return exprs[0]
else:
return ASTCommaExpr(exprs)
def _parse_expression_fallback(self, end: List[str],
parser: Callable[[], ASTExpression],
allow: bool = True) -> ASTExpression:
# Stupidly "parse" an expression.
# 'end' should be a list of characters which ends the expression.
# first try to use the provided parser
prevPos = self.pos
try:
return parser()
except DefinitionError as e:
# some places (e.g., template parameters) we really don't want to use fallback,
# and for testing we may want to globally disable it
if not allow or not self.allowFallbackExpressionParsing:
raise
self.warn("Parsing of expression failed. Using fallback parser."
" Error was:\n%s" % e)
self.pos = prevPos
# and then the fallback scanning
assert end is not None
self.skip_ws()
startPos = self.pos
if self.match(_string_re):
value = self.matched_text
else:
# TODO: add handling of more bracket-like things, and quote handling
brackets = {'(': ')', '{': '}', '[': ']', '<': '>'}
symbols = [] # type: List[str]
while not self.eof:
if (len(symbols) == 0 and self.current_char in end):
break
if self.current_char in brackets.keys():
symbols.append(brackets[self.current_char])
elif len(symbols) > 0 and self.current_char == symbols[-1]:
symbols.pop()
self.pos += 1
if len(end) > 0 and self.eof:
self.fail("Could not find end of expression starting at %d."
% startPos)
value = self.definition[startPos:self.pos].strip()
return ASTFallbackExpr(value.strip())
# ==========================================================================
def _parse_operator(self) -> ASTOperator:
self.skip_ws()
# adapted from the old code
# yay, a regular operator definition
if self.match(_operator_re):
return ASTOperatorBuildIn(self.matched_text)
# new/delete operator?
for op in 'new', 'delete':
if not self.skip_word(op):
continue
self.skip_ws()
if self.skip_string('['):
self.skip_ws()
if not self.skip_string(']'):
self.fail('Expected "]" after "operator ' + op + '["')
op += '[]'
return ASTOperatorBuildIn(op)
# user-defined literal?
if self.skip_string('""'):
self.skip_ws()
if not self.match(identifier_re):
self.fail("Expected user-defined literal suffix.")
identifier = ASTIdentifier(self.matched_text)
return ASTOperatorLiteral(identifier)
# oh well, looks like a cast operator definition.
# In that case, eat another type.
type = self._parse_type(named=False, outer="operatorCast")
return ASTOperatorType(type)
def _parse_template_argument_list(self) -> ASTTemplateArgs:
# template-argument-list: (but we include the < and > here
# template-argument ...[opt]
# template-argument-list, template-argument ...[opt]
# template-argument:
# constant-expression
# type-id
# id-expression
self.skip_ws()
if not self.skip_string_and_ws('<'):
return None
if self.skip_string('>'):
return ASTTemplateArgs([], False)
prevErrors = []
templateArgs = [] # type: List[Union[ASTType, ASTTemplateArgConstant]]
packExpansion = False
while 1:
pos = self.pos
parsedComma = False
parsedEnd = False
try:
type = self._parse_type(named=False)
self.skip_ws()
if self.skip_string_and_ws('...'):
packExpansion = True
parsedEnd = True
if not self.skip_string('>'):
self.fail('Expected ">" after "..." in template argument list.')
elif self.skip_string('>'):
parsedEnd = True
elif self.skip_string(','):
parsedComma = True
else:
self.fail('Expected "...>", ">" or "," in template argument list.')
templateArgs.append(type)
except DefinitionError as e:
prevErrors.append((e, "If type argument"))
self.pos = pos
try:
value = self._parse_constant_expression(inTemplate=True)
self.skip_ws()
if self.skip_string_and_ws('...'):
packExpansion = True
parsedEnd = True
if not self.skip_string('>'):
self.fail('Expected ">" after "..." in template argument list.')
elif self.skip_string('>'):
parsedEnd = True
elif self.skip_string(','):
parsedComma = True
else:
self.fail('Expected "...>", ">" or "," in template argument list.')
templateArgs.append(ASTTemplateArgConstant(value))
except DefinitionError as e:
self.pos = pos
prevErrors.append((e, "If non-type argument"))
header = "Error in parsing template argument list."
raise self._make_multi_error(prevErrors, header)
if parsedEnd:
assert not parsedComma
break
else:
assert not packExpansion
return ASTTemplateArgs(templateArgs, packExpansion)
def _parse_nested_name(self, memberPointer: bool = False) -> ASTNestedName:
names = [] # type: List[ASTNestedNameElement]
templates = [] # type: List[bool]
self.skip_ws()
rooted = False
if self.skip_string('::'):
rooted = True
while 1:
self.skip_ws()
if len(names) > 0:
template = self.skip_word_and_ws('template')
else:
template = False
templates.append(template)
identOrOp = None # type: Union[ASTIdentifier, ASTOperator]
if self.skip_word_and_ws('operator'):
identOrOp = self._parse_operator()
else:
if not self.match(identifier_re):
if memberPointer and len(names) > 0:
templates.pop()
break
self.fail("Expected identifier in nested name.")
identifier = self.matched_text
# make sure there isn't a keyword
if identifier in _keywords:
self.fail("Expected identifier in nested name, "
"got keyword: %s" % identifier)
identOrOp = ASTIdentifier(identifier)
# try greedily to get template arguments,
# but otherwise a < might be because we are in an expression
pos = self.pos
try:
templateArgs = self._parse_template_argument_list()
except DefinitionError as ex:
self.pos = pos
templateArgs = None
self.otherErrors.append(ex)
names.append(ASTNestedNameElement(identOrOp, templateArgs))
self.skip_ws()
if not self.skip_string('::'):
if memberPointer:
self.fail("Expected '::' in pointer to member (function).")
break
return ASTNestedName(names, templates, rooted)
# ==========================================================================
def _parse_trailing_type_spec(self) -> ASTTrailingTypeSpec:
# fundemental types
self.skip_ws()
for t in self._simple_fundemental_types:
if self.skip_word(t):
return ASTTrailingTypeSpecFundamental(t)
# TODO: this could/should be more strict
elements = []
if self.skip_word_and_ws('signed'):
elements.append('signed')
elif self.skip_word_and_ws('unsigned'):
elements.append('unsigned')
while 1:
if self.skip_word_and_ws('short'):
elements.append('short')
elif self.skip_word_and_ws('long'):
elements.append('long')
else:
break
if self.skip_word_and_ws('char'):
elements.append('char')
elif self.skip_word_and_ws('int'):
elements.append('int')
elif self.skip_word_and_ws('double'):
elements.append('double')
if len(elements) > 0:
return ASTTrailingTypeSpecFundamental(' '.join(elements))
# decltype
self.skip_ws()
if self.skip_word_and_ws('decltype'):
if not self.skip_string_and_ws('('):
self.fail("Expected '(' after 'decltype'.")
if self.skip_word_and_ws('auto'):
if not self.skip_string(')'):
self.fail("Expected ')' after 'decltype(auto'.")
return ASTTrailingTypeSpecDecltypeAuto()
expr = self._parse_expression()
self.skip_ws()
if not self.skip_string(')'):
self.fail("Expected ')' after 'decltype(<expr>'.")
return ASTTrailingTypeSpecDecltype(expr)
# prefixed
prefix = None
self.skip_ws()
for k in self._prefix_keys:
if self.skip_word_and_ws(k):
prefix = k
break
nestedName = self._parse_nested_name()
return ASTTrailingTypeSpecName(prefix, nestedName)
def _parse_parameters_and_qualifiers(self, paramMode: str) -> ASTParametersQualifiers:
if paramMode == 'new':
return None
self.skip_ws()
if not self.skip_string('('):
if paramMode == 'function':
self.fail('Expecting "(" in parameters-and-qualifiers.')
else:
return None
args = []
self.skip_ws()
if not self.skip_string(')'):
while 1:
self.skip_ws()
if self.skip_string('...'):
args.append(ASTFunctionParameter(None, True))
self.skip_ws()
if not self.skip_string(')'):
self.fail('Expected ")" after "..." in '
'parameters-and-qualifiers.')
break
# note: it seems that function arguments can always be named,
# even in function pointers and similar.
arg = self._parse_type_with_init(outer=None, named='single')
# TODO: parse default parameters # TODO: didn't we just do that?
args.append(ASTFunctionParameter(arg))
self.skip_ws()
if self.skip_string(','):
continue
elif self.skip_string(')'):
break
else:
self.fail(
'Expecting "," or ")" in parameters-and-qualifiers, '
'got "%s".' % self.current_char)
# TODO: why did we have this bail-out?
# does it hurt to parse the extra stuff?
# it's needed for pointer to member functions
if paramMode != 'function' and False:
return ASTParametersQualifiers(
args, None, None, None, None, None, None, None)
self.skip_ws()
const = self.skip_word_and_ws('const')
volatile = self.skip_word_and_ws('volatile')
if not const: # the can be permuted
const = self.skip_word_and_ws('const')
refQual = None
if self.skip_string('&&'):
refQual = '&&'
if not refQual and self.skip_string('&'):
refQual = '&'
exceptionSpec = None
override = None
final = None
initializer = None
self.skip_ws()
if self.skip_string('noexcept'):
exceptionSpec = 'noexcept'
self.skip_ws()
if self.skip_string('('):
self.fail('Parameterised "noexcept" not yet implemented.')
self.skip_ws()
override = self.skip_word_and_ws('override')
final = self.skip_word_and_ws('final')
if not override:
override = self.skip_word_and_ws(
'override') # they can be permuted
self.skip_ws()
if self.skip_string('='):
self.skip_ws()
valid = ('0', 'delete', 'default')
for w in valid:
if self.skip_word_and_ws(w):
initializer = w
break
if not initializer:
self.fail(
'Expected "%s" in initializer-specifier.'
% '" or "'.join(valid))
return ASTParametersQualifiers(
args, volatile, const, refQual, exceptionSpec, override, final,
initializer)
def _parse_decl_specs_simple(self, outer: str, typed: bool) -> ASTDeclSpecsSimple:
"""Just parse the simple ones."""
storage = None
threadLocal = None
inline = None
virtual = None
explicit = None
constexpr = None
volatile = None
const = None
friend = None
attrs = []
while 1: # accept any permutation of a subset of some decl-specs
self.skip_ws()
if not storage:
if outer in ('member', 'function'):
if self.skip_word('static'):
storage = 'static'
continue
if self.skip_word('extern'):
storage = 'extern'
continue
if outer == 'member':
if self.skip_word('mutable'):
storage = 'mutable'
continue
if self.skip_word('register'):
storage = 'register'
continue
if not threadLocal and outer == 'member':
threadLocal = self.skip_word('thread_local')
if threadLocal:
continue
if outer == 'function':
# function-specifiers
if not inline:
inline = self.skip_word('inline')
if inline:
continue
if not friend:
friend = self.skip_word('friend')
if friend:
continue
if not virtual:
virtual = self.skip_word('virtual')
if virtual:
continue
if not explicit:
explicit = self.skip_word('explicit')
if explicit:
continue
if not constexpr and outer in ('member', 'function'):
constexpr = self.skip_word("constexpr")
if constexpr:
continue
if not volatile and typed:
volatile = self.skip_word('volatile')
if volatile:
continue
if not const and typed:
const = self.skip_word('const')
if const:
continue
attr = self._parse_attribute()
if attr:
attrs.append(attr)
continue
break
return ASTDeclSpecsSimple(storage, threadLocal, inline, virtual,
explicit, constexpr, volatile, const,
friend, attrs)
def _parse_decl_specs(self, outer: str, typed: bool = True) -> ASTDeclSpecs:
if outer:
if outer not in ('type', 'member', 'function', 'templateParam'):
raise Exception('Internal error, unknown outer "%s".' % outer)
"""
storage-class-specifier function-specifier "constexpr"
"volatile" "const" trailing-type-specifier
storage-class-specifier ->
"static" (only for member_object and function_object)
| "register"
function-specifier -> "inline" | "virtual" | "explicit" (only for
function_object)
"constexpr" (only for member_object and function_object)
"""
leftSpecs = self._parse_decl_specs_simple(outer, typed)
rightSpecs = None
if typed:
trailing = self._parse_trailing_type_spec()
rightSpecs = self._parse_decl_specs_simple(outer, typed)
else:
trailing = None
return ASTDeclSpecs(outer, leftSpecs, rightSpecs, trailing)
def _parse_declarator_name_suffix(
self, named: Union[bool, str], paramMode: str, typed: bool
) -> Union[ASTDeclaratorNameParamQual, ASTDeclaratorNameBitField]:
# now we should parse the name, and then suffixes
if named == 'maybe':
pos = self.pos
try:
declId = self._parse_nested_name()
except DefinitionError:
self.pos = pos
declId = None
elif named == 'single':
if self.match(identifier_re):
identifier = ASTIdentifier(self.matched_text)
nne = ASTNestedNameElement(identifier, None)
declId = ASTNestedName([nne], [False], rooted=False)
# if it's a member pointer, we may have '::', which should be an error
self.skip_ws()
if self.current_char == ':':
self.fail("Unexpected ':' after identifier.")
else:
declId = None
elif named:
declId = self._parse_nested_name()
else:
declId = None
arrayOps = []
while 1:
self.skip_ws()
if typed and self.skip_string('['):
self.skip_ws()
if self.skip_string(']'):
arrayOps.append(ASTArray(None))
continue
def parser() -> ASTExpression:
return self._parse_expression()
value = self._parse_expression_fallback([']'], parser)
if not self.skip_string(']'):
self.fail("Expected ']' in end of array operator.")
arrayOps.append(ASTArray(value))
continue
else:
break
paramQual = self._parse_parameters_and_qualifiers(paramMode)
if paramQual is None and len(arrayOps) == 0:
# perhaps a bit-field
if named and paramMode == 'type' and typed:
self.skip_ws()
if self.skip_string(':'):
size = self._parse_constant_expression(inTemplate=False)
return ASTDeclaratorNameBitField(declId=declId, size=size)
return ASTDeclaratorNameParamQual(declId=declId, arrayOps=arrayOps,
paramQual=paramQual)
def _parse_declarator(self, named: Union[bool, str], paramMode: str,
typed: bool = True
) -> ASTDeclarator:
# 'typed' here means 'parse return type stuff'
if paramMode not in ('type', 'function', 'operatorCast', 'new'):
raise Exception(
"Internal error, unknown paramMode '%s'." % paramMode)
prevErrors = []
self.skip_ws()
if typed and self.skip_string('*'):
self.skip_ws()
volatile = False
const = False
attrs = []
while 1:
if not volatile:
volatile = self.skip_word_and_ws('volatile')
if volatile:
continue
if not const:
const = self.skip_word_and_ws('const')
if const:
continue
attr = self._parse_attribute()
if attr is not None:
attrs.append(attr)
continue
break
next = self._parse_declarator(named, paramMode, typed)
return ASTDeclaratorPtr(next=next, volatile=volatile, const=const, attrs=attrs)
# TODO: shouldn't we parse an R-value ref here first?
if typed and self.skip_string("&"):
attrs = []
while 1:
attr = self._parse_attribute()
if attr is None:
break
attrs.append(attr)
next = self._parse_declarator(named, paramMode, typed)
return ASTDeclaratorRef(next=next, attrs=attrs)
if typed and self.skip_string("..."):
next = self._parse_declarator(named, paramMode, False)
return ASTDeclaratorParamPack(next=next)
if typed and self.current_char == '(': # note: peeking, not skipping
if paramMode == "operatorCast":
# TODO: we should be able to parse cast operators which return
# function pointers. For now, just hax it and ignore.
return ASTDeclaratorNameParamQual(declId=None, arrayOps=[],
paramQual=None)
# maybe this is the beginning of params and quals,try that first,
# otherwise assume it's noptr->declarator > ( ptr-declarator )
pos = self.pos
try:
# assume this is params and quals
res = self._parse_declarator_name_suffix(named, paramMode,
typed)
return res
except DefinitionError as exParamQual:
prevErrors.append((exParamQual,
"If declarator-id with parameters-and-qualifiers"))
self.pos = pos
try:
assert self.current_char == '('
self.skip_string('(')
# TODO: hmm, if there is a name, it must be in inner, right?
# TODO: hmm, if there must be parameters, they must be
# inside, right?
inner = self._parse_declarator(named, paramMode, typed)
if not self.skip_string(')'):
self.fail("Expected ')' in \"( ptr-declarator )\"")
next = self._parse_declarator(named=False,
paramMode="type",
typed=typed)
return ASTDeclaratorParen(inner=inner, next=next)
except DefinitionError as exNoPtrParen:
self.pos = pos
prevErrors.append((exNoPtrParen, "If parenthesis in noptr-declarator"))
header = "Error in declarator"
raise self._make_multi_error(prevErrors, header)
if typed: # pointer to member
pos = self.pos
try:
name = self._parse_nested_name(memberPointer=True)
self.skip_ws()
if not self.skip_string('*'):
self.fail("Expected '*' in pointer to member declarator.")
self.skip_ws()
except DefinitionError as e:
self.pos = pos
prevErrors.append((e, "If pointer to member declarator"))
else:
volatile = False
const = False
while 1:
if not volatile:
volatile = self.skip_word_and_ws('volatile')
if volatile:
continue
if not const:
const = self.skip_word_and_ws('const')
if const:
continue
break
next = self._parse_declarator(named, paramMode, typed)
return ASTDeclaratorMemPtr(name, const, volatile, next=next)
pos = self.pos
try:
res = self._parse_declarator_name_suffix(named, paramMode, typed)
# this is a heuristic for error messages, for when there is a < after a
# nested name, but it was not a successful template argument list
if self.current_char == '<':
self.otherErrors.append(self._make_multi_error(prevErrors, ""))
return res
except DefinitionError as e:
self.pos = pos
prevErrors.append((e, "If declarator-id"))
header = "Error in declarator or parameters-and-qualifiers"
raise self._make_multi_error(prevErrors, header)
def _parse_initializer(self, outer: str = None, allowFallback: bool = True
) -> ASTInitializer:
# initializer # global vars
# -> brace-or-equal-initializer
# | '(' expression-list ')'
#
# brace-or-equal-initializer # member vars
# -> '=' initializer-clause
# | braced-init-list
#
# initializer-clause # function params, non-type template params (with '=' in front)
# -> assignment-expression
# | braced-init-list
#
# we don't distinguish between global and member vars, so disallow paren:
#
# -> braced-init-list # var only
# | '=' assignment-expression
# | '=' braced-init-list
self.skip_ws()
if outer == 'member':
bracedInit = self._parse_braced_init_list()
if bracedInit is not None:
return ASTInitializer(bracedInit, hasAssign=False)
if not self.skip_string('='):
return None
bracedInit = self._parse_braced_init_list()
if bracedInit is not None:
return ASTInitializer(bracedInit)
if outer == 'member':
fallbackEnd = [] # type: List[str]
elif outer == 'templateParam':
fallbackEnd = [',', '>']
elif outer is None: # function parameter
fallbackEnd = [',', ')']
else:
self.fail("Internal error, initializer for outer '%s' not "
"implemented." % outer)
inTemplate = outer == 'templateParam'
def parser() -> ASTExpression:
return self._parse_assignment_expression(inTemplate=inTemplate)
value = self._parse_expression_fallback(fallbackEnd, parser, allow=allowFallback)
return ASTInitializer(value)
def _parse_type(self, named: Union[bool, str], outer: str = None) -> ASTType:
"""
named=False|'maybe'|True: 'maybe' is e.g., for function objects which
doesn't need to name the arguments
outer == operatorCast: annoying case, we should not take the params
"""
if outer: # always named
if outer not in ('type', 'member', 'function',
'operatorCast', 'templateParam'):
raise Exception('Internal error, unknown outer "%s".' % outer)
if outer != 'operatorCast':
assert named
if outer in ('type', 'function'):
# We allow type objects to just be a name.
# Some functions don't have normal return types: constructors,
# destrutors, cast operators
prevErrors = []
startPos = self.pos
# first try without the type
try:
declSpecs = self._parse_decl_specs(outer=outer, typed=False)
decl = self._parse_declarator(named=True, paramMode=outer,
typed=False)
self.assert_end()
except DefinitionError as exUntyped:
if outer == 'type':
desc = "If just a name"
elif outer == 'function':
desc = "If the function has no return type"
else:
assert False
prevErrors.append((exUntyped, desc))
self.pos = startPos
try:
declSpecs = self._parse_decl_specs(outer=outer)
decl = self._parse_declarator(named=True, paramMode=outer)
except DefinitionError as exTyped:
self.pos = startPos
if outer == 'type':
desc = "If typedef-like declaration"
elif outer == 'function':
desc = "If the function has a return type"
else:
assert False
prevErrors.append((exTyped, desc))
# Retain the else branch for easier debugging.
# TODO: it would be nice to save the previous stacktrace
# and output it here.
if True:
if outer == 'type':
header = "Type must be either just a name or a "
header += "typedef-like declaration."
elif outer == 'function':
header = "Error when parsing function declaration."
else:
assert False
raise self._make_multi_error(prevErrors, header)
else:
# For testing purposes.
# do it again to get the proper traceback (how do you
# reliably save a traceback when an exception is
# constructed?)
self.pos = startPos
typed = True
declSpecs = self._parse_decl_specs(outer=outer, typed=typed)
decl = self._parse_declarator(named=True, paramMode=outer,
typed=typed)
else:
paramMode = 'type'
if outer == 'member': # i.e., member
named = True
elif outer == 'operatorCast':
paramMode = 'operatorCast'
outer = None
elif outer == 'templateParam':
named = 'single'
declSpecs = self._parse_decl_specs(outer=outer)
decl = self._parse_declarator(named=named, paramMode=paramMode)
return ASTType(declSpecs, decl)
def _parse_type_with_init(
self, named: Union[bool, str],
outer: str) -> Union[ASTTypeWithInit, ASTTemplateParamConstrainedTypeWithInit]:
if outer:
assert outer in ('type', 'member', 'function', 'templateParam')
type = self._parse_type(outer=outer, named=named)
if outer != 'templateParam':
init = self._parse_initializer(outer=outer)
return ASTTypeWithInit(type, init)
# it could also be a constrained type parameter, e.g., C T = int&
pos = self.pos
eExpr = None
try:
init = self._parse_initializer(outer=outer, allowFallback=False)
# note: init may be None if there is no =
if init is None:
return ASTTypeWithInit(type, None)
# we parsed an expression, so we must have a , or a >,
# otherwise the expression didn't get everything
self.skip_ws()
if self.current_char != ',' and self.current_char != '>':
# pretend it didn't happen
self.pos = pos
init = None
else:
# we assume that it was indeed an expression
return ASTTypeWithInit(type, init)
except DefinitionError as e:
self.pos = pos
eExpr = e
if not self.skip_string("="):
return ASTTypeWithInit(type, None)
try:
typeInit = self._parse_type(named=False, outer=None)
return ASTTemplateParamConstrainedTypeWithInit(type, typeInit)
except DefinitionError as eType:
if eExpr is None:
raise eType
errs = []
errs.append((eExpr, "If default template argument is an expression"))
errs.append((eType, "If default template argument is a type"))
msg = "Error in non-type template parameter"
msg += " or constrained template parameter."
raise self._make_multi_error(errs, msg)
def _parse_type_using(self) -> ASTTypeUsing:
name = self._parse_nested_name()
self.skip_ws()
if not self.skip_string('='):
return ASTTypeUsing(name, None)
type = self._parse_type(False, None)
return ASTTypeUsing(name, type)
def _parse_concept(self) -> ASTConcept:
nestedName = self._parse_nested_name()
self.skip_ws()
initializer = self._parse_initializer('member')
return ASTConcept(nestedName, initializer)
def _parse_class(self) -> ASTClass:
name = self._parse_nested_name()
self.skip_ws()
final = self.skip_word_and_ws('final')
bases = []
self.skip_ws()
if self.skip_string(':'):
while 1:
self.skip_ws()
visibility = None
virtual = False
pack = False
if self.skip_word_and_ws('virtual'):
virtual = True
if self.match(_visibility_re):
visibility = self.matched_text
self.skip_ws()
if not virtual and self.skip_word_and_ws('virtual'):
virtual = True
baseName = self._parse_nested_name()
self.skip_ws()
pack = self.skip_string('...')
bases.append(ASTBaseClass(baseName, visibility, virtual, pack))
self.skip_ws()
if self.skip_string(','):
continue
else:
break
return ASTClass(name, final, bases)
def _parse_union(self) -> ASTUnion:
name = self._parse_nested_name()
return ASTUnion(name)
def _parse_enum(self) -> ASTEnum:
scoped = None # is set by CPPEnumObject
self.skip_ws()
name = self._parse_nested_name()
self.skip_ws()
underlyingType = None
if self.skip_string(':'):
underlyingType = self._parse_type(named=False)
return ASTEnum(name, scoped, underlyingType)
def _parse_enumerator(self) -> ASTEnumerator:
name = self._parse_nested_name()
self.skip_ws()
init = None
if self.skip_string('='):
self.skip_ws()
def parser() -> ASTExpression:
return self._parse_constant_expression(inTemplate=False)
initVal = self._parse_expression_fallback([], parser)
init = ASTInitializer(initVal)
return ASTEnumerator(name, init)
# ==========================================================================
def _parse_template_parameter_list(self) -> ASTTemplateParams:
# only: '<' parameter-list '>'
# we assume that 'template' has just been parsed
templateParams = [] # type: List[ASTTemplateParam]
self.skip_ws()
if not self.skip_string("<"):
self.fail("Expected '<' after 'template'")
prevErrors = []
while 1:
self.skip_ws()
if self.skip_word('template'):
# declare a tenplate template parameter
nestedParams = self._parse_template_parameter_list()
else:
nestedParams = None
self.skip_ws()
key = None
if self.skip_word_and_ws('typename'):
key = 'typename'
elif self.skip_word_and_ws('class'):
key = 'class'
elif nestedParams:
self.fail("Expected 'typename' or 'class' after "
"template template parameter list.")
if key:
# declare a type or template type parameter
self.skip_ws()
parameterPack = self.skip_string('...')
self.skip_ws()
if self.match(identifier_re):
identifier = ASTIdentifier(self.matched_text)
else:
identifier = None
self.skip_ws()
if not parameterPack and self.skip_string('='):
default = self._parse_type(named=False, outer=None)
else:
default = None
data = ASTTemplateKeyParamPackIdDefault(key, identifier,
parameterPack, default)
if nestedParams:
# template type
templateParams.append(
ASTTemplateParamTemplateType(nestedParams, data))
else:
# type
templateParams.append(ASTTemplateParamType(data))
else:
# declare a non-type parameter, or constrained type parameter
pos = self.pos
try:
param = self._parse_type_with_init('maybe', 'templateParam')
templateParams.append(ASTTemplateParamNonType(param))
except DefinitionError as e:
msg = "If non-type template parameter or constrained template parameter"
prevErrors.append((e, msg))
self.pos = pos
self.skip_ws()
if self.skip_string('>'):
return ASTTemplateParams(templateParams)
elif self.skip_string(','):
prevErrors = []
continue
else:
header = "Error in template parameter list."
try:
self.fail('Expected "=", ",", or ">".')
except DefinitionError as e:
prevErrors.append((e, ""))
raise self._make_multi_error(prevErrors, header)
def _parse_template_introduction(self) -> ASTTemplateIntroduction:
pos = self.pos
try:
concept = self._parse_nested_name()
except Exception:
self.pos = pos
return None
self.skip_ws()
if not self.skip_string('{'):
self.pos = pos
return None
# for sure it must be a template introduction now
params = []
while 1:
self.skip_ws()
parameterPack = self.skip_string('...')
self.skip_ws()
if not self.match(identifier_re):
self.fail("Expected identifier in template introduction list.")
txt_identifier = self.matched_text
# make sure there isn't a keyword
if txt_identifier in _keywords:
self.fail("Expected identifier in template introduction list, "
"got keyword: %s" % txt_identifier)
identifier = ASTIdentifier(txt_identifier)
params.append(ASTTemplateIntroductionParameter(identifier, parameterPack))
self.skip_ws()
if self.skip_string('}'):
break
elif self.skip_string(','):
continue
else:
self.fail("Error in template introduction list. "
'Expected ",", or "}".')
return ASTTemplateIntroduction(concept, params)
def _parse_template_declaration_prefix(self, objectType: str
) -> ASTTemplateDeclarationPrefix:
templates = [] # type: List[Union[ASTTemplateParams, ASTTemplateIntroduction]]
while 1:
self.skip_ws()
# the saved position is only used to provide a better error message
params = None # type: Union[ASTTemplateParams, ASTTemplateIntroduction]
pos = self.pos
if self.skip_word("template"):
try:
params = self._parse_template_parameter_list()
except DefinitionError as e:
if objectType == 'member' and len(templates) == 0:
return ASTTemplateDeclarationPrefix(None)
else:
raise e
else:
params = self._parse_template_introduction()
if not params:
break
if objectType == 'concept' and len(templates) > 0:
self.pos = pos
self.fail("More than 1 template parameter list for concept.")
templates.append(params)
if len(templates) == 0 and objectType == 'concept':
self.fail('Missing template parameter list for concept.')
if len(templates) == 0:
return None
else:
return ASTTemplateDeclarationPrefix(templates)
def _check_template_consistency(self, nestedName: ASTNestedName,
templatePrefix: ASTTemplateDeclarationPrefix,
fullSpecShorthand: bool, isMember: bool = False
) -> ASTTemplateDeclarationPrefix:
numArgs = nestedName.num_templates()
isMemberInstantiation = False
if not templatePrefix:
numParams = 0
else:
if isMember and templatePrefix.templates is None:
numParams = 0
isMemberInstantiation = True
else:
numParams = len(templatePrefix.templates)
if numArgs + 1 < numParams:
self.fail("Too few template argument lists comapred to parameter"
" lists. Argument lists: %d, Parameter lists: %d."
% (numArgs, numParams))
if numArgs > numParams:
numExtra = numArgs - numParams
if not fullSpecShorthand and not isMemberInstantiation:
msg = "Too many template argument lists compared to parameter" \
" lists. Argument lists: %d, Parameter lists: %d," \
" Extra empty parameters lists prepended: %d." \
% (numArgs, numParams, numExtra)
msg += " Declaration:\n\t"
if templatePrefix:
msg += "%s\n\t" % templatePrefix
msg += str(nestedName)
self.warn(msg)
newTemplates = [] # type: List[Union[ASTTemplateParams, ASTTemplateIntroduction]]
for i in range(numExtra):
newTemplates.append(ASTTemplateParams([]))
if templatePrefix and not isMemberInstantiation:
newTemplates.extend(templatePrefix.templates)
templatePrefix = ASTTemplateDeclarationPrefix(newTemplates)
return templatePrefix
def parse_declaration(self, objectType: str, directiveType: str) -> ASTDeclaration:
if objectType not in ('class', 'union', 'function', 'member', 'type',
'concept', 'enum', 'enumerator'):
raise Exception('Internal error, unknown objectType "%s".' % objectType)
if directiveType not in ('class', 'struct', 'union', 'function', 'member', 'var',
'type', 'concept',
'enum', 'enum-struct', 'enum-class', 'enumerator'):
raise Exception('Internal error, unknown directiveType "%s".' % directiveType)
visibility = None
templatePrefix = None
declaration = None # type: Any
self.skip_ws()
if self.match(_visibility_re):
visibility = self.matched_text
if objectType in ('type', 'concept', 'member', 'function', 'class'):
templatePrefix = self._parse_template_declaration_prefix(objectType)
if objectType == 'type':
prevErrors = []
pos = self.pos
try:
if not templatePrefix:
declaration = self._parse_type(named=True, outer='type')
except DefinitionError as e:
prevErrors.append((e, "If typedef-like declaration"))
self.pos = pos
pos = self.pos
try:
if not declaration:
declaration = self._parse_type_using()
except DefinitionError as e:
self.pos = pos
prevErrors.append((e, "If type alias or template alias"))
header = "Error in type declaration."
raise self._make_multi_error(prevErrors, header)
elif objectType == 'concept':
declaration = self._parse_concept()
elif objectType == 'member':
declaration = self._parse_type_with_init(named=True, outer='member')
elif objectType == 'function':
declaration = self._parse_type(named=True, outer='function')
elif objectType == 'class':
declaration = self._parse_class()
elif objectType == 'union':
declaration = self._parse_union()
elif objectType == 'enum':
declaration = self._parse_enum()
elif objectType == 'enumerator':
declaration = self._parse_enumerator()
else:
assert False
templatePrefix = self._check_template_consistency(declaration.name,
templatePrefix,
fullSpecShorthand=False,
isMember=objectType == 'member')
return ASTDeclaration(objectType, directiveType, visibility,
templatePrefix, declaration)
def parse_namespace_object(self) -> ASTNamespace:
templatePrefix = self._parse_template_declaration_prefix(objectType="namespace")
name = self._parse_nested_name()
templatePrefix = self._check_template_consistency(name, templatePrefix,
fullSpecShorthand=False)
res = ASTNamespace(name, templatePrefix)
res.objectType = 'namespace' # type: ignore
return res
def parse_xref_object(self) -> Tuple[Union[ASTNamespace, ASTDeclaration], bool]:
pos = self.pos
try:
templatePrefix = self._parse_template_declaration_prefix(objectType="xref")
name = self._parse_nested_name()
# if there are '()' left, just skip them
self.skip_ws()
self.skip_string('()')
self.assert_end()
templatePrefix = self._check_template_consistency(name, templatePrefix,
fullSpecShorthand=True)
res1 = ASTNamespace(name, templatePrefix)
res1.objectType = 'xref' # type: ignore
return res1, True
except DefinitionError as e1:
try:
self.pos = pos
res2 = self.parse_declaration('function', 'function')
# if there are '()' left, just skip them
self.skip_ws()
self.skip_string('()')
self.assert_end()
return res2, False
except DefinitionError as e2:
errs = []
errs.append((e1, "If shorthand ref"))
errs.append((e2, "If full function ref"))
msg = "Error in cross-reference."
raise self._make_multi_error(errs, msg)
def parse_expression(self) -> Union[ASTExpression, ASTType]:
pos = self.pos
try:
expr = self._parse_expression()
self.skip_ws()
self.assert_end()
return expr
except DefinitionError as exExpr:
self.pos = pos
try:
typ = self._parse_type(False)
self.skip_ws()
self.assert_end()
return typ
except DefinitionError as exType:
header = "Error when parsing (type) expression."
errs = []
errs.append((exExpr, "If expression"))
errs.append((exType, "If type"))
raise self._make_multi_error(errs, header)
def _make_phony_error_name() -> ASTNestedName:
nne = ASTNestedNameElement(ASTIdentifier("PhonyNameDueToError"), None)
return ASTNestedName([nne], [False], rooted=False)
class CPPObject(ObjectDescription):
"""Description of a C++ language object."""
doc_field_types = [
GroupedField('parameter', label=_('Parameters'),
names=('param', 'parameter', 'arg', 'argument'),
can_collapse=True),
GroupedField('template parameter', label=_('Template Parameters'),
names=('tparam', 'template parameter'),
can_collapse=True),
GroupedField('exceptions', label=_('Throws'), rolename='cpp:class',
names=('throws', 'throw', 'exception'),
can_collapse=True),
Field('returnvalue', label=_('Returns'), has_arg=False,
names=('returns', 'return')),
]
option_spec = dict(ObjectDescription.option_spec)
option_spec['tparam-line-spec'] = directives.flag
def _add_enumerator_to_parent(self, ast: ASTDeclaration) -> None:
assert ast.objectType == 'enumerator'
# find the parent, if it exists && is an enum
# && it's unscoped,
# then add the name to the parent scope
symbol = ast.symbol
assert symbol
assert symbol.identOrOp is not None
assert symbol.templateParams is None
assert symbol.templateArgs is None
parentSymbol = symbol.parent
assert parentSymbol
if parentSymbol.parent is None:
# TODO: we could warn, but it is somewhat equivalent to unscoped
# enums, without the enum
return # no parent
parentDecl = parentSymbol.declaration
if parentDecl is None:
# the parent is not explicitly declared
# TODO: we could warn, but it could be a style to just assume
# enumerator parents to be scoped
return
if parentDecl.objectType != 'enum':
# TODO: maybe issue a warning, enumerators in non-enums is weird,
# but it is somewhat equivalent to unscoped enums, without the enum
return
if parentDecl.directiveType != 'enum':
return
targetSymbol = parentSymbol.parent
s = targetSymbol.find_identifier(symbol.identOrOp, matchSelf=False, recurseInAnon=True,
searchInSiblings=False)
if s is not None:
# something is already declared with that name
return
declClone = symbol.declaration.clone()
declClone.enumeratorScopedSymbol = symbol
Symbol(parent=targetSymbol, identOrOp=symbol.identOrOp,
templateParams=None, templateArgs=None,
declaration=declClone,
docname=self.env.docname)
def add_target_and_index(self, ast: ASTDeclaration, sig: str,
signode: TextElement) -> None:
# general note: name must be lstrip(':')'ed, to remove "::"
ids = []
for i in range(1, _max_id + 1):
try:
id = ast.get_id(version=i)
ids.append(id)
except NoOldIdError:
assert i < _max_id
# let's keep the newest first
ids = list(reversed(ids))
newestId = ids[0]
assert newestId # shouldn't be None
if not re.compile(r'^[a-zA-Z0-9_]*$').match(newestId):
logger.warning('Index id generation for C++ object "%s" failed, please '
'report as bug (id=%s).', ast, newestId,
location=self.get_source_info())
name = ast.symbol.get_full_nested_name().get_display_string().lstrip(':')
# Add index entry, but not if it's a declaration inside a concept
isInConcept = False
s = ast.symbol.parent
while s is not None:
decl = s.declaration
s = s.parent
if decl is None:
continue
if decl.objectType == 'concept':
isInConcept = True
break
if not isInConcept:
strippedName = name
for prefix in self.env.config.cpp_index_common_prefix:
if name.startswith(prefix):
strippedName = strippedName[len(prefix):]
break
indexText = self.get_index_text(strippedName)
self.indexnode['entries'].append(('single', indexText, newestId, '', None))
if newestId not in self.state.document.ids:
# if the name is not unique, the first one will win
names = self.env.domaindata['cpp']['names']
if name not in names:
names[name] = ast.symbol.docname
# always add the newest id
assert newestId
signode['ids'].append(newestId)
# only add compatibility ids when there are no conflicts
for id in ids[1:]:
if not id: # is None when the element didn't exist in that version
continue
if id not in self.state.document.ids:
signode['ids'].append(id)
self.state.document.note_explicit_target(signode)
@property
def object_type(self) -> str:
raise NotImplementedError()
@property
def display_object_type(self) -> str:
return self.object_type
def get_index_text(self, name: str) -> str:
return _('%s (C++ %s)') % (name, self.display_object_type)
def parse_definition(self, parser: DefinitionParser) -> ASTDeclaration:
return parser.parse_declaration(self.object_type, self.objtype)
def describe_signature(self, signode: desc_signature,
ast: ASTDeclaration, options: Dict) -> None:
ast.describe_signature(signode, 'lastIsName', self.env, options)
def run(self) -> List[Node]:
env = self.state.document.settings.env # from ObjectDescription.run
if 'cpp:parent_symbol' not in env.temp_data:
root = env.domaindata['cpp']['root_symbol']
env.temp_data['cpp:parent_symbol'] = root
env.ref_context['cpp:parent_key'] = root.get_lookup_key()
# The lookup keys assume that no nested scopes exists inside overloaded functions.
# (see also #5191)
# Example:
# .. cpp:function:: void f(int)
# .. cpp:function:: void f(double)
#
# .. cpp:function:: void g()
#
# :cpp:any:`boom`
#
# So we disallow any signatures inside functions.
parentSymbol = env.temp_data['cpp:parent_symbol']
parentDecl = parentSymbol.declaration
if parentDecl is not None and parentDecl.objectType == 'function':
logger.warning("C++ declarations inside functions are not supported." +
" Parent function is " +
str(parentSymbol.get_full_nested_name()),
location=self.get_source_info())
name = _make_phony_error_name()
symbol = parentSymbol.add_name(name)
env.temp_data['cpp:last_symbol'] = symbol
return []
# When multiple declarations are made in the same directive
# they need to know about each other to provide symbol lookup for function parameters.
# We use last_symbol to store the latest added declaration in a directive.
env.temp_data['cpp:last_symbol'] = None
return super().run()
def handle_signature(self, sig: str, signode: desc_signature) -> ASTDeclaration:
parentSymbol = self.env.temp_data['cpp:parent_symbol']
parser = DefinitionParser(sig, location=signode, config=self.env.config)
try:
ast = self.parse_definition(parser)
parser.assert_end()
except DefinitionError as e:
logger.warning(e, location=signode)
# It is easier to assume some phony name than handling the error in
# the possibly inner declarations.
name = _make_phony_error_name()
symbol = parentSymbol.add_name(name)
self.env.temp_data['cpp:last_symbol'] = symbol
raise ValueError
try:
symbol = parentSymbol.add_declaration(ast, docname=self.env.docname)
# append the new declaration to the sibling list
assert symbol.siblingAbove is None
assert symbol.siblingBelow is None
symbol.siblingAbove = self.env.temp_data['cpp:last_symbol']
if symbol.siblingAbove is not None:
assert symbol.siblingAbove.siblingBelow is None
symbol.siblingAbove.siblingBelow = symbol
self.env.temp_data['cpp:last_symbol'] = symbol
except _DuplicateSymbolError as e:
# Assume we are actually in the old symbol,
# instead of the newly created duplicate.
self.env.temp_data['cpp:last_symbol'] = e.symbol
logger.warning("Duplicate declaration, %s", sig, location=signode)
if ast.objectType == 'enumerator':
self._add_enumerator_to_parent(ast)
# note: handle_signature may be called multiple time per directive,
# if it has multiple signatures, so don't mess with the original options.
options = dict(self.options)
options['tparam-line-spec'] = 'tparam-line-spec' in self.options
self.describe_signature(signode, ast, options)
return ast
def before_content(self) -> None:
lastSymbol = self.env.temp_data['cpp:last_symbol'] # type: Symbol
assert lastSymbol
self.oldParentSymbol = self.env.temp_data['cpp:parent_symbol']
self.oldParentKey = self.env.ref_context['cpp:parent_key'] # type: LookupKey
self.env.temp_data['cpp:parent_symbol'] = lastSymbol
self.env.ref_context['cpp:parent_key'] = lastSymbol.get_lookup_key()
def after_content(self) -> None:
self.env.temp_data['cpp:parent_symbol'] = self.oldParentSymbol
self.env.ref_context['cpp:parent_key'] = self.oldParentKey
class CPPTypeObject(CPPObject):
object_type = 'type'
class CPPConceptObject(CPPObject):
object_type = 'concept'
class CPPMemberObject(CPPObject):
object_type = 'member'
class CPPFunctionObject(CPPObject):
object_type = 'function'
class CPPClassObject(CPPObject):
object_type = 'class'
@property
def display_object_type(self) -> str:
# the distinction between class and struct is only cosmetic
assert self.objtype in ('class', 'struct')
return self.objtype
class CPPUnionObject(CPPObject):
object_type = 'union'
class CPPEnumObject(CPPObject):
object_type = 'enum'
class CPPEnumeratorObject(CPPObject):
object_type = 'enumerator'
class CPPNamespaceObject(SphinxDirective):
"""
This directive is just to tell Sphinx that we're documenting stuff in
namespace foo.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {} # type: Dict
def run(self) -> List[Node]:
rootSymbol = self.env.domaindata['cpp']['root_symbol']
if self.arguments[0].strip() in ('NULL', '0', 'nullptr'):
symbol = rootSymbol
stack = [] # type: List[Symbol]
else:
parser = DefinitionParser(self.arguments[0],
location=self.get_source_info(),
config=self.config)
try:
ast = parser.parse_namespace_object()
parser.assert_end()
except DefinitionError as e:
logger.warning(e, location=self.get_source_info())
name = _make_phony_error_name()
ast = ASTNamespace(name, None)
symbol = rootSymbol.add_name(ast.nestedName, ast.templatePrefix)
stack = [symbol]
self.env.temp_data['cpp:parent_symbol'] = symbol
self.env.temp_data['cpp:namespace_stack'] = stack
self.env.ref_context['cpp:parent_key'] = symbol.get_lookup_key()
return []
class CPPNamespacePushObject(SphinxDirective):
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {} # type: Dict
def run(self) -> List[Node]:
if self.arguments[0].strip() in ('NULL', '0', 'nullptr'):
return []
parser = DefinitionParser(self.arguments[0],
location=self.get_source_info(),
config=self.config)
try:
ast = parser.parse_namespace_object()
parser.assert_end()
except DefinitionError as e:
logger.warning(e, location=self.get_source_info())
name = _make_phony_error_name()
ast = ASTNamespace(name, None)
oldParent = self.env.temp_data.get('cpp:parent_symbol', None)
if not oldParent:
oldParent = self.env.domaindata['cpp']['root_symbol']
symbol = oldParent.add_name(ast.nestedName, ast.templatePrefix)
stack = self.env.temp_data.get('cpp:namespace_stack', [])
stack.append(symbol)
self.env.temp_data['cpp:parent_symbol'] = symbol
self.env.temp_data['cpp:namespace_stack'] = stack
self.env.ref_context['cpp:parent_key'] = symbol.get_lookup_key()
return []
class CPPNamespacePopObject(SphinxDirective):
has_content = False
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
option_spec = {} # type: Dict
def run(self) -> List[Node]:
stack = self.env.temp_data.get('cpp:namespace_stack', None)
if not stack or len(stack) == 0:
logger.warning("C++ namespace pop on empty stack. Defaulting to gobal scope.",
location=self.get_source_info())
stack = []
else:
stack.pop()
if len(stack) > 0:
symbol = stack[-1]
else:
symbol = self.env.domaindata['cpp']['root_symbol']
self.env.temp_data['cpp:parent_symbol'] = symbol
self.env.temp_data['cpp:namespace_stack'] = stack
self.env.ref_context['cpp:parent_key'] = symbol.get_lookup_key()
return []
class AliasNode(nodes.Element):
def __init__(self, sig: str, env: "BuildEnvironment" = None,
parentKey: LookupKey = None) -> None:
super().__init__()
self.sig = sig
if env is not None:
if 'cpp:parent_symbol' not in env.temp_data:
root = env.domaindata['cpp']['root_symbol']
env.temp_data['cpp:parent_symbol'] = root
self.parentKey = env.temp_data['cpp:parent_symbol'].get_lookup_key()
else:
assert parentKey is not None
self.parentKey = parentKey
def copy(self: T) -> T:
return self.__class__(self.sig, env=None, parentKey=self.parentKey) # type: ignore
class AliasTransform(SphinxTransform):
default_priority = ReferencesResolver.default_priority - 1
def apply(self, **kwargs: Any) -> None:
for node in self.document.traverse(AliasNode):
sig = node.sig
parentKey = node.parentKey
try:
parser = DefinitionParser(sig, location=node,
config=self.env.config)
ast, isShorthand = parser.parse_xref_object()
parser.assert_end()
except DefinitionError as e:
logger.warning(e, location=node)
ast, isShorthand = None, None
if ast is None:
# could not be parsed, so stop here
signode = addnodes.desc_signature(sig, '')
signode.clear()
signode += addnodes.desc_name(sig, sig)
node.replace_self(signode)
continue
rootSymbol = self.env.domains['cpp'].data['root_symbol'] # type: Symbol
parentSymbol = rootSymbol.direct_lookup(parentKey) # type: Symbol
if not parentSymbol:
print("Target: ", sig)
print("ParentKey: ", parentKey)
print(rootSymbol.dump(1))
assert parentSymbol # should be there
symbols = [] # type: List[Symbol]
if isShorthand:
assert isinstance(ast, ASTNamespace)
ns = ast
name = ns.nestedName
if ns.templatePrefix:
templateDecls = ns.templatePrefix.templates
else:
templateDecls = []
symbols, failReason = parentSymbol.find_name(
nestedName=name,
templateDecls=templateDecls,
typ='any',
templateShorthand=True,
matchSelf=True, recurseInAnon=True,
searchInSiblings=False)
if symbols is None:
symbols = []
else:
assert isinstance(ast, ASTDeclaration)
decl = ast
name = decl.name
s = parentSymbol.find_declaration(decl, 'any',
templateShorthand=True,
matchSelf=True, recurseInAnon=True)
if s is not None:
symbols.append(s)
symbols = [s for s in symbols if s.declaration is not None]
if len(symbols) == 0:
signode = addnodes.desc_signature(sig, '')
node.append(signode)
signode.clear()
signode += addnodes.desc_name(sig, sig)
logger.warning("Could not find C++ declaration for alias '%s'." % ast,
location=node)
node.replace_self(signode)
else:
nodes = []
options = dict()
options['tparam-line-spec'] = False
for s in symbols:
signode = addnodes.desc_signature(sig, '')
nodes.append(signode)
s.declaration.describe_signature(signode, 'markName', self.env, options)
node.replace_self(nodes)
class CPPAliasObject(ObjectDescription):
option_spec = {} # type: Dict
def run(self) -> List[Node]:
"""
On purpose this doesn't call the ObjectDescription version, but is based on it.
Each alias signature may expand into multiple real signatures (an overload set).
The code is therefore based on the ObjectDescription version.
"""
if ':' in self.name:
self.domain, self.objtype = self.name.split(':', 1)
else:
self.domain, self.objtype = '', self.name
node = addnodes.desc()
node.document = self.state.document
node['domain'] = self.domain
# 'desctype' is a backwards compatible attribute
node['objtype'] = node['desctype'] = self.objtype
node['noindex'] = True
self.names = [] # type: List[str]
signatures = self.get_signatures()
for i, sig in enumerate(signatures):
node.append(AliasNode(sig, env=self.env))
contentnode = addnodes.desc_content()
node.append(contentnode)
self.before_content()
self.state.nested_parse(self.content, self.content_offset, contentnode)
self.env.temp_data['object'] = None
self.after_content()
return [node]
class CPPXRefRole(XRefRole):
def process_link(self, env: BuildEnvironment, refnode: Element, has_explicit_title: bool,
title: str, target: str) -> Tuple[str, str]:
refnode.attributes.update(env.ref_context)
if not has_explicit_title:
# major hax: replace anon names via simple string manipulation.
# Can this actually fail?
title = anon_identifier_re.sub("[anonymous]", str(title))
if refnode['reftype'] == 'any':
# Assume the removal part of fix_parens for :any: refs.
# The addition part is done with the reference is resolved.
if not has_explicit_title and title.endswith('()'):
title = title[:-2]
if target.endswith('()'):
target = target[:-2]
# TODO: should this really be here?
if not has_explicit_title:
target = target.lstrip('~') # only has a meaning for the title
# if the first character is a tilde, don't display the module/class
# parts of the contents
if title[:1] == '~':
title = title[1:]
dcolon = title.rfind('::')
if dcolon != -1:
title = title[dcolon + 2:]
return title, target
class CPPExprRole(SphinxRole):
def __init__(self, asCode: bool) -> None:
super().__init__()
if asCode:
# render the expression as inline code
self.class_type = 'cpp-expr'
self.node_type = nodes.literal # type: Type[TextElement]
else:
# render the expression as inline text
self.class_type = 'cpp-texpr'
self.node_type = nodes.inline
def run(self) -> Tuple[List[Node], List[system_message]]:
text = self.text.replace('\n', ' ')
parser = DefinitionParser(text,
location=self.get_source_info(),
config=self.config)
# attempt to mimic XRefRole classes, except that...
classes = ['xref', 'cpp', self.class_type]
try:
ast = parser.parse_expression()
except DefinitionError as ex:
logger.warning('Unparseable C++ expression: %r\n%s', text, ex,
location=self.get_source_info())
# see below
return [self.node_type(text, text, classes=classes)], []
parentSymbol = self.env.temp_data.get('cpp:parent_symbol', None)
if parentSymbol is None:
parentSymbol = self.env.domaindata['cpp']['root_symbol']
# ...most if not all of these classes should really apply to the individual references,
# not the container node
signode = self.node_type(classes=classes)
ast.describe_signature(signode, 'markType', self.env, parentSymbol)
return [signode], []
class CPPDomain(Domain):
"""C++ language domain.
There are two 'object type' attributes being used::
- Each object created from directives gets an assigned .objtype from ObjectDescription.run.
This is simply the directive name.
- Each declaration (see the distinction in the directives dict below) has a nested .ast of
type ASTDeclaration. That object has .objectType which corresponds to the keys in the
object_types dict below. They are the core different types of declarations in C++ that
one can document.
"""
name = 'cpp'
label = 'C++'
object_types = {
'class': ObjType(_('class'), 'class', 'type', 'identifier'),
'union': ObjType(_('union'), 'union', 'type', 'identifier'),
'function': ObjType(_('function'), 'function', 'func', 'type', 'identifier'),
'member': ObjType(_('member'), 'member', 'var'),
'type': ObjType(_('type'), 'type', 'identifier'),
'concept': ObjType(_('concept'), 'concept', 'identifier'),
'enum': ObjType(_('enum'), 'enum', 'type', 'identifier'),
'enumerator': ObjType(_('enumerator'), 'enumerator')
}
directives = {
# declarations
'class': CPPClassObject,
'struct': CPPClassObject,
'union': CPPUnionObject,
'function': CPPFunctionObject,
'member': CPPMemberObject,
'var': CPPMemberObject,
'type': CPPTypeObject,
'concept': CPPConceptObject,
'enum': CPPEnumObject,
'enum-struct': CPPEnumObject,
'enum-class': CPPEnumObject,
'enumerator': CPPEnumeratorObject,
# scope control
'namespace': CPPNamespaceObject,
'namespace-push': CPPNamespacePushObject,
'namespace-pop': CPPNamespacePopObject,
# other
'alias': CPPAliasObject
}
roles = {
'any': CPPXRefRole(),
'class': CPPXRefRole(),
'struct': CPPXRefRole(),
'union': CPPXRefRole(),
'func': CPPXRefRole(fix_parens=True),
'member': CPPXRefRole(),
'var': CPPXRefRole(),
'type': CPPXRefRole(),
'concept': CPPXRefRole(),
'enum': CPPXRefRole(),
'enumerator': CPPXRefRole(),
'expr': CPPExprRole(asCode=True),
'texpr': CPPExprRole(asCode=False)
}
initial_data = {
'root_symbol': Symbol(None, None, None, None, None, None),
'names': {} # full name for indexing -> docname
}
def clear_doc(self, docname: str) -> None:
if Symbol.debug_show_tree:
print("clear_doc:", docname)
print("\tbefore:")
print(self.data['root_symbol'].dump(1))
print("\tbefore end")
rootSymbol = self.data['root_symbol']
rootSymbol.clear_doc(docname)
if Symbol.debug_show_tree:
print("\tafter:")
print(self.data['root_symbol'].dump(1))
print("\tafter end")
print("clear_doc end:", docname)
for name, nDocname in list(self.data['names'].items()):
if nDocname == docname:
del self.data['names'][name]
def process_doc(self, env: BuildEnvironment, docname: str,
document: nodes.document) -> None:
if Symbol.debug_show_tree:
print("process_doc:", docname)
print(self.data['root_symbol'].dump(0))
print("process_doc end:", docname)
def process_field_xref(self, pnode: pending_xref) -> None:
pnode.attributes.update(self.env.ref_context)
def merge_domaindata(self, docnames: List[str], otherdata: Dict) -> None:
if Symbol.debug_show_tree:
print("merge_domaindata:")
print("\tself:")
print(self.data['root_symbol'].dump(1))
print("\tself end")
print("\tother:")
print(otherdata['root_symbol'].dump(1))
print("\tother end")
self.data['root_symbol'].merge_with(otherdata['root_symbol'],
docnames, self.env)
ourNames = self.data['names']
for name, docname in otherdata['names'].items():
if docname in docnames:
if name in ourNames:
msg = __("Duplicate declaration, also defined in '%s'.\n"
"Name of declaration is '%s'.")
msg = msg % (ourNames[name], name)
logger.warning(msg, location=docname)
else:
ourNames[name] = docname
if Symbol.debug_show_tree:
print("\tresult:")
print(self.data['root_symbol'].dump(1))
print("\tresult end")
print("merge_domaindata end")
def _resolve_xref_inner(self, env: BuildEnvironment, fromdocname: str, builder: Builder,
typ: str, target: str, node: pending_xref,
contnode: Element) -> Tuple[Element, str]:
# add parens again for those that could be functions
if typ == 'any' or typ == 'func':
target += '()'
parser = DefinitionParser(target, location=node, config=env.config)
try:
ast, isShorthand = parser.parse_xref_object()
except DefinitionError as e:
# as arg to stop flake8 from complaining
def findWarning(e: Exception) -> Tuple[str, Exception]:
if typ != 'any' and typ != 'func':
return target, e
# hax on top of the paren hax to try to get correct errors
parser2 = DefinitionParser(target[:-2],
location=node,
config=env.config)
try:
parser2.parse_xref_object()
except DefinitionError as e2:
return target[:-2], e2
# strange, that we don't get the error now, use the original
return target, e
t, ex = findWarning(e)
logger.warning('Unparseable C++ cross-reference: %r\n%s', t, ex,
location=node)
return None, None
parentKey = node.get("cpp:parent_key", None) # type: LookupKey
rootSymbol = self.data['root_symbol']
if parentKey:
parentSymbol = rootSymbol.direct_lookup(parentKey) # type: Symbol
if not parentSymbol:
print("Target: ", target)
print("ParentKey: ", parentKey.data)
print(rootSymbol.dump(1))
assert parentSymbol # should be there
else:
parentSymbol = rootSymbol
if isShorthand:
assert isinstance(ast, ASTNamespace)
ns = ast
name = ns.nestedName
if ns.templatePrefix:
templateDecls = ns.templatePrefix.templates
else:
templateDecls = []
# let's be conservative with the sibling lookup for now
searchInSiblings = (not name.rooted) and len(name.names) == 1
symbols, failReason = parentSymbol.find_name(
name, templateDecls, typ,
templateShorthand=True,
matchSelf=True, recurseInAnon=True,
searchInSiblings=searchInSiblings)
if symbols is None:
if typ == 'identifier':
if failReason == 'templateParamInQualified':
# this is an xref we created as part of a signature,
# so don't warn for names nested in template parameters
raise NoUri(str(name), typ)
s = None
else:
# just refer to the arbitrarily first symbol
s = symbols[0]
else:
assert isinstance(ast, ASTDeclaration)
decl = ast
name = decl.name
s = parentSymbol.find_declaration(decl, typ,
templateShorthand=True,
matchSelf=True, recurseInAnon=True)
if s is None or s.declaration is None:
txtName = str(name)
if txtName.startswith('std::') or txtName == 'std':
raise NoUri(txtName, typ)
return None, None
if typ.startswith('cpp:'):
typ = typ[4:]
origTyp = typ
if typ == 'func':
typ = 'function'
if typ == 'struct':
typ = 'class'
declTyp = s.declaration.objectType
def checkType() -> bool:
if typ == 'any' or typ == 'identifier':
return True
if declTyp == 'templateParam':
# TODO: perhaps this should be strengthened one day
return True
if declTyp == 'functionParam':
if typ == 'var' or typ == 'member':
return True
objtypes = self.objtypes_for_role(typ)
if objtypes:
return declTyp in objtypes
print("Type is %s (originally: %s), declType is %s" % (typ, origTyp, declTyp))
assert False
if not checkType():
logger.warning("cpp:%s targets a %s (%s).",
origTyp, s.declaration.objectType,
s.get_full_nested_name(),
location=node)
declaration = s.declaration
if isShorthand:
fullNestedName = s.get_full_nested_name()
displayName = fullNestedName.get_display_string().lstrip(':')
else:
displayName = decl.get_display_string()
docname = s.docname
assert docname
# the non-identifier refs are cross-references, which should be processed:
# - fix parenthesis due to operator() and add_function_parentheses
if typ != "identifier":
title = contnode.pop(0).astext()
# If it's operator(), we need to add '()' if explicit function parens
# are requested. Then the Sphinx machinery will add another pair.
# Also, if it's an 'any' ref that resolves to a function, we need to add
# parens as well.
# However, if it's a non-shorthand function ref, for a function that
# takes no arguments, then we may need to add parens again as well.
addParen = 0
if not node.get('refexplicit', False) and declaration.objectType == 'function':
if isShorthand:
# this is just the normal haxing for 'any' roles
if env.config.add_function_parentheses and typ == 'any':
addParen += 1
# and now this stuff for operator()
if (env.config.add_function_parentheses and typ == 'function' and
title.endswith('operator()')):
addParen += 1
if ((typ == 'any' or typ == 'function') and
title.endswith('operator') and
displayName.endswith('operator()')):
addParen += 1
else:
# our job here is to essentially nullify add_function_parentheses
if env.config.add_function_parentheses:
if typ == 'any' and displayName.endswith('()'):
addParen += 1
elif typ == 'function':
if title.endswith('()') and not displayName.endswith('()'):
title = title[:-2]
else:
if displayName.endswith('()'):
addParen += 1
if addParen > 0:
title += '()' * addParen
# and reconstruct the title again
contnode += nodes.Text(title)
return make_refnode(builder, fromdocname, docname,
declaration.get_newest_id(), contnode, displayName
), declaration.objectType
def resolve_xref(self, env: BuildEnvironment, fromdocname: str, builder: Builder,
typ: str, target: str, node: pending_xref, contnode: Element
) -> Element:
return self._resolve_xref_inner(env, fromdocname, builder, typ,
target, node, contnode)[0]
def resolve_any_xref(self, env: BuildEnvironment, fromdocname: str, builder: Builder,
target: str, node: pending_xref, contnode: Element
) -> List[Tuple[str, Element]]:
with logging.suppress_logging():
retnode, objtype = self._resolve_xref_inner(env, fromdocname, builder,
'any', target, node, contnode)
if retnode:
if objtype == 'templateParam':
return [('cpp:templateParam', retnode)]
else:
return [('cpp:' + self.role_for_objtype(objtype), retnode)]
return []
def get_objects(self) -> Iterator[Tuple[str, str, str, str, str, int]]:
rootSymbol = self.data['root_symbol']
for symbol in rootSymbol.get_all_symbols():
if symbol.declaration is None:
continue
assert symbol.docname
fullNestedName = symbol.get_full_nested_name()
name = str(fullNestedName).lstrip(':')
dispname = fullNestedName.get_display_string().lstrip(':')
objectType = symbol.declaration.objectType
docname = symbol.docname
newestId = symbol.declaration.get_newest_id()
yield (name, dispname, objectType, docname, newestId, 1)
def get_full_qualified_name(self, node: Element) -> str:
target = node.get('reftarget', None)
if target is None:
return None
parentKey = node.get("cpp:parent_key", None) # type: LookupKey
if parentKey is None or len(parentKey.data) <= 0:
return None
rootSymbol = self.data['root_symbol']
parentSymbol = rootSymbol.direct_lookup(parentKey)
parentName = parentSymbol.get_full_nested_name()
return '::'.join([str(parentName), target])
def setup(app: Sphinx) -> Dict[str, Any]:
app.add_domain(CPPDomain)
app.add_config_value("cpp_index_common_prefix", [], 'env')
app.add_config_value("cpp_id_attributes", [], 'env')
app.add_config_value("cpp_paren_attributes", [], 'env')
app.add_post_transform(AliasTransform)
return {
'version': 'builtin',
'env_version': 2,
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| 39.700055 | 145 | 0.552608 | [
"BSD-2-Clause"
] | begolu2/sphinx | sphinx/domains/cpp.py | 288,143 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = ['WebAppSitePushSettingsSlot']
class WebAppSitePushSettingsSlot(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
dynamic_tags_json: Optional[pulumi.Input[str]] = None,
is_push_enabled: Optional[pulumi.Input[bool]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
slot: Optional[pulumi.Input[str]] = None,
tag_whitelist_json: Optional[pulumi.Input[str]] = None,
tags_requiring_auth: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Push settings for the App.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] dynamic_tags_json: Gets or sets a JSON string containing a list of dynamic tags that will be evaluated from user claims in the push registration endpoint.
:param pulumi.Input[bool] is_push_enabled: Gets or sets a flag indicating whether the Push endpoint is enabled.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] name: Name of web app.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
:param pulumi.Input[str] slot: Name of web app slot. If not specified then will default to production slot.
:param pulumi.Input[str] tag_whitelist_json: Gets or sets a JSON string containing a list of tags that are whitelisted for use by the push registration endpoint.
:param pulumi.Input[str] tags_requiring_auth: Gets or sets a JSON string containing a list of tags that require user authentication to be used in the push registration endpoint.
Tags can consist of alphanumeric characters and the following:
'_', '@', '#', '.', ':', '-'.
Validation should be performed at the PushRequestHandler.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['dynamic_tags_json'] = dynamic_tags_json
if is_push_enabled is None and not opts.urn:
raise TypeError("Missing required property 'is_push_enabled'")
__props__['is_push_enabled'] = is_push_enabled
__props__['kind'] = kind
if name is None and not opts.urn:
raise TypeError("Missing required property 'name'")
__props__['name'] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if slot is None and not opts.urn:
raise TypeError("Missing required property 'slot'")
__props__['slot'] = slot
__props__['tag_whitelist_json'] = tag_whitelist_json
__props__['tags_requiring_auth'] = tags_requiring_auth
__props__['system_data'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:web/v20200901:WebAppSitePushSettingsSlot"), pulumi.Alias(type_="azure-native:web:WebAppSitePushSettingsSlot"), pulumi.Alias(type_="azure-nextgen:web:WebAppSitePushSettingsSlot"), pulumi.Alias(type_="azure-native:web/latest:WebAppSitePushSettingsSlot"), pulumi.Alias(type_="azure-nextgen:web/latest:WebAppSitePushSettingsSlot"), pulumi.Alias(type_="azure-native:web/v20160801:WebAppSitePushSettingsSlot"), pulumi.Alias(type_="azure-nextgen:web/v20160801:WebAppSitePushSettingsSlot"), pulumi.Alias(type_="azure-native:web/v20180201:WebAppSitePushSettingsSlot"), pulumi.Alias(type_="azure-nextgen:web/v20180201:WebAppSitePushSettingsSlot"), pulumi.Alias(type_="azure-native:web/v20181101:WebAppSitePushSettingsSlot"), pulumi.Alias(type_="azure-nextgen:web/v20181101:WebAppSitePushSettingsSlot"), pulumi.Alias(type_="azure-native:web/v20190801:WebAppSitePushSettingsSlot"), pulumi.Alias(type_="azure-nextgen:web/v20190801:WebAppSitePushSettingsSlot"), pulumi.Alias(type_="azure-native:web/v20200601:WebAppSitePushSettingsSlot"), pulumi.Alias(type_="azure-nextgen:web/v20200601:WebAppSitePushSettingsSlot"), pulumi.Alias(type_="azure-native:web/v20201001:WebAppSitePushSettingsSlot"), pulumi.Alias(type_="azure-nextgen:web/v20201001:WebAppSitePushSettingsSlot")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(WebAppSitePushSettingsSlot, __self__).__init__(
'azure-native:web/v20200901:WebAppSitePushSettingsSlot',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'WebAppSitePushSettingsSlot':
"""
Get an existing WebAppSitePushSettingsSlot resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["dynamic_tags_json"] = None
__props__["is_push_enabled"] = None
__props__["kind"] = None
__props__["name"] = None
__props__["system_data"] = None
__props__["tag_whitelist_json"] = None
__props__["tags_requiring_auth"] = None
__props__["type"] = None
return WebAppSitePushSettingsSlot(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="dynamicTagsJson")
def dynamic_tags_json(self) -> pulumi.Output[Optional[str]]:
"""
Gets or sets a JSON string containing a list of dynamic tags that will be evaluated from user claims in the push registration endpoint.
"""
return pulumi.get(self, "dynamic_tags_json")
@property
@pulumi.getter(name="isPushEnabled")
def is_push_enabled(self) -> pulumi.Output[bool]:
"""
Gets or sets a flag indicating whether the Push endpoint is enabled.
"""
return pulumi.get(self, "is_push_enabled")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
The system metadata relating to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter(name="tagWhitelistJson")
def tag_whitelist_json(self) -> pulumi.Output[Optional[str]]:
"""
Gets or sets a JSON string containing a list of tags that are whitelisted for use by the push registration endpoint.
"""
return pulumi.get(self, "tag_whitelist_json")
@property
@pulumi.getter(name="tagsRequiringAuth")
def tags_requiring_auth(self) -> pulumi.Output[Optional[str]]:
"""
Gets or sets a JSON string containing a list of tags that require user authentication to be used in the push registration endpoint.
Tags can consist of alphanumeric characters and the following:
'_', '@', '#', '.', ':', '-'.
Validation should be performed at the PushRequestHandler.
"""
return pulumi.get(self, "tags_requiring_auth")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 51.042328 | 1,345 | 0.671504 | [
"Apache-2.0"
] | pulumi-bot/pulumi-azure-native | sdk/python/pulumi_azure_native/web/v20200901/web_app_site_push_settings_slot.py | 9,647 | Python |
"""
This software was developed by the University of Tennessee as part of the
Distributed Data Analysis of Neutron Scattering Experiments (DANSE)
project funded by the US National Science Foundation.
See the license text in license.txt
copyright 2008, 2009, University of Tennessee
"""
import wx
import sys
from sas.sasgui.guiframe.panel_base import PanelBase
from sas.sascalc.calculator.kiessig_calculator import KiessigThicknessCalculator
from calculator_widgets import OutputTextCtrl
from calculator_widgets import InputTextCtrl
from sas.sasgui.perspectives.calculator import calculator_widgets as widget
from sas.sasgui.guiframe.documentation_window import DocumentationWindow
_BOX_WIDTH = 77
#Slit length panel size
if sys.platform.count("win32") > 0:
PANEL_TOP = 0
PANEL_WIDTH = 500
PANEL_HEIGHT = 230
FONT_VARIANT = 0
else:
PANEL_TOP = 60
PANEL_WIDTH = 560
PANEL_HEIGHT = 230
FONT_VARIANT = 1
class KiessigThicknessCalculatorPanel(wx.Panel, PanelBase):
"""
Provides the Kiessig thickness calculator GUI.
"""
## Internal nickname for the window, used by the AUI manager
window_name = "Kiessig Thickness Calculator"
## Name to appear on the window title bar
window_caption = "Kiessig Thickness Calculator"
## Flag to tell the AUI manager to put this panel in the center pane
CENTER_PANE = True
def __init__(self, parent, *args, **kwds):
wx.Panel.__init__(self, parent, *args, **kwds)
PanelBase.__init__(self)
#Font size
self.SetWindowVariant(variant=FONT_VARIANT)
# Object that receive status event
self.parent = parent
self.kiessig = KiessigThicknessCalculator()
#layout attribute
self.hint_sizer = None
self._do_layout()
def _define_structure(self):
"""
Define the main sizers building to build this application.
"""
self.main_sizer = wx.BoxSizer(wx.VERTICAL)
self.box_source = wx.StaticBox(self, -1,
str("Kiessig Thickness Calculator"))
self.boxsizer_source = wx.StaticBoxSizer(self.box_source,
wx.VERTICAL)
self.dq_name_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.thickness_size_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.hint_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.button_sizer = wx.BoxSizer(wx.HORIZONTAL)
def _layout_dq_name(self):
"""
Fill the sizer containing dq name
"""
# get the default dq
dq_value = str(self.kiessig.get_deltaq())
dq_unit_txt = wx.StaticText(self, -1, '[1/A]')
dq_name_txt = wx.StaticText(self, -1,
'Kiessig Fringe Width (Delta Q): ')
self.dq_name_tcl = InputTextCtrl(self, -1,
size=(_BOX_WIDTH,-1))
dq_hint = "Type the Kiessig Fringe Width (Delta Q)"
self.dq_name_tcl.SetValue(dq_value)
self.dq_name_tcl.SetToolTipString(dq_hint)
#control that triggers importing data
id = wx.NewId()
self.compute_button = wx.Button(self, id, "Compute")
hint_on_compute = "Compute the diameter/thickness in the real space."
self.compute_button.SetToolTipString(hint_on_compute)
self.Bind(wx.EVT_BUTTON, self.on_compute, id=id)
self.dq_name_sizer.AddMany([(dq_name_txt, 0, wx.LEFT, 15),
(self.dq_name_tcl, 0, wx.LEFT, 15),
(dq_unit_txt, 0, wx.LEFT, 10),
(self.compute_button, 0, wx.LEFT, 30)])
def _layout_thickness_size(self):
"""
Fill the sizer containing thickness information
"""
thick_unit = '['+self.kiessig.get_thickness_unit() +']'
thickness_size_txt = wx.StaticText(self, -1,
'Thickness (or Diameter): ')
self.thickness_size_tcl = OutputTextCtrl(self, -1,
size=(_BOX_WIDTH,-1))
thickness_size_hint = " Estimated Size in Real Space"
self.thickness_size_tcl.SetToolTipString(thickness_size_hint)
thickness_size_unit_txt = wx.StaticText(self, -1, thick_unit)
self.thickness_size_sizer.AddMany([(thickness_size_txt, 0, wx.LEFT, 15),
(self.thickness_size_tcl, 0, wx.LEFT, 15),
(thickness_size_unit_txt, 0, wx.LEFT, 10)])
def _layout_hint(self):
"""
Fill the sizer containing hint
"""
hint_msg = "This tool is to approximately estimate "
hint_msg += "the thickness of a layer"
hint_msg += " or the diameter of particles\n "
hint_msg += "from the Kiessig fringe width in SAS/NR data."
hint_msg += ""
self.hint_txt = wx.StaticText(self, -1, hint_msg)
self.hint_sizer.AddMany([(self.hint_txt, 0, wx.LEFT, 15)])
def _layout_button(self):
"""
Do the layout for the button widgets
"""
id = wx.NewId()
self.bt_help = wx.Button(self, id, 'HELP')
self.bt_help.Bind(wx.EVT_BUTTON, self.on_help)
self.bt_help.SetToolTipString("Help using the Kiessig fringe calculator.")
self.bt_close = wx.Button(self, wx.ID_CANCEL, 'Close')
self.bt_close.Bind(wx.EVT_BUTTON, self.on_close)
self.bt_close.SetToolTipString("Close this window.")
self.button_sizer.AddMany([(self.bt_help, 0, wx.LEFT, 260),
(self.bt_close, 0, wx.LEFT, 20)])
def _do_layout(self):
"""
Draw window content
"""
self._define_structure()
self._layout_dq_name()
self._layout_thickness_size()
self._layout_hint()
self._layout_button()
self.boxsizer_source.AddMany([(self.dq_name_sizer, 0,
wx.EXPAND|wx.TOP|wx.BOTTOM, 5),
(self.thickness_size_sizer, 0,
wx.EXPAND|wx.TOP|wx.BOTTOM, 5),
(self.hint_sizer, 0,
wx.EXPAND|wx.TOP|wx.BOTTOM, 5)])
self.main_sizer.AddMany([(self.boxsizer_source, 0, wx.ALL, 10),
(self.button_sizer, 0,
wx.EXPAND|wx.TOP|wx.BOTTOM, 5)])
self.SetSizer(self.main_sizer)
self.SetAutoLayout(True)
def on_help(self, event):
"""
Bring up the Kiessig fringe calculator Documentation whenever
the HELP button is clicked.
Calls DocumentationWindow with the path of the location within the
documentation tree (after /doc/ ....". Note that when using old
versions of Wx (before 2.9) and thus not the release version of
installers, the help comes up at the top level of the file as
webbrowser does not pass anything past the # to the browser when it is
running "file:///...."
:param evt: Triggers on clicking the help button
"""
_TreeLocation = "user/sasgui/perspectives/calculator/"
_TreeLocation += "kiessig_calculator_help.html"
_doc_viewer = DocumentationWindow(self, -1, _TreeLocation, "",
"Density/Volume Calculator Help")
def on_close(self, event):
"""
close the window containing this panel
"""
self.parent.Close()
if event is not None:
event.Skip()
def on_compute(self, event):
"""
Execute the computation of thickness
"""
# skip for another event
if event is not None:
event.Skip()
dq = self.dq_name_tcl.GetValue()
self.kiessig.set_deltaq(dq)
# calculate the thickness
output = self.kiessig.compute_thickness()
thickness = self.format_number(output)
# set tcl
self.thickness_size_tcl.SetValue(str(thickness))
def format_number(self, value=None):
"""
Return a float in a standardized, human-readable formatted string
"""
try:
value = float(value)
except:
output = None
return output
output = "%-7.4g" % value
return output.lstrip().rstrip()
def _onparamEnter(self, event = None):
"""
On Text_enter_callback, perform compute
"""
self.on_compute(event)
class KiessigWindow(widget.CHILD_FRAME):
def __init__(self, parent=None, manager=None,
title="Kiessig Thickness Calculator",
size=(PANEL_WIDTH,PANEL_HEIGHT), *args, **kwds):
kwds['title'] = title
kwds['size'] = size
widget.CHILD_FRAME.__init__(self, parent, *args, **kwds)
self.parent = parent
self.manager = manager
self.panel = KiessigThicknessCalculatorPanel(parent=self)
self.Bind(wx.EVT_CLOSE, self.on_close)
self.SetPosition((wx.LEFT, PANEL_TOP))
self.Show(True)
def on_close(self, event):
"""
Close event
"""
if self.manager is not None:
self.manager.kiessig_frame = None
self.Destroy()
if __name__ == "__main__":
app = wx.PySimpleApp()
widget.CHILD_FRAME = wx.Frame
frame = KiessigWindow()
frame.Show(True)
app.MainLoop()
| 38.269076 | 86 | 0.59597 | [
"BSD-3-Clause"
] | andyfaff/sasview | src/sas/sasgui/perspectives/calculator/kiessig_calculator_panel.py | 9,529 | Python |
import unittest
from typing import Optional
from pathlib import Path
from time import strftime, gmtime
from github import Github
import json
import yaml
from .. import get_repo_meta, clone_and_archive
from ..get_github import dump_list
from pathlib import Path
class TestGetGithub(unittest.TestCase):
"""
Test coverage for the get_github module
"""
_current_dir = Path(__file__).resolve().parent
#BORROWED from https://github.com/NOAA-OWP/DMOD/blob/master/python/lib/scheduler/dmod/test/it_redisManager.py
@classmethod
def find_project_root_directory(cls, current_directory: Optional[Path]) -> Optional[Path]:
"""
Given a directory (with ``None`` implying the current directory) assumed to be at or under this project's root,
find the project root directory.
This implementation attempts to find a directory having both a ``.git/`` child directory and a ``.env`` file.
Parameters
----------
current_directory
Returns
-------
Optional[Path]
The project root directory, or ``None`` if it fails to find it.
"""
if not current_directory:
current_directory = TestGetGithub._current_dir
abs_root = Path(current_directory.absolute().root)
while current_directory.absolute() != abs_root:
if not current_directory.is_dir():
current_directory = current_directory.parent
continue
git_sub_dir = current_directory.joinpath('.git')
child_env_file = current_directory.joinpath('config.yaml')
if git_sub_dir.exists() and git_sub_dir.is_dir() and child_env_file.exists() and child_env_file.is_file():
return current_directory
current_directory = current_directory.parent
return None
@classmethod
def load_token(cls):
"""
Read an API token from a configuration file, if none found, use '' for no auth
"""
token = ''
root_dir = cls.find_project_root_directory(None)
if not root_dir:
return token
config_file = root_dir/'config.yaml'
if config_file.exists():
with open(config_file) as file:
config = yaml.load(file, Loader=yaml.FullLoader)
try:
token = config['token']
except:
print("Unable to load api-token from project root directory config.yaml")
return token
def setUp(self):
self.token = self.load_token()
if self.token:
#Token auth github, higher rate limit
self.github = Github(self.token)
else:
#Anonimous github, severly rate limited API
self.github = Github()
self.org_string = 'NOAA-OWP'
self.repo_string = 'owp-open-source-project-template'
self.repo_w_wiki = 'DMOD'
self.org = self.github.get_organization(self.org_string)
self.wiki_repo = self.org.get_repo(self.repo_w_wiki)
self.repo = self.org.get_repo(self.repo_string)
self.time = strftime("%Y-%m-%d_%H:%M:%S", gmtime())
def tearDown(self):
for p in Path(TestGetGithub._current_dir).glob("*.json"):
if p.is_file():
p.unlink()
for p in Path(TestGetGithub._current_dir).glob("*.tar.gz"):
if p.is_file():
p.unlink()
def test_get_repo_meta(self):
"""
Test the archive_repo function to ensure all meta data is properly captured
"""
meta = get_repo_meta(self.repo, self.time, TestGetGithub._current_dir)
self.assertIsNotNone(meta)
self.assertTrue(len(meta), 6)
#defer name substitution
pattern = "{repo}_{name}_{time}.json".format(repo=self.repo_string, name="{name}", time=self.time)
self.assertEqual(meta[0].name, pattern.format(name='comments'))
self.assertEqual(meta[1].name, pattern.format(name='issues'))
self.assertEqual(meta[2].name, pattern.format(name='issue_comments'))
self.assertEqual(meta[3].name, pattern.format(name='pulls'))
self.assertEqual(meta[4].name, pattern.format(name='pulls_comments'))
self.assertEqual(meta[5].name, pattern.format(name='pulls_review_comments'))
self.assertTrue((TestGetGithub._current_dir/pattern.format(name='comments')).exists())
self.assertTrue((TestGetGithub._current_dir/pattern.format(name='issues')).exists())
self.assertTrue((TestGetGithub._current_dir/pattern.format(name='issue_comments')).exists())
self.assertTrue((TestGetGithub._current_dir/pattern.format(name='pulls')).exists())
self.assertTrue((TestGetGithub._current_dir/pattern.format(name='pulls_comments')).exists())
self.assertTrue((TestGetGithub._current_dir/pattern.format(name='pulls_review_comments')).exists())
def test_clone_and_archive(self):
"""
Test the clone functionality
"""
#Sorta hackily testing the archive_repo logic here...FIXME later
#FIXME has_wiki is broke!!!
self.assertFalse(self.repo.has_wiki)
clone_url = self.repo.clone_url
archive_name = clone_and_archive(self.repo_string, clone_url, self.time, TestGetGithub._current_dir, [])
name = '{repo}_github_archive_{time}.tar.gz'.format(repo=self.repo_string, time=self.time)
self.assertEqual(archive_name.name, name)
self.assertTrue((TestGetGithub._current_dir/name).exists())
#TODO test existence of repo in archive
def test_clone_and_archive_1(self):
"""
Test cloning a repo with a wiki
"""
#Sorta hackily testing the archive_repo logic here...FIXME later
self.assertTrue(self.wiki_repo.has_wiki)
wiki_url = self.wiki_repo.clone_url[:-3]+'wiki.git'
#finally get the repo code itself
clone_url = self.wiki_repo.clone_url
archive_name = clone_and_archive(self.repo_w_wiki, clone_url, self.time, TestGetGithub._current_dir, [], wiki_url)
name = '{repo}_github_archive_{time}.tar.gz'.format(repo=self.repo_w_wiki, time=self.time)
self.assertEqual(archive_name.name, name)
self.assertTrue((TestGetGithub._current_dir/name).exists())
#TODO test existense of wiki in archive
@unittest.skip("Incomplete mock implementation for dumped item")
def test_dump_list(self):
pass
#Quick json format test for list objects
outfile = dump_list("test_repo", self.time, TestGetGithub._current_dir, "test_key", [ '{json=dict, test=values}', 'json=dict2, test=values2}'])
with open(outfile) as fp:
data = json.load(fp)
self.assertTrue(data['json'], 'dict')
| 44.253247 | 151 | 0.650624 | [
"CC0-1.0"
] | hellkite500/github_archive | github_archive/test/test_get_github.py | 6,815 | Python |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AutoAugment.
[1] Barret, et al. Learning Data Augmentation Strategies for Object Detection.
Arxiv: https://arxiv.org/abs/1906.11172
"""
import inspect
import math
from absl import logging
import tensorflow.compat.v1 as tf
from tensorflow_addons import image as image_ops
import hparams_config
# This signifies the max integer that the controller RNN could predict for the
# augmentation scheme.
_MAX_LEVEL = 10.
# Represents an invalid bounding box that is used for checking for padding
# lists of bounding box coordinates for a few augmentation operations
_INVALID_BOX = [[-1.0, -1.0, -1.0, -1.0]]
def policy_v0():
"""Autoaugment policy that was used in AutoAugment Detection Paper."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('TranslateX_BBox', 0.6, 4), ('Equalize', 0.8, 10)],
[('TranslateY_Only_BBoxes', 0.2, 2), ('Cutout', 0.8, 8)],
[('Sharpness', 0.0, 8), ('ShearX_BBox', 0.4, 0)],
[('ShearY_BBox', 1.0, 2), ('TranslateY_Only_BBoxes', 0.6, 6)],
[('Rotate_BBox', 0.6, 10), ('Color', 1.0, 6)],
]
return policy
def policy_v1():
"""Autoaugment policy that was used in AutoAugment Detection Paper."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('TranslateX_BBox', 0.6, 4), ('Equalize', 0.8, 10)],
[('TranslateY_Only_BBoxes', 0.2, 2), ('Cutout', 0.8, 8)],
[('Sharpness', 0.0, 8), ('ShearX_BBox', 0.4, 0)],
[('ShearY_BBox', 1.0, 2), ('TranslateY_Only_BBoxes', 0.6, 6)],
[('Rotate_BBox', 0.6, 10), ('Color', 1.0, 6)],
[('Color', 0.0, 0), ('ShearX_Only_BBoxes', 0.8, 4)],
[('ShearY_Only_BBoxes', 0.8, 2), ('Flip_Only_BBoxes', 0.0, 10)],
[('Equalize', 0.6, 10), ('TranslateX_BBox', 0.2, 2)],
[('Color', 1.0, 10), ('TranslateY_Only_BBoxes', 0.4, 6)],
[('Rotate_BBox', 0.8, 10), ('Contrast', 0.0, 10)],
[('Cutout', 0.2, 2), ('Brightness', 0.8, 10)],
[('Color', 1.0, 6), ('Equalize', 1.0, 2)],
[('Cutout_Only_BBoxes', 0.4, 6), ('TranslateY_Only_BBoxes', 0.8, 2)],
[('Color', 0.2, 8), ('Rotate_BBox', 0.8, 10)],
[('Sharpness', 0.4, 4), ('TranslateY_Only_BBoxes', 0.0, 4)],
[('Sharpness', 1.0, 4), ('SolarizeAdd', 0.4, 4)],
[('Rotate_BBox', 1.0, 8), ('Sharpness', 0.2, 8)],
[('ShearY_BBox', 0.6, 10), ('Equalize_Only_BBoxes', 0.6, 8)],
[('ShearX_BBox', 0.2, 6), ('TranslateY_Only_BBoxes', 0.2, 10)],
[('SolarizeAdd', 0.6, 8), ('Brightness', 0.8, 10)],
]
return policy
def policy_vtest():
"""Autoaugment test policy for debugging."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('TranslateX_BBox', 1.0, 4), ('Equalize', 1.0, 10)],
]
return policy
def policy_v2():
"""Additional policy that performs well on object detection."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('Color', 0.0, 6), ('Cutout', 0.6, 8), ('Sharpness', 0.4, 8)],
[('Rotate_BBox', 0.4, 8), ('Sharpness', 0.4, 2),
('Rotate_BBox', 0.8, 10)],
[('TranslateY_BBox', 1.0, 8), ('AutoContrast', 0.8, 2)],
[('AutoContrast', 0.4, 6), ('ShearX_BBox', 0.8, 8),
('Brightness', 0.0, 10)],
[('SolarizeAdd', 0.2, 6), ('Contrast', 0.0, 10),
('AutoContrast', 0.6, 0)],
[('Cutout', 0.2, 0), ('Solarize', 0.8, 8), ('Color', 1.0, 4)],
[('TranslateY_BBox', 0.0, 4), ('Equalize', 0.6, 8),
('Solarize', 0.0, 10)],
[('TranslateY_BBox', 0.2, 2), ('ShearY_BBox', 0.8, 8),
('Rotate_BBox', 0.8, 8)],
[('Cutout', 0.8, 8), ('Brightness', 0.8, 8), ('Cutout', 0.2, 2)],
[('Color', 0.8, 4), ('TranslateY_BBox', 1.0, 6), ('Rotate_BBox', 0.6, 6)],
[('Rotate_BBox', 0.6, 10), ('BBox_Cutout', 1.0, 4), ('Cutout', 0.2, 8)],
[('Rotate_BBox', 0.0, 0), ('Equalize', 0.6, 6), ('ShearY_BBox', 0.6, 8)],
[('Brightness', 0.8, 8), ('AutoContrast', 0.4, 2),
('Brightness', 0.2, 2)],
[('TranslateY_BBox', 0.4, 8), ('Solarize', 0.4, 6),
('SolarizeAdd', 0.2, 10)],
[('Contrast', 1.0, 10), ('SolarizeAdd', 0.2, 8), ('Equalize', 0.2, 4)],
]
return policy
def policy_v3():
""""Additional policy that performs well on object detection."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('Posterize', 0.8, 2), ('TranslateX_BBox', 1.0, 8)],
[('BBox_Cutout', 0.2, 10), ('Sharpness', 1.0, 8)],
[('Rotate_BBox', 0.6, 8), ('Rotate_BBox', 0.8, 10)],
[('Equalize', 0.8, 10), ('AutoContrast', 0.2, 10)],
[('SolarizeAdd', 0.2, 2), ('TranslateY_BBox', 0.2, 8)],
[('Sharpness', 0.0, 2), ('Color', 0.4, 8)],
[('Equalize', 1.0, 8), ('TranslateY_BBox', 1.0, 8)],
[('Posterize', 0.6, 2), ('Rotate_BBox', 0.0, 10)],
[('AutoContrast', 0.6, 0), ('Rotate_BBox', 1.0, 6)],
# [('Equalize', 0.0, 4), ('Cutout', 0.8, 10)],
[('Brightness', 1.0, 2), ('TranslateY_BBox', 1.0, 6)],
[('Contrast', 0.0, 2), ('ShearY_BBox', 0.8, 0)],
[('AutoContrast', 0.8, 10), ('Contrast', 0.2, 10)],
# [('Rotate_BBox', 1.0, 10), ('Cutout', 1.0, 10)],
[('SolarizeAdd', 0.8, 6), ('Equalize', 0.8, 8)],
]
return policy
def blend(image1, image2, factor):
"""Blend image1 and image2 using 'factor'.
Factor can be above 0.0. A value of 0.0 means only image1 is used.
A value of 1.0 means only image2 is used. A value between 0.0 and
1.0 means we linearly interpolate the pixel values between the two
images. A value greater than 1.0 "extrapolates" the difference
between the two pixel values, and we clip the results to values
between 0 and 255.
Args:
image1: An image Tensor of type uint8.
image2: An image Tensor of type uint8.
factor: A floating point value above 0.0.
Returns:
A blended image Tensor of type uint8.
"""
if factor == 0.0:
return tf.convert_to_tensor(image1)
if factor == 1.0:
return tf.convert_to_tensor(image2)
image1 = tf.to_float(image1)
image2 = tf.to_float(image2)
difference = image2 - image1
scaled = factor * difference
# Do addition in float.
temp = tf.to_float(image1) + scaled
# Interpolate
if factor > 0.0 and factor < 1.0:
# Interpolation means we always stay within 0 and 255.
return tf.cast(temp, tf.uint8)
# Extrapolate:
#
# We need to clip and then cast.
return tf.cast(tf.clip_by_value(temp, 0.0, 255.0), tf.uint8)
def cutout(image, pad_size, replace=0):
"""Apply cutout (https://arxiv.org/abs/1708.04552) to image.
This operation applies a (2*pad_size x 2*pad_size) mask of zeros to
a random location within `img`. The pixel values filled in will be of the
value `replace`. The located where the mask will be applied is randomly
chosen uniformly over the whole image.
Args:
image: An image Tensor of type uint8.
pad_size: Specifies how big the zero mask that will be generated is that
is applied to the image. The mask will be of size
(2*pad_size x 2*pad_size).
replace: What pixel value to fill in the image in the area that has
the cutout mask applied to it.
Returns:
An image Tensor that is of type uint8.
"""
image_height = tf.maximum(tf.shape(image)[0], 10)
image_width = tf.maximum(tf.shape(image)[1], 10)
# Sample the center location in the image where the zero mask will be applied.
cutout_center_height = tf.random_uniform(
shape=[], minval=0, maxval=image_height,
dtype=tf.int32)
cutout_center_width = tf.random_uniform(
shape=[], minval=0, maxval=image_width,
dtype=tf.int32)
lower_pad = tf.maximum(0, cutout_center_height - pad_size)
upper_pad = tf.maximum(0, image_height - cutout_center_height - pad_size)
left_pad = tf.maximum(0, cutout_center_width - pad_size)
right_pad = tf.maximum(0, image_width - cutout_center_width - pad_size)
cutout_shape = [image_height - (lower_pad + upper_pad),
image_width - (left_pad + right_pad)]
padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
mask = tf.pad(
tf.zeros(cutout_shape, dtype=image.dtype),
padding_dims, constant_values=1)
mask = tf.expand_dims(mask, -1)
mask = tf.tile(mask, [1, 1, 3])
image = tf.where(
tf.equal(mask, 0),
tf.ones_like(image, dtype=image.dtype) * replace,
image)
return image
def solarize(image, threshold=128):
# For each pixel in the image, select the pixel
# if the value is less than the threshold.
# Otherwise, subtract 255 from the pixel.
return tf.where(image < threshold, image, 255 - image)
def solarize_add(image, addition=0, threshold=128):
# For each pixel in the image less than threshold
# we add 'addition' amount to it and then clip the
# pixel value to be between 0 and 255. The value
# of 'addition' is between -128 and 128.
added_image = tf.cast(image, tf.int64) + addition
added_image = tf.cast(tf.clip_by_value(added_image, 0, 255), tf.uint8)
return tf.where(image < threshold, added_image, image)
def color(image, factor):
"""Equivalent of PIL Color."""
degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))
return blend(degenerate, image, factor)
def contrast(image, factor):
"""Equivalent of PIL Contrast."""
degenerate = tf.image.rgb_to_grayscale(image)
# Cast before calling tf.histogram.
degenerate = tf.cast(degenerate, tf.int32)
# Compute the grayscale histogram, then compute the mean pixel value,
# and create a constant image size of that value. Use that as the
# blending degenerate target of the original image.
mean = tf.reduce_mean(tf.cast(degenerate, tf.float32))
degenerate = tf.ones_like(degenerate, dtype=tf.float32) * mean
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.image.grayscale_to_rgb(tf.cast(degenerate, tf.uint8))
return blend(degenerate, image, factor)
def brightness(image, factor):
"""Equivalent of PIL Brightness."""
degenerate = tf.zeros_like(image)
return blend(degenerate, image, factor)
def posterize(image, bits):
"""Equivalent of PIL Posterize."""
shift = 8 - bits
return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift)
def rotate(image, degrees, replace):
"""Rotates the image by degrees either clockwise or counterclockwise.
Args:
image: An image Tensor of type uint8.
degrees: Float, a scalar angle in degrees to rotate all images by. If
degrees is positive the image will be rotated clockwise otherwise it will
be rotated counterclockwise.
replace: A one or three value 1D tensor to fill empty pixels caused by
the rotate operation.
Returns:
The rotated version of image.
"""
# Convert from degrees to radians.
degrees_to_radians = math.pi / 180.0
radians = degrees * degrees_to_radians
# In practice, we should randomize the rotation degrees by flipping
# it negatively half the time, but that's done on 'degrees' outside
# of the function.
image = image_ops.rotate(wrap(image), radians)
return unwrap(image, replace)
def random_shift_bbox(image, bbox, pixel_scaling, replace,
new_min_bbox_coords=None):
"""Move the bbox and the image content to a slightly new random location.
Args:
image: 3D uint8 Tensor.
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
The potential values for the new min corner of the bbox will be between
[old_min - pixel_scaling * bbox_height/2,
old_min - pixel_scaling * bbox_height/2].
pixel_scaling: A float between 0 and 1 that specifies the pixel range
that the new bbox location will be sampled from.
replace: A one or three value 1D tensor to fill empty pixels.
new_min_bbox_coords: If not None, then this is a tuple that specifies the
(min_y, min_x) coordinates of the new bbox. Normally this is randomly
specified, but this allows it to be manually set. The coordinates are
the absolute coordinates between 0 and image height/width and are int32.
Returns:
The new image that will have the shifted bbox location in it along with
the new bbox that contains the new coordinates.
"""
# Obtains image height and width and create helper clip functions.
image_height = tf.to_float(tf.maximum(tf.shape(image)[0], 10))
image_width = tf.to_float(tf.maximum(tf.shape(image)[1], 10))
def clip_y(val):
return tf.clip_by_value(val, 0, tf.to_int32(image_height) - 1)
def clip_x(val):
return tf.clip_by_value(val, 0, tf.to_int32(image_width) - 1)
# Convert bbox to pixel coordinates.
min_y = tf.to_int32(image_height * bbox[0])
min_x = tf.to_int32(image_width * bbox[1])
max_y = clip_y(tf.to_int32(image_height * bbox[2]))
max_x = clip_x(tf.to_int32(image_width * bbox[3]))
bbox_height, bbox_width = (max_y - min_y + 1, max_x - min_x + 1)
image_height = tf.to_int32(image_height)
image_width = tf.to_int32(image_width)
# Select the new min/max bbox ranges that are used for sampling the
# new min x/y coordinates of the shifted bbox.
minval_y = clip_y(
min_y - tf.to_int32(pixel_scaling * tf.to_float(bbox_height) / 2.0))
maxval_y = clip_y(
min_y + tf.to_int32(pixel_scaling * tf.to_float(bbox_height) / 2.0))
minval_x = clip_x(
min_x - tf.to_int32(pixel_scaling * tf.to_float(bbox_width) / 2.0))
maxval_x = clip_x(
min_x + tf.to_int32(pixel_scaling * tf.to_float(bbox_width) / 2.0))
# Sample and calculate the new unclipped min/max coordinates of the new bbox.
if new_min_bbox_coords is None:
unclipped_new_min_y = tf.random_uniform(
shape=[], minval=minval_y, maxval=maxval_y,
dtype=tf.int32)
unclipped_new_min_x = tf.random_uniform(
shape=[], minval=minval_x, maxval=maxval_x,
dtype=tf.int32)
else:
unclipped_new_min_y, unclipped_new_min_x = (
clip_y(new_min_bbox_coords[0]), clip_x(new_min_bbox_coords[1]))
unclipped_new_max_y = unclipped_new_min_y + bbox_height - 1
unclipped_new_max_x = unclipped_new_min_x + bbox_width - 1
# Determine if any of the new bbox was shifted outside the current image.
# This is used for determining if any of the original bbox content should be
# discarded.
new_min_y, new_min_x, new_max_y, new_max_x = (
clip_y(unclipped_new_min_y), clip_x(unclipped_new_min_x),
clip_y(unclipped_new_max_y), clip_x(unclipped_new_max_x))
shifted_min_y = (new_min_y - unclipped_new_min_y) + min_y
shifted_max_y = max_y - (unclipped_new_max_y - new_max_y)
shifted_min_x = (new_min_x - unclipped_new_min_x) + min_x
shifted_max_x = max_x - (unclipped_new_max_x - new_max_x)
# Create the new bbox tensor by converting pixel integer values to floats.
new_bbox = tf.stack([
tf.to_float(new_min_y) / tf.to_float(image_height),
tf.to_float(new_min_x) / tf.to_float(image_width),
tf.to_float(new_max_y) / tf.to_float(image_height),
tf.to_float(new_max_x) / tf.to_float(image_width)])
# Copy the contents in the bbox and fill the old bbox location
# with gray (128).
bbox_content = image[shifted_min_y:shifted_max_y + 1,
shifted_min_x:shifted_max_x + 1, :]
def mask_and_add_image(
min_y_, min_x_, max_y_, max_x_, mask, content_tensor, image_):
"""Applies mask to bbox region in image then adds content_tensor to it."""
mask = tf.pad(mask,
[[min_y_, (image_height - 1) - max_y_],
[min_x_, (image_width - 1) - max_x_],
[0, 0]], constant_values=1)
content_tensor = tf.pad(content_tensor,
[[min_y_, (image_height - 1) - max_y_],
[min_x_, (image_width - 1) - max_x_],
[0, 0]], constant_values=0)
return image_ * mask + content_tensor
# Zero out original bbox location.
mask = tf.zeros_like(image)[min_y:max_y+1, min_x:max_x+1, :]
grey_tensor = tf.zeros_like(mask) + replace[0]
image = mask_and_add_image(min_y, min_x, max_y, max_x, mask,
grey_tensor, image)
# Fill in bbox content to new bbox location.
mask = tf.zeros_like(bbox_content)
image = mask_and_add_image(new_min_y, new_min_x, new_max_y, new_max_x, mask,
bbox_content, image)
return image, new_bbox
def _clip_bbox(min_y, min_x, max_y, max_x):
"""Clip bounding box coordinates between 0 and 1.
Args:
min_y: Normalized bbox coordinate of type float between 0 and 1.
min_x: Normalized bbox coordinate of type float between 0 and 1.
max_y: Normalized bbox coordinate of type float between 0 and 1.
max_x: Normalized bbox coordinate of type float between 0 and 1.
Returns:
Clipped coordinate values between 0 and 1.
"""
min_y = tf.clip_by_value(min_y, 0.0, 1.0)
min_x = tf.clip_by_value(min_x, 0.0, 1.0)
max_y = tf.clip_by_value(max_y, 0.0, 1.0)
max_x = tf.clip_by_value(max_x, 0.0, 1.0)
return min_y, min_x, max_y, max_x
def _check_bbox_area(min_y, min_x, max_y, max_x, delta=0.05):
"""Adjusts bbox coordinates to make sure the area is > 0.
Args:
min_y: Normalized bbox coordinate of type float between 0 and 1.
min_x: Normalized bbox coordinate of type float between 0 and 1.
max_y: Normalized bbox coordinate of type float between 0 and 1.
max_x: Normalized bbox coordinate of type float between 0 and 1.
delta: Float, this is used to create a gap of size 2 * delta between
bbox min/max coordinates that are the same on the boundary.
This prevents the bbox from having an area of zero.
Returns:
Tuple of new bbox coordinates between 0 and 1 that will now have a
guaranteed area > 0.
"""
height = max_y - min_y
width = max_x - min_x
def _adjust_bbox_boundaries(min_coord, max_coord):
# Make sure max is never 0 and min is never 1.
max_coord = tf.maximum(max_coord, 0.0 + delta)
min_coord = tf.minimum(min_coord, 1.0 - delta)
return min_coord, max_coord
min_y, max_y = tf.cond(tf.equal(height, 0.0),
lambda: _adjust_bbox_boundaries(min_y, max_y),
lambda: (min_y, max_y))
min_x, max_x = tf.cond(tf.equal(width, 0.0),
lambda: _adjust_bbox_boundaries(min_x, max_x),
lambda: (min_x, max_x))
return min_y, min_x, max_y, max_x
def _scale_bbox_only_op_probability(prob):
"""Reduce the probability of the bbox-only operation.
Probability is reduced so that we do not distort the content of too many
bounding boxes that are close to each other. The value of 3.0 was a chosen
hyper parameter when designing the autoaugment algorithm that we found
empirically to work well.
Args:
prob: Float that is the probability of applying the bbox-only operation.
Returns:
Reduced probability.
"""
return prob / 3.0
def _apply_bbox_augmentation(image, bbox, augmentation_func, *args):
"""Applies augmentation_func to the subsection of image indicated by bbox.
Args:
image: 3D uint8 Tensor.
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
augmentation_func: Augmentation function that will be applied to the
subsection of image.
*args: Additional parameters that will be passed into augmentation_func
when it is called.
Returns:
A modified version of image, where the bbox location in the image will
have `ugmentation_func applied to it.
"""
image_height = tf.to_float(tf.maximum(tf.shape(image)[0], 10))
image_width = tf.to_float(tf.maximum(tf.shape(image)[1], 10))
min_y = tf.to_int32(image_height * bbox[0])
min_x = tf.to_int32(image_width * bbox[1])
max_y = tf.to_int32(image_height * bbox[2])
max_x = tf.to_int32(image_width * bbox[3])
image_height = tf.to_int32(image_height)
image_width = tf.to_int32(image_width)
# Clip to be sure the max values do not fall out of range.
max_y = tf.minimum(max_y, image_height - 1)
max_x = tf.minimum(max_x, image_width - 1)
# Get the sub-tensor that is the image within the bounding box region.
bbox_content = image[min_y:max_y + 1, min_x:max_x + 1, :]
# Apply the augmentation function to the bbox portion of the image.
augmented_bbox_content = augmentation_func(bbox_content, *args)
# Pad the augmented_bbox_content and the mask to match the shape of original
# image.
augmented_bbox_content = tf.pad(augmented_bbox_content,
[[min_y, (image_height - 1) - max_y],
[min_x, (image_width - 1) - max_x],
[0, 0]])
# Create a mask that will be used to zero out a part of the original image.
mask_tensor = tf.zeros_like(bbox_content)
mask_tensor = tf.pad(mask_tensor,
[[min_y, (image_height - 1) - max_y],
[min_x, (image_width - 1) - max_x],
[0, 0]],
constant_values=1)
# Replace the old bbox content with the new augmented content.
image = image * mask_tensor + augmented_bbox_content
return image
def _concat_bbox(bbox, bboxes):
"""Helper function that concats bbox to bboxes along the first dimension."""
# Note if all elements in bboxes are -1 (_INVALID_BOX), then this means
# we discard bboxes and start the bboxes Tensor with the current bbox.
bboxes_sum_check = tf.reduce_sum(bboxes)
bbox = tf.expand_dims(bbox, 0)
# This check will be true when it is an _INVALID_BOX
bboxes = tf.cond(tf.equal(bboxes_sum_check, -4.0),
lambda: bbox,
lambda: tf.concat([bboxes, bbox], 0))
return bboxes
def _apply_bbox_augmentation_wrapper(image, bbox, new_bboxes, prob,
augmentation_func, func_changes_bbox,
*args):
"""Applies _apply_bbox_augmentation with probability prob.
Args:
image: 3D uint8 Tensor.
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
new_bboxes: 2D Tensor that is a list of the bboxes in the image after they
have been altered by aug_func. These will only be changed when
func_changes_bbox is set to true. Each bbox has 4 elements
(min_y, min_x, max_y, max_x) of type float that are the normalized
bbox coordinates between 0 and 1.
prob: Float that is the probability of applying _apply_bbox_augmentation.
augmentation_func: Augmentation function that will be applied to the
subsection of image.
func_changes_bbox: Boolean. Does augmentation_func return bbox in addition
to image.
*args: Additional parameters that will be passed into augmentation_func
when it is called.
Returns:
A tuple. Fist element is a modified version of image, where the bbox
location in the image will have augmentation_func applied to it if it is
chosen to be called with probability `prob`. The second element is a
Tensor of Tensors of length 4 that will contain the altered bbox after
applying augmentation_func.
"""
should_apply_op = tf.cast(
tf.floor(tf.random_uniform([], dtype=tf.float32) + prob), tf.bool)
if func_changes_bbox:
augmented_image, bbox = tf.cond(
should_apply_op,
lambda: augmentation_func(image, bbox, *args),
lambda: (image, bbox))
else:
augmented_image = tf.cond(
should_apply_op,
lambda: _apply_bbox_augmentation(image, bbox, augmentation_func, *args),
lambda: image)
new_bboxes = _concat_bbox(bbox, new_bboxes)
return augmented_image, new_bboxes
def _apply_multi_bbox_augmentation(image, bboxes, prob, aug_func,
func_changes_bbox, *args):
"""Applies aug_func to the image for each bbox in bboxes.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float.
prob: Float that is the probability of applying aug_func to a specific
bounding box within the image.
aug_func: Augmentation function that will be applied to the
subsections of image indicated by the bbox values in bboxes.
func_changes_bbox: Boolean. Does augmentation_func return bbox in addition
to image.
*args: Additional parameters that will be passed into augmentation_func
when it is called.
Returns:
A modified version of image, where each bbox location in the image will
have augmentation_func applied to it if it is chosen to be called with
probability prob independently across all bboxes. Also the final
bboxes are returned that will be unchanged if func_changes_bbox is set to
false and if true, the new altered ones will be returned.
"""
# Will keep track of the new altered bboxes after aug_func is repeatedly
# applied. The -1 values are a dummy value and this first Tensor will be
# removed upon appending the first real bbox.
new_bboxes = tf.constant(_INVALID_BOX)
# If the bboxes are empty, then just give it _INVALID_BOX. The result
# will be thrown away.
bboxes = tf.cond(tf.equal(tf.shape(bboxes)[0], 0),
lambda: tf.constant(_INVALID_BOX),
lambda: bboxes)
bboxes = tf.ensure_shape(bboxes, (None, 4))
# pylint:disable=g-long-lambda
# pylint:disable=line-too-long
wrapped_aug_func = lambda _image, bbox, _new_bboxes: _apply_bbox_augmentation_wrapper(
_image, bbox, _new_bboxes, prob, aug_func, func_changes_bbox, *args)
# pylint:enable=g-long-lambda
# pylint:enable=line-too-long
# Setup the while_loop.
num_bboxes = tf.shape(bboxes)[0] # We loop until we go over all bboxes.
idx = tf.constant(0) # Counter for the while loop.
# Conditional function when to end the loop once we go over all bboxes
# images_and_bboxes contain (_image, _new_bboxes)
cond = lambda _idx, _images_and_bboxes: tf.less(_idx, num_bboxes)
# Shuffle the bboxes so that the augmentation order is not deterministic if
# we are not changing the bboxes with aug_func.
if not func_changes_bbox:
loop_bboxes = tf.random.shuffle(bboxes)
else:
loop_bboxes = bboxes
# Main function of while_loop where we repeatedly apply augmentation on the
# bboxes in the image.
# pylint:disable=g-long-lambda
body = lambda _idx, _images_and_bboxes: [
_idx + 1, wrapped_aug_func(_images_and_bboxes[0],
loop_bboxes[_idx],
_images_and_bboxes[1])]
# pylint:enable=g-long-lambda
_, (image, new_bboxes) = tf.while_loop(
cond, body, [idx, (image, new_bboxes)],
shape_invariants=[idx.get_shape(),
(image.get_shape(), tf.TensorShape([None, 4]))])
# Either return the altered bboxes or the original ones depending on if
# we altered them in anyway.
if func_changes_bbox:
final_bboxes = new_bboxes
else:
final_bboxes = bboxes
return image, final_bboxes
def _apply_multi_bbox_augmentation_wrapper(image, bboxes, prob, aug_func,
func_changes_bbox, *args):
"""Checks to be sure num bboxes > 0 before calling inner function."""
num_bboxes = tf.shape(bboxes)[0]
image, bboxes = tf.cond(
tf.equal(num_bboxes, 0),
lambda: (image, bboxes),
# pylint:disable=g-long-lambda
lambda: _apply_multi_bbox_augmentation(
image, bboxes, prob, aug_func, func_changes_bbox, *args))
# pylint:enable=g-long-lambda
return image, bboxes
def rotate_only_bboxes(image, bboxes, prob, degrees, replace):
"""Apply rotate to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, rotate, func_changes_bbox, degrees, replace)
def shear_x_only_bboxes(image, bboxes, prob, level, replace):
"""Apply shear_x to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, shear_x, func_changes_bbox, level, replace)
def shear_y_only_bboxes(image, bboxes, prob, level, replace):
"""Apply shear_y to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, shear_y, func_changes_bbox, level, replace)
def translate_x_only_bboxes(image, bboxes, prob, pixels, replace):
"""Apply translate_x to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, translate_x, func_changes_bbox, pixels, replace)
def translate_y_only_bboxes(image, bboxes, prob, pixels, replace):
"""Apply translate_y to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, translate_y, func_changes_bbox, pixels, replace)
def flip_only_bboxes(image, bboxes, prob):
"""Apply flip_lr to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, tf.image.flip_left_right, func_changes_bbox)
def solarize_only_bboxes(image, bboxes, prob, threshold):
"""Apply solarize to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, solarize, func_changes_bbox, threshold)
def equalize_only_bboxes(image, bboxes, prob):
"""Apply equalize to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, equalize, func_changes_bbox)
def cutout_only_bboxes(image, bboxes, prob, pad_size, replace):
"""Apply cutout to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, cutout, func_changes_bbox, pad_size, replace)
def _rotate_bbox(bbox, image_height, image_width, degrees):
"""Rotates the bbox coordinated by degrees.
Args:
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
image_height: Int, height of the image.
image_width: Int, height of the image.
degrees: Float, a scalar angle in degrees to rotate all images by. If
degrees is positive the image will be rotated clockwise otherwise it will
be rotated counterclockwise.
Returns:
A tensor of the same shape as bbox, but now with the rotated coordinates.
"""
image_height, image_width = (
tf.to_float(image_height), tf.to_float(image_width))
# Convert from degrees to radians.
degrees_to_radians = math.pi / 180.0
radians = degrees * degrees_to_radians
# Translate the bbox to the center of the image and turn the normalized 0-1
# coordinates to absolute pixel locations.
# Y coordinates are made negative as the y axis of images goes down with
# increasing pixel values, so we negate to make sure x axis and y axis points
# are in the traditionally positive direction.
min_y = -tf.to_int32(image_height * (bbox[0] - 0.5))
min_x = tf.to_int32(image_width * (bbox[1] - 0.5))
max_y = -tf.to_int32(image_height * (bbox[2] - 0.5))
max_x = tf.to_int32(image_width * (bbox[3] - 0.5))
coordinates = tf.stack(
[[min_y, min_x], [min_y, max_x], [max_y, min_x], [max_y, max_x]])
coordinates = tf.cast(coordinates, tf.float32)
# Rotate the coordinates according to the rotation matrix clockwise if
# radians is positive, else negative
rotation_matrix = tf.stack(
[[tf.cos(radians), tf.sin(radians)],
[-tf.sin(radians), tf.cos(radians)]])
new_coords = tf.cast(
tf.matmul(rotation_matrix, tf.transpose(coordinates)), tf.int32)
# Find min/max values and convert them back to normalized 0-1 floats.
min_y = -(tf.to_float(tf.reduce_max(new_coords[0, :])) / image_height - 0.5)
min_x = tf.to_float(tf.reduce_min(new_coords[1, :])) / image_width + 0.5
max_y = -(tf.to_float(tf.reduce_min(new_coords[0, :])) / image_height - 0.5)
max_x = tf.to_float(tf.reduce_max(new_coords[1, :])) / image_width + 0.5
# Clip the bboxes to be sure the fall between [0, 1].
min_y, min_x, max_y, max_x = _clip_bbox(min_y, min_x, max_y, max_x)
min_y, min_x, max_y, max_x = _check_bbox_area(min_y, min_x, max_y, max_x)
return tf.stack([min_y, min_x, max_y, max_x])
def rotate_with_bboxes(image, bboxes, degrees, replace):
"""Equivalent of PIL Rotate that rotates the image and bbox.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float.
degrees: Float, a scalar angle in degrees to rotate all images by. If
degrees is positive the image will be rotated clockwise otherwise it will
be rotated counterclockwise.
replace: A one or three value 1D tensor to fill empty pixels.
Returns:
A tuple containing a 3D uint8 Tensor that will be the result of rotating
image by degrees. The second element of the tuple is bboxes, where now
the coordinates will be shifted to reflect the rotated image.
"""
# Rotate the image.
image = rotate(image, degrees, replace)
# Convert bbox coordinates to pixel values.
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# pylint:disable=g-long-lambda
wrapped_rotate_bbox = lambda bbox: _rotate_bbox(
bbox, image_height, image_width, degrees)
# pylint:enable=g-long-lambda
bboxes = tf.map_fn(wrapped_rotate_bbox, bboxes)
return image, bboxes
def translate_x(image, pixels, replace):
"""Equivalent of PIL Translate in X dimension."""
image = image_ops.translate(wrap(image), [-pixels, 0])
return unwrap(image, replace)
def translate_y(image, pixels, replace):
"""Equivalent of PIL Translate in Y dimension."""
image = image_ops.translate(wrap(image), [0, -pixels])
return unwrap(image, replace)
def _shift_bbox(bbox, image_height, image_width, pixels, shift_horizontal):
"""Shifts the bbox coordinates by pixels.
Args:
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
image_height: Int, height of the image.
image_width: Int, width of the image.
pixels: An int. How many pixels to shift the bbox.
shift_horizontal: Boolean. If true then shift in X dimension else shift in
Y dimension.
Returns:
A tensor of the same shape as bbox, but now with the shifted coordinates.
"""
pixels = tf.to_int32(pixels)
# Convert bbox to integer pixel locations.
min_y = tf.to_int32(tf.to_float(image_height) * bbox[0])
min_x = tf.to_int32(tf.to_float(image_width) * bbox[1])
max_y = tf.to_int32(tf.to_float(image_height) * bbox[2])
max_x = tf.to_int32(tf.to_float(image_width) * bbox[3])
if shift_horizontal:
min_x = tf.maximum(0, min_x - pixels)
max_x = tf.minimum(image_width, max_x - pixels)
else:
min_y = tf.maximum(0, min_y - pixels)
max_y = tf.minimum(image_height, max_y - pixels)
# Convert bbox back to floats.
min_y = tf.to_float(min_y) / tf.to_float(image_height)
min_x = tf.to_float(min_x) / tf.to_float(image_width)
max_y = tf.to_float(max_y) / tf.to_float(image_height)
max_x = tf.to_float(max_x) / tf.to_float(image_width)
# Clip the bboxes to be sure the fall between [0, 1].
min_y, min_x, max_y, max_x = _clip_bbox(min_y, min_x, max_y, max_x)
min_y, min_x, max_y, max_x = _check_bbox_area(min_y, min_x, max_y, max_x)
return tf.stack([min_y, min_x, max_y, max_x])
def translate_bbox(image, bboxes, pixels, replace, shift_horizontal):
"""Equivalent of PIL Translate in X/Y dimension that shifts image and bbox.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float with values
between [0, 1].
pixels: An int. How many pixels to shift the image and bboxes
replace: A one or three value 1D tensor to fill empty pixels.
shift_horizontal: Boolean. If true then shift in X dimension else shift in
Y dimension.
Returns:
A tuple containing a 3D uint8 Tensor that will be the result of translating
image by pixels. The second element of the tuple is bboxes, where now
the coordinates will be shifted to reflect the shifted image.
"""
if shift_horizontal:
image = translate_x(image, pixels, replace)
else:
image = translate_y(image, pixels, replace)
# Convert bbox coordinates to pixel values.
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# pylint:disable=g-long-lambda
wrapped_shift_bbox = lambda bbox: _shift_bbox(
bbox, image_height, image_width, pixels, shift_horizontal)
# pylint:enable=g-long-lambda
bboxes = tf.map_fn(wrapped_shift_bbox, bboxes)
return image, bboxes
def shear_x(image, level, replace):
"""Equivalent of PIL Shearing in X dimension."""
# Shear parallel to x axis is a projective transform
# with a matrix form of:
# [1 level
# 0 1].
image = image_ops.transform(
wrap(image), [1., level, 0., 0., 1., 0., 0., 0.])
return unwrap(image, replace)
def shear_y(image, level, replace):
"""Equivalent of PIL Shearing in Y dimension."""
# Shear parallel to y axis is a projective transform
# with a matrix form of:
# [1 0
# level 1].
image = image_ops.transform(
wrap(image), [1., 0., 0., level, 1., 0., 0., 0.])
return unwrap(image, replace)
def _shear_bbox(bbox, image_height, image_width, level, shear_horizontal):
"""Shifts the bbox according to how the image was sheared.
Args:
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
image_height: Int, height of the image.
image_width: Int, height of the image.
level: Float. How much to shear the image.
shear_horizontal: If true then shear in X dimension else shear in
the Y dimension.
Returns:
A tensor of the same shape as bbox, but now with the shifted coordinates.
"""
image_height, image_width = (
tf.to_float(image_height), tf.to_float(image_width))
# Change bbox coordinates to be pixels.
min_y = tf.to_int32(image_height * bbox[0])
min_x = tf.to_int32(image_width * bbox[1])
max_y = tf.to_int32(image_height * bbox[2])
max_x = tf.to_int32(image_width * bbox[3])
coordinates = tf.stack(
[[min_y, min_x], [min_y, max_x], [max_y, min_x], [max_y, max_x]])
coordinates = tf.cast(coordinates, tf.float32)
# Shear the coordinates according to the translation matrix.
if shear_horizontal:
translation_matrix = tf.stack(
[[1, 0], [-level, 1]])
else:
translation_matrix = tf.stack(
[[1, -level], [0, 1]])
translation_matrix = tf.cast(translation_matrix, tf.float32)
new_coords = tf.cast(
tf.matmul(translation_matrix, tf.transpose(coordinates)), tf.int32)
# Find min/max values and convert them back to floats.
min_y = tf.to_float(tf.reduce_min(new_coords[0, :])) / image_height
min_x = tf.to_float(tf.reduce_min(new_coords[1, :])) / image_width
max_y = tf.to_float(tf.reduce_max(new_coords[0, :])) / image_height
max_x = tf.to_float(tf.reduce_max(new_coords[1, :])) / image_width
# Clip the bboxes to be sure the fall between [0, 1].
min_y, min_x, max_y, max_x = _clip_bbox(min_y, min_x, max_y, max_x)
min_y, min_x, max_y, max_x = _check_bbox_area(min_y, min_x, max_y, max_x)
return tf.stack([min_y, min_x, max_y, max_x])
def shear_with_bboxes(image, bboxes, level, replace, shear_horizontal):
"""Applies Shear Transformation to the image and shifts the bboxes.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float with values
between [0, 1].
level: Float. How much to shear the image. This value will be between
-0.3 to 0.3.
replace: A one or three value 1D tensor to fill empty pixels.
shear_horizontal: Boolean. If true then shear in X dimension else shear in
the Y dimension.
Returns:
A tuple containing a 3D uint8 Tensor that will be the result of shearing
image by level. The second element of the tuple is bboxes, where now
the coordinates will be shifted to reflect the sheared image.
"""
if shear_horizontal:
image = shear_x(image, level, replace)
else:
image = shear_y(image, level, replace)
# Convert bbox coordinates to pixel values.
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# pylint:disable=g-long-lambda
wrapped_shear_bbox = lambda bbox: _shear_bbox(
bbox, image_height, image_width, level, shear_horizontal)
# pylint:enable=g-long-lambda
bboxes = tf.map_fn(wrapped_shear_bbox, bboxes)
return image, bboxes
def autocontrast(image):
"""Implements Autocontrast function from PIL using TF ops.
Args:
image: A 3D uint8 tensor.
Returns:
The image after it has had autocontrast applied to it and will be of type
uint8.
"""
def scale_channel(image):
"""Scale the 2D image using the autocontrast rule."""
# A possibly cheaper version can be done using cumsum/unique_with_counts
# over the histogram values, rather than iterating over the entire image.
# to compute mins and maxes.
lo = tf.to_float(tf.reduce_min(image))
hi = tf.to_float(tf.reduce_max(image))
# Scale the image, making the lowest value 0 and the highest value 255.
def scale_values(im):
scale = 255.0 / (hi - lo)
offset = -lo * scale
im = tf.to_float(im) * scale + offset
im = tf.clip_by_value(im, 0.0, 255.0)
return tf.cast(im, tf.uint8)
result = tf.cond(hi > lo, lambda: scale_values(image), lambda: image)
return result
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image[:, :, 0])
s2 = scale_channel(image[:, :, 1])
s3 = scale_channel(image[:, :, 2])
image = tf.stack([s1, s2, s3], 2)
return image
def sharpness(image, factor):
"""Implements Sharpness function from PIL using TF ops."""
orig_image = image
image = tf.cast(image, tf.float32)
# Make image 4D for conv operation.
image = tf.expand_dims(image, 0)
# SMOOTH PIL Kernel.
kernel = tf.constant(
[[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=tf.float32,
shape=[3, 3, 1, 1]) / 13.
# Tile across channel dimension.
kernel = tf.tile(kernel, [1, 1, 3, 1])
strides = [1, 1, 1, 1]
with tf.device('/cpu:0'):
degenerate = tf.nn.depthwise_conv2d(
image, kernel, strides, padding='VALID', rate=[1, 1])
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.squeeze(tf.cast(degenerate, tf.uint8), [0])
# For the borders of the resulting image, fill in the values of the
# original image.
mask = tf.ones_like(degenerate)
padded_mask = tf.pad(mask, [[1, 1], [1, 1], [0, 0]])
padded_degenerate = tf.pad(degenerate, [[1, 1], [1, 1], [0, 0]])
result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_image)
# Blend the final result.
return blend(result, orig_image, factor)
def equalize(image):
"""Implements Equalize function from PIL using TF ops."""
def scale_channel(im, c):
"""Scale the data in the channel to implement equalize."""
im = tf.cast(im[:, :, c], tf.int32)
# Compute the histogram of the image channel.
histo = tf.histogram_fixed_width(im, [0, 255], nbins=256)
# For the purposes of computing the step, filter out the nonzeros.
nonzero = tf.where(tf.not_equal(histo, 0))
nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1])
step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255
def build_lut(histo, step):
# Compute the cumulative sum, shifting by step // 2
# and then normalization by step.
lut = (tf.cumsum(histo) + (step // 2)) // step
# Shift lut, prepending with 0.
lut = tf.concat([[0], lut[:-1]], 0)
# Clip the counts to be in range. This is done
# in the C code for image.point.
return tf.clip_by_value(lut, 0, 255)
# If step is zero, return the original image. Otherwise, build
# lut from the full histogram and step and then index from it.
result = tf.cond(tf.equal(step, 0),
lambda: im,
lambda: tf.gather(build_lut(histo, step), im))
return tf.cast(result, tf.uint8)
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image, 0)
s2 = scale_channel(image, 1)
s3 = scale_channel(image, 2)
image = tf.stack([s1, s2, s3], 2)
return image
def wrap(image):
"""Returns 'image' with an extra channel set to all 1s."""
shape = tf.shape(image)
extended_channel = tf.ones([shape[0], shape[1], 1], image.dtype)
extended = tf.concat([image, extended_channel], 2)
return extended
def unwrap(image, replace):
"""Unwraps an image produced by wrap.
Where there is a 0 in the last channel for every spatial position,
the rest of the three channels in that spatial dimension are grayed
(set to 128). Operations like translate and shear on a wrapped
Tensor will leave 0s in empty locations. Some transformations look
at the intensity of values to do preprocessing, and we want these
empty pixels to assume the 'average' value, rather than pure black.
Args:
image: A 3D Image Tensor with 4 channels.
replace: A one or three value 1D tensor to fill empty pixels.
Returns:
image: A 3D image Tensor with 3 channels.
"""
image_shape = tf.shape(image)
# Flatten the spatial dimensions.
flattened_image = tf.reshape(image, [-1, image_shape[2]])
# Find all pixels where the last channel is zero.
alpha_channel = flattened_image[:, 3]
replace = tf.concat([replace, tf.ones([1], image.dtype)], 0)
# Where they are zero, fill them in with 'replace'.
flattened_image = tf.where(
tf.equal(alpha_channel, 0),
tf.ones_like(flattened_image, dtype=image.dtype) * replace,
flattened_image)
image = tf.reshape(flattened_image, image_shape)
image = tf.slice(image, [0, 0, 0], [image_shape[0], image_shape[1], 3])
return image
def _cutout_inside_bbox(image, bbox, pad_fraction):
"""Generates cutout mask and the mean pixel value of the bbox.
First a location is randomly chosen within the image as the center where the
cutout mask will be applied. Note this can be towards the boundaries of the
image, so the full cutout mask may not be applied.
Args:
image: 3D uint8 Tensor.
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
pad_fraction: Float that specifies how large the cutout mask should be in
in reference to the size of the original bbox. If pad_fraction is 0.25,
then the cutout mask will be of shape
(0.25 * bbox height, 0.25 * bbox width).
Returns:
A tuple. Fist element is a tensor of the same shape as image where each
element is either a 1 or 0 that is used to determine where the image
will have cutout applied. The second element is the mean of the pixels
in the image where the bbox is located.
"""
image_height = tf.maximum(tf.shape(image)[0], 10)
image_width = tf.maximum(tf.shape(image)[1], 10)
# Transform from shape [1, 4] to [4].
bbox = tf.squeeze(bbox)
min_y = tf.to_int32(tf.to_float(image_height) * bbox[0])
min_x = tf.to_int32(tf.to_float(image_width) * bbox[1])
max_y = tf.to_int32(tf.to_float(image_height) * bbox[2])
max_x = tf.to_int32(tf.to_float(image_width) * bbox[3])
# Calculate the mean pixel values in the bounding box, which will be used
# to fill the cutout region.
mean = tf.reduce_mean(image[min_y:max_y + 1, min_x:max_x + 1],
reduction_indices=[0, 1])
# Cutout mask will be size pad_size_heigh * 2 by pad_size_width * 2 if the
# region lies entirely within the bbox.
box_height = max_y - min_y + 1
box_width = max_x - min_x + 1
pad_size_height = tf.to_int32(pad_fraction * (box_height / 2))
pad_size_width = tf.to_int32(pad_fraction * (box_width / 2))
# Sample the center location in the image where the zero mask will be applied.
cutout_center_height = tf.random_uniform(
shape=[], minval=min_y, maxval=max_y+1,
dtype=tf.int32)
cutout_center_width = tf.random_uniform(
shape=[], minval=min_x, maxval=max_x+1,
dtype=tf.int32)
lower_pad = tf.maximum(
0, cutout_center_height - pad_size_height)
upper_pad = tf.maximum(
0, image_height - cutout_center_height - pad_size_height)
left_pad = tf.maximum(
0, cutout_center_width - pad_size_width)
right_pad = tf.maximum(
0, image_width - cutout_center_width - pad_size_width)
cutout_shape = [image_height - (lower_pad + upper_pad),
image_width - (left_pad + right_pad)]
padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
mask = tf.pad(
tf.zeros(cutout_shape, dtype=image.dtype),
padding_dims, constant_values=1)
mask = tf.expand_dims(mask, 2)
mask = tf.tile(mask, [1, 1, 3])
return mask, mean
def bbox_cutout(image, bboxes, pad_fraction, replace_with_mean):
"""Applies cutout to the image according to bbox information.
This is a cutout variant that using bbox information to make more informed
decisions on where to place the cutout mask.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float with values
between [0, 1].
pad_fraction: Float that specifies how large the cutout mask should be in
in reference to the size of the original bbox. If pad_fraction is 0.25,
then the cutout mask will be of shape
(0.25 * bbox height, 0.25 * bbox width).
replace_with_mean: Boolean that specified what value should be filled in
where the cutout mask is applied. Since the incoming image will be of
uint8 and will not have had any mean normalization applied, by default
we set the value to be 128. If replace_with_mean is True then we find
the mean pixel values across the channel dimension and use those to fill
in where the cutout mask is applied.
Returns:
A tuple. First element is a tensor of the same shape as image that has
cutout applied to it. Second element is the bboxes that were passed in
that will be unchanged.
"""
def apply_bbox_cutout(image, bboxes, pad_fraction):
"""Applies cutout to a single bounding box within image."""
# Choose a single bounding box to apply cutout to.
random_index = tf.random_uniform(
shape=[], maxval=tf.shape(bboxes)[0], dtype=tf.int32)
# Select the corresponding bbox and apply cutout.
chosen_bbox = tf.gather(bboxes, random_index)
mask, mean = _cutout_inside_bbox(image, chosen_bbox, pad_fraction)
# When applying cutout we either set the pixel value to 128 or to the mean
# value inside the bbox.
replace = mean if replace_with_mean else 128
# Apply the cutout mask to the image. Where the mask is 0 we fill it with
# `replace`.
image = tf.where(
tf.equal(mask, 0),
tf.cast(tf.ones_like(image, dtype=image.dtype) * replace,
dtype=image.dtype),
image)
return image
# Check to see if there are boxes, if so then apply boxcutout.
image = tf.cond(tf.equal(tf.shape(bboxes)[0], 0), lambda: image,
lambda: apply_bbox_cutout(image, bboxes, pad_fraction))
return image, bboxes
NAME_TO_FUNC = {
'AutoContrast': autocontrast,
'Equalize': equalize,
'Posterize': posterize,
'Solarize': solarize,
'SolarizeAdd': solarize_add,
'Color': color,
'Contrast': contrast,
'Brightness': brightness,
'Sharpness': sharpness,
'Cutout': cutout,
'BBox_Cutout': bbox_cutout,
'Rotate_BBox': rotate_with_bboxes,
# pylint:disable=g-long-lambda
'TranslateX_BBox': lambda image, bboxes, pixels, replace: translate_bbox(
image, bboxes, pixels, replace, shift_horizontal=True),
'TranslateY_BBox': lambda image, bboxes, pixels, replace: translate_bbox(
image, bboxes, pixels, replace, shift_horizontal=False),
'ShearX_BBox': lambda image, bboxes, level, replace: shear_with_bboxes(
image, bboxes, level, replace, shear_horizontal=True),
'ShearY_BBox': lambda image, bboxes, level, replace: shear_with_bboxes(
image, bboxes, level, replace, shear_horizontal=False),
# pylint:enable=g-long-lambda
'Rotate_Only_BBoxes': rotate_only_bboxes,
'ShearX_Only_BBoxes': shear_x_only_bboxes,
'ShearY_Only_BBoxes': shear_y_only_bboxes,
'TranslateX_Only_BBoxes': translate_x_only_bboxes,
'TranslateY_Only_BBoxes': translate_y_only_bboxes,
'Flip_Only_BBoxes': flip_only_bboxes,
'Solarize_Only_BBoxes': solarize_only_bboxes,
'Equalize_Only_BBoxes': equalize_only_bboxes,
'Cutout_Only_BBoxes': cutout_only_bboxes,
}
def _randomly_negate_tensor(tensor):
"""With 50% prob turn the tensor negative."""
should_flip = tf.cast(tf.floor(tf.random_uniform([]) + 0.5), tf.bool)
final_tensor = tf.cond(should_flip, lambda: tensor, lambda: -tensor)
return final_tensor
def _rotate_level_to_arg(level):
level = (level/_MAX_LEVEL) * 30.
level = _randomly_negate_tensor(level)
return (level,)
def _shrink_level_to_arg(level):
"""Converts level to ratio by which we shrink the image content."""
if level == 0:
return (1.0,) # if level is zero, do not shrink the image
# Maximum shrinking ratio is 2.9.
level = 2. / (_MAX_LEVEL / level) + 0.9
return (level,)
def _enhance_level_to_arg(level):
return ((level/_MAX_LEVEL) * 1.8 + 0.1,)
def _shear_level_to_arg(level):
level = (level/_MAX_LEVEL) * 0.3
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def _translate_level_to_arg(level, translate_const):
level = (level/_MAX_LEVEL) * float(translate_const)
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def _bbox_cutout_level_to_arg(level, hparams):
cutout_pad_fraction = (level/_MAX_LEVEL) * hparams.cutout_max_pad_fraction
return (cutout_pad_fraction,
hparams.cutout_bbox_replace_with_mean)
def level_to_arg(hparams):
return {
'AutoContrast': lambda level: (),
'Equalize': lambda level: (),
'Posterize': lambda level: (int((level/_MAX_LEVEL) * 4),),
'Solarize': lambda level: (int((level/_MAX_LEVEL) * 256),),
'SolarizeAdd': lambda level: (int((level/_MAX_LEVEL) * 110),),
'Color': _enhance_level_to_arg,
'Contrast': _enhance_level_to_arg,
'Brightness': _enhance_level_to_arg,
'Sharpness': _enhance_level_to_arg,
'Cutout': lambda level: (int((level/_MAX_LEVEL) * hparams.cutout_const),),
# pylint:disable=g-long-lambda
'BBox_Cutout': lambda level: _bbox_cutout_level_to_arg(
level, hparams),
'TranslateX_BBox': lambda level: _translate_level_to_arg(
level, hparams.translate_const),
'TranslateY_BBox': lambda level: _translate_level_to_arg(
level, hparams.translate_const),
# pylint:enable=g-long-lambda
'ShearX_BBox': _shear_level_to_arg,
'ShearY_BBox': _shear_level_to_arg,
'Rotate_BBox': _rotate_level_to_arg,
'Rotate_Only_BBoxes': _rotate_level_to_arg,
'ShearX_Only_BBoxes': _shear_level_to_arg,
'ShearY_Only_BBoxes': _shear_level_to_arg,
# pylint:disable=g-long-lambda
'TranslateX_Only_BBoxes': lambda level: _translate_level_to_arg(
level, hparams.translate_bbox_const),
'TranslateY_Only_BBoxes': lambda level: _translate_level_to_arg(
level, hparams.translate_bbox_const),
# pylint:enable=g-long-lambda
'Flip_Only_BBoxes': lambda level: (),
'Solarize_Only_BBoxes': lambda level: (int((level/_MAX_LEVEL) * 256),),
'Equalize_Only_BBoxes': lambda level: (),
# pylint:disable=g-long-lambda
'Cutout_Only_BBoxes': lambda level: (
int((level/_MAX_LEVEL) * hparams.cutout_bbox_const),),
# pylint:enable=g-long-lambda
}
def bbox_wrapper(func):
"""Adds a bboxes function argument to func and returns unchanged bboxes."""
def wrapper(images, bboxes, *args, **kwargs):
return (func(images, *args, **kwargs), bboxes)
return wrapper
def _parse_policy_info(name, prob, level, replace_value, augmentation_hparams):
"""Return the function that corresponds to `name` and update `level` param."""
func = NAME_TO_FUNC[name]
args = level_to_arg(augmentation_hparams)[name](level)
# Check to see if prob is passed into function. This is used for operations
# where we alter bboxes independently.
# pytype:disable=wrong-arg-types
if 'prob' in inspect.getfullargspec(func)[0]:
args = tuple([prob] + list(args))
# pytype:enable=wrong-arg-types
# Add in replace arg if it is required for the function that is being called.
if 'replace' in inspect.getfullargspec(func)[0]:
# Make sure replace is the final argument
assert 'replace' == inspect.getfullargspec(func)[0][-1]
args = tuple(list(args) + [replace_value])
# Add bboxes as the second positional argument for the function if it does
# not already exist.
if 'bboxes' not in inspect.getfullargspec(func)[0]:
func = bbox_wrapper(func)
return (func, prob, args)
def _apply_func_with_prob(func, image, args, prob, bboxes):
"""Apply `func` to image w/ `args` as input with probability `prob`."""
assert isinstance(args, tuple)
assert 'bboxes' == inspect.getfullargspec(func)[0][1]
# If prob is a function argument, then this randomness is being handled
# inside the function, so make sure it is always called.
if 'prob' in inspect.getfullargspec(func)[0]:
prob = 1.0
# Apply the function with probability `prob`.
should_apply_op = tf.cast(
tf.floor(tf.random_uniform([], dtype=tf.float32) + prob), tf.bool)
augmented_image, augmented_bboxes = tf.cond(
should_apply_op,
lambda: func(image, bboxes, *args),
lambda: (image, bboxes))
return augmented_image, augmented_bboxes
def select_and_apply_random_policy(policies, image, bboxes):
"""Select a random policy from `policies` and apply it to `image`."""
policy_to_select = tf.random_uniform([], maxval=len(policies), dtype=tf.int32)
# Note that using tf.case instead of tf.conds would result in significantly
# larger graphs and would even break export for some larger policies.
for (i, policy) in enumerate(policies):
image, bboxes = tf.cond(
tf.equal(i, policy_to_select),
lambda selected_policy=policy: selected_policy(image, bboxes),
lambda: (image, bboxes))
return (image, bboxes)
def build_and_apply_nas_policy(policies, image, bboxes,
augmentation_hparams):
"""Build a policy from the given policies passed in and apply to image.
Args:
policies: list of lists of tuples in the form `(func, prob, level)`, `func`
is a string name of the augmentation function, `prob` is the probability
of applying the `func` operation, `level` is the input argument for
`func`.
image: tf.Tensor that the resulting policy will be applied to.
bboxes: tf.Tensor of shape [N, 4] representing ground truth boxes that are
normalized between [0, 1].
augmentation_hparams: Hparams associated with the NAS learned policy.
Returns:
A version of image that now has data augmentation applied to it based on
the `policies` pass into the function. Additionally, returns bboxes if
a value for them is passed in that is not None
"""
replace_value = [128, 128, 128]
# func is the string name of the augmentation function, prob is the
# probability of applying the operation and level is the parameter associated
# with the tf op.
# tf_policies are functions that take in an image and return an augmented
# image.
tf_policies = []
for policy in policies:
tf_policy = []
# Link string name to the correct python function and make sure the correct
# argument is passed into that function.
for policy_info in policy:
policy_info = list(policy_info) + [replace_value, augmentation_hparams]
tf_policy.append(_parse_policy_info(*policy_info))
# Now build the tf policy that will apply the augmentation procedue
# on image.
def make_final_policy(tf_policy_):
def final_policy(image_, bboxes_):
for func, prob, args in tf_policy_:
image_, bboxes_ = _apply_func_with_prob(
func, image_, args, prob, bboxes_)
return image_, bboxes_
return final_policy
tf_policies.append(make_final_policy(tf_policy))
augmented_images, augmented_bboxes = select_and_apply_random_policy(
tf_policies, image, bboxes)
# If no bounding boxes were specified, then just return the images.
return (augmented_images, augmented_bboxes)
@tf.autograph.experimental.do_not_convert
def distort_image_with_autoaugment(image,
bboxes,
augmentation_name):
"""Applies the AutoAugment policy to `image` and `bboxes`.
Args:
image: `Tensor` of shape [height, width, 3] representing an image.
bboxes: `Tensor` of shape [N, 4] representing ground truth boxes that are
normalized between [0, 1].
augmentation_name: The name of the AutoAugment policy to use. The available
options are `v0`, `v1`, `v2`, `v3` and `test`. `v0` is the policy used for
all of the results in the paper and was found to achieve the best results
on the COCO dataset. `v1`, `v2` and `v3` are additional good policies
found on the COCO dataset that have slight variation in what operations
were used during the search procedure along with how many operations are
applied in parallel to a single image (2 vs 3).
Returns:
A tuple containing the augmented versions of `image` and `bboxes`.
"""
logging.info('Using autoaugmention policy: %s', augmentation_name)
available_policies = {'v0': policy_v0, 'v1': policy_v1, 'v2': policy_v2,
'v3': policy_v3, 'test': policy_vtest}
if augmentation_name not in available_policies:
raise ValueError('Invalid augmentation_name: {}'.format(augmentation_name))
policy = available_policies[augmentation_name]()
# Hparams that will be used for AutoAugment.
augmentation_hparams = hparams_config.Config(dict(
cutout_max_pad_fraction=0.75,
cutout_bbox_replace_with_mean=False,
cutout_const=100,
translate_const=250,
cutout_bbox_const=50,
translate_bbox_const=120))
return build_and_apply_nas_policy(policy, image, bboxes,
augmentation_hparams)
def distort_image_with_randaugment(image, bboxes, num_layers, magnitude):
"""Applies the RandAugment to `image` and `bboxes`."""
replace_value = [128, 128, 128]
tf.logging.info('Using RandAugment.')
augmentation_hparams = hparams_config.Config(
dict(
cutout_max_pad_fraction=0.75,
cutout_bbox_replace_with_mean=False,
cutout_const=100,
translate_const=250,
cutout_bbox_const=50,
translate_bbox_const=120))
available_ops = [
'Equalize', 'Solarize', 'Color', 'Cutout', 'SolarizeAdd',
'TranslateX_BBox', 'TranslateY_BBox', 'ShearX_BBox', 'ShearY_BBox',
'Rotate_BBox']
if bboxes is None:
bboxes = tf.constant(0.0)
for layer_num in range(num_layers):
op_to_select = tf.random_uniform(
[], maxval=len(available_ops), dtype=tf.int32)
random_magnitude = float(magnitude)
with tf.name_scope('randaug_layer_{}'.format(layer_num)):
for (i, op_name) in enumerate(available_ops):
prob = tf.random_uniform([], minval=0.2, maxval=0.8, dtype=tf.float32)
func, _, args = _parse_policy_info(op_name, prob, random_magnitude,
replace_value, augmentation_hparams)
image, bboxes = tf.cond(
tf.equal(i, op_to_select),
lambda fn=func, fn_args=args: fn(image, bboxes, *fn_args),
lambda: (image, bboxes))
return (image, bboxes)
| 39.967626 | 88 | 0.688327 | [
"Apache-2.0"
] | datawowio/automl | efficientdet/aug/autoaugment.py | 66,666 | Python |
"""
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: Przemyslaw Wirkus <[email protected]>
"""
from mbed_os_tools.test.host_tests_plugins.module_copy_silabs import (
HostTestPluginCopyMethod_Silabs,
load_plugin,
)
| 31.708333 | 72 | 0.788436 | [
"Apache-2.0"
] | ARMmbed/mbed-os-tools | packages/mbed-host-tests/mbed_host_tests/host_tests_plugins/module_copy_silabs.py | 761 | Python |
""" Tests for the linecache module """
import linecache
import unittest
import os.path
from test import support
FILENAME = linecache.__file__
INVALID_NAME = '!@$)(!@#_1'
EMPTY = ''
TESTS = 'inspect_fodder inspect_fodder2 mapping_tests'
TESTS = TESTS.split()
TEST_PATH = os.path.dirname(__file__)
MODULES = "linecache abc".split()
MODULE_PATH = os.path.dirname(FILENAME)
SOURCE_1 = '''
" Docstring "
def function():
return result
'''
SOURCE_2 = '''
def f():
return 1 + 1
a = f()
'''
SOURCE_3 = '''
def f():
return 3''' # No ending newline
class LineCacheTests(unittest.TestCase):
def test_getline(self):
getline = linecache.getline
# Bad values for line number should return an empty string
self.assertEqual(getline(FILENAME, 2**15), EMPTY)
self.assertEqual(getline(FILENAME, -1), EMPTY)
# Float values currently raise TypeError, should it?
self.assertRaises(TypeError, getline, FILENAME, 1.1)
# Bad filenames should return an empty string
self.assertEqual(getline(EMPTY, 1), EMPTY)
self.assertEqual(getline(INVALID_NAME, 1), EMPTY)
# Check whether lines correspond to those from file iteration
for entry in TESTS:
filename = os.path.join(TEST_PATH, entry) + '.py'
with open(filename) as file:
for index, line in enumerate(file):
self.assertEqual(line, getline(filename, index + 1))
# Check module loading
for entry in MODULES:
filename = os.path.join(MODULE_PATH, entry) + '.py'
with open(filename) as file:
for index, line in enumerate(file):
self.assertEqual(line, getline(filename, index + 1))
# Check that bogus data isn't returned (issue #1309567)
empty = linecache.getlines('a/b/c/__init__.py')
self.assertEqual(empty, [])
def test_no_ending_newline(self):
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, "w") as fp:
fp.write(SOURCE_3)
lines = linecache.getlines(support.TESTFN)
self.assertEqual(lines, ["\n", "def f():\n", " return 3\n"])
def test_clearcache(self):
cached = []
for entry in TESTS:
filename = os.path.join(TEST_PATH, entry) + '.py'
cached.append(filename)
linecache.getline(filename, 1)
# Are all files cached?
cached_empty = [fn for fn in cached if fn not in linecache.cache]
self.assertEqual(cached_empty, [])
# Can we clear the cache?
linecache.clearcache()
cached_empty = [fn for fn in cached if fn in linecache.cache]
self.assertEqual(cached_empty, [])
def test_checkcache(self):
getline = linecache.getline
# Create a source file and cache its contents
source_name = support.TESTFN + '.py'
self.addCleanup(support.unlink, source_name)
with open(source_name, 'w') as source:
source.write(SOURCE_1)
getline(source_name, 1)
# Keep a copy of the old contents
source_list = []
with open(source_name) as source:
for index, line in enumerate(source):
self.assertEqual(line, getline(source_name, index + 1))
source_list.append(line)
with open(source_name, 'w') as source:
source.write(SOURCE_2)
# Try to update a bogus cache entry
linecache.checkcache('dummy')
# Check that the cache matches the old contents
for index, line in enumerate(source_list):
self.assertEqual(line, getline(source_name, index + 1))
# Update the cache and check whether it matches the new source file
linecache.checkcache(source_name)
with open(source_name) as source:
for index, line in enumerate(source):
self.assertEqual(line, getline(source_name, index + 1))
source_list.append(line)
def test_memoryerror(self):
lines = linecache.getlines(FILENAME)
self.assertTrue(lines)
def raise_memoryerror(*args, **kwargs):
raise MemoryError
with support.swap_attr(linecache, 'updatecache', raise_memoryerror):
lines2 = linecache.getlines(FILENAME)
self.assertEqual(lines2, lines)
linecache.clearcache()
with support.swap_attr(linecache, 'updatecache', raise_memoryerror):
lines3 = linecache.getlines(FILENAME)
self.assertEqual(lines3, [])
self.assertEqual(linecache.getlines(FILENAME), lines)
if __name__ == "__main__":
unittest.main()
| 31.911565 | 76 | 0.626519 | [
"Apache-2.0"
] | AnandEmbold/ironpython3 | Src/StdLib/Lib/test/test_linecache.py | 4,691 | Python |
import torch
import os
import os.path
import shutil
import numpy as np
import soundfile as sf
from pathlib import PurePath
from torch import nn
from torch.utils.data import DataLoader, random_split
from asteroid.data import TimitDataset
from asteroid.data.utils import CachedWavSet, RandomMixtureSet, FixedMixtureSet
from tqdm import tqdm
from torch import optim
from pytorch_lightning import Trainer, seed_everything, loggers as pl_loggers
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from pytorch_lightning.callbacks import ModelCheckpoint
from asteroid_filterbanks.transforms import mag
from asteroid.engine import System
from asteroid.losses import singlesrc_neg_sisdr
from egs.whamr.TasNet.model import TasNet
BATCH_SIZE = 8 # could be more on cluster, test if larger one work
SAMPLE_RATE = 8000 # as agreed upon
CROP_LEN = 24000 # average track len in TIMIT
SEED = 42 # magic number :)
def sisdr_loss_wrapper(est_target, target):
return singlesrc_neg_sisdr(est_target.squeeze(1), target).mean()
def train_val_split(ds, val_fraction=0.1, random_seed=SEED):
assert val_fraction > 0 and val_fraction < 0.5
len_train = int(len(ds) * (1 - val_fraction))
len_val = len(ds) - len_train
return random_split(ds, [len_train, len_val], generator=torch.Generator().manual_seed(random_seed))
DRONE_NOISE_DIR = '/jmain01/home/JAD007/txk02/aaa18-txk02/Datasets/noises-train-drones'
# fixed SNRs for validation set
TRAIN_SNRS = [-25, -20, -15, -10, -5]
TIMIT_DIR = PurePath('/jmain01/home/JAD007/txk02/aaa18-txk02/Datasets/TIMIT')
TIMIT_DIR_8kHZ = PurePath('/jmain01/home/JAD007/txk02/aaa18-txk02/Datasets/TIMIT_8kHZ')
# Reproducibility - fix all random seeds
seed_everything(SEED)
# Load noises, resample and save into the memory
noises = CachedWavSet(DRONE_NOISE_DIR, sample_rate=SAMPLE_RATE, precache=True)
# Load clean data and split it into train and val
timit = TimitDataset(TIMIT_DIR_8kHZ, subset='train', sample_rate=SAMPLE_RATE, with_path=False)
timit_train, timit_val = train_val_split(timit, val_fraction=0.1, random_seed=SEED)
# Training data mixes crops randomly on the fly with random SNR in range (effectively infinite training data)
# `repeat_factor=20` means that the dataset contains 20 copies of itself - it is the easiest way to make the epoch longer
timit_train = RandomMixtureSet(timit_train, noises, random_seed=SEED, snr_range=(-25, -5),
crop_length=CROP_LEN, repeat_factor=30)
# Validation data is fixed (for stability): mix every clean clip with all the noises in the folder
# Argument `mixtures_per_clean` regulates with how many different noise files each clean file will be mixed
timit_val = FixedMixtureSet(timit_val, noises, snrs=TRAIN_SNRS, random_seed=SEED,
mixtures_per_clean=5, crop_length=CROP_LEN)
NUM_WORKERS = 5
train_loader = DataLoader(timit_train, shuffle=True, batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS, drop_last=True)
val_loader = DataLoader(timit_val, batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS, drop_last=True)
# some random parameters, does it look sensible?
LR = 1e-3
REDUCE_LR_PATIENCE = 5
EARLY_STOP_PATIENCE = 20
MAX_EPOCHS = 20
# the model here should be constructed in the script accordingly to the passed config (including the model type)
# most of the models accept `sample_rate` parameter for encoders, which is important (default is 16000, override)
model = TasNet(fb_conf={'n_filters': 512, 'kernel_size': 40, 'stride': 20},
mask_conf ={'n_layers': 4, 'n_units': 500, 'dropout': 0.3, "n_src": 1})
optimizer = optim.Adam(model.parameters(), lr=LR)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=REDUCE_LR_PATIENCE)
early_stopping = EarlyStopping(monitor='val_loss', patience=EARLY_STOP_PATIENCE)
checkpoint = ModelCheckpoint(
filename='{epoch:02d}-{val_loss:.2f}',
monitor="val_loss",
mode="min",
save_top_k=5,
verbose=True
)
# Probably we also need to subclass `System`, in order to log the target metrics on the validation set (PESQ/STOI)
system = System(model, optimizer, sisdr_loss_wrapper, train_loader, val_loader, scheduler)
# log dir and model name are also part of the config, of course
LOG_DIR = 'logs'
logger = pl_loggers.TensorBoardLogger(LOG_DIR, name='TIMIT-drones-TasNet-random_test', version=1)
# choose the proper accelerator for JADE, probably `ddp` (also, `auto_select_gpus=True` might be useful)
trainer = Trainer(max_epochs=MAX_EPOCHS, gpus=-1,
logger=logger, callbacks=[early_stopping, checkpoint], deterministic=True, gradient_clip_val=5.0,)
trainer.fit(system)
#torch.save(model.serialize(), 'tasnet_model.pt')
| 44.574074 | 121 | 0.753635 | [
"MIT"
] | flyingleafe/asteroid | notebooks/train_tasnet.py | 4,814 | Python |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import logging
import datetime
import sqlalchemy as sa
from random import randint
from flask import Blueprint
from flask import current_app
from flask import redirect
from flask import url_for
from relengapi.blueprints.archiver import tables
from relengapi.blueprints.archiver.tasks import TASK_TIME_OUT
from relengapi.blueprints.archiver.tasks import create_and_upload_archive
from relengapi.blueprints.archiver.types import MozharnessArchiveTask
from relengapi.lib import api
from relengapi.lib import badpenny
from relengapi.lib.time import now
bp = Blueprint('archiver', __name__)
log = logging.getLogger(__name__)
GET_EXPIRES_IN = 300
PENDING_EXPIRES_IN = 60
FINISHED_STATES = ['SUCCESS', 'FAILURE', 'REVOKED']
def delete_tracker(tracker):
session = current_app.db.session('relengapi')
log.info("deleting tracker with id: {}".format(tracker.task_id))
session.delete(tracker)
session.commit()
def update_tracker_state(tracker, state):
session = current_app.db.session('relengapi')
log.info("updating tracker with id: {} to state: {}".format(tracker.id, state))
try:
tracker.state = state
session.commit()
except sa.exc.IntegrityError:
session.rollback()
@badpenny.periodic_task(seconds=TASK_TIME_OUT)
def cleanup_old_tasks(job_status):
"""delete any tracker task if it is older than the time a task can live for."""
session = current_app.db.session('relengapi')
expiry_cutoff = now() - datetime.timedelta(seconds=TASK_TIME_OUT)
table = tables.ArchiverTask
for tracker in session.query(table).order_by(table.created_at):
if tracker.created_at < expiry_cutoff:
delete_tracker(tracker)
else:
break
def renew_tracker_pending_expiry(tracker):
pending_expires_at = now() + datetime.timedelta(seconds=PENDING_EXPIRES_IN)
session = current_app.db.session('relengapi')
log.info("renewing tracker {} with pending expiry: {}".format(tracker.id, pending_expires_at))
tracker.pending_expires_at = pending_expires_at
session.commit()
@bp.route('/status/<task_id>')
@api.apimethod(MozharnessArchiveTask, unicode)
def task_status(task_id):
"""
Check and return the current state of the create_and_upload_archive celery task with task id
of <task_id>.
If the task is unknown, state will be PENDING. Once the task starts it will be updated to
STARTED and finally, if it completes, it will be either SUCCESS (no exceptions), or FAILURE.
See update_state() within create_and_upload_archive and
http://celery.readthedocs.org/en/latest/reference/celery.states.html for more details.
If state is SUCCESS, it is safe to check response['s3_urls'] for the archives submitted to s3
"""
task = create_and_upload_archive.AsyncResult(task_id)
task_tracker = tables.ArchiverTask.query.filter(tables.ArchiverTask.task_id == task_id).first()
log.info("checking status of task id {}: current state {}".format(task_id, task.state))
task_info = task.info or {}
response = {
'state': task.state,
}
if task.state != 'FAILURE':
response['status'] = task_info.get('status', 'no status available at this point.')
response['src_url'] = task_info.get('src_url', '')
response['s3_urls'] = task_info.get('s3_urls', {})
else:
# something went wrong
response['status'] = str(task.info) # this is the exception raised
response['src_url'] = ''
response['s3_urls'] = {}
# archiver does not create any custom states, so we can assume to have only the defaults:
# http://docs.celeryproject.org/en/latest/userguide/tasks.html#task-states
# therefore, delete our state_id tracker from the db if the celery state is in a final state:
# e.g. not RETRY, STARTED, or PENDING
if task_tracker:
if task.state in FINISHED_STATES:
delete_tracker(task_tracker)
elif task.state == "PENDING" and task_tracker.pending_expires_at < now():
log.info("Task {} has expired from pending too long. Re-creating task".format(task.id))
renew_tracker_pending_expiry(task_tracker) # let exceptions bubble up before moving on
create_and_upload_archive.apply_async(args=[task_tracker.src_url, task_tracker.s3_key],
task_id=task.id)
response['state'] = 'RETRY'
response['status'] = 'Task has expired from pending for too long. Re-creating task.'
elif task_tracker.state != task.state:
update_tracker_state(task_tracker, task.state)
return MozharnessArchiveTask(**response)
@bp.route('/hgmo/<path:repo>/<rev>')
@api.apimethod(None, unicode, unicode, unicode, unicode, unicode, status_code=302)
def get_hgmo_archive(repo, rev, subdir=None, suffix='tar.gz', preferred_region=None):
"""
An archiver for hg.mozilla.org related requests. Uses relengapi.blueprints.archiver.get_archive
:param repo: the repo location off of hg.mozilla.org/
:param rev: the rev associated with the repo
:param subdir: optional subdir path to only archive a portion of the repo
:param suffix: the archive extension type. defaulted to tar.gz
:param preferred_region: the preferred s3 region to use
"""
# allow for the short hash and full hash to be passed
rev = rev[0:12]
src_url = current_app.config['ARCHIVER_HGMO_URL_TEMPLATE'].format(
repo=repo, rev=rev, suffix=suffix, subdir=subdir or ''
)
# though slightly odd to append the archive suffix extension with a subdir, this:
# 1) allows us to have archives based on different subdir locations from the same repo and rev
# 2) is aligned with the hg.mozilla.org format
key = '{repo}-{rev}.{suffix}'.format(repo=repo, rev=rev, suffix=suffix)
if subdir:
key += '/{}'.format(subdir)
return get_archive(src_url, key, preferred_region)
def get_archive(src_url, key, preferred_region):
"""
A generic getter for retrieving an s3 location of an archive where the archive is based off a
src_url.
sub-dir: hg.mozilla.org supports archives of sub directories within a repository. This
flexibility allows for creating archives of only a portion of what would normally be an entire
repo archive.
logic flow:
If their is already a key within s3, a re-direct link is given for the
s3 location. If the key does not exist, download the archive from src url, upload it to s3
for each region supported and return all uploaded s3 url locations.
When the key does not exist, the remaining work will be assigned to a celery background task
with a url location returned immediately for obtaining task state updates.
"""
buckets = current_app.config['ARCHIVER_S3_BUCKETS']
random_region = buckets.keys()[randint(0, len(buckets.keys()) - 1)]
# use preferred region if available otherwise choose a valid one at random
region = preferred_region if preferred_region and preferred_region in buckets else random_region
bucket = buckets[region]
s3 = current_app.aws.connect_to('s3', region)
session = current_app.db.session('relengapi')
# first, see if the key exists
if not s3.get_bucket(bucket).get_key(key):
task_id = key.replace('/', '_') # keep things simple and avoid slashes in task url
# can't use unique support:
# api.pub.build.mozilla.org/docs/development/databases/#unique-row-support-get-or-create
# because we want to know when the row doesn't exist before creating it
tracker = tables.ArchiverTask.query.filter(tables.ArchiverTask.task_id == task_id).first()
if tracker and tracker.state in FINISHED_STATES:
log.info('Task tracker: {} exists but finished with state: '
'{}'.format(task_id, tracker.state))
# remove tracker and try celery task again
delete_tracker(tracker)
tracker = None
if not tracker:
log.info("Creating new celery task and task tracker for: {}".format(task_id))
task = create_and_upload_archive.apply_async(args=[src_url, key], task_id=task_id)
if task and task.id:
pending_expires_at = now() + datetime.timedelta(seconds=PENDING_EXPIRES_IN)
session.add(tables.ArchiverTask(task_id=task.id, s3_key=key, created_at=now(),
pending_expires_at=pending_expires_at,
src_url=src_url, state="PENDING"))
session.commit()
else:
return {}, 500
return {}, 202, {'Location': url_for('archiver.task_status', task_id=task.id)}
log.info("generating GET URL to {}, expires in {}s".format(key, GET_EXPIRES_IN))
# return 302 pointing to s3 url with archive
signed_url = s3.generate_url(
method='GET', expires_in=GET_EXPIRES_IN,
bucket=bucket, key=key
)
return redirect(signed_url)
| 44.5 | 100 | 0.693712 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | lundjordan/build-relengapi | relengapi/blueprints/archiver/__init__.py | 9,256 | Python |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Output command line arguments and json-encoded TF_CONFIGs.
Usage:
`make_tf_configs.py --workers="server1:1234" --ps="server3:2134,server4:2334"`
Outputs 1 line per job to stdout, first the workers, then the parameter servers.
Each line has the TF_CONFIG, then a tab, then the command line flags for that
job.
If there is a single worker, workers will have the `--sync` flag.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
# Dependency imports
import six
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("workers", "", "Comma-separated list of worker addresses")
flags.DEFINE_string("ps", "", "Comma-separated list of ps addresses")
def main(_):
if not (FLAGS.workers and FLAGS.ps):
raise ValueError("Must provide --workers and --ps")
workers = FLAGS.workers.split(",")
ps = FLAGS.ps.split(",")
cluster = {"ps": ps, "worker": workers}
for task_type, jobs in six.iteritems(cluster):
for idx, job in enumerate(jobs):
if task_type == "worker":
cmd_line_flags = " ".join([
"--master=%s" % job,
"--ps_replicas=%d" % len(ps),
"--worker_replicas=%d" % len(workers),
"--worker_gpu=1",
"--worker_id=%d" % idx,
"--ps_gpu=1",
"--schedule=train",
"--sync" if len(workers) == 1 else "",
])
else:
cmd_line_flags = " ".join([
"--schedule=run_std_server",
])
tf_config = json.dumps({
"cluster": cluster,
"task": {
"type": task_type,
"index": idx
}
})
print(tf_config + "\t" + cmd_line_flags)
if __name__ == "__main__":
tf.app.run()
| 28.202381 | 80 | 0.643309 | [
"Apache-2.0"
] | anishsingh20/tensor2tensor | tensor2tensor/bin/make_tf_configs.py | 2,369 | Python |
# coding: utf-8
from __future__ import unicode_literals
import re
import base64
from .common import InfoExtractor
from ..compat import (
compat_urlparse,
compat_parse_qs,
)
from ..utils import (
clean_html,
ExtractorError,
int_or_none,
unsmuggle_url,
smuggle_url,
)
class KalturaIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
kaltura:(?P<partner_id>\d+):(?P<id>[0-9a-z_]+)|
https?://
(:?(?:www|cdnapi(?:sec)?)\.)?kaltura\.com(?::\d+)?/
(?:
(?:
# flash player
index\.php/(?:kwidget|extwidget/preview)|
# html5 player
html5/html5lib/[^/]+/mwEmbedFrame\.php
)
)(?:/(?P<path>[^?]+))?(?:\?(?P<query>.*))?
)
'''
_SERVICE_URL = 'http://cdnapi.kaltura.com'
_SERVICE_BASE = '/api_v3/index.php'
# See https://github.com/kaltura/server/blob/master/plugins/content/caption/base/lib/model/enums/CaptionType.php
_CAPTION_TYPES = {
1: 'srt',
2: 'ttml',
3: 'vtt',
}
_TESTS = [
{
'url': 'kaltura:269692:1_1jc2y3e4',
'md5': '3adcbdb3dcc02d647539e53f284ba171',
'info_dict': {
'id': '1_1jc2y3e4',
'ext': 'mp4',
'title': 'Straight from the Heart',
'upload_date': '20131219',
'uploader_id': '[email protected]',
'description': 'The Allman Brothers Band, 12/16/1981',
'thumbnail': 're:^https?://.*/thumbnail/.*',
'timestamp': int,
},
},
{
'url': 'http://www.kaltura.com/index.php/kwidget/cache_st/1300318621/wid/_269692/uiconf_id/3873291/entry_id/1_1jc2y3e4',
'only_matching': True,
},
{
'url': 'https://cdnapisec.kaltura.com/index.php/kwidget/wid/_557781/uiconf_id/22845202/entry_id/1_plr1syf3',
'only_matching': True,
},
{
'url': 'https://cdnapisec.kaltura.com/html5/html5lib/v2.30.2/mwEmbedFrame.php/p/1337/uiconf_id/20540612/entry_id/1_sf5ovm7u?wid=_243342',
'only_matching': True,
},
{
# video with subtitles
'url': 'kaltura:111032:1_cw786r8q',
'only_matching': True,
},
{
# video with ttml subtitles (no fileExt)
'url': 'kaltura:1926081:0_l5ye1133',
'info_dict': {
'id': '0_l5ye1133',
'ext': 'mp4',
'title': 'What Can You Do With Python?',
'upload_date': '20160221',
'uploader_id': 'stork',
'thumbnail': 're:^https?://.*/thumbnail/.*',
'timestamp': int,
'subtitles': {
'en': [{
'ext': 'ttml',
}],
},
},
'skip': 'Gone. Maybe https://www.safaribooksonline.com/library/tutorials/introduction-to-python-anon/3469/',
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.kaltura.com/index.php/extwidget/preview/partner_id/1770401/uiconf_id/37307382/entry_id/0_58u8kme7/embed/iframe?&flashvars[streamerType]=auto',
'only_matching': True,
},
{
'url': 'https://www.kaltura.com:443/index.php/extwidget/preview/partner_id/1770401/uiconf_id/37307382/entry_id/0_58u8kme7/embed/iframe?&flashvars[streamerType]=auto',
'only_matching': True,
}
]
@staticmethod
def _extract_url(webpage):
# Embed codes: https://knowledge.kaltura.com/embedding-kaltura-media-players-your-site
mobj = (
re.search(
r"""(?xs)
kWidget\.(?:thumb)?[Ee]mbed\(
\{.*?
(?P<q1>['"])wid(?P=q1)\s*:\s*
(?P<q2>['"])_?(?P<partner_id>(?:(?!(?P=q2)).)+)(?P=q2),.*?
(?P<q3>['"])entry_?[Ii]d(?P=q3)\s*:\s*
(?P<q4>['"])(?P<id>(?:(?!(?P=q4)).)+)(?P=q4)(?:,|\s*\})
""", webpage) or
re.search(
r'''(?xs)
(?P<q1>["'])
(?:https?:)?//cdnapi(?:sec)?\.kaltura\.com(?::\d+)?/(?:(?!(?P=q1)).)*\b(?:p|partner_id)/(?P<partner_id>\d+)(?:(?!(?P=q1)).)*
(?P=q1).*?
(?:
(?:
entry_?[Ii]d|
(?P<q2>["'])entry_?[Ii]d(?P=q2)
)\s*:\s*|
\[\s*(?P<q2_1>["'])entry_?[Ii]d(?P=q2_1)\s*\]\s*=\s*
)
(?P<q3>["'])(?P<id>(?:(?!(?P=q3)).)+)(?P=q3)
''', webpage) or
re.search(
r'''(?xs)
<(?:iframe[^>]+src|meta[^>]+\bcontent)=(?P<q1>["'])
(?:https?:)?//(?:(?:www|cdnapi(?:sec)?)\.)?kaltura\.com/(?:(?!(?P=q1)).)*\b(?:p|partner_id)/(?P<partner_id>\d+)
(?:(?!(?P=q1)).)*
[?&;]entry_id=(?P<id>(?:(?!(?P=q1))[^&])+)
(?:(?!(?P=q1)).)*
(?P=q1)
''', webpage)
)
if mobj:
embed_info = mobj.groupdict()
for k, v in embed_info.items():
embed_info[k] = v.strip()
url = 'kaltura:%(partner_id)s:%(id)s' % embed_info
escaped_pid = re.escape(embed_info['partner_id'])
service_url = re.search(
r'<script[^>]+src=["\']((?:https?:)?//.+?)/p/%s/sp/%s00/embedIframeJs' % (escaped_pid, escaped_pid),
webpage)
if service_url:
url = smuggle_url(url, {'service_url': service_url.group(1)})
return url
def _kaltura_api_call(self, video_id, actions, service_url=None, *args, **kwargs):
params = actions[0]
if len(actions) > 1:
for i, a in enumerate(actions[1:], start=1):
for k, v in a.items():
params['%d:%s' % (i, k)] = v
data = self._download_json(
(service_url or self._SERVICE_URL) + self._SERVICE_BASE,
video_id, query=params, *args, **kwargs)
status = data if len(actions) == 1 else data[0]
if status.get('objectType') == 'KalturaAPIException':
raise ExtractorError(
'%s said: %s' % (self.IE_NAME, status['message']))
return data
def _get_video_info(self, video_id, partner_id, service_url=None):
actions = [
{
'action': 'null',
'apiVersion': '3.1.5',
'clientTag': 'kdp:v3.8.5',
'format': 1, # JSON, 2 = XML, 3 = PHP
'service': 'multirequest',
},
{
'expiry': 86400,
'service': 'session',
'action': 'startWidgetSession',
'widgetId': '_%s' % partner_id,
},
{
'action': 'get',
'entryId': video_id,
'service': 'baseentry',
'ks': '{1:result:ks}',
'responseProfile:fields': 'createdAt,dataUrl,duration,name,plays,thumbnailUrl,userId',
'responseProfile:type': 1,
},
{
'action': 'getbyentryid',
'entryId': video_id,
'service': 'flavorAsset',
'ks': '{1:result:ks}',
},
{
'action': 'list',
'filter:entryIdEqual': video_id,
'service': 'caption_captionasset',
'ks': '{1:result:ks}',
},
]
return self._kaltura_api_call(
video_id, actions, service_url, note='Downloading video info JSON')
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
mobj = re.match(self._VALID_URL, url)
partner_id, entry_id = mobj.group('partner_id', 'id')
ks = None
captions = None
if partner_id and entry_id:
_, info, flavor_assets, captions = self._get_video_info(entry_id, partner_id, smuggled_data.get('service_url'))
else:
path, query = mobj.group('path', 'query')
if not path and not query:
raise ExtractorError('Invalid URL', expected=True)
params = {}
if query:
params = compat_parse_qs(query)
if path:
splitted_path = path.split('/')
params.update(dict((zip(splitted_path[::2], [[v] for v in splitted_path[1::2]]))))
if 'wid' in params:
partner_id = params['wid'][0][1:]
elif 'p' in params:
partner_id = params['p'][0]
elif 'partner_id' in params:
partner_id = params['partner_id'][0]
else:
raise ExtractorError('Invalid URL', expected=True)
if 'entry_id' in params:
entry_id = params['entry_id'][0]
_, info, flavor_assets, captions = self._get_video_info(entry_id, partner_id)
elif 'uiconf_id' in params and 'flashvars[referenceId]' in params:
reference_id = params['flashvars[referenceId]'][0]
webpage = self._download_webpage(url, reference_id)
entry_data = self._parse_json(self._search_regex(
r'window\.kalturaIframePackageData\s*=\s*({.*});',
webpage, 'kalturaIframePackageData'),
reference_id)['entryResult']
info, flavor_assets = entry_data['meta'], entry_data['contextData']['flavorAssets']
entry_id = info['id']
# Unfortunately, data returned in kalturaIframePackageData lacks
# captions so we will try requesting the complete data using
# regular approach since we now know the entry_id
try:
_, info, flavor_assets, captions = self._get_video_info(
entry_id, partner_id)
except ExtractorError:
# Regular scenario failed but we already have everything
# extracted apart from captions and can process at least
# with this
pass
else:
raise ExtractorError('Invalid URL', expected=True)
ks = params.get('flashvars[ks]', [None])[0]
source_url = smuggled_data.get('source_url')
if source_url:
referrer = base64.b64encode(
'://'.join(compat_urlparse.urlparse(source_url)[:2])
.encode('utf-8')).decode('utf-8')
else:
referrer = None
def sign_url(unsigned_url):
if ks:
unsigned_url += '/ks/%s' % ks
if referrer:
unsigned_url += '?referrer=%s' % referrer
return unsigned_url
data_url = info['dataUrl']
if '/flvclipper/' in data_url:
data_url = re.sub(r'/flvclipper/.*', '/serveFlavor', data_url)
formats = []
for f in flavor_assets:
# Continue if asset is not ready
if f.get('status') != 2:
continue
# Original format that's not available (e.g. kaltura:1926081:0_c03e1b5g)
# skip for now.
if f.get('fileExt') == 'chun':
continue
# DRM-protected video, cannot be decrypted
if f.get('fileExt') == 'wvm':
continue
if not f.get('fileExt'):
# QT indicates QuickTime; some videos have broken fileExt
if f.get('containerFormat') == 'qt':
f['fileExt'] = 'mov'
else:
f['fileExt'] = 'mp4'
video_url = sign_url(
'%s/flavorId/%s' % (data_url, f['id']))
# audio-only has no videoCodecId (e.g. kaltura:1926081:0_c03e1b5g
# -f mp4-56)
vcodec = 'none' if 'videoCodecId' not in f and f.get(
'frameRate') == 0 else f.get('videoCodecId')
formats.append({
'format_id': '%(fileExt)s-%(bitrate)s' % f,
'ext': f.get('fileExt'),
'tbr': int_or_none(f['bitrate']),
'fps': int_or_none(f.get('frameRate')),
'filesize_approx': int_or_none(f.get('size'), invscale=1024),
'container': f.get('containerFormat'),
'vcodec': vcodec,
'height': int_or_none(f.get('height')),
'width': int_or_none(f.get('width')),
'url': video_url,
})
if '/playManifest/' in data_url:
m3u8_url = sign_url(data_url.replace(
'format/url', 'format/applehttp'))
formats.extend(self._extract_m3u8_formats(
m3u8_url, entry_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
self._sort_formats(formats)
subtitles = {}
if captions:
for caption in captions.get('objects', []):
# Continue if caption is not ready
if caption.get('status') != 2:
continue
if not caption.get('id'):
continue
caption_format = int_or_none(caption.get('format'))
subtitles.setdefault(caption.get('languageCode') or caption.get('language'), []).append({
'url': '%s/api_v3/service/caption_captionasset/action/serve/captionAssetId/%s' % (self._SERVICE_URL, caption['id']),
'ext': caption.get('fileExt') or self._CAPTION_TYPES.get(caption_format) or 'ttml',
})
return {
'id': entry_id,
'title': info['name'],
'formats': formats,
'subtitles': subtitles,
'description': clean_html(info.get('description')),
'thumbnail': info.get('thumbnailUrl'),
'duration': info.get('duration'),
'timestamp': info.get('createdAt'),
'uploader_id': info.get('userId') if info.get('userId') != 'None' else None,
'view_count': info.get('plays'),
}
| 40.911111 | 178 | 0.47094 | [
"Unlicense"
] | inshadsajeev143/utube | youtube_dl/extractor/kaltura.py | 14,728 | Python |
from conans import ConanFile, CMake, tools
import os
class TestPackageConan(ConanFile):
settings = "os", "arch", "compiler", "build_type"
generators = "cmake", "cmake_find_package_multi"
def build_requirements(self):
if self.settings.os == "Macos" and self.settings.arch == "armv8":
# Workaround for CMake bug with error message:
# Attempting to use @rpath without CMAKE_SHARED_LIBRARY_RUNTIME_C_FLAG being
# set. This could be because you are using a Mac OS X version less than 10.5
# or because CMake's platform configuration is corrupt.
# FIXME: Remove once CMake on macOS/M1 CI runners is upgraded.
self.build_requires("cmake/3.22.3")
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def test(self):
if not tools.cross_building(self):
bin_path = os.path.join("bin", "test_package")
self.run(bin_path, run_environment=True)
| 37.333333 | 88 | 0.645833 | [
"MIT"
] | AnotherFoxGuy/conan-center-index | recipes/librasterlite/all/test_package/conanfile.py | 1,008 | Python |
""" Tests for grph.py """
import os
import platform
import random
import re
import string
from subprocess import getstatusoutput
PRG = './grph.py'
RUN = f'python {PRG}' if platform.system() == 'Windows' else PRG
SAMPLE1 = './tests/inputs/1.fa'
SAMPLE2 = './tests/inputs/2.fa'
SAMPLE3 = './tests/inputs/3.fa'
# --------------------------------------------------
def test_exists() -> None:
""" Program exists """
assert os.path.isfile(PRG)
# --------------------------------------------------
def test_usage() -> None:
""" Usage """
rv, out = getstatusoutput(RUN)
assert rv > 0
assert out.lower().startswith('usage:')
# --------------------------------------------------
def test_bad_k() -> None:
""" Dies on bad k """
k = random.choice(range(-10, 1))
rv, out = getstatusoutput(f'{RUN} -k {k} {SAMPLE1}')
assert rv != 0
assert out.lower().startswith('usage:')
assert re.search(f'-k "{k}" must be > 0', out)
# --------------------------------------------------
def test_bad_file() -> None:
""" Dies on bad file """
bad = random_string()
rv, out = getstatusoutput('{} {}'.format(RUN, bad))
assert rv != 0
assert out.lower().startswith('usage:')
assert re.search(f"No such file or directory: '{bad}'", out)
# --------------------------------------------------
def run(in_file: str, k: int) -> None:
""" Run with args """
out_file = '.'.join([in_file, str(k), 'out'])
assert os.path.isfile(out_file)
expected = open(out_file).read().rstrip()
cmd = '{} -k {} {} | sort'.format(RUN, k, in_file)
rv, out = getstatusoutput(cmd)
assert rv == 0
assert out.rstrip() == expected
# --------------------------------------------------
def test_01():
""" Runs OK """
run(SAMPLE1, 3)
# --------------------------------------------------
def test_02() -> None:
""" Runs OK """
run(SAMPLE1, 4)
# --------------------------------------------------
def test_03() -> None:
""" Runs OK """
run(SAMPLE1, 5)
# --------------------------------------------------
def test_04() -> None:
""" Runs OK """
run(SAMPLE2, 3)
# --------------------------------------------------
def test_05() -> None:
""" Runs OK """
run(SAMPLE2, 4)
# --------------------------------------------------
def test_06() -> None:
""" Runs OK """
run(SAMPLE2, 5)
# --------------------------------------------------
def test_07() -> None:
""" Runs OK """
run(SAMPLE3, 3)
# --------------------------------------------------
def test_08() -> None:
""" Runs OK """
run(SAMPLE3, 4)
# --------------------------------------------------
def test_09() -> None:
""" Runs OK """
run(SAMPLE3, 5)
# --------------------------------------------------
def random_string() -> str:
"""Generate a random string"""
return ''.join(
random.sample(string.ascii_letters + string.digits,
k=random.randint(5, 10)))
| 21.561151 | 64 | 0.414414 | [
"MIT"
] | BioPeterson/biofx_python | 09_grph/tests/grph_test.py | 2,997 | Python |
import six
import pytest
from pymemcache.test.utils import MockMemcacheClient
@pytest.mark.unit()
def test_get_set():
client = MockMemcacheClient()
assert client.get(b"hello") is None
client.set(b"hello", 12)
assert client.get(b"hello") == 12
@pytest.mark.unit()
def test_get_set_unicide_key():
client = MockMemcacheClient()
assert client.get(u"hello") is None
client.set(b"hello", 12)
assert client.get(u"hello") == 12
@pytest.mark.unit()
def test_get_set_non_ascii_value():
client = MockMemcacheClient()
assert client.get(b"hello") is None
# This is the value of msgpack.packb('non_ascii')
non_ascii_str = b'\xa9non_ascii'
client.set(b"hello", non_ascii_str)
assert client.get(b"hello") == non_ascii_str
@pytest.mark.unit()
def test_get_many_set_many():
client = MockMemcacheClient()
client.set(b"h", 1)
result = client.get_many([b"h", b"e", b"l", b"o"])
assert result == {b"h": 1}
# Convert keys into bytes
d = dict((k.encode('ascii'), v)
for k, v in six.iteritems(dict(h=1, e=2, z=3)))
client.set_many(d)
assert client.get_many([b"h", b"e", b"z", b"o"]) == d
@pytest.mark.unit()
def test_get_many_set_many_non_ascii_values():
client = MockMemcacheClient()
# These are the values of calling msgpack.packb() on '1', '2', and '3'
non_ascii_1 = b'\xa11'
non_ascii_2 = b'\xa12'
non_ascii_3 = b'\xa13'
client.set(b"h", non_ascii_1)
result = client.get_many([b"h", b"e", b"l", b"o"])
assert result == {b"h": non_ascii_1}
# Convert keys into bytes
d = dict((k.encode('ascii'), v)
for k, v in six.iteritems(
dict(h=non_ascii_1, e=non_ascii_2, z=non_ascii_3)
))
client.set_many(d)
assert client.get_many([b"h", b"e", b"z", b"o"]) == d
@pytest.mark.unit()
def test_add():
client = MockMemcacheClient()
client.add(b"k", 2)
assert client.get(b"k") == 2
client.add(b"k", 25)
assert client.get(b"k") == 2
@pytest.mark.unit()
def test_delete():
client = MockMemcacheClient()
client.add(b"k", 2)
assert client.get(b"k") == 2
client.delete(b"k")
assert client.get(b"k") is None
@pytest.mark.unit()
def test_incr_decr():
client = MockMemcacheClient()
client.add(b"k", 2)
client.incr(b"k", 4)
assert client.get(b"k") == 6
client.decr(b"k", 2)
assert client.get(b"k") == 4
@pytest.mark.unit()
def test_prepand_append():
client = MockMemcacheClient()
client.set(b"k", '1')
client.append(b"k", 'a')
client.prepend(b"k", 'p')
assert client.get(b"k") == b'p1a'
| 22.844828 | 74 | 0.619623 | [
"Apache-2.0"
] | FerasAlazzeh/pymemcache | pymemcache/test/test_utils.py | 2,650 | Python |
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Async pipeline for testing."""
from tfx.dsl.compiler import compiler
from tfx.dsl.component.experimental.annotations import InputArtifact
from tfx.dsl.component.experimental.annotations import OutputArtifact
from tfx.dsl.component.experimental.annotations import Parameter
from tfx.dsl.component.experimental.decorators import component
from tfx.orchestration import pipeline as pipeline_lib
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import standard_artifacts
@component
def _example_gen(examples: OutputArtifact[standard_artifacts.Examples]):
del examples
# pytype: disable=wrong-arg-types
@component
def _transform(
examples: InputArtifact[standard_artifacts.Examples],
transform_graph: OutputArtifact[standard_artifacts.TransformGraph],
a_param: Parameter[int]):
del examples, transform_graph, a_param
# pytype: enable=wrong-arg-types
@component
def _trainer(examples: InputArtifact[standard_artifacts.Examples],
transform_graph: InputArtifact[standard_artifacts.TransformGraph],
model: OutputArtifact[standard_artifacts.Model]):
del examples, transform_graph, model
def create_pipeline() -> pipeline_pb2.Pipeline:
"""Creates an async pipeline for testing."""
# pylint: disable=no-value-for-parameter
example_gen = _example_gen().with_id('my_example_gen')
transform = _transform(
examples=example_gen.outputs['examples'],
a_param=10).with_id('my_transform')
trainer = _trainer(
examples=example_gen.outputs['examples'],
transform_graph=transform.outputs['transform_graph']).with_id(
'my_trainer')
# pylint: enable=no-value-for-parameter
pipeline = pipeline_lib.Pipeline(
pipeline_name='my_pipeline',
pipeline_root='/path/to/root',
components=[
example_gen,
transform,
trainer,
],
execution_mode=pipeline_lib.ExecutionMode.ASYNC)
dsl_compiler = compiler.Compiler()
compiled_pipeline: pipeline_pb2.Pipeline = dsl_compiler.compile(pipeline)
# Compiler does not support setting min_count yet, so we mutate the proto
# explicitly for testing.
trainer = compiled_pipeline.nodes[2].pipeline_node
assert trainer.node_info.id == 'my_trainer'
for value in trainer.inputs.inputs.values():
value.min_count = 1
return compiled_pipeline
| 35.337349 | 79 | 0.762359 | [
"Apache-2.0"
] | Avnish327030/tfx | tfx/orchestration/experimental/core/testing/test_async_pipeline.py | 2,933 | Python |
import os
import datetime
TOKEN = os.environ.get("TOKEN")
DB_PATH = os.environ.get("DB_PATH")
EXPIRATION_TIMEDELTA = datetime.timedelta(days=7)
| 18.25 | 49 | 0.767123 | [
"MIT"
] | QwertygidQ/ChessBot | chessbot/config.py | 146 | Python |
"""Reducing Functions in Python
These are functions that recombine an iterable recursively, ending up with a single return value
Also called accumulators, aggregators, or folding functions
Example: Finding the maximum value in an iterable
a0, a1, a2, ...,, aN-1
max(a, b) _> maximum of a and b
result =a0
result = max(result, a1)
result = max(result, a2)
...
result = max(result, an-1)
# max value in a0, a1, a2, ..., an-1
the special case of sequences
(i.e. we can use indexes to access elements in the sequence)
Using a loop
"""
from msilib import sequence
from unittest import result
l = l[5, 8, 6, 10, 9] # result = 5
max_value = lambda a, b: a if a > b else b # result = max(5, 8) = 8
def max_sequence(sequence): # result = max(5, 6) = 8
result = sequence[0]
for e in sequence[1:]: # result = max(5, 10) = 10
result = max_value(result, e) # result = max(5, 10) = 10
return result # result -> 10
Notice the sequence of steps:
l = l[5, 8, 6, 10, 9] # result = 5
max_value = lambda a, b: a if a > b else b # result = max(5, 8) = 8
def max_sequence(sequence): # result = max(5, 6) = 8
result = sequence[0]
for e in sequence[1:]: # result = max(5, 10) = 10
result = max_value(result, e) # result = max(5, 10) = 10
return result # result -> 10
l = [5, 8, 6, 10, 9]
^ | | | |
| | |
5 | |
\ | |
max(5, 8) | | |
8 |
\ |
\ |
max(8, 6)
8 | |
\
max(8, 10)
10
\ |
max(10, 9)
10
result -> 10
To caculate the min: # I just need to change (max) to (min)
l = l[5, 8, 6, 10, 9] # result = 5
min_value = lambda a, b: a if a > b else b # result = max(5, 8) = 8
def min_sequence(sequence): # result = max(5, 6) = 8
result = sequence[0]
for e in sequence[1:]: # result = max(5, 10) = 10
result = min_value(result, e) # result = max(5, 10) = 10
return result # result -> 10
# I could just write:
def _reduce(fn, sequence):
result = sequence[0
for x in sequence[1:]]:
result = fn(result, x)
return result
_reduce(lambda a, b: a if a > b else b, l) # maximum
_reduce(lambda a, b: a if a < b else b, l) # minimum
# Adding all the elements to a list
add = lambda a, b: a+b
# result = 5
l = [5, 8, 6, 10, 9]
# result = add(5, 8) = 13
# result = add(13, 6) = 19
def _reduce(fn, sequence): # result = add(19, 10) = 29
result = sequence[0]
for x in sequence[1:]: # result = add(29. 9) = 38
result = fn(result, x)
return result # result = 38
_reduce(add. l)
""" The functools module
Pthon implements a reduce function that will handle any iterable, but works similarly to what I just saw.
"""
from functools import reduce
l = [5, 8, 6, 10, 9]
reduce(lambda a, b: a if a > b else b, l) # max -> 10
reduce(lambda a, b: a if a < b else b, l) # min -> 5
| 25.543478 | 105 | 0.473759 | [
"Unlicense"
] | minefarmer/deep-Dive-1 | .history/my_classes/FirstClassFunctions/reducing_functions_20210707181157.py | 3,525 | Python |
def lcm(x, y):
if x > y:
greater = x
else:
greater = y
while(True):
if((greater % x == 0) and (greater % y == 0)):
lcm = greater
break
greater += 1
return lcm
num1 = int(input("Enter first number: "))
num2 = int(input("Enter second number: "))
print("The L.C.M. of", num1,"and", num2,"is", lcm(num1, num2))
| 14.185185 | 62 | 0.496084 | [
"MIT"
] | mariamaafreen/python-exploring-battle-1 | lcm.py | 383 | Python |
'''Escreva um programa que receba um número inteiro na entrada e verifique
se o número recebido possui ao menos um dígito com um dígito adjacente
igual a ele. Caso exista, imprima "sim"; se não existir, imprima "não". '''
num = int(input('Digite um número inteiro: '))
alg1 = num % 10
while num > 0:
num = num // 10
alg2 = num % 10
if alg1 == alg2:
print('sim')
break
else:
if num > 0:
alg1 = alg2
else:
break
if num == 0:
print('não')
| 25.8 | 76 | 0.581395 | [
"MIT"
] | eduardodarocha/Introducao_Ciencia_da_Computacao_com_Python_Parte_1_Coursera | Exercicios/digitos_adjacentes.py | 524 | Python |
#from fake_useragent import UserAgent
from selenium import webdriver
from selenium.webdriver.common.by import By
import time
import os
# 订阅,0关闭
dingyue = 1
# 短信,0关闭
duanxin = 1
# 设置翻页范围
start_page = 1
end_page = 1
def main():
# 当前版本仅支持订阅一个手机号码
_list = [
['18888888888', '张三']
]
_run(_list)
def _run(_list):
delay = 0
option = webdriver.ChromeOptions()
# option.add_argument('--disable-gpu')
option.add_argument('--incognito')
option.add_argument('blink-settings=imagesEnabled=false')
option.add_argument('--headless')
ua = 'Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)'
option.add_argument('--user-agent={}'.format(ua))
driver = webdriver.Chrome(options=option)
driver.execute_cdp_cmd('Page.addScriptToEvaluateOnNewDocument', {
'source': 'Object.defineProperty(navigator, "webdriver", {get: () => undefined})'})
driver.maximize_window()
driver.get("https://www.baidu.com/")
citys = ["jn", "qingdao", "yantai", "dongying", "weihai", "zibo", "weifang", "liaocheng", "heze", "rizhao", "linyi",
"dezhou", "jining", "zaozhuang", "binzhou", "taian", "laiwu", "laiyang", "xintai", "hf", "liuan", "anqing",
"xuancheng", "ahsuzhou", "chaohu", "chizhou", "huainan", "chuzhou", "wuhu", "bengbu", "tongling", "fuyang",
"maanshan", "huangshan", "huaibei", "bozhou", "dangtu", "huoqiu", "sh", "hz", "ningbo", "shaoxing", "taizhou",
"jinhua", "wenzhou", "jiaxing", "lishui", "huzhou", "zhoushan", "quzhou", "nj", "suzhou", "wuxi", "xuzhou",
"changzhou", "nantong", "huaian", "changshu", "kunshan", "yx", "yangzhou", "jiangyin", "tz", "zhenjiang", "suqian",
"lyg", "taicang", "yancheng", "zhangjiagang", "liyang", "donghai", "baoying", "rugao", "yizheng", "haian", "haimen",
"gaoyou", "qidong", "rudong", "xinghua", "jingjiang", "taixing", "nc", "ganzhou", "shangrao", "jxfc", "jiujiang",
"jingan", "yichun", "xinyu", "ruijin", "jian", "pingxiang", "jingdezhen", "fuzhou", "yingtan", "ys", "quanzhou",
"fz", "xiamen", "zhangzhou", "putian", "longyan", "nanping", "sanming", "ningde", "gz", "sz", "foshan", "zhongshan",
"dongguan", "zhuhai", "shantou", "zhaoqing", "jiangmen", "huizhou", "jieyang", "meizhou", "heyuan", "qingyuan",
"zhanjiang", "yangjiang", "shaoguan", "maoming", "yunfu", "chaozhou", "shanwei", "puning", "sanya", "hn", "hk",
"guilin", "nn", "liuzhou", "beihai", "qinzhou", "laibin", "yulin", "fangchenggang", "guigang", "wuzhou", "chongzuo",
"hechi", "baise", "hezhou", "hengxian", "by", "wh", "yichang", "huangshi", "jingzhou", "lhk", "yicheng", "zaoyang",
"shiyan", "xianning", "xiaogan", "jingmen", "xiangyang", "huanggang", "ezhou", "suizhou", "xiantao", "tianmen",
"qianjiang", "enshi", "shennongjia", "zz", "luoyang", "jiaozuo", "xinyang", "nanyang", "shangqiu", "anyang",
"pingdingshan", "ruzhou", "xinxiang", "wugang", "zhumadian", "kaifeng", "xuchang", "luohe", "hebi", "sanmenxia",
"zhoukou", "puyang", "yuzhou", "yongcheng", "changge", "yanling", "cs", "xiangtan", "zhuzhou", "changde", "chenzhou",
"hengyang", "huaihua", "loudi", "yiyang", "yongzhou", "yueyang", "zjj", "shaoyang", "xiangxi", "changshada", "heb",
"hailin", "daqing", "zhaoyuan", "zhaozhou", "mudanjiang", "qiqihaer", "suihua", "jixi", "jiamusi", "heihe",
"hljyichun", "zhaodong", "anda", "shuangyashan", "hegang", "qitaihe", "sy", "dl", "anshan", "panjin", "yingkou",
"huludao", "wafangdian", "zhuanghe", "jinzhou", "dandong", "chaoyang", "benxi", "liaoyang", "fushun", "fuxin",
"tieling", "cc", "jl", "huadian", "yanbian", "siping", "baishan", "baicheng", "songyuan", "dehui", "tonghua", "nongan",
"liaoyuan", "jlys", "gongzhuling", "gy", "zunyi", "bijie", "qianxinan", "qiandongnan", "qiannan", "tongren", "anshun",
"lps", "cd", "mianyang", "leshan", "nanchong", "meishan", "yibin", "bazhong", "guangyuan", "luzhou", "guanghan",
"renshou", "deyang", "neijiang", "anyue", "suining", "guangan", "dazhou", "ziyang", "panzhihua", "zigong", "yaan",
"liangshan", "ganzi", "abazhou", "cq", "hechuan", "km", "dali", "qujing", "yuxi", "xishuangbanna", "lijiang",
"baoshan", "dehong", "zhaotong", "wenshan", "chuxiong", "honghe", "puer", "lincang", "diqing", "lasa", "rkz",
"changdu", "linzhi", "ty", "yuncheng", "huairen", "linfen", "datong", "jinzhong", "changzhi", "yangquan", "lvliang",
"shuozhou", "jc", "xinzhou", "hhht", "eerduosi", "baotou", "wlcb", "byne", "chifeng", "xam", "wuhai", "tl", "hlbe",
"xlglm", "alsm", "sjz", "tangshan", "baoding", "qinhuangdao", "handan", "zhangjiakou", "hengshui", "xingtai",
"langfang", "chengde", "sanhe", "yuxian", "cangzhou", "wenan", "yanjiao", "dachang", "zhuozhou", "guan", "xianghe",
"yongqing", "hbbz", "dingzhou", "gaobeidian", "changli", "xiongan", "bj", "tj", "wuqing", "wlmq", "bazhou", "yili",
"kashi", "akesu", "changji", "kzls", "hami", "klmy", "bedl", "ht", "shz", "tlf", "wjq", "qt", "kel", "alt", "tc", "lz",
"gannan", "tianshui", "qingyang", "dingxi", "pingliang", "zhangye", "jiuquan", "wuwei", "longnan", "yongdeng",
"baiyin", "yuzhong", "jinchang", "jiayuguan", "xn", "haidong", "guoluo", "haibei", "haixi", "yushu", "huangnan", "yc",
"guyuan", "wuzhong", "zhongwei", "shizuishan", "xa", "xianyang", "baoji", "weinan", "hanzhong", "ankang", "tongchuan",
"shangluo", "yl", "yanan"]
#citys = ['xm']
# 遍历城市列表
for city in citys:
# 打开page页
for page in range(start_page, end_page+1):
href = "http://"+city + \
".jiwu.com/loupan/list-page"+str(page)+".html"
driver.execute_script(f'window.open("{href}", "_blank");')
driver.switch_to.window(driver.window_handles[0])
driver.close()
driver.switch_to.window(driver.window_handles[0])
time.sleep(3)
try:
driver.switch_to.alert.dismiss()
time.sleep(delay)
except:
pass
urls = driver.find_elements(
By.XPATH, "//div[@class='box new_house']/div[@class='img']/a")
houses = []
# 从page页获取楼盘链接存入houses[]
for url in urls:
houses.append(url.get_attribute("href"))
# 逐个打开楼盘页并处理
for house in houses:
os.system('cls')
print(
'正在第{}/{}个城市{}订阅第{}/{}页,当页进度:{}/{}'.format(str(citys.index(city)+1), str(len(citys)), city, str(page), str(end_page), str(houses.index(house)+1), str(len(houses))))
try:
# 打开楼盘页
driver.execute_script(f'window.open("{house}", "_blank");')
driver.switch_to.window(driver.window_handles[0])
driver.close()
driver.switch_to.window(driver.window_handles[0])
# 删除遮挡元素
try:
js = "document.getElementsByClassName('dialing-box')[0].remove();"
driver.execute_script(js)
except:
pass
# 操作
try:
for person in _list:
if(duanxin != 0):
## 发送地址给手机 ##
try:
driver.find_element(
By.LINK_TEXT, "[发地址到手机]").click()
except:
pass
# 输入电话
try:
driver.find_element(
By.CSS_SELECTOR, "[type='tel']").send_keys(person[0])
except:
pass
# 点击领取
try:
driver.find_element(
By.CSS_SELECTOR, "[class='btn btn-green ok']").click()
except:
pass
# 关闭窗口
try:
driver.find_element(
By.CSS_SELECTOR, "[class='close-pop']").click()
except:
pass
time.sleep(delay)
if(dingyue != 0):
## 降价通知 ##
try:
driver.find_element(
By.LINK_TEXT, "降价通知我").click()
except:
pass
# 输入电话
try:
driver.find_element(
By.CSS_SELECTOR, "[type='tel']").send_keys(person[0])
except:
pass
# 点击提交
try:
driver.find_element(
By.CSS_SELECTOR, "[class='btn btn-green ok']").click()
except:
pass
# 关闭窗口
try:
driver.find_element(
By.CSS_SELECTOR, "[class='close-pop']").click()
except:
pass
time.sleep(delay)
## 组团砍价 ##
try:
driver.find_element(
By.LINK_TEXT, "立即报名").click()
except:
pass
# 输入电话
try:
driver.find_element(
By.CSS_SELECTOR, "[type='tel']").send_keys(person[0])
except:
pass
# 点击提交
try:
driver.find_element(
By.CSS_SELECTOR, "[class='btn btn-green ok']").click()
except:
pass
# 关闭窗口
try:
driver.find_element(
By.CSS_SELECTOR, "[class='close-pop']").click()
except:
pass
time.sleep(delay)
## 新信息通知 ##
try:
driver.find_element(
By.LINK_TEXT, "新信息通知").click()
except:
pass
# 输入电话
try:
driver.find_element(
By.CSS_SELECTOR, "[type='tel']").send_keys(person[0])
except:
pass
# 点击提交
try:
driver.find_element(
By.CSS_SELECTOR, "[class='btn btn-green ok']").click()
except:
pass
# 关闭窗口
try:
driver.find_element(
By.CSS_SELECTOR, "[class='close-pop']").click()
except:
pass
time.sleep(delay)
## 开盘通知我 ##
try:
driver.find_element(
By.LINK_TEXT, "[开盘通知我]").click()
except:
pass
# 输入电话
try:
driver.find_element(
By.CSS_SELECTOR, "[type='tel']").send_keys(person[0])
except:
pass
# 点击领取
try:
driver.find_element(
By.CSS_SELECTOR, "[class='btn btn-green ok']").click()
except:
pass
# 关闭窗口
try:
driver.find_element(
By.CSS_SELECTOR, "[class='close-pop']").click()
except:
pass
time.sleep(delay)
## 新动态通知我 ##
try:
driver.find_element(
By.LINK_TEXT, "新动态通知我").click()
except:
pass
# 输入电话
try:
driver.find_element(
By.CSS_SELECTOR, "[type='tel']").send_keys(person[0])
except:
pass
# 点击提交
try:
driver.find_element(
By.CSS_SELECTOR, "[class='btn btn-green ok']").click()
except:
pass
# 关闭窗口
try:
driver.find_element(
By.CSS_SELECTOR, "[class='close-pop']").click()
except:
pass
except:
pass
except:
pass
if __name__ == "__main__":
main()
| 53.621993 | 186 | 0.373878 | [
"Apache-2.0"
] | 006hjy/buy_house | main/JW/main.py | 16,048 | Python |
#!/usr/bin/env python
from django.urls import reverse_lazy
from django.shortcuts import Http404
from django.utils.translation import ugettext as _
from vanilla import ListView, CreateView, DetailView, UpdateView, DeleteView, TemplateView
from .forms import ArticleForm, ArticleSearchForm
from .models import Article, Folder
from haystack.generic_views import SearchView
from haystack.query import SearchQuerySet
class ArticleList(ListView):
model = Article
paginate_by = 20
#class ArticleCreate(CreateView):
# model = Article
# form_class = ArticleForm
# success_url = reverse_lazy('bibloi:list')
class ArticleDetail(DetailView):
model = Article
def get_context_data(self, **kwargs):
context = super(ArticleDetail, self).get_context_data(**kwargs)
return context
#class ArticleUpdate(UpdateView):
# model = Article
# form_class = ArticleForm
# success_url = reverse_lazy('bibloi:list')
#class ArticleDelete(DeleteView):
# model = Article
# success_url = reverse_lazy('bibloi:list')
class ArticleSearch(SearchView):
template_name = 'search/search.html'
form_class = ArticleSearchForm
queryset = SearchQuerySet().order_by('-date')
paginate_by = 5
def get_context_data(self, **kwargs):
context = super(ArticleSearch, self).get_context_data(**kwargs)
return context
class FolderView(ListView):
model = Article
template_name = 'bibloi/folder_browse.html'
parent = None
def get_queryset(self):
path = self.kwargs.get('path', '')
folders = path.split('/')
for folder in folders:
try:
if not self.parent:
if folder:
self.parent = Folder.objects.get(name=folder)
else:
self.parent = self.parent.get_children().get(name=folder)
except Folder.DoesNotExist:
raise Http404(_('Folder does not exist'))
return self.model.objects.filter(folder=self.parent)
def get_context_data(self, **kwargs):
context = super(FolderView, self).get_context_data(**kwargs)
context['parent_folders'] = self.parent.parent_folders if self.parent else []
context['current_folder'] = self.parent
if self.parent:
context['folders'] = self.parent.get_children()
else:
context['folders'] = Folder.objects.filter(parent=self.parent)
return context
class TasksView(TemplateView):
template_name = 'tasks.html'
| 29.569767 | 90 | 0.672827 | [
"BSD-2-Clause"
] | joyinsky/pergamum | pergamum/bibloi/views.py | 2,543 | Python |
"""
Get absolute URLs for static assets.
"""
from urllib.parse import ParseResult, urlparse, urlunparse
from django import template
from django.templatetags.static import StaticNode
from django.urls import reverse
register = template.Library()
class AbstaticNode(StaticNode):
"""
{% abstatic %} is like Django's {% static %} tag,
but always returns an absolute URI.
"""
def url(self, context) -> str:
url_to_asset = super().url(context)
parsed_url = urlparse(url_to_asset)
assert parsed_url.path
if is_absolute_uri(parsed_url):
return url_to_asset
# Delegate to Django to provide its own schema and authority:
path_and_file = to_pf_url(parsed_url)
return context["request"].build_absolute_uri(path_and_file)
def is_absolute_uri(url: ParseResult) -> bool:
"""
Returns True if the parsed result is an "absolute URI".
We define an "absolute URI" as containing at mimimum a **scheme** and an
**host** (a.k.a., an authority).
It must contain SH according to the nomenclature defined in this proposal:
https://gist.github.com/andrewdotn/eebeaa60d48c3c0f6f9fc75f0ede8d03#proposal
Examples of absolute URIs:
[SH ] https://example.com
[SHP ] https://example.com/
[SHPF] https://example.com/foo/cat.gif
What are NOT absolute URIs:
[ F] cat.gif
[ P ] /
[ PF] /foo/cat.gif
[ HPF] //example.com/foo/cat.gif†
[S F] https:cat.gif (uncommon)
[S PF] https:/foo/cat.gif (uncommon)
†: This is called a "network-path reference, and relies on inferring the scheme
based on an existing base URI. For our purposes, this is not "absolute" enough!
Source: https://tools.ietf.org/html/rfc3986#section-4.2
"""
# netloc == authority, i.e., [username[:password]@]example.com[:443]
if url.scheme and url.netloc:
return True
return False
def to_pf_url(url: ParseResult):
"""
Returns *P*ath and *F*ile as defined here:
https://gist.github.com/andrewdotn/eebeaa60d48c3c0f6f9fc75f0ede8d03#proposal
"""
return urlunparse(url._replace(scheme="", netloc=""))
@register.tag
def abstatic(parser, token):
"""
Given a relative path to a static asset, return the absolute path to the
asset.
Derived from: https://github.com/django/django/blob/635d53a86a36cde7866b9caefeb64d809e6bfcd9/django/templatetags/static.py#L143-L159
"""
return AbstaticNode.handle_token(parser, token)
| 29.988235 | 136 | 0.66889 | [
"Apache-2.0"
] | Madoshakalaka/morphodict | src/CreeDictionary/CreeDictionary/templatetags/url_extras.py | 2,553 | Python |
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.SUS/Serif_8/udhr_Latn.SUS_Serif_8.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| 30.3 | 73 | 0.811881 | [
"BSD-3-Clause"
] | antoinecarme/pdf_to_json_tests | data/multilingual/Latn.SUS/Serif_8/pdf_to_json_test_Latn.SUS_Serif_8.py | 303 | Python |
"""
hubspot engagements api
"""
from hubspot3.base import BaseClient
from hubspot3.utils import get_log
from typing import Dict, List
ENGAGEMENTS_API_VERSION = "1"
class EngagementsClient(BaseClient):
"""
The hubspot3 Engagements client uses the _make_request method to call the API
for data. It returns a python object translated from the json returned
"""
def __init__(self, *args, **kwargs) -> None:
super(EngagementsClient, self).__init__(*args, **kwargs)
self.log = get_log("hubspot3.engagements")
def _get_path(self, subpath: str) -> str:
"""get full subpath"""
return f"engagements/v{self.options.get('version') or ENGAGEMENTS_API_VERSION}/{subpath}"
def get(self, engagement_id, **options):
"""Get a HubSpot engagement."""
return self._call(f"engagements/{engagement_id}", method="GET", **options)
def get_associated(self, object_type, object_id, **options) -> List[Dict]:
"""
get all engagements associated with the given object
:param object_type: type of object to get associations on [CONTACT, COMPANY, DEAL]
:param object_id: ID of the object to get associations on
"""
finished = False
output = [] # type: List[Dict]
query_limit = 100 # Max value according to docs
offset = 0
while not finished:
batch = self._call(
f"engagements/associated/{object_type}/{object_id}/paged",
method="GET",
params={"limit": query_limit, "offset": offset},
**options,
)
output.extend(batch["results"])
finished = not batch["hasMore"]
offset = batch["offset"]
return output
def create(self, data=None, **options):
data = data or {}
return self._call("engagements", data=data, method="POST", **options)
def update(self, key, data=None, **options):
data = data or {}
return self._call(f"engagements/{key}", data=data, method="PUT", **options)
def patch(self, key, data=None, **options):
data = data or {}
return self._call(f"engagements/{key}", data=data, method="PATCH", **options)
def get_all(self, **options) -> List[Dict]:
"""get all engagements"""
finished = False
output = [] # type: List[Dict]
query_limit = 250 # Max value according to docs
offset = 0
while not finished:
batch = self._call(
"engagements/paged",
method="GET",
params={"limit": query_limit, "offset": offset},
**options,
)
output.extend(batch["results"])
finished = not batch["hasMore"]
offset = batch["offset"]
return output
def get_recently_modified(self, since, **options) -> List[Dict]:
"""get recently modified engagements"""
finished = False
output = [] # type: List[Dict]
query_limit = 100 # Max value according to docs
offset = 0
while not finished:
batch = self._call(
"engagements/recent/modified",
method="GET",
params={"limit": query_limit, "offset": offset, "since": since},
**options,
)
output.extend(batch["results"])
finished = not batch["hasMore"]
offset = batch["offset"]
return output
| 34.441176 | 97 | 0.575576 | [
"MIT"
] | benaduggan/hubspot3 | hubspot3/engagements.py | 3,513 | Python |
# coding: utf-8
"""
Copyright 2017 Square, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1ListItemsRequest(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, batch_token=None):
"""
V1ListItemsRequest - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'batch_token': 'str'
}
self.attribute_map = {
'batch_token': 'batch_token'
}
self._batch_token = batch_token
@property
def batch_token(self):
"""
Gets the batch_token of this V1ListItemsRequest.
A pagination cursor to retrieve the next set of results for your original query to the endpoint.
:return: The batch_token of this V1ListItemsRequest.
:rtype: str
"""
return self._batch_token
@batch_token.setter
def batch_token(self, batch_token):
"""
Sets the batch_token of this V1ListItemsRequest.
A pagination cursor to retrieve the next set of results for your original query to the endpoint.
:param batch_token: The batch_token of this V1ListItemsRequest.
:type: str
"""
self._batch_token = batch_token
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 29.198347 | 104 | 0.579394 | [
"Apache-2.0"
] | reduceus/connect-python-sdk | squareconnect/models/v1_list_items_request.py | 3,533 | Python |
print(2012)
print(6)
print(16)
| 7.75 | 11 | 0.709677 | [
"MIT"
] | jaredliw/mcc-2015 | 9-days-of-code/day-1/02 Order of Evaluation.py | 31 | Python |
def valid(ps, a, n):
ps = set(ps)
for i in xrange(1, n):
for j in xrange(1, n):
for k in xrange(i):
for l in xrange(j):
if a[i][j] != -1 or i*n+j in ps: continue
if a[k][l] != -1 or k*n+l in ps: continue
if a[i][l] != -1 or i*n+l in ps: continue
if a[k][j] != -1 or k*n+j in ps: continue
return False
return True
def dfs(idx, cur, n, a, b, r, c, ans):
if idx == n*n:
if not valid(cur, a, n): return
t = 0
for x in cur:
i, j = divmod(x, n)
if a[i][j] != -1: continue
t += b[i][j]
ans[0] = min(ans[0], t)
return
dfs(idx+1, cur, n, a, b, r, c, ans)
cur += idx,
dfs(idx+1, cur, n, a, b, r, c, ans)
cur.pop()
def solve(cid):
n = int(raw_input())
a, b = [], []
for _ in xrange(n):
a += map(int, raw_input().split()),
for _ in xrange(n):
b += map(int, raw_input().split()),
r = map(int, raw_input().split())
c = map(int, raw_input().split())
ans = [float('inf')]
dfs(0, [], n, a, b, r, c, ans)
print 'Case #{}: {}'.format(cid, ans[0])
for cid in xrange(1, int(raw_input())+1):
solve(cid)
| 29.272727 | 61 | 0.431677 | [
"MIT"
] | mingweihe/kickstart | 2021/roundA/D_Checksum.py | 1,288 | Python |
# coding: utf-8
"""
Swaggy Jenkins
Jenkins API clients generated from Swagger / Open API specification # noqa: E501
The version of the OpenAPI document: 1.1.2-pre.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import openapi_client
from openapi_client.models.queue import Queue # noqa: E501
from openapi_client.rest import ApiException
class TestQueue(unittest.TestCase):
"""Queue unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test Queue
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = openapi_client.models.queue.Queue() # noqa: E501
if include_optional :
return Queue(
_class = '',
items = [
openapi_client.models.queue_blocked_item.QueueBlockedItem(
_class = '',
actions = [
openapi_client.models.cause_action.CauseAction(
_class = '',
causes = [
openapi_client.models.cause_user_id_cause.CauseUserIdCause(
_class = '',
short_description = '',
user_id = '',
user_name = '', )
], )
],
blocked = True,
buildable = True,
id = 56,
in_queue_since = 56,
params = '',
stuck = True,
task = openapi_client.models.free_style_project.FreeStyleProject(
_class = '',
name = '',
url = '',
color = '',
description = '',
display_name = '',
display_name_or_null = '',
full_display_name = '',
full_name = '',
buildable = True,
builds = [
openapi_client.models.free_style_build.FreeStyleBuild(
_class = '',
number = 56,
url = '',
building = True,
description = '',
display_name = '',
duration = 56,
estimated_duration = 56,
executor = '',
full_display_name = '',
id = '',
keep_log = True,
queue_id = 56,
result = '',
timestamp = 56,
built_on = '',
change_set = openapi_client.models.empty_change_log_set.EmptyChangeLogSet(
_class = '',
kind = '', ), )
],
first_build = openapi_client.models.free_style_build.FreeStyleBuild(
_class = '',
number = 56,
url = '',
building = True,
description = '',
display_name = '',
duration = 56,
estimated_duration = 56,
executor = '',
full_display_name = '',
id = '',
keep_log = True,
queue_id = 56,
result = '',
timestamp = 56,
built_on = '', ),
health_report = [
openapi_client.models.free_style_projecthealth_report.FreeStyleProjecthealthReport(
description = '',
icon_class_name = '',
icon_url = '',
score = 56,
_class = '', )
],
in_queue = True,
keep_dependencies = True,
last_build = openapi_client.models.free_style_build.FreeStyleBuild(
_class = '',
number = 56,
url = '',
building = True,
description = '',
display_name = '',
duration = 56,
estimated_duration = 56,
executor = '',
full_display_name = '',
id = '',
keep_log = True,
queue_id = 56,
result = '',
timestamp = 56,
built_on = '', ),
last_completed_build = openapi_client.models.free_style_build.FreeStyleBuild(
_class = '',
number = 56,
url = '',
building = True,
description = '',
display_name = '',
duration = 56,
estimated_duration = 56,
executor = '',
full_display_name = '',
id = '',
keep_log = True,
queue_id = 56,
result = '',
timestamp = 56,
built_on = '', ),
last_failed_build = '',
last_stable_build = openapi_client.models.free_style_build.FreeStyleBuild(
_class = '',
number = 56,
url = '',
building = True,
description = '',
display_name = '',
duration = 56,
estimated_duration = 56,
executor = '',
full_display_name = '',
id = '',
keep_log = True,
queue_id = 56,
result = '',
timestamp = 56,
built_on = '', ),
last_successful_build = openapi_client.models.free_style_build.FreeStyleBuild(
_class = '',
number = 56,
url = '',
building = True,
description = '',
display_name = '',
duration = 56,
estimated_duration = 56,
executor = '',
full_display_name = '',
id = '',
keep_log = True,
queue_id = 56,
result = '',
timestamp = 56,
built_on = '', ),
last_unstable_build = '',
last_unsuccessful_build = '',
next_build_number = 56,
queue_item = '',
concurrent_build = True,
scm = openapi_client.models.null_scm.NullSCM(
_class = '', ), ),
url = '',
why = '',
buildable_start_milliseconds = 56, )
]
)
else :
return Queue(
)
def testQueue(self):
"""Test Queue"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 45.403756 | 115 | 0.314859 | [
"MIT"
] | cliffano/jenkins-api-clients-generator | clients/python-legacy/generated/test/test_queue.py | 9,671 | Python |
import factory
from core.models import Instance
class InstanceFactory(factory.DjangoModelFactory):
class Meta:
model = Instance
| 15.888889 | 50 | 0.755245 | [
"BSD-3-Clause"
] | profesormig/quimica3a | api/tests/factories/instance_factory.py | 143 | Python |
# Code generated by lark_sdk_gen. DO NOT EDIT.
import typing
from pylark.lark_request import Response
from pylark.api_service_hire_job_get import (
GetHireJobReq,
GetHireJobResp,
_gen_get_hire_job_req,
)
from pylark.api_service_hire_job_manager_get import (
GetHireJobManagerReq,
GetHireJobManagerResp,
_gen_get_hire_job_manager_req,
)
from pylark.api_service_hire_talent_get import (
GetHireTalentReq,
GetHireTalentResp,
_gen_get_hire_talent_req,
)
from pylark.api_service_hire_attachment_get import (
GetHireAttachmentReq,
GetHireAttachmentResp,
_gen_get_hire_attachment_req,
)
from pylark.api_service_hire_attachment_preview_get import (
GetHireAttachmentPreviewReq,
GetHireAttachmentPreviewResp,
_gen_get_hire_attachment_preview_req,
)
from pylark.api_service_hire_resume_sources_get import (
GetHireResumeSourceReq,
GetHireResumeSourceResp,
_gen_get_hire_resume_source_req,
)
from pylark.api_service_hire_note_create import (
CreateHireNoteReq,
CreateHireNoteResp,
_gen_create_hire_note_req,
)
from pylark.api_service_hire_note_update import (
UpdateHireNoteReq,
UpdateHireNoteResp,
_gen_update_hire_note_req,
)
from pylark.api_service_hire_note_get import (
GetHireNoteReq,
GetHireNoteResp,
_gen_get_hire_note_req,
)
from pylark.api_service_hire_note_list import (
GetHireNoteListReq,
GetHireNoteListResp,
_gen_get_hire_note_list_req,
)
from pylark.api_service_hire_referral_get_by_application import (
GetHireReferralByApplicationReq,
GetHireReferralByApplicationResp,
_gen_get_hire_referral_by_application_req,
)
from pylark.api_service_hire_job_process_list import (
GetHireJobProcessListReq,
GetHireJobProcessListResp,
_gen_get_hire_job_process_list_req,
)
from pylark.api_service_hire_application_create import (
CreateHireApplicationReq,
CreateHireApplicationResp,
_gen_create_hire_application_req,
)
from pylark.api_service_hire_application_terminate import (
TerminateHireApplicationReq,
TerminateHireApplicationResp,
_gen_terminate_hire_application_req,
)
from pylark.api_service_hire_application_get import (
GetHireApplicationReq,
GetHireApplicationResp,
_gen_get_hire_application_req,
)
from pylark.api_service_hire_application_list import (
GetHireApplicationListReq,
GetHireApplicationListResp,
_gen_get_hire_application_list_req,
)
from pylark.api_service_hire_application_interview_list import (
GetHireApplicationInterviewListReq,
GetHireApplicationInterviewListResp,
_gen_get_hire_application_interview_list_req,
)
from pylark.api_service_hire_offer_get_by_application import (
GetHireOfferByApplicationReq,
GetHireOfferByApplicationResp,
_gen_get_hire_offer_by_application_req,
)
from pylark.api_service_hire_offer_schema_get import (
GetHireOfferSchemaReq,
GetHireOfferSchemaResp,
_gen_get_hire_offer_schema_req,
)
from pylark.api_service_hire_transfer_onboard_by_application import (
MakeHireTransferOnboardByApplicationReq,
MakeHireTransferOnboardByApplicationResp,
_gen_make_hire_transfer_onboard_by_application_req,
)
from pylark.api_service_hire_employee_update import (
UpdateHireEmployeeReq,
UpdateHireEmployeeResp,
_gen_update_hire_employee_req,
)
from pylark.api_service_hire_employee_get_by_application import (
GetHireEmployeeByApplicationReq,
GetHireEmployeeByApplicationResp,
_gen_get_hire_employee_by_application_req,
)
from pylark.api_service_hire_employee_get import (
GetHireEmployeeReq,
GetHireEmployeeResp,
_gen_get_hire_employee_req,
)
if typing.TYPE_CHECKING:
from lark import Lark
class LarkHireService(object):
cli: "Lark"
def __init__(self, cli: "Lark"):
self.cli = cli
def get_hire_job(
self, request: GetHireJobReq, options: typing.List[str] = None
) -> typing.Tuple[GetHireJobResp, Response]:
return self.cli.raw_request(_gen_get_hire_job_req(request, options))
def get_hire_job_manager(
self, request: GetHireJobManagerReq, options: typing.List[str] = None
) -> typing.Tuple[GetHireJobManagerResp, Response]:
return self.cli.raw_request(_gen_get_hire_job_manager_req(request, options))
def get_hire_talent(
self, request: GetHireTalentReq, options: typing.List[str] = None
) -> typing.Tuple[GetHireTalentResp, Response]:
return self.cli.raw_request(_gen_get_hire_talent_req(request, options))
def get_hire_attachment(
self, request: GetHireAttachmentReq, options: typing.List[str] = None
) -> typing.Tuple[GetHireAttachmentResp, Response]:
return self.cli.raw_request(_gen_get_hire_attachment_req(request, options))
def get_hire_attachment_preview(
self, request: GetHireAttachmentPreviewReq, options: typing.List[str] = None
) -> typing.Tuple[GetHireAttachmentPreviewResp, Response]:
return self.cli.raw_request(
_gen_get_hire_attachment_preview_req(request, options)
)
def get_hire_resume_source(
self, request: GetHireResumeSourceReq, options: typing.List[str] = None
) -> typing.Tuple[GetHireResumeSourceResp, Response]:
return self.cli.raw_request(_gen_get_hire_resume_source_req(request, options))
def create_hire_note(
self, request: CreateHireNoteReq, options: typing.List[str] = None
) -> typing.Tuple[CreateHireNoteResp, Response]:
return self.cli.raw_request(_gen_create_hire_note_req(request, options))
def update_hire_note(
self, request: UpdateHireNoteReq, options: typing.List[str] = None
) -> typing.Tuple[UpdateHireNoteResp, Response]:
return self.cli.raw_request(_gen_update_hire_note_req(request, options))
def get_hire_note(
self, request: GetHireNoteReq, options: typing.List[str] = None
) -> typing.Tuple[GetHireNoteResp, Response]:
return self.cli.raw_request(_gen_get_hire_note_req(request, options))
def get_hire_note_list(
self, request: GetHireNoteListReq, options: typing.List[str] = None
) -> typing.Tuple[GetHireNoteListResp, Response]:
return self.cli.raw_request(_gen_get_hire_note_list_req(request, options))
def get_hire_referral_by_application(
self, request: GetHireReferralByApplicationReq, options: typing.List[str] = None
) -> typing.Tuple[GetHireReferralByApplicationResp, Response]:
return self.cli.raw_request(
_gen_get_hire_referral_by_application_req(request, options)
)
def get_hire_job_process_list(
self, request: GetHireJobProcessListReq, options: typing.List[str] = None
) -> typing.Tuple[GetHireJobProcessListResp, Response]:
return self.cli.raw_request(
_gen_get_hire_job_process_list_req(request, options)
)
def create_hire_application(
self, request: CreateHireApplicationReq, options: typing.List[str] = None
) -> typing.Tuple[CreateHireApplicationResp, Response]:
return self.cli.raw_request(_gen_create_hire_application_req(request, options))
def terminate_hire_application(
self, request: TerminateHireApplicationReq, options: typing.List[str] = None
) -> typing.Tuple[TerminateHireApplicationResp, Response]:
return self.cli.raw_request(
_gen_terminate_hire_application_req(request, options)
)
def get_hire_application(
self, request: GetHireApplicationReq, options: typing.List[str] = None
) -> typing.Tuple[GetHireApplicationResp, Response]:
return self.cli.raw_request(_gen_get_hire_application_req(request, options))
def get_hire_application_list(
self, request: GetHireApplicationListReq, options: typing.List[str] = None
) -> typing.Tuple[GetHireApplicationListResp, Response]:
return self.cli.raw_request(
_gen_get_hire_application_list_req(request, options)
)
def get_hire_application_interview_list(
self,
request: GetHireApplicationInterviewListReq,
options: typing.List[str] = None,
) -> typing.Tuple[GetHireApplicationInterviewListResp, Response]:
return self.cli.raw_request(
_gen_get_hire_application_interview_list_req(request, options)
)
def get_hire_offer_by_application(
self, request: GetHireOfferByApplicationReq, options: typing.List[str] = None
) -> typing.Tuple[GetHireOfferByApplicationResp, Response]:
return self.cli.raw_request(
_gen_get_hire_offer_by_application_req(request, options)
)
def get_hire_offer_schema(
self, request: GetHireOfferSchemaReq, options: typing.List[str] = None
) -> typing.Tuple[GetHireOfferSchemaResp, Response]:
return self.cli.raw_request(_gen_get_hire_offer_schema_req(request, options))
def make_hire_transfer_onboard_by_application(
self,
request: MakeHireTransferOnboardByApplicationReq,
options: typing.List[str] = None,
) -> typing.Tuple[MakeHireTransferOnboardByApplicationResp, Response]:
return self.cli.raw_request(
_gen_make_hire_transfer_onboard_by_application_req(request, options)
)
def update_hire_employee(
self, request: UpdateHireEmployeeReq, options: typing.List[str] = None
) -> typing.Tuple[UpdateHireEmployeeResp, Response]:
return self.cli.raw_request(_gen_update_hire_employee_req(request, options))
def get_hire_employee_by_application(
self, request: GetHireEmployeeByApplicationReq, options: typing.List[str] = None
) -> typing.Tuple[GetHireEmployeeByApplicationResp, Response]:
return self.cli.raw_request(
_gen_get_hire_employee_by_application_req(request, options)
)
def get_hire_employee(
self, request: GetHireEmployeeReq, options: typing.List[str] = None
) -> typing.Tuple[GetHireEmployeeResp, Response]:
return self.cli.raw_request(_gen_get_hire_employee_req(request, options))
| 37.431227 | 88 | 0.761247 | [
"Apache-2.0"
] | chyroc/pylark | pylark/api_service_hire.py | 10,069 | Python |
from setuptools import find_packages, setup
setup(
author="Zackary Troop",
name="wave-reader",
version="0.0.10",
url="https://github.com/ztroop/wave-reader-utils",
license="MIT",
description="Unofficial package for Airthings Wave communication.",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
packages=find_packages(exclude=["examples", "tests"]),
install_requires=[
"Authlib>=0.15.4",
"bleak>=0.10.0",
"httpx>=0.18.2",
],
python_requires=">=3.7.*",
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
)
| 31.428571 | 71 | 0.614773 | [
"MIT"
] | ztroop/wave-reader | setup.py | 880 | Python |
"""Run experiment.
This module is intended to be run as a script:
$ python src/experiment.py
"""
import os
import pandas as pd
from bert import BERT
from constants import LANGUAGES, MASK, MISSING
from filenames import CLOZE_DIR, EXPERIMENTS_DIR, FEATURES_DIR
from utils import refresh
ENGLISH_MODEL = "bert-base-cased"
MULTILINGUAL_MODEL = "bert-base-multilingual-cased"
def index_of_masked_word(sentence, bert):
"""Return index of the masked word in `sentence` using `bert`'s' tokenizer.
We use this function to calculate the linear distance between the target
and controller as BERT sees it.
Parameters
----------
sentence : str
Returns
-------
int
"""
tokens = bert.tokenize(sentence)
try:
return tokens.index(MASK)
except ValueError: # MASK not in sentence
return -1
def run(language, force_multilingual=False, fold_case=True, gpu=True):
"""Run the experiment for `language`.
Parameters
----------
language : str
force_multilingual : bool
Whether to use the multilingual model even on English
fold_case : bool
Whether to ignore caseing differences after making predictions
gpu : bool
Whether to run on GPU or not (useful for debugging)
Returns
-------
pd.DataFrame
"""
if (language == "English") and (not force_multilingual):
bert = BERT(ENGLISH_MODEL, gpu=gpu)
else:
bert = BERT(MULTILINGUAL_MODEL, gpu=gpu)
vocab = bert.vocab
if fold_case:
vocab = [word.lower() for word in vocab]
code = LANGUAGES[language]
cloze = pd.read_csv(os.path.join(CLOZE_DIR, f"{code}.csv"))
num_examples = len(cloze) * 2 # because we mask out both words
print(f"\n\nNumber of examples for {language}: {num_examples}")
print_every = num_examples // 100
features = pd.read_csv(
os.path.join(FEATURES_DIR, f"{code}.csv"), dtype={"person": str}
)
# remove any words that aren't in the vocab
features = features[features["word"].isin(vocab)]
# if we are masking out the controller, we know that the masked word is
# also a noun or a pronoun, so we can remove everything else from features
# features = features[features['pos'].isin(['NOUN', 'PRON'])]
cols = ["number", "gender", "case", "person"]
result = []
count, total = 0, 0
for _, example in cloze.iterrows():
for mask in ["masked", "other_masked"]:
try:
predictions = bert.predict(example[mask], fold_case)
except ValueError: # MASK not in sentence
continue
predictions = features.merge(
predictions, how="left", left_on="word", right_index=True
)
# only keep words of the same POS category as the masked word
predictions = predictions[predictions["pos"] == example["pos"]]
# A word is correct if all its features are identical with the features
# of the masked word.
predictions["correct"] = (predictions[cols] == example[cols]).all(axis=1)
# If a word form has multiple feature bundles and at least one of them
# is correct, then we count that word form as correct. The values of
# 'p' for the differently valued but identical word forms will be
# identical (because BERT predicts word forms). I want to include the
# 'p' in the resulting dataframe so I just take the first value.
predictions = predictions.groupby("word").agg(
{"correct": any, "p": "first"}
)
# we compute the average (unnormalized) probability of all the word
# forms BERT got correct and all it got incorrect.
mean = predictions.groupby("correct")["p"].mean()
try:
example["correct"] = mean[True]
except KeyError:
example["correct"] = 0.0
try:
example["incorrect"] = mean[False]
except KeyError:
example["incorrect"] = 0.0
# add in the linear distance between masked and other word
masked_index = index_of_masked_word(example["masked"], bert)
other_index = index_of_masked_word(example["other_masked"], bert)
example["distance"] = abs(masked_index - other_index)
result.append(example)
if example["correct"] > example["incorrect"]:
count += 1
total += 1
if total % print_every == 0:
percent_correct = round(100 * (count / total), 3)
percent_done = round(100 * (total / num_examples), 3)
print(f"{percent_correct}% correct with {percent_done}% done")
result = pd.DataFrame(result)
result["right"] = result["correct"] > result["incorrect"]
file_name = os.path.join(EXPERIMENTS_DIR, f"{code}.csv")
result.to_csv(file_name, index=False)
return result
if __name__ == "__main__":
# # # refresh(EXPERIMENTS_DIR) # don't uncomment me!
# # run experiments for languages with fewer cloze examples first
# ORDER = {
# language: len(pd.read_csv(os.path.join(CLOZE_DIR, f'{code}.csv')))
# for language, code in LANGUAGES.items()
# }
ORDER = {"Czech": 0, "German": 1}
for language in sorted(ORDER, key=ORDER.get):
try:
result = run(language)
proportion_correct = result["right"].value_counts(normalize=True)[True]
print(language, round(proportion_correct, 2))
except: # noqa
print(f"Error with {language}")
| 37.953333 | 85 | 0.610926 | [
"MIT"
] | geoffbacon/does-bert-agree | src/experiment.py | 5,693 | Python |
# Python Object Oriented Programming
import datetime
class Employee:
num_of_emps = 0 # Class Variable
raise_amount = 1.02 # Class Variable
def __init__(self, FirstName, LastName, salary):
self.FirstName = FirstName
self.LastName = LastName
self.salary = int(salary)
self.email = FirstName + "." + LastName + "@email.com"
Employee.num_of_emps += 1 # Class Variable
def FullName(self):
return "{} {}".format(self.FirstName, self.LastName)
def apply_raise(self):
self.salary = int(self.salary * self.raise_amount)
@classmethod
def set_raise_amount(cls, amount):
cls.raise_amount = amount
@classmethod
def from_string(cls, emp_str):
first, last, salary = emp_str.split("-")
return cls(first, last, salary)
@staticmethod
def is_workday(day):
if day.weekday() == 5 or day.weekday() == 6:
return False
else:
return True
def __repr__(self):
return "Employee('{}', '{}', '{}')".format(self.FirstName, self.LastName, self.salary)
def __str__(self):
return "{} - {}".format(self.FullName(), self.email)
def __add__(self, other):
return self.salary + other.salary
def __len__(self):
return len(self.FullName())
class Developer(Employee):
def __init__(self, FirstName, LastName, salary, prog_lang):
super().__init__(FirstName, LastName, salary)
self.prog_lang = prog_lang
@classmethod
def from_string(cls, dev_str):
first, last, salary, prog_lang = dev_str.split("-")
return cls(first, last, salary, prog_lang)
class Manager(Employee):
def __init__(self, FirstName, LastName, salary, employees = None):
super().__init__(FirstName, LastName, salary)
if employees is None:
self.employees = []
else:
self.employees = employees
def add_emp(self, emp):
if emp not in self.employees:
self.employees.append(emp)
def remove_emp(self, emp):
if emp in self.employees:
self.employees.remove(emp)
def print_emps(self):
for emp in self.employees:
print("\n --> {}".format(emp.FullName()))
emp_1 = Employee("Sudani", "Coder", 100500)
emp_2 = Employee("Root", "Admin", 100500)
print()
print(emp_1, end = "\n\n")
print(repr(emp_2), end = "\n\n")
print(str(emp_2), end = "\n\n")
print(emp_1 + emp_2, end = "\n\n")
print(len(emp_2), end = "\n\n")
| 27.597826 | 94 | 0.606932 | [
"MIT"
] | Sudani-Coder/python | Object Oriented Programming/5 - Special Methods/index.py | 2,539 | Python |
# -*- coding: utf-8 -*-
"""
Key classification
using multiclass Support Vector Machine (SVM)
reference:
Date: Jun 05, 2017
@author: Thuong Tran
@Library: scikit-learn
"""
import os, glob, random
import numpy as np
from pandas import DataFrame
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import LinearSVC
from sklearn.metrics import confusion_matrix, precision_score
from sklearn.pipeline import Pipeline
from sklearn.externals import joblib
from sklearn.cross_validation import KFold
import time
import codecs
import matplotlib.pyplot as plt
import itertools
NEW_LINE = '\r\n'
TRAIN_SIZE = 0.8
def build_data_frame(data_dir):
# folders = [d for d in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, d))]
dirs = next(os.walk(data_dir))[1] # [0]: path, [1]: folders list, [2] files list
class_names = []
total_amount = []
train_amount = []
test_amount = []
train_data = DataFrame({'value': [], 'class': []})
test_data = DataFrame({'value': [], 'class': []})
for d in dirs:
tmp_dir = os.path.join(data_dir, d)
rows = []
index = []
for f in glob.glob(os.path.join(tmp_dir, '*.txt')):
with open(f, encoding="latin1") as fc:
value = [line.replace('\n', '').replace('\r', '').replace('\t', '')
for line in fc.readlines()]
value = '. '.join(value)
rows.append({'value': value, 'class': d})
index.append(f)
tmp_df = DataFrame(rows, index=index)
size = int(len(tmp_df) * TRAIN_SIZE)
train_df, test_df = tmp_df.iloc[:size], tmp_df.iloc[size:]
train_data = train_data.append(train_df)
test_data = test_data.append(test_df)
class_names.append(d)
total_amount.append(len(os.listdir(tmp_dir)))
train_amount.append(len(train_df))
test_amount.append(len(test_df))
tmp_arr = np.array([total_amount, train_amount, test_amount])
print (DataFrame(tmp_arr, ['Total', 'Train', 'Test'], class_names))
train_data = train_data.reindex(np.random.permutation(train_data.index))
test_data = test_data.reindex(np.random.permutation(test_data.index))
return train_data, test_data, class_names
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def main():
data_dir = '/Users/thuong/Documents/tmp_datasets/SI/TrainValue'
train_data_df, test_data_df, class_names = build_data_frame(data_dir)
pipeline = Pipeline([
('vectorizer', CountVectorizer()),
('tfidf_transformer', TfidfTransformer()),
('classifier', LinearSVC())])
######### One-KFolds ##############################
train_data, test_data = train_data_df['value'].values, test_data_df['value'].values
train_target, test_target = train_data_df['class'].values, test_data_df['class'].values
pipeline.fit(train_data, train_target)
predictions = pipeline.predict(test_data)
cnf_matrix = confusion_matrix(test_target, predictions)
print('Confusion matrix with one-fold: ')
print(cnf_matrix)
print("Score with one-fold: %s" % precision_score(test_target, predictions, average = 'weighted'))
print("Score with one-fold: %s" % precision_score(test_target, predictions, average = None))
# ######### KFolds ##############################
# k_fold = KFold(n=len(data_frame), n_folds=6)
# scores = []
# confusion = np.array([[0, 0], [0, 0]])
# for train_indices, test_indices in k_fold:
# train_text = data_frame.iloc[train_indices]['text'].values
# train_label = data_frame.iloc[train_indices]['class'].values
# test_text = data_frame.iloc[test_indices]['text'].values
# test_label = data_frame.iloc[test_indices]['class'].values
# pipeline.fit(train_text, train_label)
# predictions = pipeline.predict(test_text)
# confusion += confusion_matrix(test_label, predictions)
# score = f1_score(test_label, predictions, pos_label = SPAM)
# scores.append(score)
# print('Confusion matrix with 6-fold: ')
# print(confusion)
# print('Score with 6-fold: %s' % (sum(scores)/len(scores)))
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names,
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,
title='Normalized confusion matrix')
plt.show()
if __name__ == "__main__":
main() | 32.934132 | 100 | 0.668545 | [
"MIT"
] | xuanthuong/DOU-SI | TrainValue/multiclass_svm.py | 5,500 | Python |
import math
import collections
from gym_jsbsim import utils
class BoundedProperty(collections.namedtuple('BoundedProperty', ['name', 'description', 'min', 'max'])):
def get_legal_name(self):
return utils.AttributeFormatter.translate(self.name)
class Property(collections.namedtuple('Property', ['name', 'description'])):
def get_legal_name(self):
return utils.AttributeFormatter.translate(self.name)
# position and attitude
altitude_sl_ft = BoundedProperty('position/h-sl-ft', 'altitude above mean sea level [ft]', -1400, 85000)
pitch_rad = BoundedProperty('attitude/pitch-rad', 'pitch [rad]', -0.5 * math.pi, 0.5 * math.pi)
roll_rad = BoundedProperty('attitude/roll-rad', 'roll [rad]', -math.pi, math.pi)
heading_deg = BoundedProperty('attitude/psi-deg', 'heading [deg]', 0, 360)
sideslip_deg = BoundedProperty('aero/beta-deg', 'sideslip [deg]', -180, +180)
lat_geod_deg = BoundedProperty('position/lat-geod-deg', 'geocentric latitude [deg]', -90, 90)
lng_geoc_deg = BoundedProperty('position/long-gc-deg', 'geodesic longitude [deg]', -180, 180)
dist_travel_m = Property('position/distance-from-start-mag-mt', 'distance travelled from starting position [m]')
# velocities
u_fps = BoundedProperty('velocities/u-fps', 'body frame x-axis velocity [ft/s]', -2200, 2200)
v_fps = BoundedProperty('velocities/v-fps', 'body frame y-axis velocity [ft/s]', -2200, 2200)
w_fps = BoundedProperty('velocities/w-fps', 'body frame z-axis velocity [ft/s]', -2200, 2200)
v_north_fps = BoundedProperty('velocities/v-north-fps', 'velocity true north [ft/s]', float('-inf'), float('+inf'))
v_east_fps = BoundedProperty('velocities/v-east-fps', 'velocity east [ft/s]', float('-inf'), float('+inf'))
v_down_fps = BoundedProperty('velocities/v-down-fps', 'velocity downwards [ft/s]', float('-inf'), float('+inf'))
vc_fps = BoundedProperty("velocities/vc-fps", "airspeed in knots", 0, 4400)
p_radps = BoundedProperty('velocities/p-rad_sec', 'roll rate [rad/s]', -2 * math.pi, 2 * math.pi)
q_radps = BoundedProperty('velocities/q-rad_sec', 'pitch rate [rad/s]', -2 * math.pi, 2 * math.pi)
r_radps = BoundedProperty('velocities/r-rad_sec', 'yaw rate [rad/s]', -2 * math.pi, 2 * math.pi)
altitude_rate_fps = Property('velocities/h-dot-fps', 'Rate of altitude change [ft/s]')
# accelerations
accelerations_n_pilot_x_norm = Property('accelerations/n-pilot-x-norm', 'pilot body x-axis acceleration, normalised')
accelerations_n_pilot_y_norm = Property('accelerations/n-pilot-y-norm', 'pilot body y-axis acceleration, normalised')
accelerations_n_pilot_z_norm = Property('accelerations/n-pilot-z-norm', 'pilot body z-axis acceleration, normalised')
# controls state
aileron_left = BoundedProperty('fcs/left-aileron-pos-norm', 'left aileron position, normalised', -1, 1)
aileron_right = BoundedProperty('fcs/right-aileron-pos-norm', 'right aileron position, normalised', -1, 1)
elevator = BoundedProperty('fcs/elevator-pos-norm', 'elevator position, normalised', -1, 1)
rudder = BoundedProperty('fcs/rudder-pos-norm', 'rudder position, normalised', -1, 1)
throttle = BoundedProperty('fcs/throttle-pos-norm', 'throttle position, normalised', 0, 1)
gear = BoundedProperty('gear/gear-pos-norm', 'landing gear position, normalised', 0, 1)
# engines
engine_running = Property('propulsion/engine/set-running', 'engine running (0/1 bool)')
all_engine_running = Property('propulsion/set-running', 'set engine running (-1 for all engines)')
engine_thrust_lbs = Property('propulsion/engine/thrust-lbs', 'engine thrust [lb]')
# controls command
aileron_cmd = BoundedProperty('fcs/aileron-cmd-norm', 'aileron commanded position, normalised', -1., 1.)
elevator_cmd = BoundedProperty('fcs/elevator-cmd-norm', 'elevator commanded position, normalised', -1., 1.)
rudder_cmd = BoundedProperty('fcs/rudder-cmd-norm', 'rudder commanded position, normalised', -1., 1.)
throttle_cmd = BoundedProperty('fcs/throttle-cmd-norm', 'throttle commanded position, normalised', 0., 1.)
mixture_cmd = BoundedProperty('fcs/mixture-cmd-norm', 'engine mixture setting, normalised', 0., 1.)
gear_all_cmd = BoundedProperty('gear/gear-cmd-norm', 'all landing gear commanded position, normalised', 0, 1)
# simulation
sim_dt = Property('simulation/dt', 'JSBSim simulation timestep [s]')
sim_time_s = Property('simulation/sim-time-sec', 'Simulation time [s]')
# initial conditions
initial_altitude_ft = Property('ic/h-sl-ft', 'initial altitude MSL [ft]')
initial_terrain_altitude_ft = Property('ic/terrain-elevation-ft', 'initial terrain alt [ft]')
initial_longitude_geoc_deg = Property('ic/long-gc-deg', 'initial geocentric longitude [deg]')
initial_latitude_geod_deg = Property('ic/lat-geod-deg', 'initial geodesic latitude [deg]')
initial_u_fps = Property('ic/u-fps', 'body frame x-axis velocity; positive forward [ft/s]')
initial_v_fps = Property('ic/v-fps', 'body frame y-axis velocity; positive right [ft/s]')
initial_w_fps = Property('ic/w-fps', 'body frame z-axis velocity; positive down [ft/s]')
initial_p_radps = Property('ic/p-rad_sec', 'roll rate [rad/s]')
initial_q_radps = Property('ic/q-rad_sec', 'pitch rate [rad/s]')
initial_r_radps = Property('ic/r-rad_sec', 'yaw rate [rad/s]')
initial_roc_fpm = Property('ic/roc-fpm', 'initial rate of climb [ft/min]')
initial_heading_deg = Property('ic/psi-true-deg', 'initial (true) heading [deg]')
class Vector2(object):
def __init__(self, x: float, y: float):
self.x = x
self.y = y
def heading_deg(self):
""" Calculate heading in degrees of vector from origin """
heading_rad = math.atan2(self.x, self.y)
heading_deg_normalised = (math.degrees(heading_rad) + 360) % 360
return heading_deg_normalised
@staticmethod
def from_sim(sim: 'simulation.Simulation') -> 'Vector2':
return Vector2(sim[v_east_fps], sim[v_north_fps])
class GeodeticPosition(object):
def __init__(self, latitude_deg: float, longitude_deg: float):
self.lat = latitude_deg
self.lon = longitude_deg
def heading_deg_to(self, destination: 'GeodeticPosition') -> float:
""" Determines heading in degrees of course between self and destination """
difference_vector = destination - self
return difference_vector.heading_deg()
@staticmethod
def from_sim(sim: 'simulation.Simulation') -> 'GeodeticPosition':
""" Return a GeodeticPosition object with lat and lon from simulation """
lat_deg = sim[lat_geod_deg]
lon_deg = sim[lng_geoc_deg]
return GeodeticPosition(lat_deg, lon_deg)
def __sub__(self, other) -> Vector2:
""" Returns difference between two coords as (delta_lat, delta_long) """
return Vector2(self.lon - other.lon, self.lat - other.lat)
| 56.008333 | 117 | 0.724892 | [
"MIT"
] | songhyonkim/gym-ai-pilot | gym_jsbsim/properties.py | 6,721 | Python |
import re
import os
import time
import tweepy
import neologdn
import emoji
TRAINFILE = "./data/train.tsv"
DEVFILE = "./data/dev.tsv"
TESTFILE = "./data/test.tsv"
dup = []
if os.path.exists(TRAINFILE):
with open(TRAINFILE) as f:
dup += f.readlines()
if os.path.exists(DEVFILE):
with open(DEVFILE) as f:
dup += f.readlines()
if os.path.exists(TESTFILE):
with open(TESTFILE) as f:
dup += f.readlines()
class Tweet:
def __init__(self, status):
self.in_reply_to_status_id = status.in_reply_to_status_id
self.text = status.text
self.created_at = status.created_at
self.screen_name = status.user.screen_name
self.username = status.user.name
self.user_id = status.user.id
def is_valid_tweet(status):
# is bot
if "bot" in status.user.screen_name:
return False
# include URL
if re.search(r"https?://", status.text):
return False
# is hashtag
if re.search(r"#(\w+)", status.text):
return False
# reply to multi user
tweet = re.sub(r"@([A-Za-z0-9_]+)", "<unk>", status.text)
if tweet.split().count("<unk>") > 1:
return False
# too long
if len(tweet.replace("<unk>", "")) > 20:
return False
return True
def normalize(text):
text = text.replace(" ", "")
text = text.replace("\n", "")
text = neologdn.normalize(text)
text = "".join(["" if c in emoji.UNICODE_EMOJI["en"].keys() else c for c in text])
tmp = re.sub(r"(\d)([,.])(\d+)", r"\1\3", text)
# text = re.sub(r"\d+", "0", tmp)
tmp = re.sub(r"[!-/:-@[-`{-~]", r" ", text)
text = re.sub(u"[■-♯]", " ", tmp)
text = text.strip()
return text
def main():
query = input("Search Query: ")
max_tw = int(input("Tweet Count: "))
CK = os.getenv("TW_CK")
CS = os.getenv("TW_CS")
auth = tweepy.AppAuthHandler(CK, CS)
api = tweepy.API(auth)
got = 0
filtered = 0
saved = 0
lookup_ids = []
replies = {}
max_id = None
while saved <= max_tw:
try:
statuses = api.search_tweets(
q=query, lang="ja", count=100, max_id=max_id
)
max_id = statuses[-1].id
for status in statuses:
got += 1
# is not reply
if not status.in_reply_to_status_id:
continue
# filter
if not is_valid_tweet(status):
continue
# append lookup id
lookup_ids.append(status.in_reply_to_status_id)
replies[status.in_reply_to_status_id] = Tweet(status)
filtered += 1
print(f"\r{got} => {filtered} => {saved}", end="")
# collect 100 tweets
if len(lookup_ids) >= 100:
pstatuses = api.lookup_statuses(lookup_ids)
for pstatus in pstatuses:
if not is_valid_tweet(pstatus):
continue
reply = replies[pstatus.id]
# is same user
if pstatus.user.id == reply.user_id:
continue
intext = re.sub(r"@([A-Za-z0-9_]+)", "", pstatus.text)
intext = normalize(intext)
outtext = re.sub(r"@([A-Za-z0-9_]+)", "", reply.text)
outtext = normalize(outtext)
if not intext or not outtext:
continue
if f"{intext}\t{outtext}\n" in dup:
continue
if saved <= max_tw * .9:
path = TRAINFILE
elif saved <= max_tw * .95:
path = DEVFILE
else:
path = TESTFILE
with open(path, "a") as f:
f.write(f"{intext}\t{outtext}\n")
saved += 1
print(f"\r{got} => {filtered} => {saved}", end="")
if saved > max_tw:
exit()
lookup_ids = []
replies = {}
except Exception:
print()
limit_status = api.rate_limit_status(
)["resources"]["search"]["/search/tweets"]
while limit_status["reset"] >= int(time.time()):
print("\rLimited: " + (" " +
str(limit_status["reset"] - int(time.time())))[-3:] + "s", end="")
time.sleep(.5)
print()
if __name__ == "__main__":
main()
| 32.930556 | 88 | 0.471531 | [
"MIT"
] | laddge/gf-ai | collect_tweet.py | 4,746 | Python |
"""
WSGI config for kubeoperator project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'kubeoperator.settings')
application = get_wsgi_application()
| 23.588235 | 78 | 0.790524 | [
"Apache-2.0"
] | 2733284198/KubeOperator | core/apps/kubeoperator/wsgi.py | 401 | Python |
"""The tests for the MQTT switch platform."""
import json
from unittest.mock import ANY
from asynctest import patch
import pytest
from homeassistant.components import mqtt, switch
from homeassistant.components.mqtt.discovery import async_start
from homeassistant.const import (
ATTR_ASSUMED_STATE, STATE_OFF, STATE_ON, STATE_UNAVAILABLE)
import homeassistant.core as ha
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry, async_fire_mqtt_message, async_mock_mqtt_component,
mock_coro, mock_registry)
from tests.components.switch import common
@pytest.fixture
def mock_publish(hass):
"""Initialize components."""
yield hass.loop.run_until_complete(async_mock_mqtt_component(hass))
async def test_controlling_state_via_topic(hass, mock_publish):
"""Test the controlling state via topic."""
assert await async_setup_component(hass, switch.DOMAIN, {
switch.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'state-topic',
'command_topic': 'command-topic',
'payload_on': 1,
'payload_off': 0
}
})
state = hass.states.get('switch.test')
assert STATE_OFF == state.state
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, 'state-topic', '1')
await hass.async_block_till_done()
state = hass.states.get('switch.test')
assert STATE_ON == state.state
async_fire_mqtt_message(hass, 'state-topic', '0')
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get('switch.test')
assert STATE_OFF == state.state
async def test_sending_mqtt_commands_and_optimistic(hass, mock_publish):
"""Test the sending MQTT commands in optimistic mode."""
fake_state = ha.State('switch.test', 'on')
with patch('homeassistant.helpers.restore_state.RestoreEntity'
'.async_get_last_state',
return_value=mock_coro(fake_state)):
assert await async_setup_component(hass, switch.DOMAIN, {
switch.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'command_topic': 'command-topic',
'payload_on': 'beer on',
'payload_off': 'beer off',
'qos': '2'
}
})
state = hass.states.get('switch.test')
assert STATE_ON == state.state
assert state.attributes.get(ATTR_ASSUMED_STATE)
common.turn_on(hass, 'switch.test')
await hass.async_block_till_done()
mock_publish.async_publish.assert_called_once_with(
'command-topic', 'beer on', 2, False)
mock_publish.async_publish.reset_mock()
state = hass.states.get('switch.test')
assert STATE_ON == state.state
common.turn_off(hass, 'switch.test')
await hass.async_block_till_done()
await hass.async_block_till_done()
mock_publish.async_publish.assert_called_once_with(
'command-topic', 'beer off', 2, False)
state = hass.states.get('switch.test')
assert STATE_OFF == state.state
async def test_controlling_state_via_topic_and_json_message(
hass, mock_publish):
"""Test the controlling state via topic and JSON message."""
assert await async_setup_component(hass, switch.DOMAIN, {
switch.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'state-topic',
'command_topic': 'command-topic',
'payload_on': 'beer on',
'payload_off': 'beer off',
'value_template': '{{ value_json.val }}'
}
})
state = hass.states.get('switch.test')
assert STATE_OFF == state.state
async_fire_mqtt_message(hass, 'state-topic', '{"val":"beer on"}')
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get('switch.test')
assert STATE_ON == state.state
async_fire_mqtt_message(hass, 'state-topic', '{"val":"beer off"}')
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get('switch.test')
assert STATE_OFF == state.state
async def test_default_availability_payload(hass, mock_publish):
"""Test the availability payload."""
assert await async_setup_component(hass, switch.DOMAIN, {
switch.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'state-topic',
'command_topic': 'command-topic',
'availability_topic': 'availability_topic',
'payload_on': 1,
'payload_off': 0
}
})
state = hass.states.get('switch.test')
assert STATE_UNAVAILABLE == state.state
async_fire_mqtt_message(hass, 'availability_topic', 'online')
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get('switch.test')
assert STATE_OFF == state.state
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, 'availability_topic', 'offline')
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get('switch.test')
assert STATE_UNAVAILABLE == state.state
async_fire_mqtt_message(hass, 'state-topic', '1')
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get('switch.test')
assert STATE_UNAVAILABLE == state.state
async_fire_mqtt_message(hass, 'availability_topic', 'online')
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get('switch.test')
assert STATE_ON == state.state
async def test_custom_availability_payload(hass, mock_publish):
"""Test the availability payload."""
assert await async_setup_component(hass, switch.DOMAIN, {
switch.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'state-topic',
'command_topic': 'command-topic',
'availability_topic': 'availability_topic',
'payload_on': 1,
'payload_off': 0,
'payload_available': 'good',
'payload_not_available': 'nogood'
}
})
state = hass.states.get('switch.test')
assert STATE_UNAVAILABLE == state.state
async_fire_mqtt_message(hass, 'availability_topic', 'good')
await hass.async_block_till_done()
state = hass.states.get('switch.test')
assert STATE_OFF == state.state
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, 'availability_topic', 'nogood')
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get('switch.test')
assert STATE_UNAVAILABLE == state.state
async_fire_mqtt_message(hass, 'state-topic', '1')
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get('switch.test')
assert STATE_UNAVAILABLE == state.state
async_fire_mqtt_message(hass, 'availability_topic', 'good')
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get('switch.test')
assert STATE_ON == state.state
async def test_custom_state_payload(hass, mock_publish):
"""Test the state payload."""
assert await async_setup_component(hass, switch.DOMAIN, {
switch.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'state-topic',
'command_topic': 'command-topic',
'payload_on': 1,
'payload_off': 0,
'state_on': "HIGH",
'state_off': "LOW",
}
})
state = hass.states.get('switch.test')
assert STATE_OFF == state.state
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, 'state-topic', 'HIGH')
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get('switch.test')
assert STATE_ON == state.state
async_fire_mqtt_message(hass, 'state-topic', 'LOW')
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get('switch.test')
assert STATE_OFF == state.state
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
assert await async_setup_component(hass, switch.DOMAIN, {
switch.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'command_topic': 'test-topic',
'json_attributes_topic': 'attr-topic'
}
})
async_fire_mqtt_message(hass, 'attr-topic', '{ "val": "100" }')
await hass.async_block_till_done()
state = hass.states.get('switch.test')
assert '100' == state.attributes.get('val')
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
assert await async_setup_component(hass, switch.DOMAIN, {
switch.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'command_topic': 'test-topic',
'json_attributes_topic': 'attr-topic'
}
})
async_fire_mqtt_message(hass, 'attr-topic', '[ "list", "of", "things"]')
await hass.async_block_till_done()
state = hass.states.get('switch.test')
assert state.attributes.get('val') is None
assert 'JSON result was not a dictionary' in caplog.text
async def test_update_with_json_attrs_bad_JSON(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
assert await async_setup_component(hass, switch.DOMAIN, {
switch.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'command_topic': 'test-topic',
'json_attributes_topic': 'attr-topic'
}
})
async_fire_mqtt_message(hass, 'attr-topic', 'This is not JSON')
await hass.async_block_till_done()
state = hass.states.get('switch.test')
assert state.attributes.get('val') is None
assert 'Erroneous JSON: This is not JSON' in caplog.text
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
"""Test update of discovered MQTTAttributes."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, 'homeassistant', {}, entry)
data1 = (
'{ "name": "Beer",'
' "command_topic": "test_topic",'
' "json_attributes_topic": "attr-topic1" }'
)
data2 = (
'{ "name": "Beer",'
' "command_topic": "test_topic",'
' "json_attributes_topic": "attr-topic2" }'
)
async_fire_mqtt_message(hass, 'homeassistant/switch/bla/config',
data1)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, 'attr-topic1', '{ "val": "100" }')
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get('switch.beer')
assert '100' == state.attributes.get('val')
# Change json_attributes_topic
async_fire_mqtt_message(hass, 'homeassistant/switch/bla/config',
data2)
await hass.async_block_till_done()
await hass.async_block_till_done()
# Verify we are no longer subscribing to the old topic
async_fire_mqtt_message(hass, 'attr-topic1', '{ "val": "50" }')
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get('switch.beer')
assert '100' == state.attributes.get('val')
# Verify we are subscribing to the new topic
async_fire_mqtt_message(hass, 'attr-topic2', '{ "val": "75" }')
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get('switch.beer')
assert '75' == state.attributes.get('val')
async def test_unique_id(hass):
"""Test unique id option only creates one switch per unique_id."""
await async_mock_mqtt_component(hass)
assert await async_setup_component(hass, switch.DOMAIN, {
switch.DOMAIN: [{
'platform': 'mqtt',
'name': 'Test 1',
'state_topic': 'test-topic',
'command_topic': 'command-topic',
'unique_id': 'TOTALLY_UNIQUE'
}, {
'platform': 'mqtt',
'name': 'Test 2',
'state_topic': 'test-topic',
'command_topic': 'command-topic',
'unique_id': 'TOTALLY_UNIQUE'
}]
})
async_fire_mqtt_message(hass, 'test-topic', 'payload')
await hass.async_block_till_done()
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids()) == 2
# all switches group is 1, unique id created is 1
async def test_discovery_removal_switch(hass, mqtt_mock, caplog):
"""Test removal of discovered switch."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, 'homeassistant', {}, entry)
data = (
'{ "name": "Beer",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
async_fire_mqtt_message(hass, 'homeassistant/switch/bla/config',
data)
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get('switch.beer')
assert state is not None
assert state.name == 'Beer'
async_fire_mqtt_message(hass, 'homeassistant/switch/bla/config',
'')
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get('switch.beer')
assert state is None
async def test_discovery_update_switch(hass, mqtt_mock, caplog):
"""Test update of discovered switch."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, 'homeassistant', {}, entry)
data1 = (
'{ "name": "Beer",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
data2 = (
'{ "name": "Milk",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
async_fire_mqtt_message(hass, 'homeassistant/switch/bla/config',
data1)
await hass.async_block_till_done()
state = hass.states.get('switch.beer')
assert state is not None
assert state.name == 'Beer'
async_fire_mqtt_message(hass, 'homeassistant/switch/bla/config',
data2)
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get('switch.beer')
assert state is not None
assert state.name == 'Milk'
state = hass.states.get('switch.milk')
assert state is None
async def test_discovery_broken(hass, mqtt_mock, caplog):
"""Test handling of bad discovery message."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, 'homeassistant', {}, entry)
data1 = (
'{ "name": "Beer" }'
)
data2 = (
'{ "name": "Milk",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
async_fire_mqtt_message(hass, 'homeassistant/switch/bla/config',
data1)
await hass.async_block_till_done()
state = hass.states.get('switch.beer')
assert state is None
async_fire_mqtt_message(hass, 'homeassistant/switch/bla/config',
data2)
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get('switch.milk')
assert state is not None
assert state.name == 'Milk'
state = hass.states.get('switch.beer')
assert state is None
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT switch device registry integration."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
entry.add_to_hass(hass)
await async_start(hass, 'homeassistant', {}, entry)
registry = await hass.helpers.device_registry.async_get_registry()
data = json.dumps({
'platform': 'mqtt',
'name': 'Test 1',
'state_topic': 'test-topic',
'command_topic': 'test-command-topic',
'device': {
'identifiers': ['helloworld'],
'connections': [
["mac", "02:5b:26:a8:dc:12"],
],
'manufacturer': 'Whatever',
'name': 'Beer',
'model': 'Glass',
'sw_version': '0.1-beta',
},
'unique_id': 'veryunique'
})
async_fire_mqtt_message(hass, 'homeassistant/switch/bla/config',
data)
await hass.async_block_till_done()
await hass.async_block_till_done()
device = registry.async_get_device({('mqtt', 'helloworld')}, set())
assert device is not None
assert device.identifiers == {('mqtt', 'helloworld')}
assert device.connections == {('mac', "02:5b:26:a8:dc:12")}
assert device.manufacturer == 'Whatever'
assert device.name == 'Beer'
assert device.model == 'Glass'
assert device.sw_version == '0.1-beta'
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
entry.add_to_hass(hass)
await async_start(hass, 'homeassistant', {}, entry)
registry = await hass.helpers.device_registry.async_get_registry()
config = {
'platform': 'mqtt',
'name': 'Test 1',
'state_topic': 'test-topic',
'command_topic': 'test-command-topic',
'device': {
'identifiers': ['helloworld'],
'connections': [
["mac", "02:5b:26:a8:dc:12"],
],
'manufacturer': 'Whatever',
'name': 'Beer',
'model': 'Glass',
'sw_version': '0.1-beta',
},
'unique_id': 'veryunique'
}
data = json.dumps(config)
async_fire_mqtt_message(hass, 'homeassistant/switch/bla/config',
data)
await hass.async_block_till_done()
await hass.async_block_till_done()
device = registry.async_get_device({('mqtt', 'helloworld')}, set())
assert device is not None
assert device.name == 'Beer'
config['device']['name'] = 'Milk'
data = json.dumps(config)
async_fire_mqtt_message(hass, 'homeassistant/switch/bla/config',
data)
await hass.async_block_till_done()
await hass.async_block_till_done()
device = registry.async_get_device({('mqtt', 'helloworld')}, set())
assert device is not None
assert device.name == 'Milk'
async def test_entity_id_update(hass, mqtt_mock):
"""Test MQTT subscriptions are managed when entity_id is updated."""
registry = mock_registry(hass, {})
mock_mqtt = await async_mock_mqtt_component(hass)
assert await async_setup_component(hass, switch.DOMAIN, {
switch.DOMAIN: [{
'platform': 'mqtt',
'name': 'beer',
'state_topic': 'test-topic',
'command_topic': 'command-topic',
'availability_topic': 'avty-topic',
'unique_id': 'TOTALLY_UNIQUE'
}]
})
state = hass.states.get('switch.beer')
assert state is not None
assert mock_mqtt.async_subscribe.call_count == 2
mock_mqtt.async_subscribe.assert_any_call('test-topic', ANY, 0, 'utf-8')
mock_mqtt.async_subscribe.assert_any_call('avty-topic', ANY, 0, 'utf-8')
mock_mqtt.async_subscribe.reset_mock()
registry.async_update_entity('switch.beer', new_entity_id='switch.milk')
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get('switch.beer')
assert state is None
state = hass.states.get('switch.milk')
assert state is not None
assert mock_mqtt.async_subscribe.call_count == 2
mock_mqtt.async_subscribe.assert_any_call('test-topic', ANY, 0, 'utf-8')
mock_mqtt.async_subscribe.assert_any_call('avty-topic', ANY, 0, 'utf-8')
| 33.128713 | 76 | 0.639171 | [
"Apache-2.0"
] | BobbyBleacher/home-assistant | tests/components/mqtt/test_switch.py | 20,076 | Python |
import torch
from utils.distmat import compute_distmat
def init_feedback_indices(q, g, device=None):
return torch.zeros((q, g), dtype=torch.bool, device=device)
def init_feedback_indices_qg(q, g, positive=False, device=None):
indices = torch.zeros(q, q + g, dtype=torch.bool, device=device)
if positive:
indices[torch.arange(q), torch.arange(q)] = True
return indices
def greedy_feedback(distmat, q_pids, g_pids, positive_indices, negative_indices, inplace=True):
"""
Update positive_indices, negative_indices with one round of feedback. Provide feedback for top-ranked gallery.
Note that distmat is corrupted if inplace=True.
:param distmat: q x g Tensor (adjusted query to gallery)
:param q_pids: q
:param g_pids: g
:param positive_indices: q x g
:param negative_indices: q x g
:return:
(positive_indices, negative_indices, matches)
"""
q, g = tuple(distmat.shape)
if not inplace:
distmat = distmat.clone().detach()
positive_indices = positive_indices.copy()
negative_indices = negative_indices.copy()
distmat[positive_indices] = float("inf")
distmat[negative_indices] = float("inf")
indices = distmat.argmin(dim=1)
pmap = g_pids[indices] == q_pids
positive_q = torch.arange(0, q)[pmap]
negative_q = torch.arange(0, q)[pmap == False]
positive_g = indices[pmap]
negative_g = indices[pmap == False]
existing = positive_indices[positive_q, positive_g]
assert (not existing.any())
positive_indices[positive_q, positive_g] = True
existing = negative_indices[negative_q, negative_g]
assert (not existing.any())
negative_indices[negative_q, negative_g] = True
return positive_indices, negative_indices, pmap
def naive_round(qf, gf, q_pids, g_pids, positive_indices=None, negative_indices=None,
inplace=True, previous_distmat=None, device=None):
"""
qf: q x m
gf: g x m
q_pids: q
g_pids: g
positive_indices: q x g
negative_indices: q x g
previous_distmat: adjusted distmat (== compute_distmat(qf, gf) only at init)
"""
q, g = qf.shape[0], gf.shape[0]
assert (qf.shape[1] == gf.shape[1])
if positive_indices is None: positive_indices = init_feedback_indices(q, g, device=device)
if negative_indices is None: negative_indices = init_feedback_indices(q, g, device=device)
if previous_distmat is None:
distmat = compute_distmat(qf, gf)
else:
distmat = previous_distmat
res = greedy_feedback(distmat, q_pids, g_pids, positive_indices, negative_indices, inplace=inplace)
positive_indices, negative_indices, matches = res
distmat = compute_distmat(qf, gf)
distmat[positive_indices] = 0
distmat[negative_indices] = float("inf")
return distmat, positive_indices, negative_indices, matches
| 32.988506 | 114 | 0.699652 | [
"MIT"
] | itsnamgyu/reid-metric | hitl/feedback.py | 2,870 | Python |
from django.contrib import admin
from .models import User, Departament
admin.site.register(User)
admin.site.register(Departament)
| 21.833333 | 37 | 0.824427 | [
"MIT"
] | Bounty1993/rest-crm | src/accounts/admin.py | 131 | Python |
#Licenced under MIT License
#charset = "utf-8"
#Language = "Python3"
#Bot Framework = "python-telegram-bot"
#The Code is without Proxy, Actual code contains Proxy
#Proxy should be used is of the type SOCKS5
#Special thanks to cyberboySumanjay
#The bot will work till you press ctrl+c in the terminal or command line.,
#import the required files
import requests
import logging
from telegram import *
from telegram.ext import *
#enable logger (optional)
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO
)
logger = logging.getLogger(__name__)
TOKEN = "Bot Token Here"
#CommandHandler for message "Start"
def start(update: Update, context: CallbackContext) -> None:
update.message.reply_text(f"""*Hi {update.effective_chat.first_name},*
Welcome to the Torrent Searcher Bot. Here you will find all the torrents you search for :)
Type /help to know how to use the bot
Type /info to know about the developer""", parse_mode=ParseMode.MARKDOWN)
#CommandHandler for message "Help"
def help(update: Update, context: CallbackContext) -> None:
update.message.reply_text("""Send me the query you want to search and i will do the rest!
If any error occurs, feel free to pm me on https://t.me/SL_PUNSITH1""", parse_mode=ParseMode.MARKDOWN)
#CommandHandler to get torrents for the query
def find(update: Update, context: CallbackContext) -> None:
try:
update.message.reply_text("Searching results for 👉{}👈".format(update.message.text))
#1337x, torrent9 & eztv api
url = "https://src.abirxo.com/index.php?name={}&x1337=true&x1337pages=1".format(update.message.text)
results = requests.get(url).json()
print(results)
for item in results:
link = item.get('link')
name = item.get('name')
pic = item.get('picture')
update.message.reply_text(f"""*➲Name:* `{name}`
*➲Link:* `{link}`""", parse_mode=ParseMode.MARKDOWN)
update.message.reply_text("End of Results")
except:
update.message.reply_text("""Search Completed""")
#CommandHnadler for message "info"
def info(update: Update, context: CallbackContext) -> None:
#Never Mind :-)
update.message.reply_text("""Bot by @unkusr""", parse_mode=ParseMode.MARKDOWN)
#Add all handlers to the main function.
def main() -> None:
updater = Updater(TOKEN, use_context=True)
dispatcher = updater.dispatcher
dispatcher.add_handler(CommandHandler("start", start))
dispatcher.add_handler(CommandHandler("help", help))
dispatcher.add_handler(CommandHandler("info", info))
dispatcher.add_handler(MessageHandler(Filters.text & (~Filters.command), find))
updater.start_polling() #set bot to polling, if you use webhooks, replace this statement with the url of webhook.,
updater.idle()
#Call the main function
if __name__ == '__main__':
main()
| 36.658228 | 118 | 0.708909 | [
"MIT"
] | Bot361/Torrent_Searcher_Bot | mirror.py | 2,906 | Python |
#!/bin/env python3
import sys
import numpy as np
import pandas as pd
from pandas_plink import read_plink
import argparse
if __name__=='__main__':
parser = argparse.ArgumentParser(description="Calculate chi-squared selection statistics based on principal components from Galinsky et al. 2016")
parser.add_argument("outfile",help="path to output file name")
parser.add_argument("plink",help="path to PLINK prefix")
parser.add_argument("eval",help="path to eigenvalue file")
parser.add_argument("proj",help="path to projections file")
parser.add_argument("-v","--verbose",help="verbose mode (default: TRUE)",action="store_false")
parser.add_argument("-m","--missing",help="missing mode (default: FALSE)",action="store_true")
parser.add_argument("-c","--chunk",help="chunk size (default: 64)",type=int,default=64)
args = parser.parse_args()
outfile = args.outfile
filename = args.plink
eigenvec_file = args.proj
eigenvals_file = args.eval
verbose = args.verbose
chunk_size = args.chunk
missing = args.missing
evecs = np.loadtxt(eigenvec_file,dtype=np.float64)
evals = np.loadtxt(eigenvals_file,dtype=np.float64,delimiter='\n')
evec_scalar = np.nansum(evecs,axis=0)[np.newaxis,:]
output=open(outfile,"wb")
(bim, _, G) = read_plink(filename)
snps = bim['snp']
del(bim)
ncols = evecs.shape[0]
for counter in range(int(np.ceil(G.shape[0]/chunk_size))):
if verbose:
print("Reading {}".format((counter+1)*chunk_size))
labels = snps[counter*chunk_size:(counter+1)*chunk_size]
genos = G[counter*chunk_size:(counter+1)*chunk_size,:].compute()
p = np.nanmean(genos,axis=1)/2
if missing:
genos = np.nan_to_num(genos)
scores = np.dot(genos,evecs)
scores = scores - 2*np.dot(p[:,np.newaxis],evec_scalar)
scores = scores / np.sqrt(2*p*(1-p))[:,np.newaxis]
statistic = (1/evals) * (scores**2)
statistic = np.insert(statistic.astype(str),0,labels,axis=1)
np.savetxt(output,statistic,delimiter="\t",fmt="%s")
output.close()
exit(0)
| 28.571429 | 147 | 0.7155 | [
"MIT"
] | CreRecombinase/fastPPCA | misc/selection/galinsky.py | 2,000 | Python |
# -*- coding: utf-8 -*-
# This file is part of the Ingram Micro Cloud Blue Connect connect-cli.
# Copyright (c) 2021 Ingram Micro. All Rights Reserved.
import os
import sys
def unimport():
for m in ('connect.cli.plugins.play.commands', 'connect.cli.ccli'):
if m in sys.modules:
del sys.modules[m]
def test_play_commands(fs, mocker):
os.environ['CCLI_SCRIPTS'] = os.path.join(os.path.dirname(__file__), 'scripts')
unimport()
from connect.cli.ccli import main
mocker.patch('connect.cli.plugins.play.commands.PlayOptions.context_file', None)
mocker.patch('sys.argv', ['cmd', 'play', 'script1'])
main()
def test_play_commands_rel(fs, mocker):
os.environ['CCLI_SCRIPTS'] = 'tests/plugins/play/scripts'
unimport()
from connect.cli.ccli import main
mocker.patch('connect.cli.plugins.play.commands.PlayOptions.context_file', None)
mocker.patch('sys.argv', ['cmd', 'play', 'script1'])
main()
| 26.861111 | 84 | 0.679421 | [
"Apache-2.0"
] | cloudblue/product-sync | tests/plugins/play/test_play_commands.py | 967 | Python |
import json
from urllib import urlencode
from corehq.apps.registration.utils import create_30_day_trial
from dimagi.utils.couch import CriticalSection
from dimagi.utils.couch.resource_conflict import retry_resource
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponseRedirect, HttpResponse
from django.template.loader import render_to_string
from django.shortcuts import render
from django.contrib import messages
from dimagi.utils.name_to_url import name_to_url
from django.utils.translation import ugettext as _, ugettext_lazy
from corehq.apps.app_manager.views.apps import clear_app_cache
from corehq.apps.domain.decorators import require_superuser
from corehq.apps.domain.exceptions import NameUnavailableException
from corehq.elastic import es_query, parse_args_for_es, fill_mapping_with_facets
from corehq.apps.domain.models import Domain
from dimagi.utils.couch.database import apply_update
from corehq.apps.fixtures.models import FixtureDataType
SNAPSHOT_FACETS = ['project_type', 'license', 'author.exact', 'is_starter_app']
DEPLOYMENT_FACETS = ['deployment.region']
SNAPSHOT_MAPPING = [
("", True, [
{"facet": "project_type", "name": ugettext_lazy("Category"), "expanded": True},
{
"facet": "license",
"name": ugettext_lazy("License"),
"expanded": True,
"mapping": {
'cc': 'CC BY',
'cc-sa': 'CC BY-SA',
'cc-nd': 'CC BY-ND',
'cc-nc': 'CC BY-NC',
'cc-nc-sa': 'CC BY-NC-SA',
'cc-nc-nd': 'CC BY-NC-ND',
}
},
{"facet": "author.exact", "name": ugettext_lazy("Author"), "expanded": True},
]),
]
DEPLOYMENT_MAPPING = [
("", True, [
{"facet": "deployment.region", "name": "Region", "expanded": True},
]),
]
def rewrite_url(request, path):
return HttpResponseRedirect('/exchange%s?%s' % (path, request.META['QUERY_STRING']))
def inverse_dict(d):
return dict([(v, k) for k, v in d.iteritems()])
def can_view_app(req, dom):
if not dom or not dom.is_snapshot or not dom.published:
return False
if not dom.is_approved and (
not getattr(req, "couch_user", "") or not req.couch_user.is_domain_admin(dom.copied_from.name)
):
return False
return True
def project_info(request, domain, template="appstore/project_info.html"):
dom = Domain.get(domain)
if not can_view_app(request, dom):
raise Http404()
copies = dom.copies_of_parent()
images = set()
audio = set()
return render(request, template, {
"project": dom,
"applications": dom.full_applications(include_builds=False),
"fixtures": FixtureDataType.by_domain(dom.name),
"copies": copies,
"images": images,
"audio": audio,
"url_base": reverse('appstore'),
'display_import': True if getattr(request, "couch_user",
"") and request.couch_user.get_domains() else False
})
def deduplicate(hits):
unique_names = set()
unique_hits = []
for hit in hits:
if not hit['_source']['name'] in unique_names:
unique_hits.append(hit)
unique_names.add(hit['_source']['name'])
return unique_hits
def appstore(request, template="appstore/appstore_base.html"):
page_length = 10
include_unapproved = True if request.GET.get('is_approved', "") == "false" else False
if include_unapproved and not request.user.is_superuser:
raise Http404()
params, _ = parse_args_for_es(request)
page = params.pop('page', 1)
page = int(page[0] if isinstance(page, list) else page)
results = es_snapshot_query(params, SNAPSHOT_FACETS)
hits = results.get('hits', {}).get('hits', [])
hits = deduplicate(hits)
d_results = [Domain.wrap(res['_source']) for res in hits]
starter_apps = request.GET.get('is_starter_app', None)
sort_by = request.GET.get('sort_by', None)
if sort_by == 'newest':
pass
else:
d_results = Domain.hit_sort(d_results)
persistent_params = {}
if sort_by:
persistent_params["sort_by"] = sort_by
if include_unapproved:
persistent_params["is_approved"] = "false"
persistent_params = urlencode(persistent_params) # json.dumps(persistent_params)
more_pages = False if len(d_results) <= page * page_length else True
facet_map = fill_mapping_with_facets(SNAPSHOT_MAPPING, results, params)
vals = dict(
apps=d_results[(page - 1) * page_length:page * page_length],
page=page,
prev_page=(page - 1),
next_page=(page + 1),
more_pages=more_pages,
sort_by=sort_by,
show_starter_apps=starter_apps,
include_unapproved=include_unapproved,
facet_map=facet_map,
facets=results.get("facets", []),
query_str=request.META['QUERY_STRING'],
search_query=params.get('search', [""])[0],
persistent_params=persistent_params,
)
return render(request, template, vals)
def appstore_api(request):
params, facets = parse_args_for_es(request)
results = es_snapshot_query(params, facets)
return HttpResponse(json.dumps(results), content_type="application/json")
def es_snapshot_query(params, facets=None, terms=None, sort_by="snapshot_time"):
if terms is None:
terms = ['is_approved', 'sort_by', 'search']
if facets is None:
facets = []
q = {"sort": {sort_by: {"order": "desc"}},
"query": {"bool": {"must": [
{"match": {'doc_type': "Domain"}},
{"term": {"published": True}},
{"term": {"is_snapshot": True}}
]}},
"filter": {"and": [{"term": {"is_approved": params.get('is_approved', None) or True}}]}}
search_query = params.get('search', "")
if search_query:
q['query']['bool']['must'].append({
"match": {
"_all": {
"query": search_query,
"operator": "and"
}
}
})
return es_query(params, facets, terms, q)
@require_superuser
def approve_app(request, domain):
domain = Domain.get(domain)
if request.GET.get('approve') == 'true':
domain.is_approved = True
domain.save()
elif request.GET.get('approve') == 'false':
domain.is_approved = False
domain.save()
return HttpResponseRedirect(request.META.get('HTTP_REFERER') or reverse('appstore'))
@login_required
@retry_resource(3)
def import_app(request, domain):
user = request.couch_user
if not user.is_eula_signed():
messages.error(request, 'You must agree to our eula to download an app')
return project_info(request, domain)
from_project = Domain.get(domain)
if request.method == 'POST' and from_project.is_snapshot:
if not from_project.published:
messages.error(request, "This project is not published and can't be downloaded")
return project_info(request, domain)
to_project_name = request.POST['project']
if not user.is_member_of(to_project_name):
messages.error(request, _("You don't belong to that project"))
return project_info(request, domain)
full_apps = from_project.full_applications(include_builds=False)
assert full_apps, 'Bad attempt to copy apps from a project without any!'
for app in full_apps:
new_doc = from_project.copy_component(app['doc_type'], app.get_id, to_project_name, user)
clear_app_cache(request, to_project_name)
from_project.downloads += 1
from_project.save()
messages.success(request, render_to_string("appstore/partials/view_wiki.html",
{"pre": _("Application successfully imported!")}),
extra_tags="html")
return HttpResponseRedirect(reverse('view_app', args=[to_project_name, new_doc.id]))
else:
return HttpResponseRedirect(reverse('project_info', args=[domain]))
@login_required
def copy_snapshot(request, domain):
user = request.couch_user
if not user.is_eula_signed():
messages.error(request, 'You must agree to our eula to download an app')
return project_info(request, domain)
dom = Domain.get(domain)
if request.method == "POST" and dom.is_snapshot:
assert dom.full_applications(include_builds=False), 'Bad attempt to copy project without any apps!'
from corehq.apps.registration.forms import DomainRegistrationForm
args = {
'domain_name': request.POST['new_project_name'],
'hr_name': request.POST['new_project_name'],
'eula_confirmed': True,
}
form = DomainRegistrationForm(args)
if request.POST.get('new_project_name', ""):
if not dom.published:
messages.error(request, _("This project is not published and can't be downloaded"))
return project_info(request, domain)
if not form.is_valid():
messages.error(request, form.errors)
return project_info(request, domain)
new_domain_name = name_to_url(form.cleaned_data['hr_name'], "project")
with CriticalSection(['copy_domain_snapshot_{}_to_{}'.format(dom.name, new_domain_name)]):
try:
new_domain = dom.save_copy(new_domain_name,
new_hr_name=form.cleaned_data['hr_name'],
user=user)
except NameUnavailableException:
messages.error(request, _("A project by that name already exists"))
return project_info(request, domain)
# sign new project up for trial
create_30_day_trial(new_domain)
def inc_downloads(d):
d.downloads += 1
apply_update(dom, inc_downloads)
messages.success(request, render_to_string("appstore/partials/view_wiki.html",
{"pre": _("Project copied successfully!")}),
extra_tags="html")
return HttpResponseRedirect(reverse('view_app',
args=[new_domain.name, new_domain.full_applications()[0].get_id]))
else:
messages.error(request, _("You must specify a name for the new project"))
return project_info(request, domain)
else:
return HttpResponseRedirect(reverse('project_info', args=[domain]))
def project_image(request, domain):
project = Domain.get(domain)
if project.image_path:
image = project.fetch_attachment(project.image_path)
return HttpResponse(image, content_type=project.image_type)
else:
raise Http404()
def project_documentation_file(request, domain):
project = Domain.get(domain)
if project.documentation_file_path:
documentation_file = project.fetch_attachment(project.documentation_file_path)
return HttpResponse(documentation_file, content_type=project.documentation_file_type)
else:
raise Http404()
@login_required
def deployment_info(request, domain, template="appstore/deployment_info.html"):
dom = Domain.get_by_name(domain)
if not dom or not dom.deployment.public:
raise Http404()
# get facets
results = es_deployments_query({}, DEPLOYMENT_FACETS)
facet_map = fill_mapping_with_facets(DEPLOYMENT_MAPPING, results, {})
return render(request, template, {
'domain': dom,
'search_url': reverse('deployments'),
'url_base': reverse('deployments'),
'facet_map': facet_map,
})
@login_required
def deployments(request, template="appstore/deployments.html"):
params, _ = parse_args_for_es(request)
params = dict([(DEPLOYMENT_MAPPING.get(p, p), params[p]) for p in params])
page = int(params.pop('page', 1))
results = es_deployments_query(params, DEPLOYMENT_FACETS)
d_results = [Domain.wrap(res['_source']) for res in results['hits']['hits']]
more_pages = False if len(d_results) <= page * 10 else True
facet_map = fill_mapping_with_facets(DEPLOYMENT_MAPPING, results, params)
include_unapproved = True if request.GET.get('is_approved', "") == "false" else False
vals = {'deployments': d_results[(page - 1) * 10:page * 10],
'page': page,
'prev_page': page - 1,
'next_page': (page + 1),
'more_pages': more_pages,
'include_unapproved': include_unapproved,
'facet_map': facet_map,
'query_str': request.META['QUERY_STRING'],
'search_url': reverse('deployments'),
'search_query': params.get('search', [""])[0]}
return render(request, template, vals)
def deployments_api(request):
params, facets = parse_args_for_es(request)
params = dict([(DEPLOYMENT_MAPPING.get(p, p), params[p]) for p in params])
results = es_deployments_query(params, facets)
return HttpResponse(json.dumps(results), content_type="application/json")
def es_deployments_query(params, facets=None, terms=None, sort_by="snapshot_time"):
if terms is None:
terms = ['is_approved', 'sort_by', 'search']
if facets is None:
facets = []
q = {"query": {"bool": {"must": [{"match": {'doc_type': "Domain"}},
{"term": {"deployment.public": True}}]}}}
search_query = params.get('search', "")
if search_query:
q['query']['bool']['must'].append({
"match": {
"_all": {
"query": search_query,
"operator": "and"
}
}
})
return es_query(params, facets, terms, q)
def media_files(request, domain, template="appstore/media_files.html"):
dom = Domain.get(domain)
if not can_view_app(request, dom):
raise Http404()
return render(request, template, {
"project": dom,
"url_base": reverse('appstore')
})
| 37.044041 | 114 | 0.628575 | [
"BSD-3-Clause"
] | johan--/commcare-hq | corehq/apps/appstore/views.py | 14,299 | Python |
#! /usr/bin/env python
# coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import logging
import os
import sys
from functools import partial
from multiprocessing import Pool
from typing import Union
import numpy as np
import tensorflow as tf
from ludwig.constants import *
from ludwig.encoders.image_encoders import ENCODER_REGISTRY
from ludwig.features.base_feature import InputFeature
from ludwig.utils.data_utils import get_abs_path
from ludwig.utils.fs_utils import upload_h5
from ludwig.utils.image_utils import greyscale
from ludwig.utils.image_utils import num_channels_in_image
from ludwig.utils.image_utils import resize_image
from ludwig.utils.image_utils import get_image_from_path, read_image
from ludwig.utils.misc_utils import set_default_value
logger = logging.getLogger(__name__)
image_scaling_registry = {
'pixel_normalization': lambda x: x * 1.0 / 255,
'pixel_standardization': lambda x: tf.map_fn(
lambda f: tf.image.per_image_standardization(f), x)
}
class ImageFeatureMixin:
type = IMAGE
preprocessing_defaults = {
'missing_value_strategy': BACKFILL,
'in_memory': True,
'resize_method': 'interpolate',
'scaling': 'pixel_normalization',
'num_processes': 1,
'infer_image_dimensions': False,
'infer_image_max_height': 256,
'infer_image_max_width': 256,
'infer_image_sample_size': 100
}
preprocessing_schema = {
'missing_value_strategy': {'type': 'string', 'enum': MISSING_VALUE_STRATEGY_OPTIONS},
'in_memory': {'type': 'boolean'},
'resize_method': {'type': 'string', 'enum': RESIZE_METHODS},
'scaling': {'type': 'string', 'enum': list(image_scaling_registry.keys())},
'num_processes': {'type': 'integer', 'minimum': 0},
'height': {'type': 'integer', 'minimum': 0},
'width': {'type': 'integer', 'minimum': 0},
'num_channels': {'type': 'integer', 'minimum': 0},
'infer_image_dimensions': {'type': 'boolean'},
'infer_image_max_height': {'type': 'integer', 'minimum': 0},
'infer_image_max_width': {'type': 'integer', 'minimum': 0},
'infer_image_sample_size': {'type': 'integer', 'minimum': 0}
}
@staticmethod
def cast_column(column, backend):
return column
@staticmethod
def get_feature_meta(column, preprocessing_parameters, backend):
return {
PREPROCESSING: preprocessing_parameters
}
@staticmethod
def _read_image_and_resize(
img_entry: Union[str, 'numpy.array'],
img_width: int,
img_height: int,
should_resize: bool,
num_channels: int,
resize_method: str,
user_specified_num_channels: int
):
"""
:param img_entry Union[str, 'numpy.array']: if str file path to the
image else numpy.array of the image itself
:param img_width: expected width of the image
:param img_height: expected height of the image
:param should_resize: Should the image be resized?
:param resize_method: type of resizing method
:param num_channels: expected number of channels in the first image
:param user_specified_num_channels: did the user specify num channels?
:return: image object
Helper method to read and resize an image according to model defn.
If the user doesn't specify a number of channels, we use the first image
in the dataset as the source of truth. If any image in the dataset
doesn't have the same number of channels as the first image,
raise an exception.
If the user specifies a number of channels, we try to convert all the
images to the specifications by dropping channels/padding 0 channels
"""
img = read_image(img_entry)
img_num_channels = num_channels_in_image(img)
if img_num_channels == 1:
img = img.reshape((img.shape[0], img.shape[1], 1))
if should_resize:
img = resize_image(img, (img_height, img_width), resize_method)
if user_specified_num_channels is True:
# convert to greyscale if needed
if num_channels == 1 and (
img_num_channels == 3 or img_num_channels == 4):
img = greyscale(img)
img_num_channels = 1
# Number of channels is specified by the user
img_padded = np.zeros((img_height, img_width, num_channels),
dtype=np.uint8)
min_num_channels = min(num_channels, img_num_channels)
img_padded[:, :, :min_num_channels] = img[:, :, :min_num_channels]
img = img_padded
if img_num_channels != num_channels:
logger.warning(
"Image has {0} channels, where as {1} "
"channels are expected. Dropping/adding channels "
"with 0s as appropriate".format(
img_num_channels, num_channels))
else:
# If the image isn't like the first image, raise exception
if img_num_channels != num_channels:
raise ValueError(
'Image has {0} channels, unlike the first image, which '
'has {1} channels. Make sure all the images have the same '
'number of channels or use the num_channels property in '
'image preprocessing'.format(img_num_channels,
num_channels))
if img.shape[0] != img_height or img.shape[1] != img_width:
raise ValueError(
"Images are not of the same size. "
"Expected size is {0}, "
"current image size is {1}."
"Images are expected to be all of the same size "
"or explicit image width and height are expected "
"to be provided. "
"Additional information: "
"https://ludwig-ai.github.io/ludwig-docs/user_guide/#image-features-preprocessing"
.format([img_height, img_width, num_channels], img.shape)
)
return img
@staticmethod
def _finalize_preprocessing_parameters(
preprocessing_parameters: dict,
first_img_entry: Union[str, 'numpy.array'],
src_path: str,
input_feature_col: np.array
):
"""
Helper method to determine the height, width and number of channels for
preprocessing the image data. This is achieved by looking at the
parameters provided by the user. When there are some missing parameters,
we fall back on to the first image in the dataset. The assumption being
that all the images in the data are expected be of the same size with
the same number of channels
"""
first_image = read_image(first_img_entry)
first_img_height = first_image.shape[0]
first_img_width = first_image.shape[1]
first_img_num_channels = num_channels_in_image(first_image)
should_resize = False
if (HEIGHT in preprocessing_parameters or
WIDTH in preprocessing_parameters):
should_resize = True
try:
height = int(preprocessing_parameters[HEIGHT])
width = int(preprocessing_parameters[WIDTH])
except ValueError as e:
raise ValueError(
'Image height and width must be set and have '
'positive integer values: ' + str(e)
)
if height <= 0 or width <= 0:
raise ValueError(
'Image height and width must be positive integers'
)
else:
# User hasn't specified height and width.
# Default to first image, or infer from sample.
height, width = first_img_height, first_img_width
if preprocessing_parameters[INFER_IMAGE_DIMENSIONS]:
should_resize = True
sample_size = min(len(input_feature_col), preprocessing_parameters[INFER_IMAGE_SAMPLE_SIZE])
sample_images = [read_image(get_image_from_path(src_path, img))
for img in input_feature_col[:sample_size]]
if sample_images:
height_avg = min(
sum(x.shape[0] for x in sample_images) / len(sample_images),
preprocessing_parameters[INFER_IMAGE_MAX_HEIGHT])
width_avg = min(
sum(x.shape[1] for x in sample_images) / len(sample_images),
preprocessing_parameters[INFER_IMAGE_MAX_WIDTH])
height, width = round(height_avg), round(width_avg)
logger.debug("Inferring height: {0} and width: {1}".format(height, width))
else:
logger.warning("Sample set for inference is empty, default to height and width of first image")
if NUM_CHANNELS in preprocessing_parameters:
# User specified num_channels in the model/feature config
user_specified_num_channels = True
num_channels = preprocessing_parameters[NUM_CHANNELS]
else:
user_specified_num_channels = False
num_channels = first_img_num_channels
assert isinstance(num_channels, int), ValueError(
'Number of image channels needs to be an integer'
)
return (
should_resize,
width,
height,
num_channels,
user_specified_num_channels,
first_image
)
@staticmethod
def add_feature_data(
feature,
input_df,
proc_df,
metadata,
preprocessing_parameters,
backend,
skip_save_processed_input
):
in_memory = preprocessing_parameters['in_memory']
if PREPROCESSING in feature and 'in_memory' in feature[PREPROCESSING]:
in_memory = feature[PREPROCESSING]['in_memory']
num_processes = preprocessing_parameters['num_processes']
if PREPROCESSING in feature and 'num_processes' in feature[
PREPROCESSING]:
num_processes = feature[PREPROCESSING]['num_processes']
src_path = None
if SRC in metadata:
src_path = os.path.dirname(os.path.abspath(metadata.get(SRC)))
num_images = len(input_df[feature[COLUMN]])
if num_images == 0:
raise ValueError('There are no images in the dataset provided.')
first_img_entry = next(iter(input_df[feature[COLUMN]]))
logger.debug(
'Detected image feature type is {}'.format(type(first_img_entry))
)
if not isinstance(first_img_entry, str) \
and not isinstance(first_img_entry, np.ndarray):
raise ValueError(
'Invalid image feature data type. Detected type is {}, '
'expect either string for file path or numpy array.'
.format(type(first_img_entry))
)
first_img_entry = get_image_from_path(src_path, first_img_entry)
(
should_resize,
width,
height,
num_channels,
user_specified_num_channels,
first_image
) = ImageFeatureMixin._finalize_preprocessing_parameters(
preprocessing_parameters, first_img_entry, src_path, input_df[feature[COLUMN]]
)
metadata[feature[NAME]][PREPROCESSING]['height'] = height
metadata[feature[NAME]][PREPROCESSING]['width'] = width
metadata[feature[NAME]][PREPROCESSING][
'num_channels'] = num_channels
read_image_and_resize = partial(
ImageFeatureMixin._read_image_and_resize,
img_width=width,
img_height=height,
should_resize=should_resize,
num_channels=num_channels,
resize_method=preprocessing_parameters['resize_method'],
user_specified_num_channels=user_specified_num_channels
)
# check to see if the active backend can support lazy loading of
# image features from the hdf5 cache.
backend.check_lazy_load_supported(feature)
if in_memory or skip_save_processed_input:
# Number of processes to run in parallel for preprocessing
metadata[feature[NAME]][PREPROCESSING][
'num_processes'] = num_processes
metadata[feature[NAME]]['reshape'] = (height, width, num_channels)
# Split the dataset into pools only if we have an explicit request to use
# multiple processes. In case we have multiple input images use the
# standard code anyway.
if backend.supports_multiprocessing and (
num_processes > 1 or num_images > 1):
all_img_entries = [get_abs_path(src_path, img_entry)
if isinstance(img_entry, str) else img_entry
for img_entry in input_df[feature[COLUMN]]]
with Pool(num_processes) as pool:
logger.debug(
'Using {} processes for preprocessing images'.format(
num_processes
)
)
proc_df[feature[PROC_COLUMN]] = pool.map(
read_image_and_resize, all_img_entries
)
else:
# If we're not running multiple processes and we are only processing one
# image just use this faster shortcut, bypassing multiprocessing.Pool.map
logger.debug(
'No process pool initialized. Using internal process for preprocessing images'
)
# helper function for handling single image
def _get_processed_image(img_store):
if isinstance(img_store, str):
return read_image_and_resize(
get_abs_path(src_path, img_store)
)
else:
return read_image_and_resize(img_store)
proc_df[feature[PROC_COLUMN]] = backend.df_engine.map_objects(
input_df[feature[COLUMN]],
_get_processed_image
)
else:
all_img_entries = [get_abs_path(src_path, img_entry)
if isinstance(img_entry, str) else img_entry
for img_entry in input_df[feature[COLUMN]]]
data_fp = backend.cache.get_cache_path(
metadata.get(SRC), metadata.get(CHECKSUM), TRAINING
)
with upload_h5(data_fp) as h5_file:
# todo future add multiprocessing/multithreading
image_dataset = h5_file.create_dataset(
feature[PROC_COLUMN] + '_data',
(num_images, height, width, num_channels),
dtype=np.uint8
)
for i, img_entry in enumerate(all_img_entries):
image_dataset[i, :height, :width, :] = (
read_image_and_resize(img_entry)
)
h5_file.flush()
proc_df[feature[PROC_COLUMN]] = np.arange(num_images)
return proc_df
class ImageInputFeature(ImageFeatureMixin, InputFeature):
height = 0
width = 0
num_channels = 0
scaling = 'pixel_normalization'
encoder = 'stacked_cnn'
def __init__(self, feature, encoder_obj=None):
super().__init__(feature)
self.overwrite_defaults(feature)
if encoder_obj:
self.encoder_obj = encoder_obj
else:
self.encoder_obj = self.initialize_encoder(feature)
def call(self, inputs, training=None, mask=None):
assert isinstance(inputs, tf.Tensor)
assert inputs.dtype in [tf.uint8, tf.int64]
# casting and rescaling
inputs = tf.cast(inputs, tf.float32) / 255
inputs_encoded = self.encoder_obj(
inputs, training=training, mask=mask
)
return inputs_encoded
@classmethod
def get_input_dtype(cls):
return tf.uint8
def get_input_shape(self):
return self.height, self.width, self.num_channels
@staticmethod
def update_config_with_metadata(
input_feature,
feature_metadata,
*args,
**kwargs
):
for key in ['height', 'width', 'num_channels', 'scaling']:
input_feature[key] = feature_metadata[PREPROCESSING][key]
@staticmethod
def populate_defaults(input_feature):
set_default_value(input_feature, TIED, None)
set_default_value(input_feature, PREPROCESSING, {})
encoder_registry = ENCODER_REGISTRY
| 39.908072 | 115 | 0.600258 | [
"Apache-2.0"
] | Yard1/ludwig | ludwig/features/image_feature.py | 17,799 | Python |
alien_color = ['green', 'yellow', 'red']
alien = 'green'
if alien == 'green':
point = 5
elif alien == 'yellow':
point = 10
else:
point = 15
print('The player just earned ' + str(point) + ' points.')
| 15.357143 | 58 | 0.576744 | [
"BSD-3-Clause"
] | dantin/python-by-example | crash_course/ch05/exec/alien_color_3.py | 215 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: David McCue
import sqlite3, re, os
from bottle import route, run, debug, template, request, static_file, error, response
db_filename=os.path.dirname(os.path.realpath(__file__)) + '/db/kickstarter.db'
# Validate currency values
def valid_amount(amount):
amount = re.sub(r"\$","",amount)
amount_regex=re.compile('^[0-9]+(\.[0-9]{2})?$')
if amount_regex.match(amount) and float(amount) > 0.0:
return True
# Validate names
def valid_name(name):
name_regex=re.compile('^[A-Za-z0-9-_]{4,20}$')
if name_regex.match(name):
return True
# Calculate luhn checksum
# Credit: wikipedia
def luhn_checksum(card_number):
def digits_of(n):
return [int(d) for d in str(n)]
digits = digits_of(card_number)
odd_digits = digits[-1::-2]
even_digits = digits[-2::-2]
checksum = 0
checksum += sum(odd_digits)
for d in even_digits:
checksum += sum(digits_of(d*2))
return checksum % 10
# Validate credit card
def valid_creditcard(creditcard):
creditcard_regex=re.compile('^[0-9]{1,19}$')
if creditcard_regex.match(creditcard):
if luhn_checksum(creditcard) == 0:
return True
# retrieve project by name
@route('/project/<project>')
def project_list(project):
conn = sqlite3.connect(db_filename)
c = conn.cursor()
c.execute("SELECT id, name, target FROM 'project' WHERE name = '" + project + "';")
result = c.fetchall()
c.close()
if result:
c = conn.cursor()
c.execute("SELECT name, amount FROM 'transaction' WHERE projectid = " + str(result[0][0]) + ";")
backers = c.fetchall()
c.close()
return {'project': result[0], 'backers': backers}
else:
response.status = 400
return {'msg':'ERROR: Project name not found'}
# add project
@route('/project', method='POST')
def project_new():
project = request.json
print "DEBUG: " + str(project)
if not valid_name(project['name']):
response.status = 400
return {'msg':'ERROR: Project name validation error'}
if not valid_amount(project['target']):
response.status = 400
return {'msg':'ERROR: Project target validation error'}
conn = sqlite3.connect(db_filename)
c = conn.cursor()
c.execute("SELECT name FROM project WHERE name = '" + project['name'] + "';")
result = c.fetchall()
c.close()
if result:
response.status = 400
return {'msg':'ERROR: Project name already exists'}
conn = sqlite3.connect(db_filename)
conn.execute("INSERT INTO 'project' (name, target) VALUES ('" + project['name'] + "', '" + project['target'] + "');")
conn.commit()
conn.close()
# add transaction
@route('/back', method='POST')
def back_new():
back = request.json
# Validate inputs
if not valid_name(back['name']):
response.status = 412
return {'msg':'ERROR: Backer name validation error'}
if not valid_amount(back['amount']):
response.status = 412
return {'msg':'ERROR: Backer amount validation error'}
if not valid_creditcard(back['cc']):
response.status = 400
return {'msg':'ERROR: This card is invalid'}
# Check credit card is unique to this user name
conn = sqlite3.connect(db_filename)
c = conn.cursor()
c.execute("SELECT name FROM 'transaction' WHERE cc = '" + back['cc'] + "' AND name != '" + back['name'] + "';")
result = c.fetchall()
c.close()
if result:
response.status = 400
return {'msg':'ERROR: That card has already been added by another user!'}
# Get project id from name
c = conn.cursor()
c.execute("SELECT id FROM 'project' WHERE name = '" + back['projectname'] + "';")
result = c.fetchall()
c.close()
if not result:
response.status = 400
return {'msg':'ERROR: Unable to find project name'}
back['projectid'] = result[0][0]
conn.execute("INSERT INTO 'transaction' (projectid, name, cc, amount) VALUES (" + str(back['projectid']) + ", '" + back['name'] + "', " + str(back['cc']) + ", " + str(back['amount']) + ");")
conn.commit()
# retrieve backer by name
@route('/backer/<backer>')
def backer_list(backer):
conn = sqlite3.connect(db_filename)
c = conn.cursor()
c.execute("SELECT project.name, 'transaction'.name, 'transaction'.amount FROM 'transaction' JOIN project ON 'transaction'.projectid = project.id WHERE 'transaction'.name = '" + backer + "';")
result = c.fetchall()
c.close()
if result:
return {'backer': result}
else:
response.status = 400
return {'msg':'ERROR: Backer not found'}
@error(403)
def mistake403(code):
response.status = 403
return {'msg':'ERROR: There is a mistake in your URL'}
@error(404)
def mistake404(code):
response.status = 404
return {'msg':'ERROR: This page does not exist'}
#debug(True)
run(reloader=True, host='0.0.0.0')
| 27.410405 | 193 | 0.652467 | [
"Apache-2.0"
] | dmccue/ks-cli | server.py | 4,742 | Python |
from amaranth.sim import Simulator, Settle
from program_counter import ProgramCounter
dut = ProgramCounter()
def bench():
yield dut.countEnable.eq(0)
yield dut.writeAdd.eq(0)
yield dut.writeEnable.eq(0)
yield dut.dataIn.eq(0)
yield
yield dut.writeEnable.eq(1)
yield dut.dataIn.eq(1000)
yield
yield dut.writeEnable.eq(0)
yield dut.writeAdd.eq(0)
yield
assert((yield dut.dataOut) == 1000)
yield dut.writeEnable.eq(1)
yield dut.writeAdd.eq(1)
yield dut.dataIn.eq(-50)
yield
yield dut.writeEnable.eq(0)
yield dut.writeAdd.eq(0)
yield
assert((yield dut.dataOut) == 946)
for i in range(16):
yield dut.countEnable.eq(1)
yield
assert((yield dut.dataOut) == 946 + (i*4))
sim = Simulator(dut)
sim.add_clock(1e-6) # 1 MHz
sim.add_sync_process(bench)
with sim.write_vcd("program_counter.vcd"):
sim.run()
| 20.772727 | 50 | 0.657549 | [
"Apache-2.0"
] | racerxdl/riskow-python | program_counter_tb.py | 914 | Python |
from logs import logDecorator as lD
import jsonref, sqlite3
config = jsonref.load(open('../config/config.json'))
logBase = config['logging']['logBase'] + '.databaseIO.sqLiteIO'
@lD.log(logBase + '.getAllData')
def getAllData(logger, query, values=None, dbName=None):
'''query data from the database
Query the data over here. If there is a problem with the data, it is going
to return the value of None, and log the error. Your program needs to check
whether there was an error with the query by checking for a None return
value. Note that the location of the dataabses are assumed to be present
within the file ``../config/db.json``.
Parameters
----------
logger : {logging.logger}
logging element
query : {str}
The query to be made to the databse
values : {tuple or list-like}, optional
Additional values to be passed to the query (the default is None)
dbName : {str or None}, optional
The name of the database to use. If this is None, the function will
attempt to read the name from the ``defaultDB`` item within the
file ``../config/db.json``.
Returns
-------
list or None
A list of tuples containing the values is returned. In case
there is an error, the error will be logged, and a None will
be return
'''
vals = None
try:
db = jsonref.load(open('../config/db.json'))
# Check whether a dbName is available
if (dbName is None) and ('defaultDB' in db):
dbName = db['defaultDB']
# Check whether a dbName has been specified
if dbName is None:
logger.error('A database name has not been specified.')
return None
conn = sqlite3.connect(db[dbName]['connection'])
cur = conn.cursor()
except Exception as e:
logger.error('Unable to connect to the database')
logger.error(str(e))
return
try:
if values is None:
cur.execute(query)
else:
cur.execute(query, values)
# We assume that the data is small so we
# can download the entire thing here ...
# -------------------------------------------
vals = cur.fetchall()
except Exception as e:
logger.error('Unable to obtain data from the database for:\n query: {}\n{values}'.format(query, values))
logger.error(str(e))
try:
cur.close()
conn.close()
except Exception as e:
logger.error('Unable to disconnect to the database')
logger.error(str(e))
return
return vals
@lD.log(logBase + '.getDataIterator')
def getDataIterator(logger, query, values=None, chunks=100, dbName=None):
'''Create an iterator from a largish query
This is a generator that returns values in chunks of chunksize ``chunks``.
Parameters
----------
logger : {logging.logger}
logging element
query : {str}
The query to be made to the databse
values : {tuple or list-like}, optional
Additional values to be passed to the query (the default
is None)
chunks : {number}, optional
This is the number of rows that the data is going to return at every call
if __next__() to this function. (the default is 100)
dbName : {str or None}, optional
The name of the database to use. If this is None, the function will
attempt to read the name from the ``defaultDB`` item within the
file ``../config/db.json``.
Yields
------
list of tuples
A list of tuples from the query, with a maximum of ``chunks`` tuples returned
at one time.
'''
try:
db = jsonref.load(open('../config/db.json'))
# Check whether a dbName is available
if (dbName is None) and ('defaultDB' in db):
dbName = db['defaultDB']
# Check whether a dbName has been specified
if dbName is None:
logger.error('A database name has not been specified.')
return None
conn = sqlite3.connect(db[dbName]['connection'])
cur = conn.cursor()
except Exception as e:
logger.error('Unable to connect to the database')
logger.error(str(e))
return
try:
if values is None:
cur.execute(query)
else:
cur.execute(query, values)
while True:
vals = cur.fetchmany(chunks)
if len(vals) == 0:
break
yield vals
except Exception as e:
logger.error('Unable to obtain data from the database for:\n query: {}\nvalues'.format(query, values))
logger.error(str(e))
try:
conn.close()
except Exception as e:
logger.error('Unable to disconnect to the database')
logger.error(str(e))
return
return
@lD.log(logBase + '.getSingleDataIterator')
def getSingleDataIterator(logger, query, values=None, dbName=None):
'''Create an iterator from a largish query
This is a generator that returns values in chunks of chunksize 1.
Parameters
----------
logger : {logging.logger}
logging element
query : {str}
The query to be made to the databse
values : {tuple or list-like}, optional
Additional values to be passed to the query (the default
is None)
dbName : {str or None}, optional
The name of the database to use. If this is None, the function will
attempt to read the name from the ``defaultDB`` item within the
file ``../config/db.json``.
Yields
------
list of tuples
A list of tuples from the query, with a maximum of ``chunks`` tuples returned
at one time.
'''
try:
db = jsonref.load(open('../config/db.json'))
# Check whether a dbName is available
if (dbName is None) and ('defaultDB' in db):
dbName = db['defaultDB']
# Check whether a dbName has been specified
if dbName is None:
logger.error('A database name has not been specified.')
return None
conn = sqlite3.connect(db[dbName]['connection'])
cur = conn.cursor()
except Exception as e:
logger.error('Unable to connect to the database')
logger.error(str(e))
return
try:
if values is None:
cur.execute(query)
else:
cur.execute(query, values)
while True:
vals = cur.fetchone()
if vals is None:
break
yield vals
except Exception as e:
logger.error('Unable to obtain data from the database for:\n query: {}\nvalues'.format(query, values))
logger.error(str(e))
try:
conn.close()
except Exception as e:
logger.error('Unable to disconnect to the database')
logger.error(str(e))
return
return
@lD.log(logBase + '.commitData')
def commitData(logger, query, values=None, dbName=None):
'''query data from the database
Query the data over here. If there is a problem with
the data, it is going to return the value of ``None``, and
log the error. Your program needs to check whether
there was an error with the query by checking for a ``None``
return value
Parameters
----------
logger : {logging.logger}
logging element
query : {str}
The query to be made to the databse
values : {tuple or list-like}, optional
Additional values to be passed to the query (the default
is None)
dbName : {str or None}, optional
The name of the database to use. If this is None, the function will
attempt to read the name from the ``defaultDB`` item within the
file ``../config/db.json``.
Returns
-------
True or None
On successful completion, a ``True`` is returned. In case
there is an error, the error will be logged, and a ``None`` will
be returnd
'''
vals = True
try:
db = jsonref.load(open('../config/db.json'))
# Check whether a dbName is available
if (dbName is None) and ('defaultDB' in db):
dbName = db['defaultDB']
# Check whether a dbName has been specified
if dbName is None:
logger.error('A database name has not been specified.')
return None
conn = sqlite3.connect(db[dbName]['connection'])
cur = conn.cursor()
except Exception as e:
logger.error('Unable to connect to the database')
logger.error(str(e))
return None
try:
if values is None:
cur.execute(query)
else:
cur.execute(query, values)
except Exception as e:
logger.error('Unable to obtain data from the database for:\n query: {}\nvalues'.format(query, values))
logger.error(str(e))
vals = None
try:
conn.commit()
cur.close()
conn.close()
except Exception as e:
logger.error('Unable to disconnect to the database')
logger.error(str(e))
return
return vals
@lD.log(logBase + '.commitDataList')
def commitDataList(logger, query, values, dbName=None):
'''query data from the database
Query the data over here. If there is a problem with
the data, it is going to return the value of None, and
log the error. Your program needs to check whether
there was an error with the query by checking for a ``None``
return value
Parameters
----------
logger : {logging.logger}
logging element
query : {str}
The query to be made to the databse
values : {tuple or list-like}, optional
Additional values to be passed to the query (the default
is None)
dbName : {str or None}, optional
The name of the database to use. If this is None, the function will
attempt to read the name from the ``defaultDB`` item within the
file ``../config/db.json``.
Returns
-------
True or None
A successful completion of this function returns a ``True``.
In case there is an error, the error will be logged, and a ``None`` will
be returned
'''
val = True
try:
db = jsonref.load(open('../config/db.json'))
# Check whether a dbName is available
if (dbName is None) and ('defaultDB' in db):
dbName = db['defaultDB']
# Check whether a dbName has been specified
if dbName is None:
logger.error('A database name has not been specified.')
return None
conn = sqlite3.connect(db[dbName]['connection'])
cur = conn.cursor()
except Exception as e:
logger.error('Unable to connect to the database')
logger.error(str(e))
return None
try:
cur.executemany(query, values)
except Exception as e:
logger.error('Unable to execute query for:\n query: {}\nvalues'.format(query, values))
logger.error(str(e))
val = None
try:
conn.commit()
cur.close()
conn.close()
except Exception as e:
logger.error('Unable to disconnect to the database')
logger.error(str(e))
return None
return val
| 30.727273 | 113 | 0.573373 | [
"MIT"
] | madelinelimm/newcookiectest | src/lib/databaseIO/sqLiteIO.py | 11,830 | Python |
#!\usr\bin\python
from numpy import array
from scipy.special import erf
from scipy.optimize import minimize
from math import pi, sin, cos, exp, sqrt
#import dicom
line_array = [] ## global
def read_line (file_name ):
with open( file_name ) as f:
for line in f:
line_array.append( [float( line.split()[0] ), float( line.split()[2] )] )
read_line("4mv_line.csv")
line_len_2 = int(len(line_array)*0.5) ## global
def pi(x, b): # 0 1 2 3 4 5 6
# b is np.array of these parameters: [sigma_1, sigma_2, w_1, x_sh, bkg, B, b]
s_1 = 0.5*b[5]/(abs(b[2])*b[0]+abs(1-abs(b[2]))*b[1])
s_2 = abs(b[2])*b[0]*erf( (b[6]-x-b[3])/(sqrt(2)*b[0]) )
s_3 = abs(b[2])*b[0]*erf( (-b[6]-x-b[3])/(sqrt(2)*b[0]) )
s_4 = abs(1-abs(b[2]))*b[1]*erf( (b[6]-x-b[3])/(sqrt(2)*b[1]) )
s_5 = abs(1-abs(b[2]))*b[1]*erf( (-b[6]-x-b[3])/(sqrt(2)*b[1]) )
return s_1*(s_2 - s_3 + s_4 - s_5) + b[4] # x in mm
def s(b):
n_points_checked = 190
halv = int( n_points_checked*0.5 )
temp = 0.0
for i in range( n_points_checked ):
x = (i-halv)*0.2481
a = pi(x, b) - line_array[ line_len_2 - halv +i ][1]
temp += a*a
return temp
# [sigma_1, sigma_2, w_1, x_sh, bkg, B, b ]
x0 = array([1.58, 0.58, 0.08, -0.03, 1047.0, 15031.0, 1.40]) # initial values for minimize
print ( x0 )
res = minimize(s, x0, method='nelder-mead', options={'xtol': 1e-2, 'disp': True, 'maxfev':1e5, 'maxiter':1e5} )
print (res.x)
print (res.fun * 1e-6)
# print out the whole line
for i in range(190):
x = (i-95)*0.2481 # x in milimiters
print(x,", ", line_array[line_len_2 - 95 + i][1],", ",pi(x,res.x) )
| 30.45614 | 111 | 0.541475 | [
"MIT"
] | oustling/dicom_profile_fitting | minimize/retic_xmm_2gauss/2minimize_4mv.py | 1,736 | Python |
import pandas as pd
import numpy as np
import scipy as sp
from scipy.optimize import linprog, minimize, NonlinearConstraint
from .pdf_quantile_functions import pdf_quantile_builder
from .support import diffMatMetalog, pdfMetalog, quantileMetalog, newtons_method_metalog
import time
import warnings
def a_vector_OLS_and_LP(m_dict,
bounds,
boundedness,
term_limit,
term_lower_bound,
fit_method,
alpha,
diff_error = .001,
diff_step = 0.001):
""" Main workhorse function of pymetalog package.
Called during metalog.__init__ method call.
Args:
m_dict (:obj:`dict` with keys ['params', 'dataValues', 'Y']): Initialized output_dict variable from metalog class.
- m_dict['params']: (:obj:`dict` with keys ['bounds', 'boundedness', 'term_limit', 'term_lower_bound', 'step_len', 'fit_method']):
* 'bounds': metalog.bounds
* 'boundedness': metalog.boundedness
* 'term_limit': metalog.term_limit
* 'term_lower_bound': metalog.term_lower_bound
* 'step_len': metalog.step_len
* 'fit_method': metalog.fit_method
- m_dict['dataValues']: (:obj:`pandas.DataFrame` with columns ['x','probs','z'] of type numeric):
* 'x': metalog.x
* 'probs': metalog.probs
* 'z': column calculated in metalog.append_zvector method
- depends on metalog.boundedness attribute
- metalog.boundedness = 'u':
* 'z' = metalog.x
- metalog.boundedness = 'sl':
* 'z' = log( (metalog.x-lower_bound) )
- metalog.boundedness = 'su':
* 'z' = = log( (upper_bound-metalog.x) )
- metalog.boundedness = 'b':
* 'z' = log( (metalog.x-lower_bound) / (upper_bound-metalog.x) )
- m_dict['Y']: (:obj:`pandas.DataFrame` with columns ['y1','y2','y3','y4', ... ,'yn'] of type numeric):
* 'y1': numpy.array of ones with length equal to len(x)
* 'y2': numpy.array of numeric values equal to the term attached to s in the logistic quantile function np.log(m_dict['dataValues']['probs'] / (1 - m_dict['dataValues']['probs']))
* 'y3': numpy.array of numeric values (m_dict['dataValues']['probs'] - 0.5) * m_dict['Y']['y2']
* 'y4': numpy.array of numeric values m_dict['Y']['y4'] = m_dict['dataValues']['probs'] - 0.5
* 'yn': numpy.array of numeric values:
- if n in 'yn' is odd,
m_dict['Y']['yn'] = m_dict['Y']['y4']**(int(i//2))
- if n in 'yn' is even,
zn = 'y' + str(n-1)
m_dict['Y'][yn] = m_dict['Y']['y2'] * m_dict['Y'][zn]
bounds (:obj:`list`): Upper and lower limits to filter the data with before calculating metalog quantiles/pdfs.
- should be set in conjunction with the `boundedness` parameter
boundedness (:obj:`str`): String that is used to specify the type of metalog to fit.
- must be in set ('u','sl','su','b')
- Default: 'u'
* Fits an unbounded metalog
- 'sl' fits a strictly lower bounded metalog
* len(bounds) must == 1
- 'su' fits a strictly upper bounded metalog
* len(bounds) must == 1
- 'b' fits a upper/lower bounded metalog
* len(bounds) must == 2
* bounds[1] must be > bounds[0]
term_limit (:obj:`int`): The upper limit of the range of metalog terms to use to fit the data.
- strictly > term_lower_bound
- in range [3,30]
term_lower_bound (:obj:`int`): The lower limit of the range of metalog terms to use to fit the data.
- strictly < term_limit
- in range [2,29]
fit_method (:obj:`str`): Fit method to use to fit metalog distribution.
- must be in set ('any','OLS','LP','MLE')
- Default: 'any'
* first tries 'OLS' method than 'LP'
- 'OLS' only tries to fit by solving directly for a coefficients using ordinary least squares method
- 'LP' only tries to estimate fit using simplex linear program optimization routine
- 'MLE' first tries 'OLS' method than falls back to a maximum likelihood estimation routine
alpha (:obj:`float`, optional): Regularization term to add to OLS fit
- strictly >= 0.
- should be set in conjunction with `penalty` parameter
- Default: 0. (no regularization, OLS)
diff_error (:obj:`float`, optional): Value used to in scipy.optimize.linprog method call
to init the array of values representing the
upper-bound of each inequality constraint (row) in A_ub.
- #TODO: Insert maths
diff_step (:obj:`float`, optional): Value passed to `step_len` parameter in support.py diffMatMetalog method call
defines the bin width for the Reimann sum of the differences differentiation method
- diffMatMetalog differentiates the metalog pdf
* Differentiation reference: https://math.stackexchange.com/a/313135
Returns:
m_dict: (:obj:`dict` with keys ['params', 'dataValues', 'Y', 'A', 'M', 'Validation'])
- m_dict['A']: (:obj:`pandas.DataFrame` with columns ['a2','a3', ... ,'an'] of type numeric):
* a2, a3, ... , an are our a coefficients returned by the method specified in `fit_method`
- m_dict['M']: (:obj:`pandas.DataFrame` with columns 0:'pdf_1',1:'cdf_1',2:'pdf_2',3:'cdf_2',
...,((2*(term_limit-term_lower_bound))+1)-1:'pdf_n',
((2*(term_limit-term_lower_bound))+1):'cdf_n'
where n is the total number of metalog fits determined by (term_limit-term_lower_bound)+1
)
* pdf_1, pdf_2, ... , pdf_n are the metalog pdfs returned by pdf_quantile_builder.pdfMetalog method
* cdf_1, cdf_2, ... , cdf_n are the metalog quantiles returned by pdf_quantile_builder.quantileMetalog method
- m_dict['y']: (:obj: `numpy.ndarray` of type float):
* Array of bin widths for both the pdf_n and cdf_n
- m_dict['Validation']: (:obj:`pandas.DataFrame` with columns ['term', 'valid', 'method'] of type str):
* 'term': each metalog estimation given a number of terms
* 'valid': boolean flag indicating if the metalog estimation was valid or not
* 'method': a string indicating which method was used for the metalog estimation
"""
A = pd.DataFrame()
c_a_names = []
c_m_names = []
Mh = pd.DataFrame()
Validation = pd.DataFrame()
df_MH_temp_list = list()
df_A_temp_list = list()
df_Validation_temp_list = list()
# TODO: Large for-loop can probably be factored into smaller functions
for i in range(term_lower_bound,term_limit+1):
Y = m_dict['Y'].iloc[:,0:i]
eye = np.eye(Y.shape[1])
z = m_dict['dataValues']['z']
y = m_dict['dataValues']['probs']
step_len = m_dict['params']['step_len']
methodFit = 'OLS'
a_name = 'a'+str(i)
m_name = 'm'+str(i)
M_name = 'M'+str(i)
c_m_names = np.append(c_m_names, [m_name, M_name])
c_a_names = np.append(c_a_names, a_name)
if fit_method == 'any' or fit_method == 'MLE':
try:
temp = np.dot(np.dot(np.linalg.inv(np.dot(Y.T, Y) + alpha*eye), Y.T), z)
except:
# use LP solver if OLS breaks
temp = a_vector_LP(m_dict, term_limit=i, term_lower_bound=i, diff_error=diff_error, diff_step=diff_step)
methodFit = 'Linear Program'
if fit_method == 'OLS':
try:
temp = np.dot(np.dot(np.linalg.inv(np.dot(Y.T, Y) + alpha*eye), Y.T), z)
except:
raise RuntimeError("OLS was unable to solve infeasible or poorly formulated problem")
if fit_method == "LP":
temp = a_vector_LP(m_dict, term_limit=i, term_lower_bound=i, diff_error=diff_error, diff_step=diff_step)
methodFit = 'Linear Program'
if fit_method == 'MLE':
temp = a_vector_MLE(temp, y, i, m_dict, bounds, boundedness)
temp = np.append(temp, np.zeros(term_limit-i))
# build a y vector for smaller data sets
if len(z) < 100:
y2 = np.linspace(step_len, 1 - step_len, int((1 - step_len) / step_len))
tailstep = step_len / 10
y1 = np.linspace(tailstep, (min(y2) - tailstep), int((min(y2) - tailstep) / tailstep))
y3 = np.linspace((max(y2) + tailstep), (max(y2) + tailstep * 9), int((tailstep * 9) / tailstep))
y = np.hstack((y1, y2, y3))
# Get the dict and quantile values back for validation
temp_dict = pdf_quantile_builder(temp, y=y, term_limit=i, bounds=bounds, boundedness=boundedness)
# If it not a valid pdf run and the OLS version was used the LP version
if (temp_dict['valid'] == 'no') and (fit_method != 'OLS'):
temp = a_vector_LP(m_dict, term_limit=i, term_lower_bound=i, diff_error=diff_error, diff_step=diff_step)
temp = np.append(temp, np.zeros(term_limit-i))
methodFit = 'Linear Program'
# Get the dict and quantile values back for validation
temp_dict = pdf_quantile_builder(temp, y=y, term_limit=i, bounds=bounds, boundedness=boundedness)
df_MH_temp_list.append(pd.DataFrame(temp_dict['m']))
df_MH_temp_list.append(pd.DataFrame(temp_dict['M']))
df_A_temp_list.append(pd.DataFrame(temp))
tempValidation = pd.DataFrame(data={'term': [i], 'valid': [temp_dict['valid']], 'method': [methodFit]})
df_Validation_temp_list.append(tempValidation)
Validation = pd.concat(df_Validation_temp_list, axis=0)
Mh = pd.concat(df_MH_temp_list, axis=1)
A = pd.concat(df_A_temp_list, axis=1)
A.columns = c_a_names
Mh.columns = c_m_names
m_dict['A'] = A
m_dict['M'] = Mh
m_dict['M']['y'] = temp_dict['y']
m_dict['Validation'] = Validation
A = np.column_stack((np.repeat(1.,len(A)), A))
Est = np.dot(m_dict['Y'], A)
ncols = A.shape[1]
Z = np.column_stack((np.array(m_dict['dataValues']['z']),np.repeat(m_dict['dataValues']['z'].values,ncols-1).reshape(len(m_dict['dataValues']['z']),ncols-1)))
m_dict['square_residual_error'] = ((Z-Est)**2).sum(axis=1)
return m_dict
def a_vector_LP(m_dict, term_limit, term_lower_bound, diff_error = .001, diff_step = 0.001):
"""TODO: write docstring
"""
cnames = np.array([])
for i in range(term_lower_bound, term_limit + 1):
Y = m_dict['Y'].iloc[:, 0:i]
z = m_dict['dataValues']['z']
# Bulding the objective function using abs value LP formulation
Y_neg = -Y
new_Y = pd.DataFrame({'y1': Y.iloc[:, 0], 'y1_neg': Y_neg.iloc[:, 0]})
for c in range(1,len(Y.iloc[0,:])):
new_Y['y'+str(c+1)] = Y.iloc[:,c]
new_Y['y' + str(c+1)+'_neg'] = Y_neg.iloc[:, c]
a = np.array([''.join(['a', str(i)])])
cnames = np.append(cnames, a, axis=0)
# Building the constraint matrix
error_mat = np.array([])
for j in range(1,len(Y.iloc[:,0])+1):
front_zeros = np.zeros(2 * (j - 1))
ones = [1, -1]
trail_zeroes = np.zeros(2 * (len(Y.iloc[:, 1]) - j))
if j == 1:
error_vars = np.append(ones, trail_zeroes)
elif j != 1:
error_vars = np.append(front_zeros, ones)
error_vars = np.append(error_vars, trail_zeroes)
if error_mat.size == 0:
error_mat = np.append(error_mat, error_vars, axis=0)
else:
error_mat = np.vstack((error_mat, error_vars))
new = pd.concat((pd.DataFrame(data=error_mat), new_Y), axis=1)
diff_mat = diffMatMetalog(i, diff_step)
diff_zeros = []
for t in range(0,len(diff_mat.iloc[:, 0])):
zeros_temp = np.zeros(2 * len(Y.iloc[:, 0]))
if np.size(diff_zeros) == 0:
diff_zeros = zeros_temp
else:
diff_zeros = np.vstack((zeros_temp, diff_zeros))
diff_mat = np.concatenate((diff_zeros, diff_mat), axis=1)
# Combine the total constraint matrix
lp_mat = np.concatenate((new, diff_mat), axis=0)
# Objective function coeficients
c = np.append(np.ones(2 * len(Y.iloc[:, 1])), np.zeros(2*i))
# Constraint matrices
A_eq = lp_mat[:len(Y.iloc[:, 1]),:]
A_ub = -1*lp_mat[len(Y.iloc[:, 1]):,:]
b_eq = z
b_ub = -1*np.repeat(diff_error, len(diff_mat[:,0]))
# Solving the linear program w/ scipy (for now)
lp_sol = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, method='simplex', options={"maxiter":5000, "tol":1.0e-5,"disp": False})
# Consolidating solution back into the a vector
tempLP = lp_sol.x[(2 * len(Y.iloc[:, 1])):(len(lp_sol.x)+1)]
temp = []
for r in range(0,((len(tempLP) // 2))):
temp.append(tempLP[(r * 2)] - tempLP[(2 * r)+1])
return temp
def a_vector_MLE(a, y, term, m_dict, bounds, boundedness):
"""TODO: write docstring
"""
ym = [newtons_method_metalog(a, xi, term, bounds, boundedness) for xi in m_dict['dataValues']['x']]
def MLE_quantile_constraints(x):
M = [quantileMetalog(x[:term], yi, term, bounds=bounds, boundedness=boundedness) for yi in x[term:]]
return m_dict['dataValues']['x'] - M
def MLE_objective_function(x, y, term, m_dict):
return -np.sum([np.log10(pdfMetalog(x[:term], yi, term, bounds, boundedness)) for yi in np.absolute(x[term:])])
m_dict[str('MLE' + str(term))] = {}
x0 = np.hstack((a[:term],ym))
m_dict[str('MLE' + str(term))]['oldobj'] = -MLE_objective_function(x0, y, term, m_dict)
bnd = ((None, None),)*len(a)+((0, 1),)*(len(x0)-len(a))
con = NonlinearConstraint(MLE_quantile_constraints, 0, 0)
mle = minimize(MLE_objective_function, x0, args=(y, term, m_dict), bounds=bnd, constraints=con)
m_dict[str('MLE' + str(term))]['newobj'] = -MLE_objective_function(mle.x, y, term, m_dict)
m_dict[str('MLE'+str(term))]['A'] = mle.x[:term]
m_dict[str('MLE'+str(term))]['Y'] = mle.x[term:]
m_dict[str('MLE' + str(term))]['oldA'] = a
m_dict[str('MLE' + str(term))]['oldY'] = y
out_temp = np.zeros_like(a)
for i in range(term):
out_temp[i] = mle.x[i]
return out_temp
| 44.533923 | 195 | 0.571902 | [
"MIT"
] | sives5/pymetalog | pymetalog/a_vector.py | 15,097 | Python |
import urllib.request
from datetime import datetime
import os
date = datetime.today().strftime('%Y%m%d')
cycle = 0
for hour in range(1, 19):
url = "http://nomads.ncep.noaa.gov/pub/data/nccf/com/hrrr/prod/hrrr.{date}/conus/hrrr.t{:02d}z.wrfsubhf{:02d}.grib2".format(cycle, hour, date=date)
print(url)
filename = url.split("/")[-1]
destination = "C:/Temp/hrrr/" + date + os.sep + "{:02d}".format(cycle)
if not os.path.isdir(os.path.split(destination)[0]):
os.mkdir(os.path.split(destination)[0])
if not os.path.isdir(destination):
os.mkdir(destination)
f = open(destination + os.sep + filename, 'wb')
f.write(urllib.request.urlopen(url).read())
| 34.7 | 151 | 0.658501 | [
"MIT"
] | HydrologicEngineeringCenter/data-retrieval-scripts | retrieve_hrrr_subhourly.py | 694 | Python |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test cases for the pulse scheduler passes."""
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, schedule
from qiskit.pulse import (Schedule, DriveChannel, AcquireChannel, Acquire,
MeasureChannel, MemorySlot)
from qiskit.test.mock import FakeOpenPulse2Q, FakeOpenPulse3Q
from qiskit.test import QiskitTestCase
class TestBasicSchedule(QiskitTestCase):
"""Scheduling tests."""
def setUp(self):
self.backend = FakeOpenPulse2Q()
self.cmd_def = self.backend.defaults().build_cmd_def()
def test_alap_pass(self):
"""Test ALAP scheduling."""
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.u2(3.14, 1.57, q[0])
qc.u2(0.5, 0.25, q[1])
qc.barrier(q[1])
qc.u2(0.5, 0.25, q[1])
qc.barrier(q[0], q[1])
qc.cx(q[0], q[1])
qc.measure(q, c)
sched = schedule(qc, self.backend)
# X pulse on q0 should end at the start of the CNOT
expected = Schedule(
(28, self.cmd_def.get('u2', [0], 3.14, 1.57)),
self.cmd_def.get('u2', [1], 0.5, 0.25),
(28, self.cmd_def.get('u2', [1], 0.5, 0.25)),
(56, self.cmd_def.get('cx', [0, 1])),
(78, self.cmd_def.get('measure', [0, 1])))
for actual, expected in zip(sched.instructions, expected.instructions):
self.assertEqual(actual[0], expected[0])
self.assertEqual(actual[1].command, expected[1].command)
self.assertEqual(actual[1].channels, expected[1].channels)
def test_alap_with_barriers(self):
"""Test that ALAP respects barriers on new qubits."""
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.u2(0, 0, q[0])
qc.barrier(q[0], q[1])
qc.u2(0, 0, q[1])
sched = schedule(qc, self.backend, method='alap')
expected = Schedule(
self.cmd_def.get('u2', [0], 0, 0),
(28, self.cmd_def.get('u2', [1], 0, 0)))
for actual, expected in zip(sched.instructions, expected.instructions):
self.assertEqual(actual[0], expected[0])
self.assertEqual(actual[1].command, expected[1].command)
self.assertEqual(actual[1].channels, expected[1].channels)
def test_alap_aligns_end(self):
"""Test that ALAP always acts as though there is a final global barrier."""
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.u3(0, 0, 0, q[0])
qc.u2(0, 0, q[1])
sched = schedule(qc, self.backend, method='alap')
expected_sched = Schedule(
self.cmd_def.get('u2', [1], 0, 0),
(26, self.cmd_def.get('u3', [0], 0, 0, 0)))
for actual, expected in zip(sched.instructions, expected_sched.instructions):
self.assertEqual(actual[0], expected[0])
self.assertEqual(actual[1].command, expected[1].command)
self.assertEqual(actual[1].channels, expected[1].channels)
self.assertEqual(sched.ch_duration(DriveChannel(0)),
expected_sched.ch_duration(DriveChannel(1)))
def test_asap_pass(self):
"""Test ASAP scheduling."""
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.u2(3.14, 1.57, q[0])
qc.u2(0.5, 0.25, q[1])
qc.barrier(q[1])
qc.u2(0.5, 0.25, q[1])
qc.barrier(q[0], q[1])
qc.cx(q[0], q[1])
qc.measure(q, c)
sched = schedule(qc, self.backend, method="as_soon_as_possible")
# X pulse on q0 should start at t=0
expected = Schedule(
self.cmd_def.get('u2', [0], 3.14, 1.57),
self.cmd_def.get('u2', [1], 0.5, 0.25),
(28, self.cmd_def.get('u2', [1], 0.5, 0.25)),
(56, self.cmd_def.get('cx', [0, 1])),
(78, self.cmd_def.get('measure', [0, 1])))
self.assertEqual(sched.instructions, expected.instructions)
def test_alap_resource_respecting(self):
"""Test that the ALAP pass properly respects busy resources when backwards scheduling.
For instance, a CX on 0 and 1 followed by an X on only 1 must respect both qubits'
timeline."""
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.cx(q[0], q[1])
qc.u2(0.5, 0.25, q[1])
sched = schedule(qc, self.backend, method="as_late_as_possible")
insts = sched.instructions
self.assertEqual(insts[0][0], 0)
self.assertEqual(insts[4][0], 22)
qc = QuantumCircuit(q, c)
qc.cx(q[0], q[1])
qc.u2(0.5, 0.25, q[1])
qc.measure(q, c)
sched = schedule(qc, self.backend, method="as_late_as_possible")
self.assertEqual(sched.instructions[-1][0], 50)
def test_cmd_def_schedules_unaltered(self):
"""Test that forward scheduling doesn't change relative timing with a command."""
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.cx(q[0], q[1])
sched1 = schedule(qc, self.backend, method="as_soon_as_possible")
sched2 = schedule(qc, self.backend, method="as_late_as_possible")
self.assertEqual(sched1.instructions, sched2.instructions)
insts = sched1.instructions
self.assertEqual(insts[0][0], 0)
self.assertEqual(insts[1][0], 10)
self.assertEqual(insts[2][0], 20)
self.assertEqual(insts[3][0], 20)
def test_measure_combined(self):
"""
Test to check for measure on the same qubit which generated another measure schedule.
The measures on different qubits are combined, but measures on the same qubit
adds another measure to the schedule.
"""
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.u2(3.14, 1.57, q[0])
qc.cx(q[0], q[1])
qc.measure(q[0], c[0])
qc.measure(q[1], c[1])
qc.measure(q[1], c[1])
sched = schedule(qc, self.backend, method="as_soon_as_possible")
expected = Schedule(
self.cmd_def.get('u2', [0], 3.14, 1.57),
(28, self.cmd_def.get('cx', [0, 1])),
(50, self.cmd_def.get('measure', [0, 1])),
(60, self.cmd_def.get('measure', [0, 1]).filter(channels=[MeasureChannel(1)])),
(60, Acquire(duration=10)([AcquireChannel(0), AcquireChannel(1)],
[MemorySlot(0), MemorySlot(1)])))
self.assertEqual(sched.instructions, expected.instructions)
def test_3q_schedule(self):
"""Test a schedule that was recommended by David McKay :D """
backend = FakeOpenPulse3Q()
cmd_def = backend.defaults().build_cmd_def()
q = QuantumRegister(3)
c = ClassicalRegister(3)
qc = QuantumCircuit(q, c)
qc.cx(q[0], q[1])
qc.u2(0.778, 0.122, q[2])
qc.u3(3.14, 1.57, 0., q[0])
qc.u2(3.14, 1.57, q[1])
qc.cx(q[1], q[2])
qc.u2(0.778, 0.122, q[2])
sched = schedule(qc, backend)
expected = Schedule(
cmd_def.get('cx', [0, 1]),
(22, cmd_def.get('u2', [1], 3.14, 1.57)),
(46, cmd_def.get('u2', [2], 0.778, 0.122)),
(50, cmd_def.get('cx', [1, 2])),
(72, cmd_def.get('u2', [2], 0.778, 0.122)),
(74, cmd_def.get('u3', [0], 3.14, 1.57)))
self.assertEqual(sched.instructions, expected.instructions)
def test_schedule_multi(self):
"""Test scheduling multiple circuits at once."""
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc0 = QuantumCircuit(q, c)
qc0.cx(q[0], q[1])
qc1 = QuantumCircuit(q, c)
qc1.cx(q[0], q[1])
schedules = schedule([qc0, qc1], self.backend)
expected_insts = schedule(qc0, self.backend).instructions
self.assertEqual(schedules[0].instructions, expected_insts)
def test_circuit_name_kept(self):
"""Test that the new schedule gets its name from the circuit."""
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c, name='CIRCNAME')
qc.cx(q[0], q[1])
sched = schedule(qc, self.backend, method="asap")
self.assertEqual(sched.name, qc.name)
sched = schedule(qc, self.backend, method="alap")
self.assertEqual(sched.name, qc.name)
def test_can_add_gates_into_free_space(self):
"""The scheduler does some time bookkeeping to know when qubits are free to be
scheduled. Make sure this works for qubits that are used in the future. This was
a bug, uncovered by this example:
q0 = - - - - |X|
q1 = |X| |u2| |X|
In ALAP scheduling, the next operation on qubit 0 would be added at t=0 rather
than immediately before the X gate.
"""
qr = QuantumRegister(2)
qc = QuantumCircuit(qr)
for i in range(2):
qc.u2(0, 0, [qr[i]])
qc.u1(3.14, [qr[i]])
qc.u2(0, 0, [qr[i]])
sched = schedule(qc, self.backend, method="alap")
expected = Schedule(
self.cmd_def.get('u2', [0], 0, 0),
self.cmd_def.get('u2', [1], 0, 0),
(28, self.cmd_def.get('u1', [0], 3.14)),
(28, self.cmd_def.get('u1', [1], 3.14)),
(28, self.cmd_def.get('u2', [0], 0, 0)),
(28, self.cmd_def.get('u2', [1], 0, 0)))
self.assertEqual(sched.instructions, expected.instructions)
def test_barriers_in_middle(self):
"""As a follow on to `test_can_add_gates_into_free_space`, similar issues
arose for barriers, specifically.
"""
qr = QuantumRegister(2)
qc = QuantumCircuit(qr)
for i in range(2):
qc.u2(0, 0, [qr[i]])
qc.barrier(qr[i])
qc.u1(3.14, [qr[i]])
qc.barrier(qr[i])
qc.u2(0, 0, [qr[i]])
sched = schedule(qc, self.backend, method="alap")
expected = Schedule(
self.cmd_def.get('u2', [0], 0, 0),
self.cmd_def.get('u2', [1], 0, 0),
(28, self.cmd_def.get('u1', [0], 3.14)),
(28, self.cmd_def.get('u1', [1], 3.14)),
(28, self.cmd_def.get('u2', [0], 0, 0)),
(28, self.cmd_def.get('u2', [1], 0, 0)))
self.assertEqual(sched.instructions, expected.instructions)
def test_only_needed_measures(self):
"""Test that `MeasureChannel`s are only added for measured qubits."""
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.measure(q[1], c[1])
sched_all_channels = schedule(qc, self.backend, method="as_soon_as_possible").channels
deleted_channels = [MeasureChannel(0)]
self.assertNotIn(sched_all_channels, deleted_channels)
def test_user_mapping_for_memslots(self):
"""
Test that the new schedule only has required `MeasureChannel`s and that the
`MemorySlot`s are mapped according to the input circuit.
"""
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.measure(q[0], c[1])
sched = schedule(qc, self.backend)
expected = Schedule(
self.cmd_def.get('measure', [0, 1]).filter(channels=[MeasureChannel(0)]),
Acquire(duration=10)([AcquireChannel(0), AcquireChannel(1)],
[MemorySlot(1), MemorySlot(0)]))
self.assertEqual(sched.instructions, expected.instructions)
def test_user_mapping_for_memslots_3Q(self):
"""Test measuring two of three qubits."""
backend = FakeOpenPulse3Q()
cmd_def = backend.defaults().build_cmd_def()
q = QuantumRegister(3)
c = ClassicalRegister(3)
qc = QuantumCircuit(q, c)
qc.measure(q[1], c[2])
qc.measure(q[2], c[0])
sched = schedule(qc, backend)
expected = Schedule(
cmd_def.get('measure', [0, 1, 2]).filter(
channels=[MeasureChannel(1), MeasureChannel(2)]),
Acquire(duration=10)([AcquireChannel(0), AcquireChannel(1), AcquireChannel(2)],
[MemorySlot(1), MemorySlot(2), MemorySlot(0)]))
self.assertEqual(sched.instructions, expected.instructions)
def test_multiple_measure_in_3Q(self):
"""Test multiple measure, user memslot mapping, 3Q."""
backend = FakeOpenPulse3Q()
cmd_def = backend.defaults().build_cmd_def()
q = QuantumRegister(3)
c = ClassicalRegister(5)
qc = QuantumCircuit(q, c)
qc.measure(q[0], c[2])
qc.measure(q[0], c[4])
sched = schedule(qc, backend)
expected = Schedule(
cmd_def.get('measure', [0, 1, 2]).filter(channels=[MeasureChannel(0)]),
Acquire(duration=10)([AcquireChannel(0), AcquireChannel(1), AcquireChannel(2)],
[MemorySlot(2), MemorySlot(0), MemorySlot(1)]),
(10, cmd_def.get('measure', [0, 1, 2]).filter(channels=[MeasureChannel(0)])),
(10, Acquire(duration=10)([AcquireChannel(0), AcquireChannel(1), AcquireChannel(2)],
[MemorySlot(4), MemorySlot(0), MemorySlot(1)])))
self.assertEqual(sched.instructions, expected.instructions)
| 42.404834 | 96 | 0.578014 | [
"MIT"
] | MattePalte/Bugs-Quantum-Computing-Platforms | artifacts/old_dataset_versions/minimal_commits/qiskit-terra/qiskit-terra#2704/after/test_basic_scheduler.py | 14,036 | Python |
"""
>>> sd = SliceDump()
>>> sd[1]
1
>>> sd[2:5]
slice(2, 5, None)
>>> sd[:2]
slice(None, 2, None)
>>> sd[7:]
slice(7, None, None)
>>> sd[:]
slice(None, None, None)
>>> sd[1:9:3]
slice(1, 9, 3)
>>> sd[1:9:3, 2:3]
(slice(1, 9, 3), slice(2, 3, None))
>>> s = sd[1:9:3]
>>> s.indices(20)
(1, 9, 3)
>>> s.indices(5)
(1, 5, 3)
>>> s.indices(1)
(1, 1, 3)
>>> s.indices(0)
(0, 0, 3)
"""
class SliceDump:
def __getitem__(self, pos):
return pos
| 16.117647 | 39 | 0.403285 | [
"MIT"
] | 1098994933/fluent_python | attic/sequences/slice_dump.py | 548 | Python |
from .breakpoint import Breakpoint
from .pesr_test import PESRTest
from .sr_test import SRTest, SRTestRunner
from .pe_test import PETest, PETestRunner
from .pesr_test import PESRTest, PESRTestRunner
| 33.166667 | 47 | 0.844221 | [
"BSD-3-Clause"
] | Genometric/gatk-sv | src/svtk/svtk/pesr/__init__.py | 199 | Python |
"""
utility functions for breaking down a given block of text
into it's component syntactic parts.
"""
import nltk
from nltk.tokenize import RegexpTokenizer
from . import syllables_en
TOKENIZER = RegexpTokenizer('(?u)\W+|\$[\d\.]+|\S+')
SPECIAL_CHARS = ['.', ',', '!', '?']
def get_char_count(words):
characters = 0
for word in words:
characters += len(word)
return characters
def get_words(text=''):
words = []
words = TOKENIZER.tokenize(text)
filtered_words = []
for word in words:
if word in SPECIAL_CHARS or word == " ":
pass
else:
new_word = word.replace(",","").replace(".","")
new_word = new_word.replace("!","").replace("?","")
filtered_words.append(new_word)
return filtered_words
def get_sentences(text=''):
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
sentences = tokenizer.tokenize(text)
return sentences
def count_syllables(words):
syllableCount = 0
for word in words:
syllableCount += syllables_en.count(word)
return syllableCount
#This method must be enhanced. At the moment it only
#considers the number of syllables in a word.
#This often results in that too many complex words are detected.
def count_complex_words(text=''):
words = get_words(text)
sentences = get_sentences(text)
complex_words = 0
found = False
cur_word = []
for word in words:
cur_word.append(word)
if count_syllables(cur_word)>= 3:
#Checking proper nouns. If a word starts with a capital letter
#and is NOT at the beginning of a sentence we don't add it
#as a complex word.
if not(word[0].isupper()):
complex_words += 1
else:
for sentence in sentences:
if str(sentence).startswith(word):
found = True
break
if found:
complex_words += 1
found = False
cur_word.remove(word)
return complex_words
| 28.447368 | 74 | 0.584181 | [
"MIT"
] | Open-Prose-Metrics/open_prose_metrics_app-core | opm/readability/utils.py | 2,162 | Python |
numeros = [[], []]
for c in range(0, 7):
n = int(input(f'Digite o {c+1}o. número: '))
if n % 2 == 0:
numeros[0].append(n)
elif n % 2 == 1:
numeros[1].append(n)
numeros[0].sort()
numeros[1].sort()
print('='*30)
print(f'Números pares digitados: {numeros[0]}')
print(f'Números ímpares digitados: {numeros[1]}')
| 25.846154 | 49 | 0.568452 | [
"MIT"
] | jotmar/PythonEx | Exercicios/Ex_085.py | 340 | Python |
class Logger(object):
def __init__(self, name):
self.name = name
def debug(self, msg):
print(':: {0} [debug] :: {1}'.format(self.name, msg))
def info(self, msg):
print(':: {0} [info] :: {1}'.format(self.name, msg))
def warn(self, msg):
print(':: {0} [warning] :: {1}'.format(self.name, msg))
| 26.384615 | 63 | 0.521866 | [
"MIT"
] | ethanlindley/lego.py | util/logger.py | 343 | Python |
import requests
import json
from simplegist.mygist import Mygist
from simplegist.do import Do
from comments import Comments
try:
from simplegist.config import USERNAME, API_TOKEN, BASE_URL, GIST_URL
except:
pass
class Simplegist:
"""
Gist Base Class
This class is to used to instantiate the wrapper and authenticate.
Authenticate with providing Github Username and API-Token to use
it for all future API requests
"""
def __init__(self, **args):
# Save our username and api_token (If given) for later use.
if 'username' in args:
self.username = args['username']
else:
if not USERNAME:
raise Exception('Please provide your Github username.')
else:
self.username = USERNAME
if 'api_token' in args:
self.api_token = args['api_token']
else:
if not API_TOKEN:
raise Exception('Please provide your Github API Token.')
else:
self.api_token = API_TOKEN
# Set header information in every request.
self.header = { 'X-Github-Username': self.username,
'Content-Type': 'application/json',
'Authorization': 'token %s' %self.api_token
}
def profile(self):
return Mygist(self)
def search(self, user):
return Mygist(self,user=user)
def do(self):
return Do(self)
def comments(self):
return Comments(self)
def create(self, **args):
if 'description' in args:
self.description = args['description']
else:
self.description = ''
if 'name' in args:
self.gist_name = args['name']
else:
self.gist_name = ''
if 'public' in args:
self.public = args['public']
else:
self.public = 1
if 'content' in args:
self.content = args['content']
else:
raise Exception('Gist content can\'t be empty')
url = '/gists'
data = {"description": self.description,
"public": self.public,
"files": {
self.gist_name: {
"content": self.content
}
}
}
r = requests.post(
'%s%s' % (BASE_URL, url),
data=json.dumps(data),
headers=self.header
)
if (r.status_code == 201):
response = {
'Gist-Link': '%s/%s/%s' %(GIST_URL,self.username,r.json()['id']),
'Clone-Link': '%s/%s.git' %(GIST_URL,r.json()['id']),
'Embed-Script': '<script src="%s/%s/%s.js"</script>' %(GIST_URL,self.username,r.json()['id']),
'id': r.json()['id'],
'created_at': r.json()['created_at'],
}
return response
raise Exception('Gist not created: server response was [%s] %s' % (r.status_code, r.text))
| 22.981308 | 97 | 0.646604 | [
"MIT"
] | acatiadroid/simplegist | simplegist/simplegist.py | 2,459 | Python |
from pathlib import Path
from urllib.parse import urljoin, urlparse, unquote
def _get_url_file_name(url):
path = urlparse(url).path
try:
path = unquote(path)
except (TypeError, ValueError):
pass
return Path(path).name
def dav_index(context, data):
"""List files in a WebDAV directory."""
# This is made to work with ownCloud/nextCloud, but some rumor has
# it they are "standards compliant" and it should thus work for
# other DAV servers.
url = data.get("url")
context.log.info("Fetching WebDAV path: %s" % url)
result = context.http.request("PROPFIND", url)
for resp in result.xml.findall("./{DAV:}response"):
href = resp.findtext("./{DAV:}href")
if href is None:
continue
child_url = urljoin(url, href)
if child_url == url:
continue
child = dict(data)
child["url"] = child_url
child["foreign_id"] = child_url
child["file_name"] = _get_url_file_name(href)
rule = "file"
if resp.find(".//{DAV:}collection") is not None:
rule = "folder"
context.emit(data=child, rule=rule)
| 29.923077 | 70 | 0.611825 | [
"MIT"
] | Rosencrantz/memorious | memorious/operations/webdav.py | 1,167 | Python |
"""
Name : c9_01_optimize.py
Book : Python for Finance (2nd ed.)
Publisher: Packt Publishing Ltd.
Author : Yuxing Yan
Date : 6/6/2017
email : [email protected]
[email protected]
"""
from scipy.optimize import minimize
def myFunction(x):
return (3.2+5*x**2)
x0=100
res = minimize(myFunction,x0,method='nelder-mead',options={'xtol':1e-8,'disp': True})
| 22.333333 | 85 | 0.636816 | [
"MIT"
] | HiteshMah-Jan/Python-for-Finance-Second-Edition | Chapter09/c9_01_optimize.py | 402 | Python |
# coding=utf-8
# Copyright 2020 The Real-World RL Suite Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for evaluators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import realworldrl_suite.environments as rwrl
from realworldrl_suite.utils import evaluators
class RandomAgent(object):
def __init__(self, action_spec):
self.action_spec = action_spec
def action(self):
return np.random.uniform(
self.action_spec.minimum,
self.action_spec.maximum,
size=self.action_spec.shape)
class EvaluatorsTest(parameterized.TestCase):
def _gen_stats(self, domain_name, task_name):
temp_dir = self.create_tempdir()
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
safety_spec={'enable': True},
log_output=os.path.join(temp_dir.full_path, 'test.pickle'),
environment_kwargs=dict(log_safety_vars=True, flat_observation=True))
random_policy = RandomAgent(env.action_spec()).action
for _ in range(3):
timestep = env.step(random_policy())
while not timestep.last():
timestep = env.step(random_policy())
env.write_logs()
return env.logs_path
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def test_loading(self, domain_name, task_name):
temp_path = self._gen_stats(domain_name, task_name)
data_in = np.load(temp_path, allow_pickle=True)
evaluators.Evaluators(data_in)
def test_safety_evaluator(self):
# TODO(dulacarnold): Make this test general to all envs.
temp_path = self._gen_stats(
domain_name='cartpole', task_name='realworld_balance')
data_in = np.load(temp_path, allow_pickle=True)
ev = evaluators.Evaluators(data_in)
self.assertLen(ev.get_safety_evaluator(), 3)
def test_standard_evaluators(self):
# TODO(dulacarnold): Make this test general to all envs.
temp_path = self._gen_stats(
domain_name='cartpole', task_name='realworld_balance')
data_in = np.load(temp_path, allow_pickle=True)
ev = evaluators.Evaluators(data_in)
self.assertLen(ev.get_standard_evaluators(), 5)
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def test_safety_plot(self, domain_name, task_name):
temp_path = self._gen_stats(domain_name, task_name)
data_in = np.load(temp_path, allow_pickle=True)
ev = evaluators.Evaluators(data_in)
ev.get_safety_plot()
if __name__ == '__main__':
absltest.main()
| 33.652174 | 77 | 0.74031 | [
"Apache-2.0"
] | Roryoung/realworldrl_suite | realworldrl_suite/utils/evaluators_test.py | 3,096 | Python |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import warnings
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, NewType, Optional, Tuple, Union
import torch
from torch.nn.utils.rnn import pad_sequence
from ..file_utils import PaddingStrategy
from ..modeling_utils import PreTrainedModel
from ..models.bert import BertTokenizer, BertTokenizerFast
from ..tokenization_utils_base import BatchEncoding, PreTrainedTokenizerBase
InputDataClass = NewType("InputDataClass", Any)
"""
A DataCollator is a function that takes a list of samples from a Dataset and collate them into a batch, as a dictionary
of Tensors.
"""
DataCollator = NewType("DataCollator", Callable[[List[InputDataClass]], Dict[str, torch.Tensor]])
def default_data_collator(features: List[InputDataClass]) -> Dict[str, torch.Tensor]:
"""
Very simple data collator that simply collates batches of dict-like objects and performs special handling for
potential keys named:
- ``label``: handles a single value (int or float) per object
- ``label_ids``: handles a list of values per object
Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs
to the model. See glue and ner for example of how it's useful.
"""
# In this function we'll make the assumption that all `features` in the batch
# have the same attributes.
# So we will look at the first element as a proxy for what attributes exist
# on the whole batch.
if not isinstance(features[0], (dict, BatchEncoding)):
features = [vars(f) for f in features]
first = features[0]
batch = {}
# Special handling for labels.
# Ensure that tensor is created with the correct type
# (it should be automatically the case, but let's make sure of it.)
if "label" in first and first["label"] is not None:
label = first["label"].item() if isinstance(first["label"], torch.Tensor) else first["label"]
dtype = torch.long if isinstance(label, int) else torch.float
batch["labels"] = torch.tensor([f["label"] for f in features], dtype=dtype)
elif "label_ids" in first and first["label_ids"] is not None:
if isinstance(first["label_ids"], torch.Tensor):
batch["labels"] = torch.stack([f["label_ids"] for f in features])
else:
dtype = torch.long if type(first["label_ids"][0]) is int else torch.float
batch["labels"] = torch.tensor([f["label_ids"] for f in features], dtype=dtype)
# Handling of all other possible keys.
# Again, we will use the first element to figure out which key/values are not None for this model.
for k, v in first.items():
if k not in ("label", "label_ids") and v is not None and not isinstance(v, str):
if isinstance(v, torch.Tensor):
batch[k] = torch.stack([f[k] for f in features])
else:
batch[k] = torch.tensor([f[k] for f in features])
return batch
@dataclass
class DataCollatorWithPadding:
"""
Data collator that will dynamically pad the inputs received.
Args:
tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):
The tokenizer used for encoding the data.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
"""
tokenizer: PreTrainedTokenizerBase
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
batch = self.tokenizer.pad(
features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors="pt",
)
if "label" in batch:
batch["labels"] = batch["label"]
del batch["label"]
if "label_ids" in batch:
batch["labels"] = batch["label_ids"]
del batch["label_ids"]
return batch
@dataclass
class DataCollatorForTokenClassification:
"""
Data collator that will dynamically pad the inputs received, as well as the labels.
Args:
tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):
The tokenizer used for encoding the data.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
label_pad_token_id (:obj:`int`, `optional`, defaults to -100):
The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions).
"""
tokenizer: PreTrainedTokenizerBase
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
label_pad_token_id: int = -100
def __call__(self, features):
label_name = "label" if "label" in features[0].keys() else "labels"
labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
batch = self.tokenizer.pad(
features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
# Conversion to tensors will fail if we have labels as they are not of the same length yet.
return_tensors="pt" if labels is None else None,
)
if labels is None:
return batch
sequence_length = torch.tensor(batch["input_ids"]).shape[1]
padding_side = self.tokenizer.padding_side
if padding_side == "right":
batch["labels"] = [label + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels]
else:
batch["labels"] = [[self.label_pad_token_id] * (sequence_length - len(label)) + label for label in labels]
batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}
return batch
def _collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None):
"""Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
# Tensorize if necessary.
if isinstance(examples[0], (list, tuple)):
examples = [torch.tensor(e, dtype=torch.long) for e in examples]
# Check if padding is necessary.
length_of_first = examples[0].size(0)
are_tensors_same_length = all(x.size(0) == length_of_first for x in examples)
if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):
return torch.stack(examples, dim=0)
# If yes, check if we have a `pad_token`.
if tokenizer._pad_token is None:
raise ValueError(
"You are attempting to pad samples but the tokenizer you are using"
f" ({tokenizer.__class__.__name__}) does not have a pad token."
)
# Creating the full tensor and filling it with our data.
max_length = max(x.size(0) for x in examples)
if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
result = examples[0].new_full([len(examples), max_length], tokenizer.pad_token_id)
for i, example in enumerate(examples):
if tokenizer.padding_side == "right":
result[i, : example.shape[0]] = example
else:
result[i, -example.shape[0] :] = example
return result
def tolist(x: Union[List[Any], torch.Tensor]):
return x.tolist() if isinstance(x, torch.Tensor) else x
@dataclass
class DataCollatorForSeq2Seq:
"""
Data collator that will dynamically pad the inputs received, as well as the labels.
Args:
tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):
The tokenizer used for encoding the data.
model (:class:`~transformers.PreTrainedModel`):
The model that is being trained. If set and has the `prepare_decoder_input_ids_from_labels`, use it to
prepare the `decoder_input_ids`
This is useful when using `label_smoothing` to avoid calculating loss twice.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence is provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
label_pad_token_id (:obj:`int`, `optional`, defaults to -100):
The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).
"""
tokenizer: PreTrainedTokenizerBase
model: Optional[PreTrainedModel] = None
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
label_pad_token_id: int = -100
def __call__(self, features):
labels = [feature["labels"] for feature in features] if "labels" in features[0].keys() else None
# We have to pad the labels before calling `tokenizer.pad` as this method won't pad them and needs them of the
# same length to return tensors.
if labels is not None:
max_label_length = max(len(l) for l in labels)
padding_side = self.tokenizer.padding_side
for feature in features:
remainder = [self.label_pad_token_id] * (max_label_length - len(feature["labels"]))
feature["labels"] = (
feature["labels"] + remainder if padding_side == "right" else remainder + feature["labels"]
)
features = self.tokenizer.pad(
features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors="pt",
)
# prepare decoder_input_ids
if self.model is not None and hasattr(self.model, "prepare_decoder_input_ids_from_labels"):
decoder_input_ids = self.model.prepare_decoder_input_ids_from_labels(labels=features["labels"])
features["decoder_input_ids"] = decoder_input_ids
return features
@dataclass
class DataCollatorForLanguageModeling:
"""
Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they
are not all of the same length.
Args:
tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):
The tokenizer used for encoding the data.
mlm (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to use masked language modeling. If set to :obj:`False`, the labels are the same as the
inputs with the padding tokens ignored (by setting them to -100). Otherwise, the labels are -100 for
non-masked tokens and the value to predict for the masked token.
mlm_probability (:obj:`float`, `optional`, defaults to 0.15):
The probability with which to (randomly) mask tokens in the input, when :obj:`mlm` is set to :obj:`True`.
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
.. note::
For best performance, this data collator should be used with a dataset having items that are dictionaries or
BatchEncoding, with the :obj:`"special_tokens_mask"` key, as returned by a
:class:`~transformers.PreTrainedTokenizer` or a :class:`~transformers.PreTrainedTokenizerFast` with the
argument :obj:`return_special_tokens_mask=True`.
"""
tokenizer: PreTrainedTokenizerBase
mlm: bool = True
mlm_probability: float = 0.15
pad_to_multiple_of: Optional[int] = None
def __post_init__(self):
if self.mlm and self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. "
"You should pass `mlm=False` to train on causal language modeling instead."
)
def __call__(
self, examples: List[Union[List[int], torch.Tensor, Dict[str, torch.Tensor]]]
) -> Dict[str, torch.Tensor]:
# Handle dict or lists with proper padding and conversion to tensor.
if isinstance(examples[0], (dict, BatchEncoding)):
batch = self.tokenizer.pad(examples, return_tensors="pt", pad_to_multiple_of=self.pad_to_multiple_of)
else:
batch = {"input_ids": _collate_batch(examples, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)}
# If special token mask has been preprocessed, pop it from the dict.
special_tokens_mask = batch.pop("special_tokens_mask", None)
if self.mlm:
batch["input_ids"], batch["labels"] = self.mask_tokens(
batch["input_ids"], special_tokens_mask=special_tokens_mask
)
else:
labels = batch["input_ids"].clone()
if self.tokenizer.pad_token_id is not None:
labels[labels == self.tokenizer.pad_token_id] = -100
batch["labels"] = labels
return batch
def mask_tokens(
self, inputs: torch.Tensor, special_tokens_mask: Optional[torch.Tensor] = None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
"""
labels = inputs.clone()
# We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
probability_matrix = torch.full(labels.shape, self.mlm_probability)
if special_tokens_mask is None:
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
special_tokens_mask = torch.tensor(special_tokens_mask, dtype=torch.bool)
else:
special_tokens_mask = special_tokens_mask.bool()
probability_matrix.masked_fill_(special_tokens_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
@dataclass
class DataCollatorForWholeWordMask(DataCollatorForLanguageModeling):
"""
Data collator used for language modeling that masks entire words.
- collates batches of tensors, honoring their tokenizer's pad_token
- preprocesses batches for masked language modeling
.. note::
This collator relies on details of the implementation of subword tokenization by
:class:`~transformers.BertTokenizer`, specifically that subword tokens are prefixed with `##`. For tokenizers
that do not adhere to this scheme, this collator will produce an output that is roughly equivalent to
:class:`.DataCollatorForLanguageModeling`.
"""
def __call__(
self, examples: List[Union[List[int], torch.Tensor, Dict[str, torch.Tensor]]]
) -> Dict[str, torch.Tensor]:
if isinstance(examples[0], (dict, BatchEncoding)):
input_ids = [e["input_ids"] for e in examples]
else:
input_ids = examples
examples = [{"input_ids": e} for e in examples]
batch_input = _collate_batch(input_ids, self.tokenizer)
mask_labels = []
for e in examples:
ref_tokens = []
for id in tolist(e["input_ids"]):
token = self.tokenizer._convert_id_to_token(id)
ref_tokens.append(token)
# For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢]
if "chinese_ref" in e:
ref_pos = tolist(e["chinese_ref"])
len_seq = len(e["input_ids"])
for i in range(len_seq):
if i in ref_pos:
ref_tokens[i] = "##" + ref_tokens[i]
mask_labels.append(self._whole_word_mask(ref_tokens))
batch_mask = _collate_batch(mask_labels, self.tokenizer)
inputs, labels = self.mask_tokens(batch_input, batch_mask)
return {"input_ids": inputs, "labels": labels}
def _whole_word_mask(self, input_tokens: List[str], max_predictions=512):
"""
Get 0/1 labels for masked tokens with whole word mask proxy
"""
if not isinstance(self.tokenizer, (BertTokenizer, BertTokenizerFast)):
warnings.warn(
"DataCollatorForWholeWordMask is only suitable for BertTokenizer-like tokenizers."
"Please refer to the documentation for more information."
)
cand_indexes = []
for (i, token) in enumerate(input_tokens):
if token == "[CLS]" or token == "[SEP]":
continue
if len(cand_indexes) >= 1 and token.startswith("##"):
cand_indexes[-1].append(i)
else:
cand_indexes.append([i])
random.shuffle(cand_indexes)
num_to_predict = min(max_predictions, max(1, int(round(len(input_tokens) * self.mlm_probability))))
masked_lms = []
covered_indexes = set()
for index_set in cand_indexes:
if len(masked_lms) >= num_to_predict:
break
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(masked_lms) + len(index_set) > num_to_predict:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
covered_indexes.add(index)
masked_lms.append(index)
assert len(covered_indexes) == len(masked_lms)
mask_labels = [1 if i in covered_indexes else 0 for i in range(len(input_tokens))]
return mask_labels
def mask_tokens(self, inputs: torch.Tensor, mask_labels: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
"""
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer."
)
labels = inputs.clone()
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
probability_matrix = mask_labels
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
if self.tokenizer._pad_token is not None:
padding_mask = labels.eq(self.tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = probability_matrix.bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
@dataclass
class DataCollatorForSOP(DataCollatorForLanguageModeling):
"""
Data collator used for sentence order prediction task.
- collates batches of tensors, honoring their tokenizer's pad_token
- preprocesses batches for both masked language modeling and sentence order prediction
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"DataCollatorForSOP is deprecated and will be removed in a future version, you can now use "
"DataCollatorForLanguageModeling instead.",
FutureWarning,
)
def __call__(self, examples: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:
input_ids = [example["input_ids"] for example in examples]
input_ids = _collate_batch(input_ids, self.tokenizer)
input_ids, labels, attention_mask = self.mask_tokens(input_ids)
token_type_ids = [example["token_type_ids"] for example in examples]
# size of segment_ids varied because randomness, padding zero to the end as the original implementation
token_type_ids = pad_sequence(token_type_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id)
sop_label_list = [example["sentence_order_label"] for example in examples]
sentence_order_label = torch.stack(sop_label_list)
return {
"input_ids": input_ids,
"labels": labels,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
"sentence_order_label": sentence_order_label,
}
def mask_tokens(self, inputs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Prepare masked tokens inputs/labels/attention_mask for masked language modeling: 80% MASK, 10% random, 10%
original. N-gram not applied yet.
"""
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer."
)
labels = inputs.clone()
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
probability_matrix = torch.full(labels.shape, self.mlm_probability)
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
if self.tokenizer._pad_token is not None:
padding_mask = labels.eq(self.tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
# probability be `1` (masked), however in albert model attention mask `0` means masked, revert the value
attention_mask = (~masked_indices).float()
if self.tokenizer._pad_token is not None:
attention_padding_mask = labels.eq(self.tokenizer.pad_token_id)
attention_mask.masked_fill_(attention_padding_mask, value=1.0)
labels[~masked_indices] = -100 # We only compute loss on masked tokens, -100 is default for CE compute
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels, attention_mask
@dataclass
class DataCollatorForPermutationLanguageModeling:
"""
Data collator used for permutation language modeling.
- collates batches of tensors, honoring their tokenizer's pad_token
- preprocesses batches for permutation language modeling with procedures specific to XLNet
"""
tokenizer: PreTrainedTokenizerBase
plm_probability: float = 1 / 6
max_span_length: int = 5 # maximum length of a span of masked tokens
def __call__(
self, examples: List[Union[List[int], torch.Tensor, Dict[str, torch.Tensor]]]
) -> Dict[str, torch.Tensor]:
if isinstance(examples[0], (dict, BatchEncoding)):
examples = [e["input_ids"] for e in examples]
batch = _collate_batch(examples, self.tokenizer)
inputs, perm_mask, target_mapping, labels = self.mask_tokens(batch)
return {"input_ids": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping, "labels": labels}
def mask_tokens(self, inputs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
The masked tokens to be predicted for a particular sequence are determined by the following algorithm:
0. Start from the beginning of the sequence by setting ``cur_len = 0`` (number of tokens processed so far).
1. Sample a ``span_length`` from the interval ``[1, max_span_length]`` (length of span of tokens to be
masked)
2. Reserve a context of length ``context_length = span_length / plm_probability`` to surround span to be
masked
3. Sample a starting point ``start_index`` from the interval ``[cur_len, cur_len + context_length -
span_length]`` and mask tokens ``start_index:start_index + span_length``
4. Set ``cur_len = cur_len + context_length``. If ``cur_len < max_len`` (i.e. there are tokens remaining in
the sequence to be processed), repeat from Step 1.
"""
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for permutation language modeling. Please add a mask token if you want to use this tokenizer."
)
if inputs.size(1) % 2 != 0:
raise ValueError(
"This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see relevant comments in source code for details."
)
labels = inputs.clone()
# Creating the mask and target_mapping tensors
masked_indices = torch.full(labels.shape, 0, dtype=torch.bool)
target_mapping = torch.zeros((labels.size(0), labels.size(1), labels.size(1)), dtype=torch.float32)
for i in range(labels.size(0)):
# Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
cur_len = 0
max_len = labels.size(1)
while cur_len < max_len:
# Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
span_length = torch.randint(1, self.max_span_length + 1, (1,)).item()
# Reserve a context of length `context_length = span_length / plm_probability` to surround the span to be masked
context_length = int(span_length / self.plm_probability)
# Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length`
start_index = cur_len + torch.randint(context_length - span_length + 1, (1,)).item()
masked_indices[i, start_index : start_index + span_length] = 1
# Set `cur_len = cur_len + context_length`
cur_len += context_length
# Since we're replacing non-masked tokens with -100 in the labels tensor instead of skipping them altogether,
# the i-th predict corresponds to the i-th token.
target_mapping[i] = torch.eye(labels.size(1))
special_tokens_mask = torch.tensor(
[self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()],
dtype=torch.bool,
)
masked_indices.masked_fill_(special_tokens_mask, value=0.0)
if self.tokenizer._pad_token is not None:
padding_mask = labels.eq(self.tokenizer.pad_token_id)
masked_indices.masked_fill_(padding_mask, value=0.0)
# Mask indicating non-functional tokens, where functional tokens are [SEP], [CLS], padding, etc.
non_func_mask = ~(padding_mask | special_tokens_mask)
inputs[masked_indices] = self.tokenizer.mask_token_id
labels[~masked_indices] = -100 # We only compute loss on masked tokens
perm_mask = torch.zeros((labels.size(0), labels.size(1), labels.size(1)), dtype=torch.float32)
for i in range(labels.size(0)):
# Generate permutation indices i.e. sample a random factorisation order for the sequence. This will
# determine which tokens a given token can attend to (encoded in `perm_mask`).
# Note: Length of token sequence being permuted has to be less than or equal to reused sequence length
# (see documentation for `mems`), otherwise information may leak through due to reuse. In this implementation,
# we assume that reused length is half of sequence length and permutation length is equal to reused length.
# This requires that the sequence length be even.
# Create a linear factorisation order
perm_index = torch.arange(labels.size(1))
# Split this into two halves, assuming that half the sequence is reused each time
perm_index = perm_index.reshape((-1, labels.size(1) // 2)).transpose(0, 1)
# Permute the two halves such that they do not cross over
perm_index = perm_index[torch.randperm(labels.size(1) // 2)]
# Flatten this out into the desired permuted factorisation order
perm_index = torch.flatten(perm_index.transpose(0, 1))
# Set the permutation indices of non-masked (non-functional) tokens to the
# smallest index (-1) so that:
# (1) They can be seen by all other positions
# (2) They cannot see masked positions, so there won't be information leak
perm_index.masked_fill_(~masked_indices[i] & non_func_mask[i], -1)
# The logic for whether the i-th token can attend on the j-th token based on the factorisation order:
# 0 (can attend): If perm_index[i] > perm_index[j] or j is neither masked nor a functional token
# 1 (cannot attend): If perm_index[i] <= perm_index[j] and j is either masked or a functional token
perm_mask[i] = (
perm_index.reshape((labels.size(1), 1)) <= perm_index.reshape((1, labels.size(1)))
) & masked_indices[i]
return inputs.long(), perm_mask, target_mapping, labels.long()
| 50.336565 | 181 | 0.662383 | [
"Apache-2.0"
] | 21jun/transformers | src/transformers/data/data_collator.py | 36,353 | Python |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Tests for the `verdi group` command."""
from aiida import orm
from aiida.backends.testbase import AiidaTestCase
from aiida.common import exceptions
from aiida.cmdline.commands.cmd_group import (
group_list, group_create, group_delete, group_relabel, group_description, group_add_nodes, group_remove_nodes,
group_show, group_copy
)
class TestVerdiGroup(AiidaTestCase):
"""Tests for the `verdi group` command."""
@classmethod
def setUpClass(cls, *args, **kwargs):
super().setUpClass(*args, **kwargs)
for group in ['dummygroup1', 'dummygroup2', 'dummygroup3', 'dummygroup4']:
orm.Group(label=group).store()
def setUp(self):
"""Create runner object to run tests."""
from click.testing import CliRunner
self.cli_runner = CliRunner()
def test_help(self):
"""Tests help text for all group sub commands."""
options = ['--help']
# verdi group list
result = self.cli_runner.invoke(group_list, options)
self.assertIsNone(result.exception, result.output)
self.assertIn('Usage', result.output)
# verdi group create
result = self.cli_runner.invoke(group_create, options)
self.assertIsNone(result.exception, result.output)
self.assertIn('Usage', result.output)
# verdi group delete
result = self.cli_runner.invoke(group_delete, options)
self.assertIsNone(result.exception, result.output)
self.assertIn('Usage', result.output)
# verdi group relabel
result = self.cli_runner.invoke(group_relabel, options)
self.assertIsNone(result.exception, result.output)
self.assertIn('Usage', result.output)
# verdi group description
result = self.cli_runner.invoke(group_description, options)
self.assertIsNone(result.exception, result.output)
self.assertIn('Usage', result.output)
# verdi group addnodes
result = self.cli_runner.invoke(group_add_nodes, options)
self.assertIsNone(result.exception, result.output)
self.assertIn('Usage', result.output)
# verdi group removenodes
result = self.cli_runner.invoke(group_remove_nodes, options)
self.assertIsNone(result.exception, result.output)
self.assertIn('Usage', result.output)
# verdi group show
result = self.cli_runner.invoke(group_show, options)
self.assertIsNone(result.exception, result.output)
self.assertIn('Usage', result.output)
# verdi group copy
result = self.cli_runner.invoke(group_copy, options)
self.assertIsNone(result.exception, result.output)
self.assertIn('Usage', result.output)
def test_create(self):
"""Test `verdi group create` command."""
result = self.cli_runner.invoke(group_create, ['dummygroup5'])
self.assertClickResultNoException(result)
# check if newly added group in present in list
result = self.cli_runner.invoke(group_list)
self.assertClickResultNoException(result)
self.assertIn('dummygroup5', result.output)
def test_list(self):
"""Test `verdi group list` command."""
result = self.cli_runner.invoke(group_list)
self.assertClickResultNoException(result)
for grp in ['dummygroup1', 'dummygroup2']:
self.assertIn(grp, result.output)
def test_copy(self):
"""Test `verdi group copy` command."""
result = self.cli_runner.invoke(group_copy, ['dummygroup1', 'dummygroup2'])
self.assertClickResultNoException(result)
self.assertIn('Success', result.output)
def test_delete(self):
"""Test `verdi group delete` command."""
orm.Group(label='group_test_delete_01').store()
orm.Group(label='group_test_delete_02').store()
result = self.cli_runner.invoke(group_delete, ['--force', 'group_test_delete_01'])
self.assertClickResultNoException(result)
# Verify that removed group is not present in list
result = self.cli_runner.invoke(group_list)
self.assertClickResultNoException(result)
self.assertNotIn('group_test_delete_01', result.output)
node_01 = orm.CalculationNode().store()
node_02 = orm.CalculationNode().store()
# Add some nodes and then use `verdi group delete --clear` to delete a node even when it contains nodes
group = orm.load_group(label='group_test_delete_02')
group.add_nodes([node_01, node_02])
self.assertEqual(group.count(), 2)
# Calling delete on a group without the `--clear` option should raise
result = self.cli_runner.invoke(group_delete, ['--force', 'group_test_delete_02'])
self.assertIsNotNone(result.exception, result.output)
# With `--clear` option should delete group and nodes
result = self.cli_runner.invoke(group_delete, ['--force', '--clear', 'group_test_delete_02'])
self.assertClickResultNoException(result)
with self.assertRaises(exceptions.NotExistent):
group = orm.load_group(label='group_test_delete_02')
def test_show(self):
"""Test `verdi group show` command."""
result = self.cli_runner.invoke(group_show, ['dummygroup1'])
self.assertClickResultNoException(result)
for grpline in [
'Group label', 'dummygroup1', 'Group type_string', 'user', 'Group description', '<no description>'
]:
self.assertIn(grpline, result.output)
def test_description(self):
"""Test `verdi group description` command."""
description = 'It is a new description'
group = orm.load_group(label='dummygroup2')
self.assertNotEqual(group.description, description)
# Change the description of the group
result = self.cli_runner.invoke(group_description, [group.label, description])
self.assertClickResultNoException(result)
self.assertEqual(group.description, description)
# When no description argument is passed the command should just echo the current description
result = self.cli_runner.invoke(group_description, [group.label])
self.assertClickResultNoException(result)
self.assertIn(description, result.output)
def test_relabel(self):
"""Test `verdi group relabel` command."""
result = self.cli_runner.invoke(group_relabel, ['dummygroup4', 'relabeled_group'])
self.assertIsNone(result.exception, result.output)
# check if group list command shows changed group name
result = self.cli_runner.invoke(group_list)
self.assertClickResultNoException(result)
self.assertNotIn('dummygroup4', result.output)
self.assertIn('relabeled_group', result.output)
def test_add_remove_nodes(self):
"""Test `verdi group remove-nodes` command."""
node_01 = orm.CalculationNode().store()
node_02 = orm.CalculationNode().store()
node_03 = orm.CalculationNode().store()
result = self.cli_runner.invoke(group_add_nodes, ['--force', '--group=dummygroup1', node_01.uuid])
self.assertClickResultNoException(result)
# Check if node is added in group using group show command
result = self.cli_runner.invoke(group_show, ['dummygroup1'])
self.assertClickResultNoException(result)
self.assertIn('CalculationNode', result.output)
self.assertIn(str(node_01.pk), result.output)
# Remove same node
result = self.cli_runner.invoke(group_remove_nodes, ['--force', '--group=dummygroup1', node_01.uuid])
self.assertIsNone(result.exception, result.output)
# Check if node is added in group using group show command
result = self.cli_runner.invoke(group_show, ['-r', 'dummygroup1'])
self.assertClickResultNoException(result)
self.assertNotIn('CalculationNode', result.output)
self.assertNotIn(str(node_01.pk), result.output)
# Add all three nodes and then use `verdi group remove-nodes --clear` to remove them all
group = orm.load_group(label='dummygroup1')
group.add_nodes([node_01, node_02, node_03])
self.assertEqual(group.count(), 3)
result = self.cli_runner.invoke(group_remove_nodes, ['--force', '--clear', '--group=dummygroup1'])
self.assertClickResultNoException(result)
self.assertEqual(group.count(), 0)
def test_copy_existing_group(self):
"""Test user is prompted to continue if destination group exists and is not empty"""
source_label = 'source_copy_existing_group'
dest_label = 'dest_copy_existing_group'
# Create source group with nodes
calc_s1 = orm.CalculationNode().store()
calc_s2 = orm.CalculationNode().store()
nodes_source_group = {str(node.uuid) for node in [calc_s1, calc_s2]}
source_group = orm.Group(label=source_label).store()
source_group.add_nodes([calc_s1, calc_s2])
# Copy using `verdi group copy` - making sure all is successful
options = [source_label, dest_label]
result = self.cli_runner.invoke(group_copy, options)
self.assertClickResultNoException(result)
self.assertIn(
'Success: Nodes copied from group<{}> to group<{}>'.format(source_label, dest_label), result.output,
result.exception
)
# Check destination group exists with source group's nodes
dest_group = orm.load_group(label=dest_label)
self.assertEqual(dest_group.count(), 2)
nodes_dest_group = {str(node.uuid) for node in dest_group.nodes}
self.assertSetEqual(nodes_source_group, nodes_dest_group)
# Copy again, making sure an abort error is raised, since no user input can be made and default is abort
result = self.cli_runner.invoke(group_copy, options)
self.assertIsNotNone(result.exception, result.output)
self.assertIn(
'Warning: Destination group<{}> already exists and is not empty.'.format(dest_label), result.output,
result.exception
)
# Check destination group is unchanged
dest_group = orm.load_group(label=dest_label)
self.assertEqual(dest_group.count(), 2)
nodes_dest_group = {str(node.uuid) for node in dest_group.nodes}
self.assertSetEqual(nodes_source_group, nodes_dest_group)
| 43.711462 | 114 | 0.659011 | [
"BSD-2-Clause",
"MIT"
] | pranavmodx/aiida-core | tests/cmdline/commands/test_group.py | 11,059 | Python |
'''
This module provides little helper functions that interpret
the Handle Server's response codes and the HTTP status codes.
All helpers functions test one possible outcome and return
True or False.
Author: Merret Buurman (DKRZ), 2015-2016
'''
import json
from b2handle.compatibility_helper import decoded_response
def is_redirect_from_http_to_https(response):
if response.status_code == 302:
oldurl = response.url
newurl = response.headers['location']
if oldurl.startswith('http://') and oldurl.replace('http', 'https') == newurl:
return True
return False
def is_temporary_redirect(response):
if response.status_code in [301, 302, 303, 307]:
return True
return False
def handle_success(response):
response_content = decoded_response(response)
if (response.status_code == 200 or response.status_code == 201) and json.loads(response_content)["responseCode"] == 1:
return True
return False
def does_handle_exist(response):
if handle_success(response):
return True
return False
def is_handle_empty(response):
response_content = decoded_response(response)
if response.status_code == 200 and json.loads(response_content)["responseCode"] == 200:
return True
return False
def was_handle_created(response):
response_content = decoded_response(response)
if response.status_code == 201 and json.loads(response_content)["responseCode"] == 1:
return True
return False
def handle_not_found(response):
response_content = decoded_response(response)
if response.status_code == 404 and json.loads(response_content)["responseCode"] == 100:
return True
return False
def not_authenticated(response):
response_content = decoded_response(response)
try:
if response.status_code == 401 or json.loads(response_content)["responseCode"] == 402:
# need to put 'OR' because the HS responseCode is not always received!
return True
except ValueError as e: # If there is no JSON response.
pass
return False
def values_not_found(response):
response_content = decoded_response(response)
if response.status_code == 400 and json.loads(response_content)["responseCode"] == 200:
return True
return False
def handle_already_exists(response):
response_content = decoded_response(response)
if response.status_code == 409 & json.loads(response_content)["responseCode"] == 101:
return True
return False
| 32.987013 | 122 | 0.71378 | [
"Apache-2.0"
] | EUDAT-B2HANDLE/B2HANDLE | b2handle/hsresponses.py | 2,540 | Python |
"""
auth.blueprints
~~~~~~~~~~~~~~~
"""
from .simpleidp import blueprint as idpblueprint
| 14 | 48 | 0.571429 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | Amsterdam/auth | auth/blueprints/__init__.py | 98 | Python |
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from pyarrow.compat import unittest, u # noqa
import pyarrow
import datetime
class TestConvertList(unittest.TestCase):
def test_boolean(self):
expected = [True, None, False, None]
arr = pyarrow.from_pylist(expected)
assert len(arr) == 4
assert arr.null_count == 2
assert arr.type == pyarrow.bool_()
assert arr.to_pylist() == expected
def test_empty_list(self):
arr = pyarrow.from_pylist([])
assert len(arr) == 0
assert arr.null_count == 0
assert arr.type == pyarrow.null()
assert arr.to_pylist() == []
def test_all_none(self):
arr = pyarrow.from_pylist([None, None])
assert len(arr) == 2
assert arr.null_count == 2
assert arr.type == pyarrow.null()
assert arr.to_pylist() == [None, None]
def test_integer(self):
expected = [1, None, 3, None]
arr = pyarrow.from_pylist(expected)
assert len(arr) == 4
assert arr.null_count == 2
assert arr.type == pyarrow.int64()
assert arr.to_pylist() == expected
def test_garbage_collection(self):
import gc
# Force the cyclic garbage collector to run
gc.collect()
bytes_before = pyarrow.total_allocated_bytes()
pyarrow.from_pylist([1, None, 3, None])
gc.collect()
assert pyarrow.total_allocated_bytes() == bytes_before
def test_double(self):
data = [1.5, 1, None, 2.5, None, None]
arr = pyarrow.from_pylist(data)
assert len(arr) == 6
assert arr.null_count == 3
assert arr.type == pyarrow.double()
assert arr.to_pylist() == data
def test_unicode(self):
data = [u'foo', u'bar', None, u'mañana']
arr = pyarrow.from_pylist(data)
assert len(arr) == 4
assert arr.null_count == 1
assert arr.type == pyarrow.string()
assert arr.to_pylist() == data
def test_bytes(self):
u1 = b'ma\xc3\xb1ana'
data = [b'foo',
u1.decode('utf-8'), # unicode gets encoded,
None]
arr = pyarrow.from_pylist(data)
assert len(arr) == 3
assert arr.null_count == 1
assert arr.type == pyarrow.binary()
assert arr.to_pylist() == [b'foo', u1, None]
def test_date(self):
data = [datetime.date(2000, 1, 1), None, datetime.date(1970, 1, 1),
datetime.date(2040, 2, 26)]
arr = pyarrow.from_pylist(data)
assert len(arr) == 4
assert arr.type == pyarrow.date64()
assert arr.null_count == 1
assert arr[0].as_py() == datetime.date(2000, 1, 1)
assert arr[1].as_py() is None
assert arr[2].as_py() == datetime.date(1970, 1, 1)
assert arr[3].as_py() == datetime.date(2040, 2, 26)
def test_timestamp(self):
data = [
datetime.datetime(2007, 7, 13, 1, 23, 34, 123456),
None,
datetime.datetime(2006, 1, 13, 12, 34, 56, 432539),
datetime.datetime(2010, 8, 13, 5, 46, 57, 437699)
]
arr = pyarrow.from_pylist(data)
assert len(arr) == 4
assert arr.type == pyarrow.timestamp('us')
assert arr.null_count == 1
assert arr[0].as_py() == datetime.datetime(2007, 7, 13, 1,
23, 34, 123456)
assert arr[1].as_py() is None
assert arr[2].as_py() == datetime.datetime(2006, 1, 13, 12,
34, 56, 432539)
assert arr[3].as_py() == datetime.datetime(2010, 8, 13, 5,
46, 57, 437699)
def test_mixed_nesting_levels(self):
pyarrow.from_pylist([1, 2, None])
pyarrow.from_pylist([[1], [2], None])
pyarrow.from_pylist([[1], [2], [None]])
with self.assertRaises(pyarrow.ArrowException):
pyarrow.from_pylist([1, 2, [1]])
with self.assertRaises(pyarrow.ArrowException):
pyarrow.from_pylist([1, 2, []])
with self.assertRaises(pyarrow.ArrowException):
pyarrow.from_pylist([[1], [2], [None, [1]]])
def test_list_of_int(self):
data = [[1, 2, 3], [], None, [1, 2]]
arr = pyarrow.from_pylist(data)
assert len(arr) == 4
assert arr.null_count == 1
assert arr.type == pyarrow.list_(pyarrow.int64())
assert arr.to_pylist() == data
| 35.802721 | 75 | 0.584458 | [
"Apache-2.0"
] | julienledem/arrow | python/pyarrow/tests/test_convert_builtin.py | 5,264 | Python |
import sys
import argparse
import logging
from ml_crop.version import __version__
from ml_crop.utils_train import train
from ml_crop.utils_predict import predict
logger = logging.getLogger(__name__)
def parse_args(args):
desc = 'ml_crop (v%s)' % __version__
dhf = argparse.ArgumentDefaultsHelpFormatter
parser0 = argparse.ArgumentParser(description=desc)
pparser = argparse.ArgumentParser(add_help=False)
pparser.add_argument('--version', help='Print version and exit', action='version', version=__version__)
pparser.add_argument('--log', default=2, type=int,
help='0:all, 1:debug, 2:info, 3:warning, 4:error, 5:critical')
subparsers = parser0.add_subparsers(dest='command')
parser = subparsers.add_parser('train', parents=[pparser], help='train the model', formatter_class=dhf)
parser.add_argument('-model', '--model_id', help='classifer model to apply, e.g. randomforest', default='randomforest', type=str, required=True)
parser.add_argument('-tif', '--raster_file', help='path to the satellite image in raster', default='trans_nzoia.tif', type=str, required=True)
parser.add_argument('-crop', '--train_geo', help='path to the crop type vector in geojson or shp', default='training_combined.geojson', type=str, required=True)
parser.add_argument('-out', '--out_model', help='the train model output as pickle file', default="model.sav", type=str, required=True)
parser = subparsers.add_parser('predict', parents=[pparser], help='used a pretrained model for prediction', formatter_class=dhf)
parser.add_argument('-out', '--out_model', help='the train model output as pickle file', default="model.sav", type=str, required=True)
parser.add_argument('-ntif', '--pred_tiff', help='path to the satellite image in raster', default='trans_nzoia.tif', type=str, required=True)
parser.add_argument('-outimg', '--output_image', help='filename to save the output classification images', default='trans_nzoia.tif', type=str, required=True)
parsed_args = vars(parser0.parse_args(args))
return parsed_args
def main(cmd, **kwargs):
if cmd == 'train':
train(**kwargs)
elif cmd == 'predict':
predict(**kwargs)
def cli():
args = parse_args(sys.argv[1:])
logger.setLevel(args.pop('log') * 10)
main(args.pop('command'), **args)
if __name__ == "__main__":
cli()
| 44.314815 | 164 | 0.709987 | [
"MIT"
] | developmentseed/servir-training-notebooks | crop_type_mapping/ml_crop_cli/ml_crop/main.py | 2,393 | Python |
# Most of this code is stolen from myself, but it's an app about pirating so whatev
import os
import sys
import time
import datetime
import subprocess
import pycurl
import spotipy
import spotipy.util
from StringIO import StringIO
from collections import deque
import sys
# If this seems like overkill, that's because it is. I wrote this get_args code
# for general use, this function is not specific to this
def get_args():
def is_flag(check):
if check[:2] == '--':
return True
elif check[0] == '-':
return True
else:
return False
argdict = {}
for i, arg in enumerate(sys.argv):
if i == 0:
# this is just the name of the program
continue
check = sys.argv[i]
if not is_flag(check):
continue
elif check[:2] == '--':
# single multi-character flag
check = check[2:]
# it makes my life easier if they do this
if '=' in check:
check = check.split('=')
argdict[check[0]] = check[1]
continue
j = i+1
params = []
while not is_flag(sys.argv[j]):
params.append(sys.argv[j])
j += 1
if j >= len(sys.argv):
break
if not params:
# if params is empty, just make the value True
params = True
argdict[check] = params
elif check[0] == '-':
# multiple single character flags
check = check[1:]
# it makes my life easier if they do this
if '=' in check:
check = check.split('=')
argdict[check[0]] = check[1]
continue
if len(check) == 1:
# if there is just one character, the arg can have params
j = i+1
params = []
while not is_flag(sys.argv[j]):
params.append(sys.argv[j])
j += 1
if not params:
# if params is empty, just make the value True
params = True
argdict[check] = params
else:
for c in check:
argdict[c] = True
return argdict
# Hacky AF but it works
class Foobar:
def wait(self):
return
class Song:
def __init__(self, title, artist, pl):
self.title = title.replace("/", "&").replace(" -", ",").replace("-", "~")
self.artist = artist.replace("/", "&")
self.playlist = pl
self.id = None
class Pirate:
### CONSTRUCTOR ###
def __init__(self, settings):
self.downloadthreads = []
self.downloadqueue = deque()
self.alive = True
self.verbose = int(settings["verbose"])
self.downloadthreads = settings["download_threads"]
self.dl_folder = os.path.realpath(settings["dl_folder"])
self.log_folder = os.path.realpath(settings["log_folder"])
self.err_folder = os.path.realpath(settings["err_folder"])
self.username = settings["username"]
scope = "user-library-read"
client_id = settings["client_id"]
client_secret = settings["client_secret"]
redirect_url = "http://localhost:8888/callback"
# Request a token
token = spotipy.util.prompt_for_user_token(self.username, scope, client_id, client_secret, redirect_url)
self.sp = spotipy.Spotify(auth=token)
args = get_args()
if "plid" in args:
self.plid = args["plid"]
elif "p" in args:
self.plid = args["p"]
else:
self.plid = None
self.mode = "all playlists"
if "mode" in args:
self.mode = args["mode"]
elif "m" in args:
self.mode = args["m"]
self.mode = self.mode.lower()
if self.mode not in ["all playlists", "apl", "saved", "s", "everything", "e", "playlist", "pl"]:
self.log_error("Invalid mode: %s" % self.mode, 0)
self.terminate()
if self.mode == "playlist" and self.plid is None:
self.log_error("You need to supply a playlist to fetch", 0)
self.terminate()
### THESE FUNCTIONS HANDLE STARTUP ###
def init(self):
if not os.path.exists(self.dl_folder):
os.makedirs(self.dl_folder)
#initiate logs
if not os.path.exists(self.log_folder):
os.makedirs(self.log_folder)
if not os.path.exists(self.err_folder):
os.makedirs(self.err_folder)
self.now = lambda: str(datetime.datetime.now())
# Error log
fname = self.now().replace(':', '-') + ".log"
self.log_file = os.path.join(self.log_folder, fname)
self.err_file = os.path.join(self.err_folder, fname)
with open(self.log_file, 'w') as f:
f.write("Activity log for yt-streamer:\n---------------------\n")
with open(self.err_file, 'w') as f:
f.write("Error log for yt-streamer:\n---------------------\n")
def go(self):
try:
if self.mode in ["all playlists", "apl"]:
self.fetchPls()
elif self.mode in ["saved", "s"]:
self.fetchSaved()
elif self.mode in ["everything", "e"]:
self.fetchAll()
elif self.mode in ["playlist", "pl"]:
playlists = self.sp.user_playlists(self.username)
for playlist in playlists['items']:
if playlist['name'].lower() == self.plid.lower():
self.fetch(playlist)
else:
self.log("Not the one: %s" % playlist['name'], 20)
self.log("Looking for: %s" % self.plid, 20)
else:
self.log_error("Invalid mode: %s" % self.mode, 0)
self.terminate()
self.handle_downloading()
except Exception as e:
self.log_error(str(e), 10)
except:
self.log("Program terminated by user (Keyboard Interrupt)", 10)
self.terminate()
def addtrack(self, track, plname):
name = track['name']
artists = ""
if len(track['artists']) > 0:
for artist in track['artists']:
artists += artist['name'] + ", "
artists = artists[:-2]
else:
artists = "Unknown Artist"
# Add to download queue as a Song object
s = Song(name, artists, plname)
s.id = self.ytsearch(s)
self.downloadqueue.append(s)
self.log("Found song: %s by %s" % (s.title, s.artist), 10)
def fetchAll(self):
self.fetchSaved()
self.fetchPls()
def fetchSaved(self):
# Fetch saved music
results = self.sp.current_user_saved_tracks()
for item in results['items']:
track = item['track']
self.addtrack(track, "My Music")
def fetchPls(self):
self.log("Fetching all playlists of %s" % self.username, 10)
# Fetch all spotify playlists of a user
playlists = self.sp.user_playlists(self.username)
for playlist in playlists['items']:
if playlist['owner']['id'] == self.username:
self.fetch(playlist)
# pl is a spodipy playlist object
def fetch(self, playlist):
# Fetch all songs in a playlist
self.log("Fetching playlist: %s" % playlist['name'])
results = self.sp.user_playlist(self.username, playlist['id'], fields="tracks,next")
tracks = results['tracks']
while True:
for item in tracks['items']:
# For each item, get the artist, playlist, and song name
track = item['track']
self.addtrack(track, playlist["name"])
if tracks['next']:
tracks = self.sp.next(tracks)
else:
break
def handle_downloading(self):
# Fix so queue never mutates when this is happening
while self.alive:
if (len(self.downloadqueue)):
to_push = self.downloadqueue.popleft() # Get the downloading song
self.log("Starting download: %s" % to_push.title, 10)
# Start download, wait for it to finish
self.ytdl(to_push).wait()
self.log("Finished download: %s" % to_push.title, 10)
# Edit ogg header info
# comment[0]="ARTIST=me";
# comment[1]="TITLE=the sound of Vorbis";
else:
self.terminate()
def ytdl(self, song):
pl = song.playlist
name = song.title + " - " + song.artist
vid = song.id
# Need better error handling (exceptions)
# Fix it later (or never)
if vid is None:
return Foobar()
# TODO: replace dl with self.dl_folder
ndir = "./%s/%s" % (self.dl_folder, pl)
fname = "%s/%s.ogg" % (ndir, name)
if not os.path.exists(ndir):
os.makedirs(ndir)
# check if file exists
if not os.path.isfile(fname):
# download the audio of a video based on the youtube id
try:
p = subprocess.Popen(["youtube-dl",\
"--extract-audio",\
"--audio-format", "vorbis",\
"-o", ndir + "/" + name + ".%(ext)s",\
"https://www.youtube.com/watch?v=" + vid],\
stdout=None)
return p
except Exception as e:
return Foobar()
return Foobar()
def getbetween(self, search, left, right):
for line in search.splitlines():
if not left in line:
continue
out = line.split(left)[1].split(right)[0]
return out
def curl(self, url):
c = pycurl.Curl()
c.setopt(c.URL, url)
buf = StringIO()
c.setopt(c.WRITEDATA, buf)
c.perform()
c.close()
return buf.getvalue()
def ytsearch(self, song):
# Get query from song
query = song.title + " " + song.artist
# Get the first result from the query
try:
query = query.replace(" ", "+")
url = "https://www.youtube.com/results?search_query="
url += query
#url += '+-"music+video"+dirty'
#url += '+lyrics+dirty+radio+edit'
url += '+lyrics+dirty+'
# Search for video, try to avoid music videos since they suck
search = self.curl(url)
left = "watch?v="
right = "\""
video_id = self.getbetween(search, left, right)
return video_id
except Exception as e:
# This means no results (or no internet connection)
self.log_error("Failed to ytdl for some reason", 10)
return None
def terminate(self):
self.alive = False
### THESE FUNCTIONS HANDLE LOGGING ###
def report(self, msg):
sys.stdout.flush()
print msg
# Grab the lock
with open(self.log_file, 'a') as f:
f.write(self.now() + ':\t')
f.write(msg + '\n')
return msg
def log(self, msg, log_level=0):
# This function is too complicated to properly comment
if log_level <= self.verbose:
return self.report(msg)
else:
return msg # This is pythonic
def log_error(self, e, log_level=0):
# Grab the lock
if log_level <= self.verbose:
with open(self.err_file, 'a') as f:
f.write(self.now() + ':\t')
f.write(str(e) + ('\n'))
self.report("An exception has been raised: %s" % (e,))
return e
def read(filen):
config = dict()
try:
with open(filen) as cfg:
for line in cfg:
line = line.split('#')[0] # Comments, yay!
line = line.split('//')[0] # //Comments, yay!
parts = line.split(':')
if len(parts) == 2:
config[parts[0].strip()] = parts[1].strip()
else:
pass # This is pythonic
return config
except Exception as e:
print "Error opening settings file %s" % filen
return None
settings = read("./dat/pirate.conf")
app = Pirate(settings)
app.init()
app.go()
| 31.425373 | 112 | 0.513576 | [
"MIT"
] | TylerLeite/autopirate | pirate.py | 12,633 | Python |
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
from torch import optim
from torch.utils import data
from tqdm import tqdm
from networks import LATENT_CODE_SIZE, device
# from train_autoencoder import SDFSampleDataset, save_checkpoint, SIGMA
class SDFNet(nn.Module):
def __init__(self, latent_code_size=LATENT_CODE_SIZE, dropout_prob=0.2, point_dim=3):
super(SDFNet, self).__init__()
SDF_NET_BREADTH = latent_code_size * 2
# the decoder should only have xyz information, without sdf values
self.layers1 = nn.Sequential(
nn.utils.weight_norm(nn.Linear(point_dim + latent_code_size, SDF_NET_BREADTH)),
nn.ReLU(),
nn.Dropout(dropout_prob),
nn.utils.weight_norm(nn.Linear(SDF_NET_BREADTH, SDF_NET_BREADTH)),
nn.ReLU(),
nn.Dropout(dropout_prob),
nn.utils.weight_norm(nn.Linear(SDF_NET_BREADTH, SDF_NET_BREADTH)),
nn.ReLU(),
nn.Dropout(dropout_prob),
nn.utils.weight_norm(nn.Linear(SDF_NET_BREADTH, SDF_NET_BREADTH - latent_code_size - point_dim)),
nn.ReLU(),
nn.Dropout(dropout_prob),
)
self.layers2 = nn.Sequential(
nn.utils.weight_norm(nn.Linear(SDF_NET_BREADTH, SDF_NET_BREADTH)),
nn.ReLU(),
nn.Dropout(dropout_prob),
nn.utils.weight_norm(nn.Linear(SDF_NET_BREADTH, SDF_NET_BREADTH)),
nn.ReLU(),
nn.Dropout(dropout_prob),
nn.utils.weight_norm(nn.Linear(SDF_NET_BREADTH, SDF_NET_BREADTH)),
nn.ReLU(),
nn.Dropout(dropout_prob),
nn.utils.weight_norm(nn.Linear(SDF_NET_BREADTH, SDF_NET_BREADTH)),
nn.ReLU(),
nn.Dropout(dropout_prob),
nn.Linear(SDF_NET_BREADTH, 1),
nn.Tanh()
)
def forward(self, input):
"""
input: [B, N, latent_size + point_dim]
:param latent_codes: [B, N, LATENT_CODE_DIM]
:param points: [B, N, 3]
:return: sdf_pred: [B, N]
"""
x = self.layers1(input)
x = torch.cat((x, input), dim=-1)
x = self.layers2(x)
return x
# if __name__ == '__main__':
# experiment = '5_samples_latent_128_no_reg'
# num_epochs = 500
#
# decoder = SDFNet()
#
# optimiser = optim.Adam(decoder.parameters(), lr=1e-5)
# # model, optimiser, start_epoch, training_loss = load_or_init_model(experiment)
# dataset = SDFSampleDataset('data/SdfSamples/ShapeNetV2/03001627/', '5_sample.json')
# batch_size = 5
# normal_distribution = torch.distributions.normal.Normal(0, 0.0001)
# latent_codes = normal_distribution.sample((MODEL_COUNT, LATENT_CODE_SIZE)).to(device)
# latent_codes.requires_grad = True
# train_data = data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=4)
#
# # training loop starts
# training_loss = []
# for epoch in range(1, num_epochs + 1):
# start_time = time.time()
# running_loss = []
#
# for i_batch, batch in tqdm(enumerate(train_data)):
# optimiser.zero_grad()
# batch = batch.to(device) # [B, point_dim, N]
# sdf_pred, input_trans, latent_code = model(batch)
# sdf_gt = batch[:, -1, :].squeeze()
#
# loss = l1_loss(sdf_gt, sdf_pred) # TODO: experiment with only the l1 loss
# loss += SIGMA**2 * min(1, epoch / 100) * torch.mean(torch.norm(latent_code, dim=1))
# loss.backward()
# optimiser.step()
# running_loss.append(loss.item())
#
# epoch_duration = time.time() - start_time
# epoch_loss = np.mean(running_loss)
# training_loss.append(epoch_loss)
#
# print("Epoch {:d}, {:.1f}s. Loss: {:.8f}".format(epoch, epoch_duration, epoch_loss))
#
# if epoch_loss < 0.02:
# save_checkpoint(epoch, model, optimiser, training_loss, experiment, filename='sub002')
#
# # always save the latest snapshot
# save_checkpoint(epoch, model, optimiser, training_loss, experiment)
# if epoch % 100 == 0:
# save_checkpoint(epoch, model, optimiser, training_loss, experiment, filename=str(epoch)) | 37.347826 | 109 | 0.622352 | [
"MIT"
] | FrankieYin/master_project | networks/sdf_net_decoder.py | 4,295 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.