ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a36ec3cf6e3bc7699584ab57f02e7ca4528da61 | import datetime
from base64 import b64decode, b64encode
from collections import deque
from decimal import Decimal
from enum import Enum
from functools import singledispatch, wraps
from inspect import isfunction
from json import JSONDecoder, JSONEncoder
from modulefinder import Module
from types import FunctionType, MethodType
from typing import Optional
from uuid import UUID
import dateutil.parser
from eventsourcing.exceptions import EncoderTypeError
from eventsourcing.utils.topic import get_topic, resolve_topic
try:
import orjson
except ImportError:
orjson: Optional[Module] = None # type: ignore
JSON_SEPARATORS = (",", ":")
def encoderpolicy(arg=None):
"""
Decorator for encoder policy.
Allows default behaviour to be built up from methods
registered for different types of things, rather than
chain of isinstance() calls in a long if-else block.
"""
def _mutator(func):
wrapped = singledispatch(func)
@wraps(wrapped)
def wrapper(*args, **kwargs):
obj = kwargs.get("obj") or args[-1]
return wrapped.dispatch(type(obj))(*args, **kwargs)
wrapper.register = wrapped.register
return wrapper
assert isfunction(arg), arg
return _mutator(arg)
def decoderpolicy(arg=None):
"""
Decorator for decoder policy.
Allows default behaviour to be built up from methods
registered for different named keys, rather than
chain of "in dict" queries in a long if-else block.
"""
def _mutator(func):
wrapped = func
decoder_map = {}
@wraps(wrapped)
def wrapper(*args, **kwargs):
d = kwargs.get("d") or args[-1]
keys = list(d.keys())
if len(keys) == 1:
try:
decoder_func = decoder_map[keys[0]]
except KeyError:
return d
else:
return decoder_func(d)
else:
return d
def register(key):
def decorator(decoder_func):
decoder_map[key] = decoder_func
return decoder_func
return decorator
wrapper.register = register
return wrapper
assert isfunction(arg), arg
return _mutator(arg)
class ObjectJSONEncoder(JSONEncoder):
def __init__(self, sort_keys=False):
super().__init__(sort_keys=sort_keys, separators=JSON_SEPARATORS)
def encode(self, o) -> bytes:
o = self.encode_object(o)
if self.sort_keys is True or orjson is None:
return super(ObjectJSONEncoder, self).encode(o).encode("utf8")
else:
return orjson.dumps(o)
def encode_object(self, o):
return self.encode_container(encoder(o))
@encoderpolicy
def encode_container(self, o):
return o
@encode_container.register(dict)
def encode_dict(self, o):
if type(o) == dict:
return self.encode_dict_state(o)
else:
return {
"__dict__": {
"topic": get_topic(o.__class__),
"state": self.encode_dict_state(o),
}
}
def encode_dict_state(self, o):
return {k: self.encode_object(v) for (k, v) in o.items()}
@encode_container.register(tuple)
def encode_tuple(self, o):
if type(o) == tuple:
return {"__tuple__": self.encode_object(list(o))}
else:
return {
"__tuple__": {
"state": self.encode_object(list(o)),
"topic": get_topic(o.__class__),
}
}
@encode_container.register(list)
def encode_list(self, o):
if type(o) == list:
return [self.encode_object(i) for i in o]
else:
return {
"__list__": {
"state": [self.encode_object(i) for i in o],
"topic": get_topic(o.__class__),
}
}
@encode_container.register(set)
def encode_set(self, o):
if type(o) == set:
return {"__set__": self.encode_iterable(o)}
else:
return {
"__set__": {
"state": self.encode_iterable(o),
"topic": get_topic(o.__class__),
}
}
def encode_iterable(self, o):
return self.encode_object(self.sort_keys and sorted(o) or list(o))
@encode_container.register(frozenset)
def encode_frozenset(self, o):
if type(o) == frozenset:
return {"__frozenset__": self.encode_iterable(o)}
else:
return {
"__frozenset__": {
"state": self.encode_iterable(o),
"topic": get_topic(o.__class__),
}
}
@encode_container.register(deque)
def encode_deque(self, o):
if type(o) == deque:
return {"__deque__": self.encode_object(list(o))}
else:
return {
"__deque__": {
"state": self.encode_object(list(o)),
"topic": get_topic(o.__class__),
}
}
@encode_container.register(object)
def encode_instance(self, o):
if hasattr(o, "__slots__") and o.__slots__ != ():
topic = get_topic(o.__class__)
state = {k: self.encode_object(getattr(o, k)) for k in o.__slots__}
return {"__class__": {"state": state, "topic": topic}}
elif hasattr(o, "__dict__"):
topic = get_topic(o.__class__)
state = {k: self.encode_object(v) for k, v in o.__dict__.items()}
return {"__class__": {"state": state, "topic": topic}}
else:
return o
@encoderpolicy
def encoder(o):
return o
class ObjectJSONDecoder(JSONDecoder):
def __init__(self, object_hook=None, **kwargs):
super(ObjectJSONDecoder, self).__init__(
object_hook=object_hook or decoder, **kwargs
)
@decoderpolicy
def decoder(d):
return d
@encoder.register(type)
def encode_type(o):
return {"__type__": get_topic(o)}
@encoder.register(MethodType)
def encode_method(o):
raise EncoderTypeError(o)
@encoder.register(FunctionType)
def encode_function(o):
raise EncoderTypeError(o)
@decoder.register("__type__")
def decode_type(d):
return resolve_topic(d["__type__"])
@decoder.register("__class__")
def decode_object(d):
topic = d["__class__"]["topic"]
state = d["__class__"]["state"]
obj_class = resolve_topic(topic)
obj = object.__new__(obj_class)
if hasattr(obj, "__dict__"):
obj.__dict__.update(state)
else:
for k, v in state.items():
object.__setattr__(obj, k, v)
return obj
@encoder.register(UUID)
def encode_uuid(obj):
return {"UUID": obj.hex}
@decoder.register("UUID")
def decode_uuid(d):
return UUID(d["UUID"])
@encoder.register(datetime.datetime)
def encode_datetime(obj):
return {"ISO8601_datetime": obj.strftime("%Y-%m-%dT%H:%M:%S.%f%z")}
@decoder.register("ISO8601_datetime")
def decode_datetime(d):
return dateutil.parser.parse(d["ISO8601_datetime"])
@encoder.register(datetime.date)
def encode_date(obj):
return {"ISO8601_date": obj.isoformat()}
@decoder.register("ISO8601_date")
def decode_date(d):
return datetime.datetime.strptime(d["ISO8601_date"], "%Y-%m-%d").date()
@encoder.register(datetime.time)
def encode_time(obj):
return {"ISO8601_time": obj.strftime("%H:%M:%S.%f")}
@decoder.register("ISO8601_time")
def decode_time(d):
hour, minute, seconds = d["ISO8601_time"].split(":")
second, microsecond = seconds.split(".")
return datetime.time(int(hour), int(minute), int(second), int(microsecond))
@encoder.register(Decimal)
def encode_decimal(obj):
return {"__decimal__": str(obj)}
@decoder.register("__decimal__")
def decode_decimal(d):
return Decimal(d["__decimal__"])
@encoder.register(Enum)
def encode_enum(obj):
return {"__enum__": {"topic": get_topic(type(obj)), "name": obj.name}}
@decoder.register("__enum__")
def decode_enum(d):
topic = d["__enum__"]["topic"]
name = d["__enum__"]["name"]
enum = resolve_topic(topic)
return getattr(enum, name)
@decoder.register("__deque__")
def decode_deque(d):
deque_data = d["__deque__"]
if type(deque_data) == dict:
topic = deque_data["topic"]
try:
state = deque_data["state"]
except KeyError:
state = deque_data["values"]
deque_type = resolve_topic(topic)
return deque_type(state)
else:
return deque(deque_data)
@decoder.register("__tuple__")
def decode_tuple(d):
tuple_data = d["__tuple__"]
if type(tuple_data) == dict:
# For NamedTuple objects.
topic = tuple_data["topic"]
state = tuple_data["state"]
tuple_type = resolve_topic(topic)
obj = tuple_type(*state)
else:
# For standard tuple objects.
obj = tuple(tuple_data)
return obj
@decoder.register("__dict__")
def decode_dict(d):
topic = d["__dict__"]["topic"]
state = d["__dict__"]["state"]
dict_type = resolve_topic(topic)
return dict_type(state)
@decoder.register("__set__")
def decode_set(d):
set_data = d["__set__"]
if isinstance(set_data, dict):
topic = set_data["topic"]
state = set_data["state"]
set_type = resolve_topic(topic)
return set_type(state)
else:
return set(set_data)
@decoder.register("__frozenset__")
def decode_frozenset(d):
set_data = d["__frozenset__"]
if isinstance(set_data, dict):
topic = set_data["topic"]
state = set_data["state"]
set_type = resolve_topic(topic)
return set_type(state)
else:
return frozenset(set_data)
@encoder.register(bytes)
def encode_bytes(o):
return {"__bytes__": b64str_from_bytes(o)}
@decoder.register("__bytes__")
def decode_bytes(d):
return bytes_from_b64str(d["__bytes__"])
def b64str_from_bytes(value: bytes) -> str:
return b64encode(value).decode("utf8")
def bytes_from_b64str(value):
return b64decode(value.encode("utf8"))
|
py | 1a36ed2c2c60162b19e673edd056d7310640878e | import CLARK_Automator
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--force", action="store_true",
help="Don't ask to update redmine api key")
args = parser.parse_args()
automator = CLARK_Automator.Automate(args.force)
# try to run the automation tasks, if an error occurs print it
try:
automator.timed_retrieve()
except Exception as e:
import traceback
automator.timelog.time_print("[Error] Dumping...\n%s" % traceback.format_exc())
raise
|
py | 1a36ed6ceba0d578051616320140f4e78e0c8eeb | import chances
from ..utils.exceptions import TalosDataError
def sample_reducer(self, length, max_value):
'''Sample Reducer (Helper)
NOTE: The Scan() object is in self.main_self because
the object being passed here is ParamGrid() object where
the Scan() object is attached as self.main_self.
Utilize 'grid_downsample', 'shuffle', and 'random_method'
to reduce the param_grid before starting the experiment.
This is the simplest method in Talos for dealing with curse
of dimensionality.
Options are uniform random, stratified random, latin hypercube
sampling, and latin hypercube with sudoku style constraint.
Returns the reduced param_grid as numpy array.
'''
random_method = self.main_self.random_method
# calculate the size of the downsample
n = int(max_value * self.main_self.grid_downsample)
# throw an error if
if n < 1:
raise TalosDataError("No permutations in grid. Incease grid_downsample")
# Initialize Randomizer()
r = chances.Randomizer(max_value, length)
# use the user selected method
if random_method == 'sobol':
out = r.sobol()
elif random_method == 'quantum':
out = r.quantum()
elif random_method == 'halton':
out = r.halton()
elif random_method == 'korobov_matrix':
out = r.korobov_matrix()
elif random_method == 'latin_sudoku':
out = r.latin_sudoku()
elif random_method == 'latin_matrix':
out = r.latin_matrix()
elif random_method == 'latin_improved':
out = r.latin_improved()
elif random_method == 'uniform_mersenne':
out = r.uniform_mersenne()
elif random_method == 'uniform_crypto':
out = r.uniform_crypto()
elif random_method == 'ambience':
out = r.ambience()
return out
|
py | 1a36eedf39cc28855aee253bddc7ec927560267f | """Classes for class-based forward declaration."""
class Forward(object):
"""Base class for autocomplete forward declaration."""
@property
def type(self):
"""Forward type. Should be implemented in subclasses."""
raise NotImplementedError("Please use one of my subclasses")
def to_dict(self):
"""Convert to dictionary which will be rendered as JSON."""
return {
"type": self.type
}
class Field(Forward):
"""Forward field value.
The type of the forwarded value from the field is either string, list of
strings or boolean.
The following rules are used to deduce forwarded type.
- If there is only one field in the form or subform with name ``src``
and this field is a checkbox without ``value`` HTML-attribute,
then boolean value indicating if this checkbox is checked is forwarded.
- If there is only one field in the form or subform with name ``src``
and it has ``multiple`` HTML-attribute, then this field is forwarded as a
list of strings, containing values from this field.
- If there are one or more fields in the form with name ``src`` and all of
them are checkboxes with HTML-attribute ``value`` set the list of strings
containing checked checkboxes is forwarded.
- Otherwise ``src`` field value forwarded as a string.
.. py:attribute:: src
The name of the form field whose value will be forwarded to a view.
.. py:attribute:: dst
The name of the key of the forwarded value from the src field in the
forwarded dictionary. If this value is ``None``, then the key is
``src``.
"""
type = "field"
def __init__(self, src, dst=None):
"""Instantiate a forwarded field value."""
self.src = src
self.dst = dst
def to_dict(self):
"""Convert to dictionary which will be rendered as JSON."""
d = super(Field, self).to_dict()
d.update(src=self.src)
if self.dst is not None:
d.update(dst=self.dst)
return d
class Const(Forward):
"""Forward arbitrary constant value.
.. py:attribute:: val
The value to forward. Must be JSON-serializable.
.. py:attribute:: dst
The name of the key of the forwarded value.
"""
type = "const"
def __init__(self, val, dst):
"""Instantiate a forwarded constant value."""
self.val = val
self.dst = dst
def to_dict(self):
"""Convert to dictionary which will be rendered as JSON."""
d = super(Const, self).to_dict()
d.update(val=self.val)
d.update(dst=self.dst)
return d
class JavaScript(Forward):
"""Run registered javascript handler and forward its returned value.
You can register custom forward handler in your JS code as follows:
.. code-block:: javascript
yl.registerForwardHandler("your_handler", function (autocompleteElement) {
// your code here
});
Then if your add ``JavaScript("your_handler", "some_value")`` to your
forwards declaration, your function will be called, autocomplete field
HTML element will be passed as ``autocompleteElement`` and returned value
will be added to forward dictionary with ``some_value`` key.
.. py:attribute:: handler
The name of the registered handler.
.. py:attribute:: dst
The name of the key of the forwarded value from the src field in the
forwarded dictionary. If this value is ``None``, then the key is
``handler``
"""
type = "javascript"
def __init__(self, handler, dst=None):
"""Initialize Javascript class."""
self.handler = handler
self.dst = dst
def to_dict(self):
"""Convert to dictionary which will be rendered as JSON."""
d = super(JavaScript, self).to_dict()
d.update(handler=self.handler)
d.update(dst=self.dst)
return d
class Self(Forward):
"""Forward own value.
The same as :class:`Field`, except that `src` is always this field
itself.
.. py:attribute:: dst
The name of the key of the forwarded value from the src field in the
forwarded dictionary. If this value is ``None``, then the key is
``self``.
"""
type = "self"
def __init__(self, dst=None):
"""Instantiate a forwarded field value."""
self.dst = dst
def to_dict(self):
"""Convert to dictionary which will be rendered as JSON."""
d = super(Self, self).to_dict()
if self.dst is not None:
d.update(dst=self.dst)
return d
|
py | 1a36efed6ee33a8d3a8ab1ae364af8fd807d36d0 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests
from contextlib import contextmanager
from collections import namedtuple
from typing import Optional, Dict, Iterator, Any, cast
from aruba.errors import RequestError, FormatError
from aruba.common import _ask_input, _ask_pass, _fill, Config, Settings
from aruba import common
def _api_auth_url(api_host: str) -> str:
return "https://{}:4343/v1/api".format(api_host)
def _api_config_url(api_host: str) -> str:
return "https://{}:4343/v1/configuration".format(api_host)
# ------------------------
# Métodos de autenticación
# ------------------------
class Session(common.Session):
def __init__(self, api_host: str, username: str, password: str, verify: bool = True) -> None:
self._api_host = api_host
self._username = username
self._password = password
self._verify = verify
uidaruba = self._login()
super().__init__(_api_config_url(api_host), uidaruba,
headers={ "Cookie": f"SESSION={uidaruba}" },
params={ "UIDARUBA": uidaruba })
def _login(self) -> str:
"""Lanza un intento de autenticación contra un MD/MM, devuelve el UIDARUBA"""
login_url = _api_auth_url(self._api_host) + "/login"
credentials = { "username": self._username, "password": self._password }
response = requests.post(login_url, verify=self._verify, data=credentials)
if response.status_code != 200:
raise RequestError(login_url, None, credentials, response)
# No error, accedo a los tokens
data = response.json()
gres = data.get("_global_result", None)
if not gres:
raise FormatError(login_url, None, credentials, data, "_global_result")
uid = gres.get("UIDARUBA", None)
if not uid:
raise FormatError(login_url, None, credentials, data, "UIDARUBA")
return uid
def _logout(self) -> None:
"""Cierra una sesion REST contra un MD/MM"""
logout_url = _api_auth_url(self._api_host) + "/logout"
credentials = { "UIDARUBA": self.secret }
response = requests.get(logout_url, verify=self._verify, data=credentials)
if response.status_code != 200:
raise RequestError(logout_url, None, credentials, response)
def refresh(self) -> None:
uidaruba = self._login()
headers = cast(Dict[str, str], self._headers)
params = cast(Dict[str, Any], self._params)
headers['Cookie'] = f"SESSION={uidaruba}"
params["UIDARUBA"] = uidaruba
self.secret = uidaruba
@contextmanager
def session(config: Settings, api_host: Optional[str] = None, username: Optional[str] = None,
password: Optional[str] = None, verify: bool = True) -> Iterator[Session]:
"""Obtiene un uidaruba para una MD/MM"""
# Cargo de la config valores por defecto para todos los parámetros
data = _fill({
"api_host": api_host,
"username": username,
"password": password,
}, config.get("controller"))
# Saco las variables del array, por comodidad
Params = namedtuple('Params', ('api_host', 'username', 'password'))
asserted = Params(data["api_host"], data["username"], data["password"])
# Lanzo la autenticacion que corresponda
curr = Session(asserted.api_host, asserted.username, asserted.password, verify=verify)
try:
yield curr
finally:
curr._logout()
if __name__ == "__main__":
# Cargo el fichero de configuracion y leo valores por defecto
defaults = {
"api_host": None,
"username": None,
"password": None
}
config = Config()
controller = config.get('controller')
if controller is not None:
defaults.update(controller)
# Solicito los datos básicos
print("-" * 30 + " SETTINGS " + "-" * 30)
data = {
"api_host": _ask_input("Nombre del servidor", defaults["api_host"]),
"username": _ask_input("Nombre de usuario", defaults["username"]),
"password": _ask_pass("Password"),
}
# Pruebo la autenticación y guardo la config
config.set("controller", data)
with session(config, verify=False) as curr:
print(f"OK! uidaruba = {curr.secret}")
config.save()
print(f"Tokens guardados en {config.path()}")
|
py | 1a36f0d85a0b03a88f77c44dfb373661ca33c7b4 | def get_prs_chan_count(family_name=""):
"""
Returns the number of available PRS channels for the given family
:param family_name: string representation of the family name
:return: integer representing the number of available PRS channels
"""
return 12
def get_prs_chan_with_gpio_count(family_name=""):
"""
Returns the number of available PRS channels for the given family
:param family_name: string representation of the family name
:return: integer representing the number of available PRS channels
"""
return 12
def get_available_modules_for_family():
available_modules_for_family = [
"CMU",
"PRS",
"TIMER0",
"TIMER1",
"WTIMER0",
"USART0",
"USART1",
"USART2",
"LEUART0",
"LETIMER0",
"PCNT0",
"I2C0",
"I2C1",
"ACMP0",
"ACMP1",
"LESENSE",
"GPIO",
"PTI",
"MODEM",
"ADC0",
"VDAC0",
"CSEN",
"LFXO",
"IDAC0",
]
return available_modules_for_family
def em4_pin_to_loc(pin_name=None):
pin_loc_map = {
"PF2": {
"number": 0,
"define": "(1 << 0) << _GPIO_EM4WUEN_EM4WUEN_SHIFT",
},
"PF7": {
"number": 1,
"define": "(1 << 1) << _GPIO_EM4WUEN_EM4WUEN_SHIFT",
},
"PD14": {
"number": 4,
"define": "(1 << 4) << _GPIO_EM4WUEN_EM4WUEN_SHIFT",
},
"PA3": {
"number": 8,
"define": "(1 << 8) << _GPIO_EM4WUEN_EM4WUEN_SHIFT",
},
"PB13": {
"number": 9,
"define": "(1 << 9) << _GPIO_EM4WUEN_EM4WUEN_SHIFT",
},
"PC10": {
"number": 12,
"define": "(1 << 12) << _GPIO_EM4WUEN_EM4WUEN_SHIFT",
},
}
if pin_name is not None:
return pin_loc_map[pin_name]
else:
return pin_loc_map
class stacked_flash(object):
@staticmethod
def items():
props = {
}
return props.items()
def allowed_route_conflicts(route):
allowed_conflicts = {
"BSP_BTL_BUTTON": ['BSP_LED', 'BSP_BUTTON'],
"BSP_BUTTON_COUNT": ['BSP_LED', 'BSP_BTL_BUTTON'],
"BSP_BUTTON0": ['BSP_LED', 'BSP_BTL_BUTTON'],
"BSP_BUTTON1": ['BSP_LED', 'BSP_BTL_BUTTON'],
"BSP_BUTTON2": ['BSP_LED', 'BSP_BTL_BUTTON'],
"BSP_BUTTON3": ['BSP_LED', 'BSP_BTL_BUTTON'],
"BSP_BUTTON4": ['BSP_LED', 'BSP_BTL_BUTTON'],
"BSP_BUTTON5": ['BSP_LED', 'BSP_BTL_BUTTON'],
"BSP_BUTTON6": ['BSP_LED', 'BSP_BTL_BUTTON'],
"BSP_BUTTON7": ['BSP_LED', 'BSP_BTL_BUTTON'],
"BSP_CSEN_SCAN_MASK0": ['BSP_CSEN_BONDED_INPUT', 'BSP_CSEN_SCAN_INPUT'],
"BSP_CSEN_SCAN_MASK1": ['BSP_CSEN_BONDED_INPUT', 'BSP_CSEN_SCAN_INPUT'],
"BSP_CSEN_BONDED_MASK0": ['BSP_CSEN_BONDED_INPUT', 'BSP_CSEN_SCAN_INPUT'],
"BSP_CSEN_BONDED_MASK1": ['BSP_CSEN_BONDED_INPUT', 'BSP_CSEN_SCAN_INPUT'],
"BSP_LED_COUNT": ['BSP_BUTTON', 'BSP_BTL_BUTTON'],
"BSP_LED0": ['BSP_BUTTON', 'BSP_BTL_BUTTON'],
"BSP_LED1": ['BSP_BUTTON', 'BSP_BTL_BUTTON'],
"BSP_LED2": ['BSP_BUTTON', 'BSP_BTL_BUTTON'],
"BSP_LED3": ['BSP_BUTTON', 'BSP_BTL_BUTTON'],
"BSP_LED4": ['BSP_BUTTON', 'BSP_BTL_BUTTON'],
"BSP_LED5": ['BSP_BUTTON', 'BSP_BTL_BUTTON'],
"BSP_LED6": ['BSP_BUTTON', 'BSP_BTL_BUTTON'],
"BSP_LED7": ['BSP_BUTTON', 'BSP_BTL_BUTTON'],
"BSP_SPIDISPLAY_EXTCOMIN": ['PRS_CH'],
}
return allowed_conflicts.get(route, []) |
py | 1a36f16a2b45ef5de3d68a58141d0f648cb5b79d | # Zulip's main markdown implementation. See docs/subsystems/markdown.md for
# detailed documentation on our markdown syntax.
import functools
import html
import logging
import os
import re
import time
import urllib
import urllib.parse
from collections import defaultdict, deque
from dataclasses import dataclass
from datetime import datetime
from io import StringIO
from typing import (
Any,
Callable,
Dict,
Generic,
Iterable,
List,
Optional,
Set,
Tuple,
TypeVar,
Union,
)
from typing.re import Match, Pattern
from xml.etree import ElementTree as etree
from xml.etree.ElementTree import Element, SubElement
import ahocorasick
import dateutil.parser
import dateutil.tz
import markdown
import requests
from django.conf import settings
from django.db.models import Q
from hyperlink import parse
from markdown.extensions import codehilite, nl2br, sane_lists, tables
from typing_extensions import TypedDict
from zerver.lib import mention as mention
from zerver.lib.bugdown import fenced_code
from zerver.lib.bugdown.fenced_code import FENCE_RE
from zerver.lib.cache import NotFoundInCache, cache_with_key
from zerver.lib.camo import get_camo_url
from zerver.lib.emoji import (
codepoint_to_name,
emoticon_regex,
name_to_codepoint,
translate_emoticons,
)
from zerver.lib.exceptions import BugdownRenderingException
from zerver.lib.mention import extract_user_group, possible_mentions, possible_user_group_mentions
from zerver.lib.tex import render_tex
from zerver.lib.thumbnail import user_uploads_or_external
from zerver.lib.timeout import TimeoutExpired, timeout
from zerver.lib.timezone import get_common_timezones
from zerver.lib.url_encoding import encode_stream, hash_util_encode
from zerver.lib.url_preview import preview as link_preview
from zerver.models import (
MAX_MESSAGE_LENGTH,
Message,
Realm,
UserGroup,
UserGroupMembership,
UserProfile,
all_realm_filters,
get_active_streams,
realm_filters_for_realm,
)
ReturnT = TypeVar('ReturnT')
def one_time(method: Callable[[], ReturnT]) -> Callable[[], ReturnT]:
'''
Use this decorator with extreme caution.
The function you wrap should have no dependency
on any arguments (no args, no kwargs) nor should
it depend on any global state.
'''
val = None
def cache_wrapper() -> ReturnT:
nonlocal val
if val is None:
val = method()
return val
return cache_wrapper
class FullNameInfo(TypedDict):
id: int
email: str
full_name: str
DbData = Dict[str, Any]
# Format version of the bugdown rendering; stored along with rendered
# messages so that we can efficiently determine what needs to be re-rendered
version = 1
_T = TypeVar('_T')
ElementStringNone = Union[Element, Optional[str]]
AVATAR_REGEX = r'!avatar\((?P<email>[^)]*)\)'
GRAVATAR_REGEX = r'!gravatar\((?P<email>[^)]*)\)'
EMOJI_REGEX = r'(?P<syntax>:[\w\-\+]+:)'
def verbose_compile(pattern: str) -> Any:
return re.compile(
f"^(.*?){pattern}(.*?)$",
re.DOTALL | re.UNICODE | re.VERBOSE,
)
def normal_compile(pattern: str) -> Any:
return re.compile(
fr"^(.*?){pattern}(.*)$",
re.DOTALL | re.UNICODE,
)
STREAM_LINK_REGEX = r"""
(?<![^\s'"\(,:<]) # Start after whitespace or specified chars
\#\*\* # and after hash sign followed by double asterisks
(?P<stream_name>[^\*]+) # stream name can contain anything
\*\* # ends by double asterisks
"""
@one_time
def get_compiled_stream_link_regex() -> Pattern:
return verbose_compile(STREAM_LINK_REGEX)
STREAM_TOPIC_LINK_REGEX = r"""
(?<![^\s'"\(,:<]) # Start after whitespace or specified chars
\#\*\* # and after hash sign followed by double asterisks
(?P<stream_name>[^\*>]+) # stream name can contain anything except >
> # > acts as separator
(?P<topic_name>[^\*]+) # topic name can contain anything
\*\* # ends by double asterisks
"""
@one_time
def get_compiled_stream_topic_link_regex() -> Pattern:
return verbose_compile(STREAM_TOPIC_LINK_REGEX)
LINK_REGEX: Pattern = None
def get_web_link_regex() -> str:
# We create this one time, but not at startup. So the
# first message rendered in any process will have some
# extra costs. It's roughly 75ms to run this code, so
# caching the value in LINK_REGEX is super important here.
global LINK_REGEX
if LINK_REGEX is not None:
return LINK_REGEX
tlds = '|'.join(list_of_tlds())
# A link starts at a word boundary, and ends at space, punctuation, or end-of-input.
#
# We detect a url either by the `https?://` or by building around the TLD.
# In lieu of having a recursive regex (which python doesn't support) to match
# arbitrary numbers of nested matching parenthesis, we manually build a regexp that
# can match up to six
# The inner_paren_contents chunk matches the innermore non-parenthesis-holding text,
# and the paren_group matches text with, optionally, a matching set of parens
inner_paren_contents = r"[^\s()\"]*"
paren_group = r"""
[^\s()\"]*? # Containing characters that won't end the URL
(?: \( %s \) # and more characters in matched parens
[^\s()\"]*? # followed by more characters
)* # zero-or-more sets of paired parens
"""
nested_paren_chunk = paren_group
for i in range(6):
nested_paren_chunk = nested_paren_chunk % (paren_group,)
nested_paren_chunk = nested_paren_chunk % (inner_paren_contents,)
file_links = r"| (?:file://(/[^/ ]*)+/?)" if settings.ENABLE_FILE_LINKS else r""
REGEX = fr"""
(?<![^\s'"\(,:<]) # Start after whitespace or specified chars
# (Double-negative lookbehind to allow start-of-string)
(?P<url> # Main group
(?:(?: # Domain part
https?://[\w.:@-]+? # If it has a protocol, anything goes.
|(?: # Or, if not, be more strict to avoid false-positives
(?:[\w-]+\.)+ # One or more domain components, separated by dots
(?:{tlds}) # TLDs (filled in via format from tlds-alpha-by-domain.txt)
)
)
(?:/ # A path, beginning with /
{nested_paren_chunk} # zero-to-6 sets of paired parens
)?) # Path is optional
| (?:[\w.-]+\@[\w.-]+\.[\w]+) # Email is separate, since it can't have a path
{file_links} # File path start with file:///, enable by setting ENABLE_FILE_LINKS=True
| (?:bitcoin:[13][a-km-zA-HJ-NP-Z1-9]{{25,34}}) # Bitcoin address pattern, see https://mokagio.github.io/tech-journal/2014/11/21/regex-bitcoin.html
)
(?= # URL must be followed by (not included in group)
[!:;\?\),\.\'\"\>]* # Optional punctuation characters
(?:\Z|\s) # followed by whitespace or end of string
)
"""
LINK_REGEX = verbose_compile(REGEX)
return LINK_REGEX
def clear_state_for_testing() -> None:
# The link regex never changes in production, but our tests
# try out both sides of ENABLE_FILE_LINKS, so we need
# a way to clear it.
global LINK_REGEX
LINK_REGEX = None
bugdown_logger = logging.getLogger()
def rewrite_local_links_to_relative(db_data: Optional[DbData], link: str) -> str:
"""If the link points to a local destination (e.g. #narrow/...),
generate a relative link that will open it in the current window.
"""
if db_data:
realm_uri_prefix = db_data['realm_uri'] + "/"
if (
link.startswith(realm_uri_prefix)
and urllib.parse.urljoin(realm_uri_prefix, link[len(realm_uri_prefix):])
== link
):
return link[len(realm_uri_prefix):]
return link
def url_embed_preview_enabled(message: Optional[Message]=None,
realm: Optional[Realm]=None,
no_previews: bool=False) -> bool:
if not settings.INLINE_URL_EMBED_PREVIEW:
return False
if no_previews:
return False
if realm is None:
if message is not None:
realm = message.get_realm()
if realm is None:
# realm can be None for odd use cases
# like generating documentation or running
# test code
return True
return realm.inline_url_embed_preview
def image_preview_enabled(message: Optional[Message]=None,
realm: Optional[Realm]=None,
no_previews: bool=False) -> bool:
if not settings.INLINE_IMAGE_PREVIEW:
return False
if no_previews:
return False
if realm is None:
if message is not None:
realm = message.get_realm()
if realm is None:
# realm can be None for odd use cases
# like generating documentation or running
# test code
return True
return realm.inline_image_preview
def list_of_tlds() -> List[str]:
# HACK we manually blacklist a few domains
blacklist = ['PY\n', "MD\n"]
# tlds-alpha-by-domain.txt comes from https://data.iana.org/TLD/tlds-alpha-by-domain.txt
tlds_file = os.path.join(os.path.dirname(__file__), 'tlds-alpha-by-domain.txt')
tlds = [tld.lower().strip() for tld in open(tlds_file)
if tld not in blacklist and not tld[0].startswith('#')]
tlds.sort(key=len, reverse=True)
return tlds
def walk_tree(root: Element,
processor: Callable[[Element], Optional[_T]],
stop_after_first: bool=False) -> List[_T]:
results = []
queue = deque([root])
while queue:
currElement = queue.popleft()
for child in currElement:
if child:
queue.append(child)
result = processor(child)
if result is not None:
results.append(result)
if stop_after_first:
return results
return results
@dataclass
class ElementFamily:
grandparent: Optional[Element]
parent: Element
child: Element
in_blockquote: bool
T = TypeVar("T")
class ResultWithFamily(Generic[T]):
family: ElementFamily
result: T
def __init__(self, family: ElementFamily, result: T):
self.family = family
self.result = result
class ElementPair:
parent: Optional["ElementPair"]
value: Element
def __init__(self, parent: Optional["ElementPair"], value: Element):
self.parent = parent
self.value = value
def walk_tree_with_family(root: Element,
processor: Callable[[Element], Optional[_T]],
) -> List[ResultWithFamily[_T]]:
results = []
queue = deque([ElementPair(parent=None, value=root)])
while queue:
currElementPair = queue.popleft()
for child in currElementPair.value:
if child:
queue.append(ElementPair(parent=currElementPair, value=child))
result = processor(child)
if result is not None:
if currElementPair.parent is not None:
grandparent_element = currElementPair.parent
grandparent = grandparent_element.value
else:
grandparent = None
family = ElementFamily(
grandparent=grandparent,
parent=currElementPair.value,
child=child,
in_blockquote=has_blockquote_ancestor(currElementPair),
)
results.append(ResultWithFamily(
family=family,
result=result,
))
return results
def has_blockquote_ancestor(element_pair: Optional[ElementPair]) -> bool:
if element_pair is None:
return False
elif element_pair.value.tag == 'blockquote':
return True
else:
return has_blockquote_ancestor(element_pair.parent)
@cache_with_key(lambda tweet_id: tweet_id, cache_name="database", with_statsd_key="tweet_data")
def fetch_tweet_data(tweet_id: str) -> Optional[Dict[str, Any]]:
if settings.TEST_SUITE:
from . import testing_mocks
res = testing_mocks.twitter(tweet_id)
else:
creds = {
'consumer_key': settings.TWITTER_CONSUMER_KEY,
'consumer_secret': settings.TWITTER_CONSUMER_SECRET,
'access_token_key': settings.TWITTER_ACCESS_TOKEN_KEY,
'access_token_secret': settings.TWITTER_ACCESS_TOKEN_SECRET,
}
if not all(creds.values()):
return None
# We lazily import twitter here because its import process is
# surprisingly slow, and doing so has a significant impact on
# the startup performance of `manage.py` commands.
import twitter
try:
api = twitter.Api(tweet_mode='extended', **creds)
# Sometimes Twitter hangs on responses. Timing out here
# will cause the Tweet to go through as-is with no inline
# preview, rather than having the message be rejected
# entirely. This timeout needs to be less than our overall
# formatting timeout.
tweet = timeout(3, api.GetStatus, tweet_id)
res = tweet.AsDict()
except AttributeError:
bugdown_logger.error('Unable to load twitter api, you may have the wrong '
'library installed, see https://github.com/zulip/zulip/issues/86')
return None
except TimeoutExpired:
# We'd like to try again later and not cache the bad result,
# so we need to re-raise the exception (just as though
# we were being rate-limited)
raise
except twitter.TwitterError as e:
t = e.args[0]
if len(t) == 1 and ('code' in t[0]) and (t[0]['code'] == 34):
# Code 34 means that the message doesn't exist; return
# None so that we will cache the error
return None
elif len(t) == 1 and ('code' in t[0]) and (t[0]['code'] == 88 or
t[0]['code'] == 130):
# Code 88 means that we were rate-limited and 130
# means Twitter is having capacity issues; either way
# just raise the error so we don't cache None and will
# try again later.
raise
else:
# It's not clear what to do in cases of other errors,
# but for now it seems reasonable to log at error
# level (so that we get notified), but then cache the
# failure to proceed with our usual work
bugdown_logger.exception("Unknown error fetching tweet data")
return None
return res
HEAD_START_RE = re.compile('^head[ >]')
HEAD_END_RE = re.compile('^/head[ >]')
META_START_RE = re.compile('^meta[ >]')
META_END_RE = re.compile('^/meta[ >]')
def fetch_open_graph_image(url: str) -> Optional[Dict[str, Any]]:
in_head = False
# HTML will auto close meta tags, when we start the next tag add
# a closing tag if it has not been closed yet.
last_closed = True
head = []
# TODO: What if response content is huge? Should we get headers first?
try:
content = requests.get(url, timeout=1).text
except Exception:
return None
# Extract the head and meta tags
# All meta tags are self closing, have no children or are closed
# automatically.
for part in content.split('<'):
if not in_head and HEAD_START_RE.match(part):
# Started the head node output it to have a document root
in_head = True
head.append('<head>')
elif in_head and HEAD_END_RE.match(part):
# Found the end of the head close any remaining tag then stop
# processing
in_head = False
if not last_closed:
last_closed = True
head.append('</meta>')
head.append('</head>')
break
elif in_head and META_START_RE.match(part):
# Found a meta node copy it
if not last_closed:
head.append('</meta>')
last_closed = True
head.append('<')
head.append(part)
if '/>' not in part:
last_closed = False
elif in_head and META_END_RE.match(part):
# End of a meta node just copy it to close the tag
head.append('<')
head.append(part)
last_closed = True
try:
doc = etree.fromstring(''.join(head))
except etree.ParseError:
return None
og_image = doc.find('meta[@property="og:image"]')
og_title = doc.find('meta[@property="og:title"]')
og_desc = doc.find('meta[@property="og:description"]')
title = None
desc = None
if og_image is not None:
image = og_image.get('content')
else:
return None
if og_title is not None:
title = og_title.get('content')
if og_desc is not None:
desc = og_desc.get('content')
return {'image': image, 'title': title, 'desc': desc}
def get_tweet_id(url: str) -> Optional[str]:
parsed_url = urllib.parse.urlparse(url)
if not (parsed_url.netloc == 'twitter.com' or parsed_url.netloc.endswith('.twitter.com')):
return None
to_match = parsed_url.path
# In old-style twitter.com/#!/wdaher/status/1231241234-style URLs,
# we need to look at the fragment instead
if parsed_url.path == '/' and len(parsed_url.fragment) > 5:
to_match = parsed_url.fragment
tweet_id_match = re.match(r'^!?/.*?/status(es)?/(?P<tweetid>\d{10,30})(/photo/[0-9])?/?$', to_match)
if not tweet_id_match:
return None
return tweet_id_match.group("tweetid")
class InlineHttpsProcessor(markdown.treeprocessors.Treeprocessor):
def run(self, root: Element) -> None:
# Get all URLs from the blob
found_imgs = walk_tree(root, lambda e: e if e.tag == "img" else None)
for img in found_imgs:
url = img.get("src")
if urllib.parse.urlsplit(url).scheme != "http":
# Don't rewrite images on our own site (e.g. emoji).
continue
img.set("src", get_camo_url(url))
class BacktickPattern(markdown.inlinepatterns.Pattern):
""" Return a `<code>` element containing the matching text. """
def __init__(self, pattern: str) -> None:
markdown.inlinepatterns.Pattern.__init__(self, pattern)
self.ESCAPED_BSLASH = '{}{}{}'.format(markdown.util.STX, ord('\\'), markdown.util.ETX)
self.tag = 'code'
def handleMatch(self, m: Match[str]) -> Union[str, Element]:
if m.group(4):
el = Element(self.tag)
# Modified to not strip whitespace
el.text = markdown.util.AtomicString(m.group(4))
return el
else:
return m.group(2).replace('\\\\', self.ESCAPED_BSLASH)
class InlineInterestingLinkProcessor(markdown.treeprocessors.Treeprocessor):
TWITTER_MAX_IMAGE_HEIGHT = 400
TWITTER_MAX_TO_PREVIEW = 3
INLINE_PREVIEW_LIMIT_PER_MESSAGE = 5
def __init__(self, md: markdown.Markdown) -> None:
markdown.treeprocessors.Treeprocessor.__init__(self, md)
def add_a(
self,
root: Element,
url: str,
link: str,
title: Optional[str]=None,
desc: Optional[str]=None,
class_attr: str="message_inline_image",
data_id: Optional[str]=None,
insertion_index: Optional[int]=None,
already_thumbnailed: bool=False,
) -> None:
desc = desc if desc is not None else ""
# Update message.has_image attribute.
if 'message_inline_image' in class_attr and self.md.zulip_message:
self.md.zulip_message.has_image = True
if insertion_index is not None:
div = Element("div")
root.insert(insertion_index, div)
else:
div = SubElement(root, "div")
div.set("class", class_attr)
a = SubElement(div, "a")
a.set("href", link)
if title is not None:
a.set("title", title)
if data_id is not None:
a.set("data-id", data_id)
img = SubElement(a, "img")
if settings.THUMBNAIL_IMAGES and (not already_thumbnailed) and user_uploads_or_external(url):
# See docs/thumbnailing.md for some high-level documentation.
#
# We strip leading '/' from relative URLs here to ensure
# consistency in what gets passed to /thumbnail
url = url.lstrip('/')
img.set("src", "/thumbnail?url={}&size=thumbnail".format(
urllib.parse.quote(url, safe=''),
))
img.set('data-src-fullsize', "/thumbnail?url={}&size=full".format(
urllib.parse.quote(url, safe=''),
))
else:
img.set("src", url)
if class_attr == "message_inline_ref":
summary_div = SubElement(div, "div")
title_div = SubElement(summary_div, "div")
title_div.set("class", "message_inline_image_title")
title_div.text = title
desc_div = SubElement(summary_div, "desc")
desc_div.set("class", "message_inline_image_desc")
def add_oembed_data(self, root: Element, link: str, extracted_data: Dict[str, Any]) -> bool:
oembed_resource_type = extracted_data.get('type', '')
title = extracted_data.get('title')
if oembed_resource_type == 'photo':
image = extracted_data.get('image')
if image:
self.add_a(root, image, link, title=title)
return True
elif oembed_resource_type == 'video':
html = extracted_data['html']
image = extracted_data['image']
title = extracted_data.get('title')
description = extracted_data.get('description')
self.add_a(root, image, link, title, description,
"embed-video message_inline_image",
html, already_thumbnailed=True)
return True
return False
def add_embed(self, root: Element, link: str, extracted_data: Dict[str, Any]) -> None:
oembed = extracted_data.get('oembed', False)
if oembed and self.add_oembed_data(root, link, extracted_data):
return
img_link = extracted_data.get('image')
if not img_link:
# Don't add an embed if an image is not found
return
container = SubElement(root, "div")
container.set("class", "message_embed")
parsed_img_link = urllib.parse.urlparse(img_link)
# Append domain where relative img_link url is given
if not parsed_img_link.netloc:
parsed_url = urllib.parse.urlparse(link)
domain = '{url.scheme}://{url.netloc}/'.format(url=parsed_url)
img_link = urllib.parse.urljoin(domain, img_link)
img = SubElement(container, "a")
img.set("style", "background-image: url(" + img_link + ")")
img.set("href", link)
img.set("class", "message_embed_image")
data_container = SubElement(container, "div")
data_container.set("class", "data-container")
title = extracted_data.get('title')
if title:
title_elm = SubElement(data_container, "div")
title_elm.set("class", "message_embed_title")
a = SubElement(title_elm, "a")
a.set("href", link)
a.set("title", title)
a.text = title
description = extracted_data.get('description')
if description:
description_elm = SubElement(data_container, "div")
description_elm.set("class", "message_embed_description")
description_elm.text = description
def get_actual_image_url(self, url: str) -> str:
# Add specific per-site cases to convert image-preview urls to image urls.
# See https://github.com/zulip/zulip/issues/4658 for more information
parsed_url = urllib.parse.urlparse(url)
if (parsed_url.netloc == 'github.com' or parsed_url.netloc.endswith('.github.com')):
# https://github.com/zulip/zulip/blob/master/static/images/logo/zulip-icon-128x128.png ->
# https://raw.githubusercontent.com/zulip/zulip/master/static/images/logo/zulip-icon-128x128.png
split_path = parsed_url.path.split('/')
if len(split_path) > 3 and split_path[3] == "blob":
return urllib.parse.urljoin('https://raw.githubusercontent.com',
'/'.join(split_path[0:3] + split_path[4:]))
return url
def is_image(self, url: str) -> bool:
if not self.md.image_preview_enabled:
return False
parsed_url = urllib.parse.urlparse(url)
# remove html urls which end with img extensions that can not be shorted
if parsed_url.netloc == 'pasteboard.co':
return False
# List from https://support.google.com/chromeos/bin/answer.py?hl=en&answer=183093
for ext in [".bmp", ".gif", ".jpe", "jpeg", ".jpg", ".png", ".webp"]:
if parsed_url.path.lower().endswith(ext):
return True
return False
def corrected_image_source(self, url: str) -> str:
# This function adjusts any urls from linx.li and
# wikipedia.org to point to the actual image url. It's
# structurally very similar to dropbox_image, and possibly
# should be rewritten to use open graph, but has some value.
parsed_url = urllib.parse.urlparse(url)
if parsed_url.netloc.lower().endswith('.wikipedia.org'):
# Redirecting from "/wiki/File:" to "/wiki/Special:FilePath/File:"
# A possible alternative, that avoids the redirect after hitting "Special:"
# is using the first characters of md5($filename) to generate the url
domain = parsed_url.scheme + "://" + parsed_url.netloc
correct_url = domain + parsed_url.path[:6] + 'Special:FilePath' + parsed_url.path[5:]
return correct_url
if parsed_url.netloc == 'linx.li':
return 'https://linx.li/s' + parsed_url.path
return None
def dropbox_image(self, url: str) -> Optional[Dict[str, Any]]:
# TODO: The returned Dict could possibly be a TypedDict in future.
parsed_url = urllib.parse.urlparse(url)
if (parsed_url.netloc == 'dropbox.com' or parsed_url.netloc.endswith('.dropbox.com')):
is_album = parsed_url.path.startswith('/sc/') or parsed_url.path.startswith('/photos/')
# Only allow preview Dropbox shared links
if not (parsed_url.path.startswith('/s/') or
parsed_url.path.startswith('/sh/') or
is_album):
return None
# Try to retrieve open graph protocol info for a preview
# This might be redundant right now for shared links for images.
# However, we might want to make use of title and description
# in the future. If the actual image is too big, we might also
# want to use the open graph image.
image_info = fetch_open_graph_image(url)
is_image = is_album or self.is_image(url)
# If it is from an album or not an actual image file,
# just use open graph image.
if is_album or not is_image:
# Failed to follow link to find an image preview so
# use placeholder image and guess filename
if image_info is None:
return None
image_info["is_image"] = is_image
return image_info
# Otherwise, try to retrieve the actual image.
# This is because open graph image from Dropbox may have padding
# and gifs do not work.
# TODO: What if image is huge? Should we get headers first?
if image_info is None:
image_info = dict()
image_info['is_image'] = True
parsed_url_list = list(parsed_url)
parsed_url_list[4] = "dl=1" # Replaces query
image_info["image"] = urllib.parse.urlunparse(parsed_url_list)
return image_info
return None
def youtube_id(self, url: str) -> Optional[str]:
if not self.md.image_preview_enabled:
return None
# Youtube video id extraction regular expression from https://pastebin.com/KyKAFv1s
# Slightly modified to support URLs of the forms
# - youtu.be/<id>
# - youtube.com/playlist?v=<id>&list=<list-id>
# - youtube.com/watch_videos?video_ids=<id1>,<id2>,<id3>
# If it matches, match.group(2) is the video id.
schema_re = r'(?:https?://)'
host_re = r'(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/)'
param_re = r'(?:(?:(?:v|embed)/)|' + \
r'(?:(?:(?:watch|playlist)(?:_popup|_videos)?(?:\.php)?)?(?:\?|#!?)(?:.+&)?v(?:ideo_ids)?=))'
id_re = r'([0-9A-Za-z_-]+)'
youtube_re = r'^({schema_re}?{host_re}{param_re}?)?{id_re}(?(1).+)?$'
youtube_re = youtube_re.format(schema_re=schema_re, host_re=host_re, id_re=id_re, param_re=param_re)
match = re.match(youtube_re, url)
# URLs of the form youtube.com/playlist?list=<list-id> are incorrectly matched
if match is None or match.group(2) == 'playlist':
return None
return match.group(2)
def youtube_title(self, extracted_data: Dict[str, Any]) -> Optional[str]:
title = extracted_data.get("title")
if title is not None:
return f"YouTube - {title}"
return None
def youtube_image(self, url: str) -> Optional[str]:
yt_id = self.youtube_id(url)
if yt_id is not None:
return f"https://i.ytimg.com/vi/{yt_id}/default.jpg"
return None
def vimeo_id(self, url: str) -> Optional[str]:
if not self.md.image_preview_enabled:
return None
#(http|https)?:\/\/(www\.)?vimeo.com\/(?:channels\/(?:\w+\/)?|groups\/([^\/]*)\/videos\/|)(\d+)(?:|\/\?)
# If it matches, match.group('id') is the video id.
vimeo_re = r'^((http|https)?:\/\/(www\.)?vimeo.com\/' + \
r'(?:channels\/(?:\w+\/)?|groups\/' + \
r'([^\/]*)\/videos\/|)(\d+)(?:|\/\?))$'
match = re.match(vimeo_re, url)
if match is None:
return None
return match.group(5)
def vimeo_title(self, extracted_data: Dict[str, Any]) -> Optional[str]:
title = extracted_data.get("title")
if title is not None:
return f"Vimeo - {title}"
return None
def twitter_text(self, text: str,
urls: List[Dict[str, str]],
user_mentions: List[Dict[str, Any]],
media: List[Dict[str, Any]]) -> Element:
"""
Use data from the twitter API to turn links, mentions and media into A
tags. Also convert unicode emojis to images.
This works by using the urls, user_mentions and media data from
the twitter API and searching for unicode emojis in the text using
`unicode_emoji_regex`.
The first step is finding the locations of the URLs, mentions, media and
emoji in the text. For each match we build a dictionary with type, the start
location, end location, the URL to link to, and the text(codepoint and title
in case of emojis) to be used in the link(image in case of emojis).
Next we sort the matches by start location. And for each we add the
text from the end of the last link to the start of the current link to
the output. The text needs to added to the text attribute of the first
node (the P tag) or the tail the last link created.
Finally we add any remaining text to the last node.
"""
to_process: List[Dict[str, Any]] = []
# Build dicts for URLs
for url_data in urls:
short_url = url_data["url"]
full_url = url_data["expanded_url"]
for match in re.finditer(re.escape(short_url), text, re.IGNORECASE):
to_process.append({
'type': 'url',
'start': match.start(),
'end': match.end(),
'url': short_url,
'text': full_url,
})
# Build dicts for mentions
for user_mention in user_mentions:
screen_name = user_mention['screen_name']
mention_string = '@' + screen_name
for match in re.finditer(re.escape(mention_string), text, re.IGNORECASE):
to_process.append({
'type': 'mention',
'start': match.start(),
'end': match.end(),
'url': 'https://twitter.com/' + urllib.parse.quote(screen_name),
'text': mention_string,
})
# Build dicts for media
for media_item in media:
short_url = media_item['url']
expanded_url = media_item['expanded_url']
for match in re.finditer(re.escape(short_url), text, re.IGNORECASE):
to_process.append({
'type': 'media',
'start': match.start(),
'end': match.end(),
'url': short_url,
'text': expanded_url,
})
# Build dicts for emojis
for match in re.finditer(unicode_emoji_regex, text, re.IGNORECASE):
orig_syntax = match.group('syntax')
codepoint = unicode_emoji_to_codepoint(orig_syntax)
if codepoint in codepoint_to_name:
display_string = ':' + codepoint_to_name[codepoint] + ':'
to_process.append({
'type': 'emoji',
'start': match.start(),
'end': match.end(),
'codepoint': codepoint,
'title': display_string,
})
to_process.sort(key=lambda x: x['start'])
p = current_node = Element('p')
def set_text(text: str) -> None:
"""
Helper to set the text or the tail of the current_node
"""
if current_node == p:
current_node.text = text
else:
current_node.tail = text
db_data = self.md.zulip_db_data
current_index = 0
for item in to_process:
# The text we want to link starts in already linked text skip it
if item['start'] < current_index:
continue
# Add text from the end of last link to the start of the current
# link
set_text(text[current_index:item['start']])
current_index = item['end']
if item['type'] != 'emoji':
elem = url_to_a(db_data, item['url'], item['text'])
assert isinstance(elem, Element)
else:
elem = make_emoji(item['codepoint'], item['title'])
current_node = elem
p.append(elem)
# Add any unused text
set_text(text[current_index:])
return p
def twitter_link(self, url: str) -> Optional[Element]:
tweet_id = get_tweet_id(url)
if tweet_id is None:
return None
try:
res = fetch_tweet_data(tweet_id)
if res is None:
return None
user: Dict[str, Any] = res['user']
tweet = Element("div")
tweet.set("class", "twitter-tweet")
img_a = SubElement(tweet, 'a')
img_a.set("href", url)
profile_img = SubElement(img_a, 'img')
profile_img.set('class', 'twitter-avatar')
# For some reason, for, e.g. tweet 285072525413724161,
# python-twitter does not give us a
# profile_image_url_https, but instead puts that URL in
# profile_image_url. So use _https if available, but fall
# back gracefully.
image_url = user.get('profile_image_url_https', user['profile_image_url'])
profile_img.set('src', image_url)
text = html.unescape(res['full_text'])
urls = res.get('urls', [])
user_mentions = res.get('user_mentions', [])
media: List[Dict[str, Any]] = res.get('media', [])
p = self.twitter_text(text, urls, user_mentions, media)
tweet.append(p)
span = SubElement(tweet, 'span')
span.text = "- {} (@{})".format(user['name'], user['screen_name'])
# Add image previews
for media_item in media:
# Only photos have a preview image
if media_item['type'] != 'photo':
continue
# Find the image size that is smaller than
# TWITTER_MAX_IMAGE_HEIGHT px tall or the smallest
size_name_tuples = list(media_item['sizes'].items())
size_name_tuples.sort(reverse=True,
key=lambda x: x[1]['h'])
for size_name, size in size_name_tuples:
if size['h'] < self.TWITTER_MAX_IMAGE_HEIGHT:
break
media_url = '{}:{}'.format(media_item['media_url_https'], size_name)
img_div = SubElement(tweet, 'div')
img_div.set('class', 'twitter-image')
img_a = SubElement(img_div, 'a')
img_a.set('href', media_item['url'])
img = SubElement(img_a, 'img')
img.set('src', media_url)
return tweet
except Exception:
# We put this in its own try-except because it requires external
# connectivity. If Twitter flakes out, we don't want to not-render
# the entire message; we just want to not show the Twitter preview.
bugdown_logger.warning("Error building Twitter link", exc_info=True)
return None
def get_url_data(self, e: Element) -> Optional[Tuple[str, Optional[str]]]:
if e.tag == "a":
return (e.get("href"), e.text)
return None
def handle_image_inlining(
self,
root: Element,
found_url: ResultWithFamily[Tuple[str, Optional[str]]],
) -> None:
grandparent = found_url.family.grandparent
parent = found_url.family.parent
ahref_element = found_url.family.child
(url, text) = found_url.result
actual_url = self.get_actual_image_url(url)
# url != text usually implies a named link, which we opt not to remove
url_eq_text = text is None or url == text
title = None if url_eq_text else text
if parent.tag == 'li':
self.add_a(parent, self.get_actual_image_url(url), url, title=title)
if not parent.text and not ahref_element.tail and url_eq_text:
parent.remove(ahref_element)
elif parent.tag == 'p':
parent_index = None
for index, uncle in enumerate(grandparent):
if uncle is parent:
parent_index = index
break
if parent_index is not None:
ins_index = self.find_proper_insertion_index(grandparent, parent, parent_index)
self.add_a(grandparent, actual_url, url, title=title, insertion_index=ins_index)
else:
# We're not inserting after parent, since parent not found.
# Append to end of list of grandparent's children as normal
self.add_a(grandparent, actual_url, url, title=title)
# If link is alone in a paragraph, delete paragraph containing it
if (len(parent) == 1 and
(not parent.text or parent.text == "\n") and
not ahref_element.tail and
url_eq_text):
grandparent.remove(parent)
else:
# If none of the above criteria match, fall back to old behavior
self.add_a(root, actual_url, url, title=title)
def find_proper_insertion_index(self, grandparent: Element, parent: Element,
parent_index_in_grandparent: int) -> int:
# If there are several inline images from same paragraph, ensure that
# they are in correct (and not opposite) order by inserting after last
# inline image from paragraph 'parent'
parent_links = [ele.attrib['href'] for ele in parent.iter(tag="a")]
insertion_index = parent_index_in_grandparent
while True:
insertion_index += 1
if insertion_index >= len(grandparent):
return insertion_index
uncle = grandparent[insertion_index]
inline_image_classes = ['message_inline_image', 'message_inline_ref']
if (
uncle.tag != 'div' or
'class' not in uncle.keys() or
uncle.attrib['class'] not in inline_image_classes
):
return insertion_index
uncle_link = list(uncle.iter(tag="a"))[0].attrib['href']
if uncle_link not in parent_links:
return insertion_index
def is_absolute_url(self, url: str) -> bool:
return bool(urllib.parse.urlparse(url).netloc)
def run(self, root: Element) -> None:
# Get all URLs from the blob
found_urls = walk_tree_with_family(root, self.get_url_data)
unique_urls = {found_url.result[0] for found_url in found_urls}
# Collect unique URLs which are not quoted as we don't do
# inline previews for links inside blockquotes.
unique_previewable_urls = {found_url.result[0] for found_url in found_urls
if not found_url.family.in_blockquote}
# Set has_link and similar flags whenever a message is processed by bugdown
if self.md.zulip_message:
self.md.zulip_message.has_link = len(found_urls) > 0
self.md.zulip_message.has_image = False # This is updated in self.add_a
self.md.zulip_message.potential_attachment_path_ids = []
for url in unique_urls:
# Due to rewrite_local_links_to_relative, we need to
# handle both relative URLs beginning with
# `/user_uploads` and beginning with `user_uploads`.
# This urllib construction converts the latter into
# the former.
parsed_url = urllib.parse.urlsplit(urllib.parse.urljoin("/", url))
host = parsed_url.netloc
if host != '' and host != self.md.zulip_realm.host:
continue
if not parsed_url.path.startswith("/user_uploads/"):
continue
path_id = parsed_url.path[len("/user_uploads/"):]
self.md.zulip_message.potential_attachment_path_ids.append(path_id)
if len(found_urls) == 0:
return
if len(unique_previewable_urls) > self.INLINE_PREVIEW_LIMIT_PER_MESSAGE:
return
processed_urls: Set[str] = set()
rendered_tweet_count = 0
for found_url in found_urls:
(url, text) = found_url.result
if url in unique_previewable_urls and url not in processed_urls:
processed_urls.add(url)
else:
continue
if not self.is_absolute_url(url):
if self.is_image(url):
self.handle_image_inlining(root, found_url)
# We don't have a strong use case for doing url preview for relative links.
continue
dropbox_image = self.dropbox_image(url)
if dropbox_image is not None:
class_attr = "message_inline_ref"
is_image = dropbox_image["is_image"]
if is_image:
class_attr = "message_inline_image"
# Not making use of title and description of images
self.add_a(root, dropbox_image['image'], url,
title=dropbox_image.get('title'),
desc=dropbox_image.get('desc', ""),
class_attr=class_attr,
already_thumbnailed=True)
continue
if self.is_image(url):
image_source = self.corrected_image_source(url)
if image_source is not None:
found_url = ResultWithFamily(
family=found_url.family,
result=(image_source, image_source),
)
self.handle_image_inlining(root, found_url)
continue
if get_tweet_id(url) is not None:
if rendered_tweet_count >= self.TWITTER_MAX_TO_PREVIEW:
# Only render at most one tweet per message
continue
twitter_data = self.twitter_link(url)
if twitter_data is None:
# This link is not actually a tweet known to twitter
continue
rendered_tweet_count += 1
div = SubElement(root, "div")
div.set("class", "inline-preview-twitter")
div.insert(0, twitter_data)
continue
youtube = self.youtube_image(url)
if youtube is not None:
yt_id = self.youtube_id(url)
self.add_a(root, youtube, url, None, None,
"youtube-video message_inline_image",
yt_id, already_thumbnailed=True)
# NOTE: We don't `continue` here, to allow replacing the URL with
# the title, if INLINE_URL_EMBED_PREVIEW feature is enabled.
# The entire preview would ideally be shown only if the feature
# is enabled, but URL previews are a beta feature and YouTube
# previews are pretty stable.
db_data = self.md.zulip_db_data
if db_data and db_data['sent_by_bot']:
continue
if not self.md.url_embed_preview_enabled:
continue
try:
extracted_data = link_preview.link_embed_data_from_cache(url)
except NotFoundInCache:
self.md.zulip_message.links_for_preview.add(url)
continue
if extracted_data:
if youtube is not None:
title = self.youtube_title(extracted_data)
if title is not None:
found_url.family.child.text = title
continue
self.add_embed(root, url, extracted_data)
if self.vimeo_id(url):
title = self.vimeo_title(extracted_data)
if title:
found_url.family.child.text = title
class Avatar(markdown.inlinepatterns.Pattern):
def handleMatch(self, match: Match[str]) -> Optional[Element]:
img = Element('img')
email_address = match.group('email')
email = email_address.strip().lower()
profile_id = None
db_data = self.md.zulip_db_data
if db_data is not None:
user_dict = db_data['email_info'].get(email)
if user_dict is not None:
profile_id = user_dict['id']
img.set('class', 'message_body_gravatar')
img.set('src', f'/avatar/{profile_id or email}?s=30')
img.set('title', email)
img.set('alt', email)
return img
def possible_avatar_emails(content: str) -> Set[str]:
emails = set()
for REGEX in [AVATAR_REGEX, GRAVATAR_REGEX]:
matches = re.findall(REGEX, content)
for email in matches:
if email:
emails.add(email)
return emails
class Timestamp(markdown.inlinepatterns.Pattern):
def handleMatch(self, match: Match[str]) -> Optional[Element]:
span = Element('span')
span.set('class', 'timestamp')
timestamp = None
try:
timestamp = dateutil.parser.parse(match.group('time'), tzinfos=get_common_timezones())
except ValueError:
try:
timestamp = datetime.fromtimestamp(float(match.group('time')))
except ValueError:
pass
if timestamp:
if timestamp.tzinfo:
timestamp = timestamp - timestamp.utcoffset()
span.set('data-timestamp', timestamp.strftime("%s"))
# Set text to initial input, so even if parsing fails, the data remains intact.
span.text = markdown.util.AtomicString(match.group('time'))
return span
# All of our emojis(non ZWJ sequences) belong to one of these unicode blocks:
# \U0001f100-\U0001f1ff - Enclosed Alphanumeric Supplement
# \U0001f200-\U0001f2ff - Enclosed Ideographic Supplement
# \U0001f300-\U0001f5ff - Miscellaneous Symbols and Pictographs
# \U0001f600-\U0001f64f - Emoticons (Emoji)
# \U0001f680-\U0001f6ff - Transport and Map Symbols
# \U0001f900-\U0001f9ff - Supplemental Symbols and Pictographs
# \u2000-\u206f - General Punctuation
# \u2300-\u23ff - Miscellaneous Technical
# \u2400-\u243f - Control Pictures
# \u2440-\u245f - Optical Character Recognition
# \u2460-\u24ff - Enclosed Alphanumerics
# \u2500-\u257f - Box Drawing
# \u2580-\u259f - Block Elements
# \u25a0-\u25ff - Geometric Shapes
# \u2600-\u26ff - Miscellaneous Symbols
# \u2700-\u27bf - Dingbats
# \u2900-\u297f - Supplemental Arrows-B
# \u2b00-\u2bff - Miscellaneous Symbols and Arrows
# \u3000-\u303f - CJK Symbols and Punctuation
# \u3200-\u32ff - Enclosed CJK Letters and Months
unicode_emoji_regex = '(?P<syntax>['\
'\U0001F100-\U0001F64F' \
'\U0001F680-\U0001F6FF' \
'\U0001F900-\U0001F9FF' \
'\u2000-\u206F' \
'\u2300-\u27BF' \
'\u2900-\u297F' \
'\u2B00-\u2BFF' \
'\u3000-\u303F' \
'\u3200-\u32FF' \
'])'
# The equivalent JS regex is \ud83c[\udd00-\udfff]|\ud83d[\udc00-\ude4f]|\ud83d[\ude80-\udeff]|
# \ud83e[\udd00-\uddff]|[\u2000-\u206f]|[\u2300-\u27bf]|[\u2b00-\u2bff]|[\u3000-\u303f]|
# [\u3200-\u32ff]. See below comments for explanation. The JS regex is used by marked.js for
# frontend unicode emoji processing.
# The JS regex \ud83c[\udd00-\udfff]|\ud83d[\udc00-\ude4f] represents U0001f100-\U0001f64f
# The JS regex \ud83d[\ude80-\udeff] represents \U0001f680-\U0001f6ff
# The JS regex \ud83e[\udd00-\uddff] represents \U0001f900-\U0001f9ff
# The JS regex [\u2000-\u206f] represents \u2000-\u206f
# The JS regex [\u2300-\u27bf] represents \u2300-\u27bf
# Similarly other JS regexes can be mapped to the respective unicode blocks.
# For more information, please refer to the following article:
# http://crocodillon.com/blog/parsing-emoji-unicode-in-javascript
def make_emoji(codepoint: str, display_string: str) -> Element:
# Replace underscore in emoji's title with space
title = display_string[1:-1].replace("_", " ")
span = Element('span')
span.set('class', f'emoji emoji-{codepoint}')
span.set('title', title)
span.set('role', 'img')
span.set('aria-label', title)
span.text = markdown.util.AtomicString(display_string)
return span
def make_realm_emoji(src: str, display_string: str) -> Element:
elt = Element('img')
elt.set('src', src)
elt.set('class', 'emoji')
elt.set("alt", display_string)
elt.set("title", display_string[1:-1].replace("_", " "))
return elt
def unicode_emoji_to_codepoint(unicode_emoji: str) -> str:
codepoint = hex(ord(unicode_emoji))[2:]
# Unicode codepoints are minimum of length 4, padded
# with zeroes if the length is less than zero.
while len(codepoint) < 4:
codepoint = '0' + codepoint
return codepoint
class EmoticonTranslation(markdown.inlinepatterns.Pattern):
""" Translates emoticons like `:)` into emoji like `:smile:`. """
def handleMatch(self, match: Match[str]) -> Optional[Element]:
db_data = self.md.zulip_db_data
if db_data is None or not db_data['translate_emoticons']:
return None
emoticon = match.group('emoticon')
translated = translate_emoticons(emoticon)
name = translated[1:-1]
return make_emoji(name_to_codepoint[name], translated)
class UnicodeEmoji(markdown.inlinepatterns.Pattern):
def handleMatch(self, match: Match[str]) -> Optional[Element]:
orig_syntax = match.group('syntax')
codepoint = unicode_emoji_to_codepoint(orig_syntax)
if codepoint in codepoint_to_name:
display_string = ':' + codepoint_to_name[codepoint] + ':'
return make_emoji(codepoint, display_string)
else:
return None
class Emoji(markdown.inlinepatterns.Pattern):
def handleMatch(self, match: Match[str]) -> Optional[Element]:
orig_syntax = match.group("syntax")
name = orig_syntax[1:-1]
active_realm_emoji: Dict[str, Dict[str, str]] = {}
db_data = self.md.zulip_db_data
if db_data is not None:
active_realm_emoji = db_data['active_realm_emoji']
if self.md.zulip_message and name in active_realm_emoji:
return make_realm_emoji(active_realm_emoji[name]['source_url'], orig_syntax)
elif name == 'zulip':
return make_realm_emoji('/static/generated/emoji/images/emoji/unicode/zulip.png', orig_syntax)
elif name in name_to_codepoint:
return make_emoji(name_to_codepoint[name], orig_syntax)
else:
return orig_syntax
def content_has_emoji_syntax(content: str) -> bool:
return re.search(EMOJI_REGEX, content) is not None
class Tex(markdown.inlinepatterns.Pattern):
def handleMatch(self, match: Match[str]) -> Element:
rendered = render_tex(match.group('body'), is_inline=True)
if rendered is not None:
# We need to give Python-Markdown an ElementTree object, but if we
# give it one with correctly stored XML namespaces, it will mangle
# everything when serializing it. So we play this stupid game to
# store xmlns as a normal attribute. :-[
assert ' zulip-xmlns="' not in rendered
rendered = rendered.replace(' xmlns="', ' zulip-xmlns="')
parsed = etree.iterparse(StringIO(rendered))
for event, elem in parsed:
if 'zulip-xmlns' in elem.attrib:
elem.attrib['xmlns'] = elem.attrib.pop('zulip-xmlns')
root = elem
return root
else: # Something went wrong while rendering
span = Element('span')
span.set('class', 'tex-error')
span.text = '$$' + match.group('body') + '$$'
return span
def sanitize_url(url: str) -> Optional[str]:
"""
Sanitize a url against xss attacks.
See the docstring on markdown.inlinepatterns.LinkPattern.sanitize_url.
"""
try:
parts = urllib.parse.urlparse(url.replace(' ', '%20'))
scheme, netloc, path, params, query, fragment = parts
except ValueError:
# Bad url - so bad it couldn't be parsed.
return ''
# If there is no scheme or netloc and there is a '@' in the path,
# treat it as a mailto: and set the appropriate scheme
if scheme == '' and netloc == '' and '@' in path:
scheme = 'mailto'
elif scheme == '' and netloc == '' and len(path) > 0 and path[0] == '/':
# Allow domain-relative links
return urllib.parse.urlunparse(('', '', path, params, query, fragment))
elif (scheme, netloc, path, params, query) == ('', '', '', '', '') and len(fragment) > 0:
# Allow fragment links
return urllib.parse.urlunparse(('', '', '', '', '', fragment))
# Zulip modification: If scheme is not specified, assume http://
# We re-enter sanitize_url because netloc etc. need to be re-parsed.
if not scheme:
return sanitize_url('http://' + url)
locless_schemes = ['mailto', 'news', 'file', 'bitcoin']
if netloc == '' and scheme not in locless_schemes:
# This fails regardless of anything else.
# Return immediately to save additional processing
return None
# Upstream code will accept a URL like javascript://foo because it
# appears to have a netloc. Additionally there are plenty of other
# schemes that do weird things like launch external programs. To be
# on the safe side, we whitelist the scheme.
if scheme not in ('http', 'https', 'ftp', 'mailto', 'file', 'bitcoin'):
return None
# Upstream code scans path, parameters, and query for colon characters
# because
#
# some aliases [for javascript:] will appear to urllib.parse to have
# no scheme. On top of that relative links (i.e.: "foo/bar.html")
# have no scheme.
#
# We already converted an empty scheme to http:// above, so we skip
# the colon check, which would also forbid a lot of legitimate URLs.
# Url passes all tests. Return url as-is.
return urllib.parse.urlunparse((scheme, netloc, path, params, query, fragment))
def url_to_a(db_data: Optional[DbData], url: str, text: Optional[str]=None) -> Union[Element, str]:
a = Element('a')
href = sanitize_url(url)
if href is None:
# Rejected by sanitize_url; render it as plain text.
return url
if text is None:
text = markdown.util.AtomicString(url)
href = rewrite_local_links_to_relative(db_data, href)
a.set('href', href)
a.text = text
return a
class CompiledPattern(markdown.inlinepatterns.Pattern):
def __init__(self, compiled_re: Pattern, md: markdown.Markdown) -> None:
# This is similar to the superclass's small __init__ function,
# but we skip the compilation step and let the caller give us
# a compiled regex.
self.compiled_re = compiled_re
self.md = md
class AutoLink(CompiledPattern):
def handleMatch(self, match: Match[str]) -> ElementStringNone:
url = match.group('url')
db_data = self.md.zulip_db_data
return url_to_a(db_data, url)
class OListProcessor(sane_lists.SaneOListProcessor):
def __init__(self, parser: Any) -> None:
parser.md.tab_length = 2
super().__init__(parser)
parser.md.tab_length = 4
class UListProcessor(sane_lists.SaneUListProcessor):
""" Unordered lists, but with 2-space indent """
def __init__(self, parser: Any) -> None:
parser.md.tab_length = 2
super().__init__(parser)
parser.md.tab_length = 4
class ListIndentProcessor(markdown.blockprocessors.ListIndentProcessor):
""" Process unordered list blocks.
Based on markdown.blockprocessors.ListIndentProcessor, but with 2-space indent
"""
def __init__(self, parser: Any) -> None:
# HACK: Set the tab length to 2 just for the initialization of
# this class, so that bulleted lists (and only bulleted lists)
# work off 2-space indentation.
parser.md.tab_length = 2
super().__init__(parser)
parser.md.tab_length = 4
class HashHeaderProcessor(markdown.blockprocessors.HashHeaderProcessor):
""" Process Hash Headers.
Based on markdown.blockprocessors.HashHeaderProcessor, but requires space for heading.
"""
# Original regex for hashheader is
# RE = re.compile(r'(?:^|\n)(?P<level>#{1,6})(?P<header>(?:\\.|[^\\])*?)#*(?:\n|$)')
RE = re.compile(r'(?:^|\n)(?P<level>#{1,6})\s(?P<header>(?:\\.|[^\\])*?)#*(?:\n|$)')
class BlockQuoteProcessor(markdown.blockprocessors.BlockQuoteProcessor):
""" Process BlockQuotes.
Based on markdown.blockprocessors.BlockQuoteProcessor, but with 2-space indent
"""
# Original regex for blockquote is RE = re.compile(r'(^|\n)[ ]{0,3}>[ ]?(.*)')
RE = re.compile(r'(^|\n)(?!(?:[ ]{0,3}>\s*(?:$|\n))*(?:$|\n))'
r'[ ]{0,3}>[ ]?(.*)')
mention_re = re.compile(mention.find_mentions)
def clean(self, line: str) -> str:
# Silence all the mentions inside blockquotes
line = re.sub(self.mention_re, lambda m: "@_{}".format(m.group('match')), line)
# And then run the upstream processor's code for removing the '>'
return super().clean(line)
@dataclass
class Fence:
fence_str: str
is_code: bool
class BugdownListPreprocessor(markdown.preprocessors.Preprocessor):
""" Allows list blocks that come directly after another block
to be rendered as a list.
Detects paragraphs that have a matching list item that comes
directly after a line of text, and inserts a newline between
to satisfy Markdown"""
LI_RE = re.compile(r'^[ ]*([*+-]|\d\.)[ ]+(.*)', re.MULTILINE)
def run(self, lines: List[str]) -> List[str]:
""" Insert a newline between a paragraph and ulist if missing """
inserts = 0
in_code_fence: bool = False
open_fences: List[Fence] = []
copy = lines[:]
for i in range(len(lines) - 1):
# Ignore anything that is inside a fenced code block but not quoted.
# We ignore all lines where some parent is a non quote code block.
m = FENCE_RE.match(lines[i])
if m:
fence_str = m.group('fence')
is_code = not m.group('lang') in ('quote', 'quoted')
has_open_fences = not len(open_fences) == 0
matches_last_fence = fence_str == open_fences[-1].fence_str if has_open_fences else False
closes_last_fence = not m.group('lang') and matches_last_fence
if closes_last_fence:
open_fences.pop()
else:
open_fences.append(Fence(fence_str, is_code))
in_code_fence = any([fence.is_code for fence in open_fences])
# If we're not in a fenced block and we detect an upcoming list
# hanging off any block (including a list of another type), add
# a newline.
li1 = self.LI_RE.match(lines[i])
li2 = self.LI_RE.match(lines[i+1])
if not in_code_fence and lines[i]:
if (li2 and not li1) or (li1 and li2 and
(len(li1.group(1)) == 1) != (len(li2.group(1)) == 1)):
copy.insert(i+inserts+1, '')
inserts += 1
return copy
# Name for the outer capture group we use to separate whitespace and
# other delimiters from the actual content. This value won't be an
# option in user-entered capture groups.
OUTER_CAPTURE_GROUP = "linkifier_actual_match"
def prepare_realm_pattern(source: str) -> str:
"""Augment a realm filter so it only matches after start-of-string,
whitespace, or opening delimiters, won't match if there are word
characters directly after, and saves what was matched as
OUTER_CAPTURE_GROUP."""
return fr"""(?<![^\s'"\(,:<])(?P<{OUTER_CAPTURE_GROUP}>{source})(?!\w)"""
# Given a regular expression pattern, linkifies groups that match it
# using the provided format string to construct the URL.
class RealmFilterPattern(markdown.inlinepatterns.Pattern):
""" Applied a given realm filter to the input """
def __init__(self, source_pattern: str,
format_string: str,
markdown_instance: Optional[markdown.Markdown]=None) -> None:
self.pattern = prepare_realm_pattern(source_pattern)
self.format_string = format_string
markdown.inlinepatterns.Pattern.__init__(self, self.pattern, markdown_instance)
def handleMatch(self, m: Match[str]) -> Union[Element, str]:
db_data = self.md.zulip_db_data
return url_to_a(db_data,
self.format_string % m.groupdict(),
m.group(OUTER_CAPTURE_GROUP))
class UserMentionPattern(markdown.inlinepatterns.Pattern):
def handleMatch(self, m: Match[str]) -> Optional[Element]:
match = m.group('match')
silent = m.group('silent') == '_'
db_data = self.md.zulip_db_data
if self.md.zulip_message and db_data is not None:
if match.startswith("**") and match.endswith("**"):
name = match[2:-2]
else:
return None
wildcard = mention.user_mention_matches_wildcard(name)
id_syntax_match = re.match(r'.+\|(?P<user_id>\d+)$', name)
if id_syntax_match:
id = id_syntax_match.group("user_id")
user = db_data['mention_data'].get_user_by_id(id)
else:
user = db_data['mention_data'].get_user_by_name(name)
if wildcard:
self.md.zulip_message.mentions_wildcard = True
user_id = "*"
elif user:
if not silent:
self.md.zulip_message.mentions_user_ids.add(user['id'])
name = user['full_name']
user_id = str(user['id'])
else:
# Don't highlight @mentions that don't refer to a valid user
return None
el = Element("span")
el.set('data-user-id', user_id)
text = f"{name}"
if silent:
el.set('class', 'user-mention silent')
else:
el.set('class', 'user-mention')
text = f"@{text}"
el.text = markdown.util.AtomicString(text)
return el
return None
class UserGroupMentionPattern(markdown.inlinepatterns.Pattern):
def handleMatch(self, m: Match[str]) -> Optional[Element]:
match = m.group(2)
db_data = self.md.zulip_db_data
if self.md.zulip_message and db_data is not None:
name = extract_user_group(match)
user_group = db_data['mention_data'].get_user_group(name)
if user_group:
self.md.zulip_message.mentions_user_group_ids.add(user_group.id)
name = user_group.name
user_group_id = str(user_group.id)
else:
# Don't highlight @-mentions that don't refer to a valid user
# group.
return None
el = Element("span")
el.set('class', 'user-group-mention')
el.set('data-user-group-id', user_group_id)
text = f"@{name}"
el.text = markdown.util.AtomicString(text)
return el
return None
class StreamPattern(CompiledPattern):
def find_stream_by_name(self, name: Match[str]) -> Optional[Dict[str, Any]]:
db_data = self.md.zulip_db_data
if db_data is None:
return None
stream = db_data['stream_names'].get(name)
return stream
def handleMatch(self, m: Match[str]) -> Optional[Element]:
name = m.group('stream_name')
if self.md.zulip_message:
stream = self.find_stream_by_name(name)
if stream is None:
return None
el = Element('a')
el.set('class', 'stream')
el.set('data-stream-id', str(stream['id']))
# TODO: We should quite possibly not be specifying the
# href here and instead having the browser auto-add the
# href when it processes a message with one of these, to
# provide more clarity to API clients.
# Also do the same for StreamTopicPattern.
stream_url = encode_stream(stream['id'], name)
el.set('href', f'/#narrow/stream/{stream_url}')
text = f'#{name}'
el.text = markdown.util.AtomicString(text)
return el
return None
class StreamTopicPattern(CompiledPattern):
def find_stream_by_name(self, name: Match[str]) -> Optional[Dict[str, Any]]:
db_data = self.md.zulip_db_data
if db_data is None:
return None
stream = db_data['stream_names'].get(name)
return stream
def handleMatch(self, m: Match[str]) -> Optional[Element]:
stream_name = m.group('stream_name')
topic_name = m.group('topic_name')
if self.md.zulip_message:
stream = self.find_stream_by_name(stream_name)
if stream is None or topic_name is None:
return None
el = Element('a')
el.set('class', 'stream-topic')
el.set('data-stream-id', str(stream['id']))
stream_url = encode_stream(stream['id'], stream_name)
topic_url = hash_util_encode(topic_name)
link = f'/#narrow/stream/{stream_url}/topic/{topic_url}'
el.set('href', link)
text = f'#{stream_name} > {topic_name}'
el.text = markdown.util.AtomicString(text)
return el
return None
def possible_linked_stream_names(content: str) -> Set[str]:
matches = re.findall(STREAM_LINK_REGEX, content, re.VERBOSE)
for match in re.finditer(STREAM_TOPIC_LINK_REGEX, content, re.VERBOSE):
matches.append(match.group('stream_name'))
return set(matches)
class AlertWordNotificationProcessor(markdown.preprocessors.Preprocessor):
allowed_before_punctuation = {' ', '\n', '(', '"', '.', ',', '\'', ';', '[', '*', '`', '>'}
allowed_after_punctuation = {' ', '\n', ')', '",', '?', ':', '.', ',', '\'', ';', ']', '!',
'*', '`'}
def check_valid_start_position(self, content: str, index: int) -> bool:
if index <= 0 or content[index] in self.allowed_before_punctuation:
return True
return False
def check_valid_end_position(self, content: str, index: int) -> bool:
if index >= len(content) or content[index] in self.allowed_after_punctuation:
return True
return False
def run(self, lines: Iterable[str]) -> Iterable[str]:
db_data = self.md.zulip_db_data
if self.md.zulip_message and db_data is not None:
# We check for alert words here, the set of which are
# dependent on which users may see this message.
#
# Our caller passes in the list of possible_words. We
# don't do any special rendering; we just append the alert words
# we find to the set self.md.zulip_message.alert_words.
realm_alert_words_automaton = db_data['realm_alert_words_automaton']
if realm_alert_words_automaton is not None:
content = '\n'.join(lines).lower()
for end_index, (original_value, user_ids) in realm_alert_words_automaton.iter(content):
if self.check_valid_start_position(content, end_index - len(original_value)) and \
self.check_valid_end_position(content, end_index + 1):
self.md.zulip_message.user_ids_with_alert_words.update(user_ids)
return lines
class LinkInlineProcessor(markdown.inlinepatterns.LinkInlineProcessor):
def zulip_specific_link_changes(self, el: Element) -> Union[None, Element]:
href = el.get('href')
# Sanitize url or don't parse link. See linkify_tests in markdown_test_cases for banned syntax.
href = sanitize_url(self.unescape(href.strip()))
if href is None:
return None # no-op; the link is not processed.
# Rewrite local links to be relative
db_data = self.md.zulip_db_data
href = rewrite_local_links_to_relative(db_data, href)
# Make changes to <a> tag attributes
el.set("href", href)
# Show link href if title is empty
if not el.text.strip():
el.text = href
# Prevent realm_filters from running on the content of a Markdown link, breaking up the link.
# This is a monkey-patch, but it might be worth sending a version of this change upstream.
el.text = markdown.util.AtomicString(el.text)
return el
def handleMatch(self, m: Match[str], data: str) -> Tuple[Union[None, Element], int, int]:
el, match_start, index = super().handleMatch(m, data)
if el is not None:
el = self.zulip_specific_link_changes(el)
return el, match_start, index
def get_sub_registry(r: markdown.util.Registry, keys: List[str]) -> markdown.util.Registry:
# Registry is a new class added by py-markdown to replace Ordered List.
# Since Registry doesn't support .keys(), it is easier to make a new
# object instead of removing keys from the existing object.
new_r = markdown.util.Registry()
for k in keys:
new_r.register(r[k], k, r.get_index_for_name(k))
return new_r
# These are used as keys ("realm_filters_keys") to md_engines and the respective
# realm filter caches
DEFAULT_BUGDOWN_KEY = -1
ZEPHYR_MIRROR_BUGDOWN_KEY = -2
class Bugdown(markdown.Markdown):
def __init__(self, *args: Any, **kwargs: Union[bool, int, List[Any]]) -> None:
# define default configs
self.config = {
"realm_filters": [kwargs['realm_filters'],
"Realm-specific filters for realm_filters_key {}".format(kwargs['realm'])],
"realm": [kwargs['realm'], "Realm id"],
"code_block_processor_disabled": [kwargs['code_block_processor_disabled'],
"Disabled for email gateway"],
}
super().__init__(*args, **kwargs)
self.set_output_format('html')
def build_parser(self) -> markdown.Markdown:
# Build the parser using selected default features from py-markdown.
# The complete list of all available processors can be found in the
# super().build_parser() function.
#
# Note: for any py-markdown updates, manually check if we want any
# of the new features added upstream or not; they wouldn't get
# included by default.
self.preprocessors = self.build_preprocessors()
self.parser = self.build_block_parser()
self.inlinePatterns = self.build_inlinepatterns()
self.treeprocessors = self.build_treeprocessors()
self.postprocessors = self.build_postprocessors()
self.handle_zephyr_mirror()
return self
def build_preprocessors(self) -> markdown.util.Registry:
# We disable the following preprocessors from upstream:
#
# html_block - insecure
# reference - references don't make sense in a chat context.
preprocessors = markdown.util.Registry()
preprocessors.register(BugdownListPreprocessor(self), 'hanging_lists', 35)
preprocessors.register(markdown.preprocessors.NormalizeWhitespace(self), 'normalize_whitespace', 30)
preprocessors.register(fenced_code.FencedBlockPreprocessor(self), 'fenced_code_block', 25)
preprocessors.register(AlertWordNotificationProcessor(self), 'custom_text_notifications', 20)
return preprocessors
def build_block_parser(self) -> markdown.util.Registry:
# We disable the following blockparsers from upstream:
#
# indent - replaced by ours
# setextheader - disabled; we only support hashheaders for headings
# olist - replaced by ours
# ulist - replaced by ours
# quote - replaced by ours
parser = markdown.blockprocessors.BlockParser(self)
parser.blockprocessors.register(markdown.blockprocessors.EmptyBlockProcessor(parser), 'empty', 95)
parser.blockprocessors.register(ListIndentProcessor(parser), 'indent', 90)
if not self.getConfig('code_block_processor_disabled'):
parser.blockprocessors.register(markdown.blockprocessors.CodeBlockProcessor(parser), 'code', 85)
parser.blockprocessors.register(HashHeaderProcessor(parser), 'hashheader', 80)
# We get priority 75 from 'table' extension
parser.blockprocessors.register(markdown.blockprocessors.HRProcessor(parser), 'hr', 70)
parser.blockprocessors.register(OListProcessor(parser), 'olist', 65)
parser.blockprocessors.register(UListProcessor(parser), 'ulist', 60)
parser.blockprocessors.register(BlockQuoteProcessor(parser), 'quote', 55)
parser.blockprocessors.register(markdown.blockprocessors.ParagraphProcessor(parser), 'paragraph', 50)
return parser
def build_inlinepatterns(self) -> markdown.util.Registry:
# We disable the following upstream inline patterns:
#
# backtick - replaced by ours
# escape - probably will re-add at some point.
# link - replaced by ours
# image_link - replaced by ours
# autolink - replaced by ours
# automail - replaced by ours
# linebreak - we use nl2br and consider that good enough
# html - insecure
# reference - references not useful
# image_reference - references not useful
# short_reference - references not useful
# ---------------------------------------------------
# strong_em - for these three patterns,
# strong2 - we have our own versions where
# emphasis2 - we disable _ for bold and emphasis
# Declare regexes for clean single line calls to .register().
NOT_STRONG_RE = markdown.inlinepatterns.NOT_STRONG_RE
# Custom strikethrough syntax: ~~foo~~
DEL_RE = r'(?<!~)(\~\~)([^~\n]+?)(\~\~)(?!~)'
# Custom bold syntax: **foo** but not __foo__
# str inside ** must start and end with a word character
# it need for things like "const char *x = (char *)y"
EMPHASIS_RE = r'(\*)(?!\s+)([^\*^\n]+)(?<!\s)\*'
ENTITY_RE = markdown.inlinepatterns.ENTITY_RE
STRONG_EM_RE = r'(\*\*\*)(?!\s+)([^\*^\n]+)(?<!\s)\*\*\*'
# Inline code block without whitespace stripping
BACKTICK_RE = r'(?:(?<!\\)((?:\\{2})+)(?=`+)|(?<!\\)(`+)(.+?)(?<!`)\3(?!`))'
# Add Inline Patterns. We use a custom numbering of the
# rules, that preserves the order from upstream but leaves
# space for us to add our own.
reg = markdown.util.Registry()
reg.register(BacktickPattern(BACKTICK_RE), 'backtick', 105)
reg.register(markdown.inlinepatterns.DoubleTagPattern(STRONG_EM_RE, 'strong,em'), 'strong_em', 100)
reg.register(UserMentionPattern(mention.find_mentions, self), 'usermention', 95)
reg.register(Tex(r'\B(?<!\$)\$\$(?P<body>[^\n_$](\\\$|[^$\n])*)\$\$(?!\$)\B'), 'tex', 90)
reg.register(StreamTopicPattern(get_compiled_stream_topic_link_regex(), self), 'topic', 87)
reg.register(StreamPattern(get_compiled_stream_link_regex(), self), 'stream', 85)
reg.register(Avatar(AVATAR_REGEX, self), 'avatar', 80)
reg.register(Timestamp(r'!time\((?P<time>[^)]*)\)'), 'timestamp', 75)
# Note that !gravatar syntax should be deprecated long term.
reg.register(Avatar(GRAVATAR_REGEX, self), 'gravatar', 70)
reg.register(UserGroupMentionPattern(mention.user_group_mentions, self), 'usergroupmention', 65)
reg.register(LinkInlineProcessor(markdown.inlinepatterns.LINK_RE, self), 'link', 60)
reg.register(AutoLink(get_web_link_regex(), self), 'autolink', 55)
# Reserve priority 45-54 for Realm Filters
reg = self.register_realm_filters(reg)
reg.register(markdown.inlinepatterns.HtmlInlineProcessor(ENTITY_RE, self), 'entity', 40)
reg.register(markdown.inlinepatterns.SimpleTagPattern(r'(\*\*)([^\n]+?)\2', 'strong'), 'strong', 35)
reg.register(markdown.inlinepatterns.SimpleTagPattern(EMPHASIS_RE, 'em'), 'emphasis', 30)
reg.register(markdown.inlinepatterns.SimpleTagPattern(DEL_RE, 'del'), 'del', 25)
reg.register(markdown.inlinepatterns.SimpleTextInlineProcessor(NOT_STRONG_RE), 'not_strong', 20)
reg.register(Emoji(EMOJI_REGEX, self), 'emoji', 15)
reg.register(EmoticonTranslation(emoticon_regex, self), 'translate_emoticons', 10)
# We get priority 5 from 'nl2br' extension
reg.register(UnicodeEmoji(unicode_emoji_regex), 'unicodeemoji', 0)
return reg
def register_realm_filters(self, inlinePatterns: markdown.util.Registry) -> markdown.util.Registry:
for (pattern, format_string, id) in self.getConfig("realm_filters"):
inlinePatterns.register(RealmFilterPattern(pattern, format_string, self),
f'realm_filters/{pattern}', 45)
return inlinePatterns
def build_treeprocessors(self) -> markdown.util.Registry:
# Here we build all the processors from upstream, plus a few of our own.
treeprocessors = markdown.util.Registry()
# We get priority 30 from 'hilite' extension
treeprocessors.register(markdown.treeprocessors.InlineProcessor(self), 'inline', 25)
treeprocessors.register(markdown.treeprocessors.PrettifyTreeprocessor(self), 'prettify', 20)
treeprocessors.register(InlineInterestingLinkProcessor(self), 'inline_interesting_links', 15)
if settings.CAMO_URI:
treeprocessors.register(InlineHttpsProcessor(self), 'rewrite_to_https', 10)
return treeprocessors
def build_postprocessors(self) -> markdown.util.Registry:
# These are the default python-markdown processors, unmodified.
postprocessors = markdown.util.Registry()
postprocessors.register(markdown.postprocessors.RawHtmlPostprocessor(self), 'raw_html', 20)
postprocessors.register(markdown.postprocessors.AndSubstitutePostprocessor(), 'amp_substitute', 15)
postprocessors.register(markdown.postprocessors.UnescapePostprocessor(), 'unescape', 10)
return postprocessors
def getConfig(self, key: str, default: str='') -> Any:
""" Return a setting for the given key or an empty string. """
if key in self.config:
return self.config[key][0]
else:
return default
def handle_zephyr_mirror(self) -> None:
if self.getConfig("realm") == ZEPHYR_MIRROR_BUGDOWN_KEY:
# Disable almost all inline patterns for zephyr mirror
# users' traffic that is mirrored. Note that
# inline_interesting_links is a treeprocessor and thus is
# not removed
self.inlinePatterns = get_sub_registry(self.inlinePatterns, ['autolink'])
self.treeprocessors = get_sub_registry(self.treeprocessors, ['inline_interesting_links',
'rewrite_to_https'])
# insert new 'inline' processor because we have changed self.inlinePatterns
# but InlineProcessor copies md as self.md in __init__.
self.treeprocessors.register(markdown.treeprocessors.InlineProcessor(self), 'inline', 25)
self.preprocessors = get_sub_registry(self.preprocessors, ['custom_text_notifications'])
self.parser.blockprocessors = get_sub_registry(self.parser.blockprocessors, ['paragraph'])
md_engines: Dict[Tuple[int, bool], markdown.Markdown] = {}
realm_filter_data: Dict[int, List[Tuple[str, str, int]]] = {}
def make_md_engine(realm_filters_key: int, email_gateway: bool) -> None:
md_engine_key = (realm_filters_key, email_gateway)
if md_engine_key in md_engines:
del md_engines[md_engine_key]
realm_filters = realm_filter_data[realm_filters_key]
md_engines[md_engine_key] = build_engine(
realm_filters=realm_filters,
realm_filters_key=realm_filters_key,
email_gateway=email_gateway,
)
def build_engine(realm_filters: List[Tuple[str, str, int]],
realm_filters_key: int,
email_gateway: bool) -> markdown.Markdown:
engine = Bugdown(
realm_filters=realm_filters,
realm=realm_filters_key,
code_block_processor_disabled=email_gateway,
extensions = [
nl2br.makeExtension(),
tables.makeExtension(),
codehilite.makeExtension(
linenums=False,
guess_lang=False,
),
])
return engine
# Split the topic name into multiple sections so that we can easily use
# our common single link matching regex on it.
basic_link_splitter = re.compile(r'[ !;\?\),\'\"]')
# Security note: We don't do any HTML escaping in this
# function on the URLs; they are expected to be HTML-escaped when
# rendered by clients (just as links rendered into message bodies
# are validated and escaped inside `url_to_a`).
def topic_links(realm_filters_key: int, topic_name: str) -> List[str]:
matches: List[str] = []
realm_filters = realm_filters_for_realm(realm_filters_key)
for realm_filter in realm_filters:
pattern = prepare_realm_pattern(realm_filter[0])
for m in re.finditer(pattern, topic_name):
matches += [realm_filter[1] % m.groupdict()]
# Also make raw urls navigable.
for sub_string in basic_link_splitter.split(topic_name):
link_match = re.match(get_web_link_regex(), sub_string)
if link_match:
url = link_match.group('url')
url_object = parse(url)
if not url_object.scheme:
url = url_object.replace(scheme='https').to_text()
matches.append(url)
return matches
def maybe_update_markdown_engines(realm_filters_key: Optional[int], email_gateway: bool) -> None:
# If realm_filters_key is None, load all filters
global realm_filter_data
if realm_filters_key is None:
all_filters = all_realm_filters()
all_filters[DEFAULT_BUGDOWN_KEY] = []
for realm_filters_key, filters in all_filters.items():
realm_filter_data[realm_filters_key] = filters
make_md_engine(realm_filters_key, email_gateway)
# Hack to ensure that getConfig("realm") is right for mirrored Zephyrs
realm_filter_data[ZEPHYR_MIRROR_BUGDOWN_KEY] = []
make_md_engine(ZEPHYR_MIRROR_BUGDOWN_KEY, False)
else:
realm_filters = realm_filters_for_realm(realm_filters_key)
if realm_filters_key not in realm_filter_data or \
realm_filter_data[realm_filters_key] != realm_filters:
# Realm filters data has changed, update `realm_filter_data` and any
# of the existing markdown engines using this set of realm filters.
realm_filter_data[realm_filters_key] = realm_filters
for email_gateway_flag in [True, False]:
if (realm_filters_key, email_gateway_flag) in md_engines:
# Update only existing engines(if any), don't create new one.
make_md_engine(realm_filters_key, email_gateway_flag)
if (realm_filters_key, email_gateway) not in md_engines:
# Markdown engine corresponding to this key doesn't exists so create one.
make_md_engine(realm_filters_key, email_gateway)
# We want to log Markdown parser failures, but shouldn't log the actual input
# message for privacy reasons. The compromise is to replace all alphanumeric
# characters with 'x'.
#
# We also use repr() to improve reproducibility, and to escape terminal control
# codes, which can do surprisingly nasty things.
_privacy_re = re.compile('\\w', flags=re.UNICODE)
def privacy_clean_markdown(content: str) -> str:
return repr(_privacy_re.sub('x', content))
def log_bugdown_error(msg: str) -> None:
"""We use this unusual logging approach to log the bugdown error, in
order to prevent AdminNotifyHandler from sending the sanitized
original markdown formatting into another Zulip message, which
could cause an infinite exception loop."""
bugdown_logger.error(msg)
def get_email_info(realm_id: int, emails: Set[str]) -> Dict[str, FullNameInfo]:
if not emails:
return dict()
q_list = {
Q(email__iexact=email.strip().lower())
for email in emails
}
rows = UserProfile.objects.filter(
realm_id=realm_id,
).filter(
functools.reduce(lambda a, b: a | b, q_list),
).values(
'id',
'email',
)
dct = {
row['email'].strip().lower(): row
for row in rows
}
return dct
def get_possible_mentions_info(realm_id: int, mention_texts: Set[str]) -> List[FullNameInfo]:
if not mention_texts:
return list()
# Remove the trailing part of the `name|id` mention syntax,
# thus storing only full names in full_names.
full_names = set()
name_re = r'(?P<full_name>.+)\|\d+$'
for mention_text in mention_texts:
name_syntax_match = re.match(name_re, mention_text)
if name_syntax_match:
full_names.add(name_syntax_match.group("full_name"))
else:
full_names.add(mention_text)
q_list = {
Q(full_name__iexact=full_name)
for full_name in full_names
}
rows = UserProfile.objects.filter(
realm_id=realm_id,
is_active=True,
).filter(
functools.reduce(lambda a, b: a | b, q_list),
).values(
'id',
'full_name',
'email',
)
return list(rows)
class MentionData:
def __init__(self, realm_id: int, content: str) -> None:
mention_texts, has_wildcards = possible_mentions(content)
possible_mentions_info = get_possible_mentions_info(realm_id, mention_texts)
self.full_name_info = {
row['full_name'].lower(): row
for row in possible_mentions_info
}
self.user_id_info = {
row['id']: row
for row in possible_mentions_info
}
self.init_user_group_data(realm_id=realm_id, content=content)
self.has_wildcards = has_wildcards
def message_has_wildcards(self) -> bool:
return self.has_wildcards
def init_user_group_data(self,
realm_id: int,
content: str) -> None:
user_group_names = possible_user_group_mentions(content)
self.user_group_name_info = get_user_group_name_info(realm_id, user_group_names)
self.user_group_members: Dict[int, List[int]] = defaultdict(list)
group_ids = [group.id for group in self.user_group_name_info.values()]
if not group_ids:
# Early-return to avoid the cost of hitting the ORM,
# which shows up in profiles.
return
membership = UserGroupMembership.objects.filter(user_group_id__in=group_ids)
for info in membership.values('user_group_id', 'user_profile_id'):
group_id = info['user_group_id']
user_profile_id = info['user_profile_id']
self.user_group_members[group_id].append(user_profile_id)
def get_user_by_name(self, name: str) -> Optional[FullNameInfo]:
# warning: get_user_by_name is not dependable if two
# users of the same full name are mentioned. Use
# get_user_by_id where possible.
return self.full_name_info.get(name.lower(), None)
def get_user_by_id(self, id: str) -> Optional[FullNameInfo]:
return self.user_id_info.get(int(id), None)
def get_user_ids(self) -> Set[int]:
"""
Returns the user IDs that might have been mentioned by this
content. Note that because this data structure has not parsed
the message and does not know about escaping/code blocks, this
will overestimate the list of user ids.
"""
return set(self.user_id_info.keys())
def get_user_group(self, name: str) -> Optional[UserGroup]:
return self.user_group_name_info.get(name.lower(), None)
def get_group_members(self, user_group_id: int) -> List[int]:
return self.user_group_members.get(user_group_id, [])
def get_user_group_name_info(realm_id: int, user_group_names: Set[str]) -> Dict[str, UserGroup]:
if not user_group_names:
return dict()
rows = UserGroup.objects.filter(realm_id=realm_id,
name__in=user_group_names)
dct = {row.name.lower(): row for row in rows}
return dct
def get_stream_name_info(realm: Realm, stream_names: Set[str]) -> Dict[str, FullNameInfo]:
if not stream_names:
return dict()
q_list = {
Q(name=name)
for name in stream_names
}
rows = get_active_streams(
realm=realm,
).filter(
functools.reduce(lambda a, b: a | b, q_list),
).values(
'id',
'name',
)
dct = {
row['name']: row
for row in rows
}
return dct
def do_convert(content: str,
realm_alert_words_automaton: Optional[ahocorasick.Automaton] = None,
message: Optional[Message]=None,
message_realm: Optional[Realm]=None,
sent_by_bot: bool=False,
translate_emoticons: bool=False,
mention_data: Optional[MentionData]=None,
email_gateway: bool=False,
no_previews: bool=False) -> str:
"""Convert Markdown to HTML, with Zulip-specific settings and hacks."""
# This logic is a bit convoluted, but the overall goal is to support a range of use cases:
# * Nothing is passed in other than content -> just run default options (e.g. for docs)
# * message is passed, but no realm is -> look up realm from message
# * message_realm is passed -> use that realm for bugdown purposes
if message is not None:
if message_realm is None:
message_realm = message.get_realm()
if message_realm is None:
realm_filters_key = DEFAULT_BUGDOWN_KEY
else:
realm_filters_key = message_realm.id
if message and hasattr(message, 'id') and message.id:
logging_message_id = 'id# ' + str(message.id)
else:
logging_message_id = 'unknown'
if message is not None and message_realm is not None:
if message_realm.is_zephyr_mirror_realm:
if message.sending_client.name == "zephyr_mirror":
# Use slightly customized Markdown processor for content
# delivered via zephyr_mirror
realm_filters_key = ZEPHYR_MIRROR_BUGDOWN_KEY
maybe_update_markdown_engines(realm_filters_key, email_gateway)
md_engine_key = (realm_filters_key, email_gateway)
if md_engine_key in md_engines:
_md_engine = md_engines[md_engine_key]
else:
if DEFAULT_BUGDOWN_KEY not in md_engines:
maybe_update_markdown_engines(realm_filters_key=None, email_gateway=False)
_md_engine = md_engines[(DEFAULT_BUGDOWN_KEY, email_gateway)]
# Reset the parser; otherwise it will get slower over time.
_md_engine.reset()
# Filters such as UserMentionPattern need a message.
_md_engine.zulip_message = message
_md_engine.zulip_realm = message_realm
_md_engine.zulip_db_data = None # for now
_md_engine.image_preview_enabled = image_preview_enabled(
message, message_realm, no_previews)
_md_engine.url_embed_preview_enabled = url_embed_preview_enabled(
message, message_realm, no_previews)
# Pre-fetch data from the DB that is used in the bugdown thread
if message is not None:
assert message_realm is not None # ensured above if message is not None
# Here we fetch the data structures needed to render
# mentions/avatars/stream mentions from the database, but only
# if there is syntax in the message that might use them, since
# the fetches are somewhat expensive and these types of syntax
# are uncommon enough that it's a useful optimization.
if mention_data is None:
mention_data = MentionData(message_realm.id, content)
emails = possible_avatar_emails(content)
email_info = get_email_info(message_realm.id, emails)
stream_names = possible_linked_stream_names(content)
stream_name_info = get_stream_name_info(message_realm, stream_names)
if content_has_emoji_syntax(content):
active_realm_emoji = message_realm.get_active_emoji()
else:
active_realm_emoji = dict()
_md_engine.zulip_db_data = {
'realm_alert_words_automaton': realm_alert_words_automaton,
'email_info': email_info,
'mention_data': mention_data,
'active_realm_emoji': active_realm_emoji,
'realm_uri': message_realm.uri,
'sent_by_bot': sent_by_bot,
'stream_names': stream_name_info,
'translate_emoticons': translate_emoticons,
}
try:
# Spend at most 5 seconds rendering; this protects the backend
# from being overloaded by bugs (e.g. markdown logic that is
# extremely inefficient in corner cases) as well as user
# errors (e.g. a realm filter that makes some syntax
# infinite-loop).
rendered_content = timeout(5, _md_engine.convert, content)
# Throw an exception if the content is huge; this protects the
# rest of the codebase from any bugs where we end up rendering
# something huge.
if len(rendered_content) > MAX_MESSAGE_LENGTH * 10:
raise BugdownRenderingException(
f'Rendered content exceeds {MAX_MESSAGE_LENGTH * 10} characters (message {logging_message_id})'
)
return rendered_content
except Exception:
cleaned = privacy_clean_markdown(content)
# NOTE: Don't change this message without also changing the
# logic in logging_handlers.py or we can create recursive
# exceptions.
bugdown_logger.exception(
'Exception in Markdown parser; input (sanitized) was: %s\n (message %s)',
cleaned,
logging_message_id,
)
raise BugdownRenderingException()
finally:
# These next three lines are slightly paranoid, since
# we always set these right before actually using the
# engine, but better safe then sorry.
_md_engine.zulip_message = None
_md_engine.zulip_realm = None
_md_engine.zulip_db_data = None
bugdown_time_start = 0.0
bugdown_total_time = 0.0
bugdown_total_requests = 0
def get_bugdown_time() -> float:
return bugdown_total_time
def get_bugdown_requests() -> int:
return bugdown_total_requests
def bugdown_stats_start() -> None:
global bugdown_time_start
bugdown_time_start = time.time()
def bugdown_stats_finish() -> None:
global bugdown_total_time
global bugdown_total_requests
global bugdown_time_start
bugdown_total_requests += 1
bugdown_total_time += (time.time() - bugdown_time_start)
def convert(content: str,
realm_alert_words_automaton: Optional[ahocorasick.Automaton] = None,
message: Optional[Message]=None,
message_realm: Optional[Realm]=None,
sent_by_bot: bool=False,
translate_emoticons: bool=False,
mention_data: Optional[MentionData]=None,
email_gateway: bool=False,
no_previews: bool=False) -> str:
bugdown_stats_start()
ret = do_convert(content, realm_alert_words_automaton,
message, message_realm, sent_by_bot,
translate_emoticons, mention_data, email_gateway,
no_previews=no_previews)
bugdown_stats_finish()
return ret
|
py | 1a36f16aeb7db2e463fd7c2ac56b8866fa64adbc | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import math
import networkx as nx
from collections import defaultdict
from typing import Any, Dict
def _modularity_component(
intra_community_degree: float,
total_community_degree: float,
network_degree_sum: float,
resolution: float,
) -> float:
community_degree_ratio = math.pow(total_community_degree, 2.0) / (
2.0 * network_degree_sum
)
return (intra_community_degree - resolution * community_degree_ratio) / (
2.0 * network_degree_sum
)
def _assertions(
graph: nx.Graph,
partitions: Dict[Any, int],
weight_attribute: str,
resolution: float,
):
if not isinstance(graph, nx.Graph):
raise TypeError("graph must be a networkx undirected graph")
if graph.is_directed():
raise ValueError("The graph must be an undirected graph")
if graph.is_multigraph():
raise ValueError(
"Multigraphs must be provided in the form of a non multigraph."
)
if not nx.is_weighted(graph, weight=weight_attribute):
raise ValueError(
f"weight_attribute {weight_attribute} not found on every edge in the provided graph"
)
if not isinstance(partitions, dict):
raise TypeError("partitions must be a dictionary")
if not isinstance(resolution, float):
raise TypeError("resolution must be a float")
def modularity(
graph: nx.Graph,
partitions: Dict[Any, int],
weight_attribute: str = "weight",
resolution: float = 1.0,
) -> float:
"""
Given an undirected graph and a dictionary of vertices to community ids, calculate
the modularity.
Parameters
----------
graph : nx.Graph
An undirected graph
partitions : Dict[Any, int]
A dictionary representing a community partitioning scheme with the keys being
the vertex and the value being a community id.
weight_attribute : str
The edge data attribute on the graph that contains a float weight for the edge.
resolution : float
The resolution to use when calculating the modularity.
Returns
-------
Dict[int, float]
A dictionary of the community id to the modularity component of that community
Raises
------
TypeError
If ``graph`` is not a networkx Graph or
If ``partitions`` is not a dictionary or
If ``resolution`` is not a float
ValueError
If ``graph`` is unweighted
If ``graph`` is directed
If ``graph`` is a multigraph
References
----------
.. [1] https://en.wikipedia.org/wiki/Modularity_(networks)
"""
_assertions(graph, partitions, weight_attribute, resolution)
components = modularity_components(graph, partitions, weight_attribute, resolution)
return sum(components.values())
def modularity_components(
graph: nx.Graph,
partitions: Dict[Any, int],
weight_attribute: str = "weight",
resolution: float = 1.0,
) -> Dict[int, float]:
"""
Given an undirected, weighted graph and a community partition dictionary,
calculates a modularity quantum for each community ID. The sum of these quanta
is the modularity of the graph and partitions provided.
Parameters
----------
graph : nx.Graph
An undirected graph
partitions : Dict[Any, int]
A dictionary representing a community partitioning scheme with the keys being
the vertex and the value being a community id.
weight_attribute : str
The edge data attribute on the graph that contains a float weight for the edge.
resolution : float
The resolution to use when calculating the modularity.
Returns
-------
Dict[int, float]
A dictionary of the community id to the modularity component of that community
Raises
------
TypeError
If ``graph`` is not a networkx Graph or
If ``partitions`` is not a dictionary or
If ``resolution`` is not a float
ValueError
If ``graph`` is unweighted
If ``graph`` is directed
If ``graph`` is a multigraph
"""
_assertions(graph, partitions, weight_attribute, resolution)
total_edge_weight = 0.0
communities = set(partitions.values())
degree_sums_within_community: Dict[int, float] = defaultdict(lambda: 0.0)
degree_sums_for_community: Dict[int, float] = defaultdict(lambda: 0.0)
for vertex, neighbor_vertex, weight in graph.edges(data=weight_attribute):
vertex_community = partitions[vertex]
neighbor_community = partitions[neighbor_vertex]
if vertex_community == neighbor_community:
if vertex == neighbor_vertex:
degree_sums_within_community[vertex_community] += weight
else:
degree_sums_within_community[vertex_community] += weight * 2.0
degree_sums_for_community[vertex_community] += weight
degree_sums_for_community[neighbor_community] += weight
total_edge_weight += weight
return {
comm: _modularity_component(
degree_sums_within_community[comm],
degree_sums_for_community[comm],
total_edge_weight,
resolution,
)
for comm in communities
}
|
py | 1a36f25ed3ae0be12907b4517c0f4785ee58d88a | import os
import logging
from datetime import datetime
from discord import (
Intents,
Activity,
ActivityType
)
from discord.ext import commands
from discord.ext.commands import (
ExtensionNotFound,
ExtensionAlreadyLoaded,
NoEntryPointError,
ExtensionFailed,
ExtensionNotLoaded
)
from motor.motor_asyncio import AsyncIOMotorClient
from bot.utils import config
__all__ = (
'Astro',
)
log = logging.getLogger(__name__)
TOKEN = config()['token']
DESCRIPTION = "A bot for Dimension MC's Discord server."
STICKY_EXTENSIONS = (
'admin',
'handler'
)
def _get_prefix(bot, msg):
return config()['prefixes']
def _get_status():
conf = config()
type_ = conf['status_type']
name = conf['status_name']
url = conf['status_url']
return Activity(type=getattr(ActivityType, type_, ActivityType.playing), name=name, url=url)
class Astro(commands.Bot):
def __init__(self):
self.db = AsyncIOMotorClient('mongodb://localhost:27017/astro')['astro']
self._socket_receive = {}
super().__init__(
command_prefix=_get_prefix,
help_command=None,
description=DESCRIPTION,
case_insensitive=True,
activity=_get_status(),
intents=Intents.all())
def _load_extension(self, name, *, package=None):
try:
super().load_extension(f'bot.extensions.{name}', package=package)
except ExtensionNotFound:
return f'Extension `{name}` not found'
except ExtensionAlreadyLoaded:
return f'Extension `{name}` is already loaded'
except NoEntryPointError:
return f'Extension `{name}` has no setup function'
except ModuleNotFoundError:
return f'Extension `{name}` not found'
except ExtensionFailed as e:
return f'Extension `{name}` couldn\'t be loaded:\n```{e}```'
def _reload_extension(self, name, *, package=None):
try:
super().reload_extension(f'bot.extensions.{name}', package=package)
except ExtensionNotFound:
return f'Extension `{name}` not found'
except ExtensionNotLoaded:
return f'Extension `{name}` not loaded'
except NoEntryPointError:
return f'Extension `{name}` has no setup function'
except ModuleNotFoundError:
return f'Extension `{name}` not found'
except ExtensionFailed as e:
return f'`{name}`:\n```{e.original}```'
def _unload_extension(self, name, *, package=None):
if any(_ for _ in STICKY_EXTENSIONS if (_ in name)): # im clumsy lol
return f'Extension {name} is protected from unloads.'
try:
super().unload_extension(f'bot.extensions.{name}', package=package)
except:
return f'Extension `{name}` not found/loaded'
def _load_all_extensions(self):
ret = ''
for ext in os.listdir('bot/extensions'):
if ext.endswith('.py'):
_ = self._load_extension(ext[:-3])
if _:
ret = f'{ret}\n{_}'
return ret
def _reload_all_extensions(self):
ret = ''
for ext in os.listdir('bot/extensions'):
if ext.endswith('.py'):
_ = self._reload_extension(ext[:-3])
if _:
ret = f'{ret}\n{_}'
return ret
def _unload_all_extensions(self):
ret = ''
for ext in os.listdir('bot/extensions'):
if ext.endswith('.py'):
_ = self._unload_extension(ext[:-3])
if _:
ret = f'{ret}\n{_}'
return ret
async def on_message(self, message):
if not message.guild:
return
return await super().on_message(message)
async def on_connect(self):
print(f'Connected to Discord. Latency: {(self.latency * 1000):.2f}ms')
async def on_disconnect(self):
print('Disconnected. Attempting to reconnect...')
async def on_resume(self):
print('Connection restored.')
async def on_ready(self):
print('Internal cache ready.')
self.uptime = datetime.utcnow()
async def on_error(self, event_method, *args, **kwargs):
log.exception(f'Ignoring exception in event {event_method}')
super().on_error(event_method, *args, **kwargs)
def run(self):
print('Starting...')
_ = self._load_all_extensions()
if _:
print(_)
loop = self.loop
loop.run_until_complete(super().start(TOKEN)) |
py | 1a36f2859144ca8ea02b8f54597d27923b717f1d | """
Example use of the pyjsgf parse_grammar_string function.
The parse_grammar_file, parse_rule_string and parse_expansion_string functions
are also available and work in a similar way.
"""
from jsgf import parse_grammar_string
# Parse a grammar string with parse_grammar_string and get a Grammar object back.
grammar = parse_grammar_string(
"#JSGF V1.0 UTF-8 en;"
"grammar example;"
"public <greet> = hello world {tag};"
)
# Print it.
print(grammar)
# Get the rule that matches "hello world".
rule = grammar.find_matching_rules("hello world")[0]
print("Matching rule: %s" % rule)
# Tags are also parsed and will work as expected.
print("Matched tags: %s" % rule.matched_tags)
|
py | 1a36f3398fff7717b3c02da1f515d162010b34a7 | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from petstore_api.configuration import Configuration
class Animal(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'class_name': 'str',
'color': 'str'
}
attribute_map = {
'class_name': 'className',
'color': 'color'
}
discriminator_value_class_map = {
'Dog': 'Dog',
'Cat': 'Cat'
}
def __init__(self, class_name=None, color='red', local_vars_configuration=None): # noqa: E501
"""Animal - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._class_name = None
self._color = None
self.discriminator = 'class_name'
self.class_name = class_name
if color is not None:
self.color = color
@property
def class_name(self):
"""Gets the class_name of this Animal. # noqa: E501
:return: The class_name of this Animal. # noqa: E501
:rtype: str
"""
return self._class_name
@class_name.setter
def class_name(self, class_name):
"""Sets the class_name of this Animal.
:param class_name: The class_name of this Animal. # noqa: E501
:type class_name: str
"""
if self.local_vars_configuration.client_side_validation and class_name is None: # noqa: E501
raise ValueError("Invalid value for `class_name`, must not be `None`") # noqa: E501
self._class_name = class_name
@property
def color(self):
"""Gets the color of this Animal. # noqa: E501
:return: The color of this Animal. # noqa: E501
:rtype: str
"""
return self._color
@color.setter
def color(self, color):
"""Sets the color of this Animal.
:param color: The color of this Animal. # noqa: E501
:type color: str
"""
self._color = color
def get_real_child_model(self, data):
"""Returns the real base class specified by the discriminator"""
discriminator_key = self.attribute_map[self.discriminator]
discriminator_value = data[discriminator_key]
return self.discriminator_value_class_map.get(discriminator_value)
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Animal):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Animal):
return True
return self.to_dict() != other.to_dict()
|
py | 1a36f463f0d935a3d0073ff4a6c37f4c19e6e73c | from __future__ import division
from builtins import zip
from builtins import range
from past.utils import old_div
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
import numpy as np
import random
import math
import scipy.special
def expr_math_ops():
sin_cos_tan_atan_sinh_cosh_tanh_asinh_data = [[random.uniform(-10,10) for r in range(10)] for c in range(10)]
asin_acos_atanh_data = [[random.uniform(-1,1) for r in range(10)] for c in range(10)]
acosh_data = [[random.uniform(1,10) for r in range(10)] for c in range(10)]
abs_data = [[random.uniform(-100000,0) for r in range(10)] for c in range(10)]
h2o_data1_1 = h2o.H2OFrame(sin_cos_tan_atan_sinh_cosh_tanh_asinh_data)
h2o_data2_1 = h2o.H2OFrame(asin_acos_atanh_data)
h2o_data3_1 = h2o.H2OFrame(acosh_data)
h2o_data4_1 = h2o.H2OFrame(abs_data)
np_data1 = np.array(sin_cos_tan_atan_sinh_cosh_tanh_asinh_data)
np_data2 = np.array(asin_acos_atanh_data)
np_data3 = np.array(acosh_data)
np_data4 = np.array(abs_data)
h2o_data1 = h2o_data1_1 + 2
h2o_data2 = old_div(h2o_data2_1, 1.01)
h2o_data3 = h2o_data3_1 * 1.5
h2o_data4 = h2o_data4_1 - 1.5
np_data1 = np_data1 + 2
np_data2 = old_div(np_data2, 1.01)
np_data3 = np_data3 * 1.5
np_data4 = np_data4 - 1.5
pyunit_utils.np_comparison_check(h2o_data1.cos(), np.cos(np_data1), 10)
pyunit_utils.np_comparison_check(h2o_data1.sin(), np.sin(np_data1), 10)
pyunit_utils.np_comparison_check(h2o_data1.tan(), np.tan(np_data1), 10)
pyunit_utils.np_comparison_check(h2o_data2.acos(), np.arccos(np_data2), 10)
pyunit_utils.np_comparison_check(h2o_data2.asin(), np.arcsin(np_data2), 10)
pyunit_utils.np_comparison_check(h2o_data1.atan(), np.arctan(np_data1), 10)
pyunit_utils.np_comparison_check(h2o_data1.cosh(), np.cosh(np_data1), 10)
pyunit_utils.np_comparison_check(h2o_data1.sinh(), np.sinh(np_data1), 10)
pyunit_utils.np_comparison_check(h2o_data1.tanh(), np.tanh(np_data1), 10)
pyunit_utils.np_comparison_check(h2o_data3.acosh(), np.arccosh(np_data3), 10)
pyunit_utils.np_comparison_check(h2o_data1.asinh(), np.arcsinh(np_data1), 10)
pyunit_utils.np_comparison_check(h2o_data2.atanh(), np.arctanh(np_data2), 10)
pyunit_utils.np_comparison_check((old_div(h2o_data2,math.pi)).cospi(), np.cos(np_data2), 10)
pyunit_utils.np_comparison_check((old_div(h2o_data2,math.pi)).sinpi(), np.sin(np_data2), 10)
pyunit_utils.np_comparison_check((old_div(h2o_data2,math.pi)).tanpi(), np.tan(np_data2), 10)
pyunit_utils.np_comparison_check(h2o_data4.abs(), np.fabs(np_data4), 10)
pyunit_utils.np_comparison_check(h2o_data2.sign(), np.sign(np_data2), 10)
pyunit_utils.np_comparison_check(h2o_data3.sqrt(), np.sqrt(np_data3), 10)
pyunit_utils.np_comparison_check(h2o_data3.trunc(), np.trunc(np_data3), 10)
pyunit_utils.np_comparison_check(h2o_data3.ceil(), np.ceil(np_data3), 10)
pyunit_utils.np_comparison_check(h2o_data3.floor(), np.floor(np_data3), 10)
pyunit_utils.np_comparison_check(h2o_data3.log(), np.log(np_data3), 10)
pyunit_utils.np_comparison_check(h2o_data3.log10(), np.log10(np_data3), 10)
pyunit_utils.np_comparison_check(h2o_data3.log1p(), np.log1p(np_data3), 10)
pyunit_utils.np_comparison_check(h2o_data3.log2(), np.log2(np_data3), 10)
pyunit_utils.np_comparison_check(h2o_data3.exp(), np.exp(np_data3), 10)
pyunit_utils.np_comparison_check(h2o_data3.expm1(), np.expm1(np_data3), 10)
h2o_val = h2o_data3.gamma()[5,5]
num_val = math.gamma(h2o_data3[5,5])
assert abs(h2o_val - num_val) < max(abs(h2o_val), abs(num_val)) * 1e-6, \
"check unsuccessful! h2o computed {0} and math computed {1}. expected equal gamma values between h2o and " \
"math".format(h2o_val,num_val)
h2o_val = h2o_data3.lgamma()[5,5]
num_val = math.lgamma(h2o_data3[5,5])
assert abs(h2o_val - num_val) < max(abs(h2o_val), abs(num_val)) * 1e-6, \
"check unsuccessful! h2o computed {0} and math computed {1}. expected equal lgamma values between h2o and " \
"math".\
format(h2o_val,num_val)
h2o_val = h2o_data3.digamma()[5,5]
num_val = scipy.special.polygamma(0,h2o_data3[5,5])
assert abs(h2o_val - num_val) < max(abs(h2o_val), abs(num_val)) * 1e-6, \
"check unsuccessful! h2o computed {0} and math computed {1}. expected equal digamma values between h2o and " \
"math"\
.format(h2o_val,num_val)
h2o_val = h2o_data3.trigamma()[5,5]
num_val = float(scipy.special.polygamma(1,h2o_data3[5,5]))
assert abs(h2o_val - num_val) < max(abs(h2o_val), abs(num_val)) * 1e-6, \
"check unsuccessful! h2o computed {0} and math computed {1}. expected equal trigamma values between h2o and " \
"math".format(h2o_val,num_val)
if __name__ == "__main__":
pyunit_utils.standalone_test(expr_math_ops)
else:
expr_math_ops()
|
py | 1a36f4756c20734d310f9a51ee216b0c09f842ce | import log
from machine import I2C
'''
I2C使用示例
'''
# 设置日志输出级别
log.basicConfig(level=log.INFO)
i2c_log = log.getLogger("I2C")
I2C_SLAVE_ADDR = 0x1B # i2c 设备地址
WHO_AM_I= bytearray({0x02, 0}) # i2c 寄存器地址,以buff的方式传入,取第一个值,计算一个值的长度
data = bytearray({0x12, 0}) # 输入对应指令
i2c_obj = I2C(I2C.I2C0, I2C.STANDARD_MODE) # 返回i2c对象
i2c_obj.write(I2C_SLAVE_ADDR, WHO_AM_I, 1, data, 2) # 写入data
r_data = bytearray(2) # 创建长度为2的字节数组接收
i2c_obj.read(I2C_SLAVE_ADDR, WHO_AM_I, 1, r_data, 2, 0) # read
i2c_log.info(r_data[0])
i2c_log.info(r_data[1])
|
py | 1a36f4887071206b1646826e28f7d3be79d5d977 | from composite import *
def test_simple_nested_elements(capfd):
# Creating components
window = GraphicalComposite('Window')
container = GraphicalComposite('Image')
image = GraphicalLeaf('image.jpg')
# Assembling composite tree
window.add(container)
container.add(image)
window.render()
out, _ = capfd.readouterr()
assert out == "Component 'Window' has 'Image'\nComponent 'Image': 'image.jpg'\n"
def test_multiple_nested_elements(capfd):
# Creating components
window = GraphicalComposite('Window')
container = GraphicalComposite('Image')
image = GraphicalLeaf('image.jpg')
panel = GraphicalComposite('Panel')
label = GraphicalComposite('Label')
text = GraphicalLeaf('Hello World!')
# Assembling composite tree
window.add(container)
container.add(image)
window.add(panel)
panel.add(label)
label.add(text)
window.render()
out, _ = capfd.readouterr()
assert out == "Component 'Window' has 'Image'\nComponent 'Image': 'image.jpg'\nComponent 'Window' has 'Panel'\nComponent 'Panel' has 'Label'\nComponent 'Label': 'Hello World!'\n"
|
py | 1a36f57cdf323e86faad2d86f54a171e5e428c33 | ## Cash Machine ##
# Objective: the cash machine will give you the value desired in dollar banknotes
# Input: value you want to withdraw from the cash machine
# Output: banknotes from the cash machine
# 1) Input
# 1.1) Definition of the currency and banknotes
bank_notes = [100, 50, 20, 10, 5, 2, 1]
# 1.2) Definition of the value to be drawn from the cash machine
value = input('Type the value you would like to withdraw: $ ').strip()
# 1.3) Checking if value is an integer
while True:
try:
value = int(value)
except:
value = input('Error found, please type valid value: $ ').strip()
continue
else:
value = int(value)
break
# 2) Output
# 2.1) Determination of the number of banknotes
while True:
for note in bank_notes:
if value >= note:
if value % note == 0:
print(f'{int(value / note)} banknotes of $ {note}')
elif value % note != 0:
print(f'{value // note} banknotes of $ {note}')
value = value - int(value / note) * note
if value - note == 0:
break
break |
py | 1a36f58fb8ac7308ec4c59a098f7d98a77fb2fb1 | #
# Copyright (C) Foundry 2020
#
import base64
import binascii
import hashlib
import hmac
import json
import time
import requests
from collections import OrderedDict
from datetime import datetime, timedelta
from typing import Callable, Dict, List, Tuple, Optional
class flix:
"""Flix will handle the login and expose functions to get,
create shows etc.
"""
def __init__(self):
self.reset()
def authenticate(self, hostname: str, login: str, password: str) -> Dict:
"""authenticate will authenticate a user
Arguments:
hostname {str} -- Hostname of the server
login {str} -- Login of the user
password {str} -- Password of the user
Returns:
Dict -- Authenticate
"""
authdata = base64.b64encode((login + ':' + password).encode('UTF-8'))
response = None
header = {
'Content-Type': 'application/json',
'Authorization': 'Basic ' + authdata.decode('UTF-8'),
}
try:
r = requests.post(hostname + '/authenticate', headers=header,
verify=False)
r.raise_for_status()
response = json.loads(r.content)
self.hostname = hostname
self.login = login
self.password = password
except requests.exceptions.RequestException as err:
print('Authentification failed', err)
return None
self.key = response['id']
self.secret = response['secret_access_key']
self.expiry = datetime.strptime(
response['expiry_date'].split('.')[0], '%Y-%m-%dT%H:%M:%S')
return response
def __get_token(self) -> Tuple[str, str]:
"""__get_token will request a token and will reset it
if it is too close to the expiry date
Returns:
Tuple[str, str] -- Key and Secret
"""
if (self.key is None or self.secret is None or self.expiry is None or
datetime.now() + timedelta(hours=2) > self.expiry):
authentificationToken = self.authenticate(
self.hostname, self.login, self.password)
auth_id = authentificationToken['id']
auth_secret_token = authentificationToken['secret_access_key']
auth_expiry_date = authentificationToken['expiry_date']
auth_expiry_date = auth_expiry_date.split('.')[0]
self.key = auth_id
self.secret = auth_secret_token
self.expiry = datetime.strptime(auth_expiry_date,
'%Y-%m-%dT%H:%M:%S')
return self.key, self.secret
def __fn_sign(self,
access_key_id: str,
secret_access_key: str,
url: str,
content: object,
http_method: str,
content_type: str,
dt: str) -> str:
"""After being logged in, you will have a token.
Arguments:
access_key_id {str} -- Access key ID from your token
secret_access_key {str} -- Secret access key from your token
url {str} -- Url of the request
content {object} -- Content of your request
http_method {str} -- Http Method of your request
content_type {str} -- Content Type of your request
dt {str} -- Datetime
Raises:
ValueError: 'You must specify a secret_access_key'
Returns:
str -- Signed header
"""
raw_string = http_method.upper() + '\n'
content_md5 = ''
if content:
if isinstance(content, str):
content_md5 = hashlib.md5(content).hexdigest()
elif isinstance(content, bytes):
hx = binascii.hexlify(content)
content_md5 = hashlib.md5(hx).hexdigest()
elif isinstance(content, dict):
jsoned = json.dumps(content)
content_md5 = hashlib.md5(jsoned.encode('utf-8')).hexdigest()
if content_md5 != '':
raw_string += content_md5 + '\n'
raw_string += content_type + '\n'
else:
raw_string += '\n\n'
raw_string += dt.isoformat().split('.')[0] + 'Z' + '\n'
url_bits = url.split('?')
url_without_query_params = url_bits[0]
raw_string += url_without_query_params
if len(secret_access_key) == 0:
raise ValueError('You must specify a secret_access_key')
digest_created = base64.b64encode(
hmac.new(secret_access_key.encode('utf-8'),
raw_string.encode('utf-8'),
digestmod=hashlib.sha256).digest()
)
return 'FNAUTH ' + access_key_id + ':' + digest_created.decode('utf-8')
def __get_headers(
self, content: object, url: str, method: str = 'POST') -> object:
"""__get_headers will generate the header to make any request
containing the authorization with signature
Arguments:
content {object} -- Content of the request
url {str} -- Url to make the request
method {str} -- Request method (default: {'POST'})
Returns:
object -- Headers
"""
dt = datetime.utcnow()
key, secret = self.__get_token()
return {
'Authorization': self.__fn_sign(
key,
secret,
url,
content,
method,
'application/json',
dt),
'Content-Type': 'application/json',
'Date': dt.strftime('%a, %d %b %Y %H:%M:%S GMT'),
}
def reset(self):
"""reset will reset the user info
"""
self.hostname = None
self.secret = None
self.expiry = None
self.login = None
self.password = None
self.key = None
# Returns sequence revision by given ID
def get_sequence_revision_by_id(self,
show_id: int,
episode_id: int,
sequence_id: int,
revision_id: int
) -> Dict:
"""get_sequence_revision_by_id retrieves sequence revision by given ID
Arguments:
show_id {int} -- Show ID
episode_id {int} -- Episode ID
sequence_id {int} -- Sequence ID
revision_id {int} -- Revision ID
Returns:
Dict -- Sequence revision
"""
url = '/show/{0}/sequence/{1}/revision/{2}'.format(
show_id, sequence_id, revision_id)
if episode_id is not None:
url = '/show/{0}/episode/{1}/sequence/{2}/revision/{3}'.format(
show_id, episode_id, sequence_id, revision_id)
headers = self.__get_headers(None, url, 'GET')
response = None
try:
r = requests.get(self.hostname + url, headers=headers,
verify=False)
response = json.loads(r.content)
if r.status_code == 404:
print('Could not retrieve sequence revision',
response.get('message'))
return None
except requests.exceptions.RequestException as err:
if r is not None and r.status_code == 401:
print('Your token has been revoked')
else:
print('Could not retrieve sequence revision', err)
return None
return response
# Returns the list of panels in the sequence revision
def get_sequence_revision_panels(self,
show_id: int,
episode_id: int,
sequence_id: int,
revision_id: int
) -> Dict:
"""get_sequence_revision_panels retrieves the list of panels in given sequence revision
Arguments:
show_id {int} -- Show ID
episode_id {int} -- Episode ID
sequence_id {int} -- Sequence ID
revision_id {int} -- Revision ID
Returns:
Dict -- Panels
"""
url = '/show/{0}/sequence/{1}/revision/{2}/panels'.format(
show_id, sequence_id, revision_id)
if episode_id is not None:
url = '/show/{0}/episode/{1}/sequence/{2}/revision/{3}/panels'.format(
show_id, episode_id, sequence_id, revision_id)
headers = self.__get_headers(None, url, 'GET')
response = None
try:
r = requests.get(self.hostname + url, headers=headers,
verify=False)
response = json.loads(r.content)
response = response.get('panels')
if r.status_code == 404:
print('Could not retrieve sequence revision panels',
response.get('message'))
return None
except requests.exceptions.RequestException as err:
if r is not None and r.status_code == 401:
print('Your token has been revoked')
else:
print('Could not retrieve sequence revision panels', err)
return None
return response
# Returns list of dialogues in the panel
def get_panel_dialogues(self,
show_id: int,
episode_id: int,
sequence_id: int,
panel_id: int
) -> Dict:
"""get_panel_dialogues retrieves the list of dialogues in given panel ID
Arguments:
show_id {int} -- Show ID
episode_id {int} -- Episode ID
sequence_id {int} -- Sequence ID
revision_id {int} -- Revision ID
Returns:
Dict -- Dialogues
"""
url = '/show/{0}/sequence/{1}/panel/{2}/dialogues'.format(
show_id, sequence_id, panel_id)
if episode_id is not None:
url = '/show/{0}/episode/{1}/sequence/{2}/panel/{3}/dialogues'.format(
show_id, episode_id, sequence_id, panel_id)
headers = self.__get_headers(None, url, 'GET')
response = None
try:
r = requests.get(self.hostname + url, headers=headers,
verify=False)
response = json.loads(r.content)
response = response.get('dialogues')
if r.status_code == 404:
print('Could not retrieve panel dialogues',
response.get('message'))
return None
except requests.exceptions.RequestException as err:
if r is not None and r.status_code == 401:
print('Your token has been revoked')
else:
print('Could not retrieve panel dialogues', err)
return None
return response
# Returns formatted panel object as revisioned panels for POST request
def format_panel_for_revision(self, panel: Dict, dialogue: Dict) -> Dict:
"""format_panel_for_revision will format the panels as revisioned panels
Arguments:
panels {List} -- List of panels
Returns:
List -- Formatted list of panels
"""
revisioned_panel = {
'dialogue': dialogue,
'duration': panel.get('duration'),
'id': panel.get('panel_id'),
'revision_number': panel.get('revision_number')
}
return revisioned_panel
# Makes POST request to create a new sequence revision
def create_new_sequence_revision(
self, show_id: int, episode_id: int, sequence_id: int, revisioned_panels: List[Dict], revision: Dict,
comment: Optional[str]) -> Dict:
"""new_sequence_revision will create a new sequence revision
Arguments:
show_id {int} -- Show ID
episode_id {int} -- Episode ID
sequence_id {int} -- Sequence ID
revisioned_panels {List} -- List of revisionned panels
revision {Object} -- Sequence Revision
comment {str} -- Comment (default: {'From AUTO Dialogue Relink'})
Returns:
Dict -- Sequence Revision
"""
if not comment:
comment = 'Auto Dialogue Relink'
url = '/show/{0}/sequence/{1}/revision'.format(show_id, sequence_id)
if episode_id is not None:
url = '/show/{0}/episode/{1}/sequence/{2}/revision'.format(
show_id, episode_id, sequence_id)
meta = revision.get('meta_data', {})
content = {
'comment': comment,
'imported': False,
'meta_data': {
'movie_asset_id': meta.get('movie_asset_id', None),
'audio_asset_id': meta.get('audio_asset_id', None),
'annotations': meta.get('annotations', []),
'audio_timings': meta.get('audio_timings', None),
'highlights': meta.get('highlights', None),
'markers': meta.get('markers', None)
},
'revisioned_panels': revisioned_panels
}
headers = self.__get_headers(content, url, 'POST')
response = None
try:
r = requests.post(self.hostname + url, headers=headers,
data=json.dumps(content), verify=False)
response = json.loads(r.content)
except BaseException:
print('Could not create sequence revision')
return None
return response
|
py | 1a36f618cf9694af1068c6cff4e6e3c30230d3e7 | from mindsdb.api.mongo.classes import Responder
import mindsdb.api.mongo.functions as helpers
class Responce(Responder):
when = {'ismaster': helpers.is_true}
result = {
"ismaster": True,
"minWireVersion": 0,
"maxWireVersion": 9,
"ok": 1
}
responder = Responce()
|
py | 1a36f64f382769b14333fd0b1cd2b17bd82796d2 | # coding: utf-8
from __future__ import unicode_literals
import base64
import hashlib
import json
import random
import re
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_str,
)
from ..utils import (
ExtractorError,
float_or_none,
int_or_none,
orderedSet,
str_or_none,
)
class GloboIE(InfoExtractor):
_VALID_URL = r"(?:globo:|https?://.+?\.globo\.com/(?:[^/]+/)*(?:v/(?:[^/]+/)?|videos/))(?P<id>\d{7,})"
_NETRC_MACHINE = "globo"
_TESTS = [
{
"url": "http://g1.globo.com/carros/autoesporte/videos/t/exclusivos-do-g1/v/mercedes-benz-gla-passa-por-teste-de-colisao-na-europa/3607726/",
"md5": "b3ccc801f75cd04a914d51dadb83a78d",
"info_dict": {
"id": "3607726",
"ext": "mp4",
"title": "Mercedes-Benz GLA passa por teste de colisão na Europa",
"duration": 103.204,
"uploader": "Globo.com",
"uploader_id": "265",
},
},
{
"url": "http://globoplay.globo.com/v/4581987/",
"md5": "f36a1ecd6a50da1577eee6dd17f67eff",
"info_dict": {
"id": "4581987",
"ext": "mp4",
"title": "Acidentes de trânsito estão entre as maiores causas de queda de energia em SP",
"duration": 137.973,
"uploader": "Rede Globo",
"uploader_id": "196",
},
},
{
"url": "http://canalbrasil.globo.com/programas/sangue-latino/videos/3928201.html",
"only_matching": True,
},
{
"url": "http://globosatplay.globo.com/globonews/v/4472924/",
"only_matching": True,
},
{
"url": "http://globotv.globo.com/t/programa/v/clipe-sexo-e-as-negas-adeus/3836166/",
"only_matching": True,
},
{
"url": "http://globotv.globo.com/canal-brasil/sangue-latino/t/todos-os-videos/v/ator-e-diretor-argentino-ricado-darin-fala-sobre-utopias-e-suas-perdas/3928201/",
"only_matching": True,
},
{
"url": "http://canaloff.globo.com/programas/desejar-profundo/videos/4518560.html",
"only_matching": True,
},
{
"url": "globo:3607726",
"only_matching": True,
},
]
def _real_initialize(self):
email, password = self._get_login_info()
if email is None:
return
try:
glb_id = (
self._download_json(
"https://login.globo.com/api/authentication",
None,
data=json.dumps(
{
"payload": {
"email": email,
"password": password,
"serviceId": 4654,
},
}
).encode(),
headers={
"Content-Type": "application/json; charset=utf-8",
},
)
or {}
).get("glbId")
if glb_id:
self._set_cookie(".globo.com", "GLBID", glb_id)
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
resp = self._parse_json(e.cause.read(), None)
raise ExtractorError(
resp.get("userMessage") or resp["id"], expected=True
)
raise
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._download_json(
"http://api.globovideos.com/videos/%s/playlist" % video_id, video_id
)["videos"][0]
if video.get("encrypted") is True:
raise ExtractorError("This video is DRM protected.", expected=True)
title = video["title"]
formats = []
subtitles = {}
for resource in video["resources"]:
resource_id = resource.get("_id")
resource_url = resource.get("url")
resource_type = resource.get("type")
if (
not resource_url
or (resource_type == "media" and not resource_id)
or resource_type not in ("subtitle", "media")
):
continue
if resource_type == "subtitle":
subtitles.setdefault(resource.get("language") or "por", []).append(
{
"url": resource_url,
}
)
continue
security = self._download_json(
"http://security.video.globo.com/videos/%s/hash" % video_id,
video_id,
"Downloading security hash for %s" % resource_id,
query={
"player": "desktop",
"version": "5.19.1",
"resource_id": resource_id,
},
)
security_hash = security.get("hash")
if not security_hash:
message = security.get("message")
if message:
raise ExtractorError(
"%s returned error: %s" % (self.IE_NAME, message), expected=True
)
continue
hash_code = security_hash[:2]
padding = "%010d" % random.randint(1, 10000000000)
if hash_code in ("04", "14"):
received_time = security_hash[3:13]
received_md5 = security_hash[24:]
hash_prefix = security_hash[:23]
elif hash_code in ("02", "12", "03", "13"):
received_time = security_hash[2:12]
received_md5 = security_hash[22:]
padding += "1"
hash_prefix = "05" + security_hash[:22]
padded_sign_time = compat_str(int(received_time) + 86400) + padding
md5_data = (received_md5 + padded_sign_time + "0xAC10FD").encode()
signed_md5 = (
base64.urlsafe_b64encode(hashlib.md5(md5_data).digest())
.decode()
.strip("=")
)
signed_hash = hash_prefix + padded_sign_time + signed_md5
signed_url = "%s?h=%s&k=html5&a=%s&u=%s" % (
resource_url,
signed_hash,
"F" if video.get("subscriber_only") else "A",
security.get("user") or "",
)
if resource_id.endswith("m3u8") or resource_url.endswith(".m3u8"):
formats.extend(
self._extract_m3u8_formats(
signed_url,
resource_id,
"mp4",
entry_protocol="m3u8_native",
m3u8_id="hls",
fatal=False,
)
)
elif resource_id.endswith("mpd") or resource_url.endswith(".mpd"):
formats.extend(
self._extract_mpd_formats(
signed_url, resource_id, mpd_id="dash", fatal=False
)
)
elif resource_id.endswith("manifest") or resource_url.endswith("/manifest"):
formats.extend(
self._extract_ism_formats(
signed_url, resource_id, ism_id="mss", fatal=False
)
)
else:
formats.append(
{
"url": signed_url,
"format_id": "http-%s" % resource_id,
"height": int_or_none(resource.get("height")),
}
)
self._sort_formats(formats)
duration = float_or_none(video.get("duration"), 1000)
uploader = video.get("channel")
uploader_id = str_or_none(video.get("channel_id"))
return {
"id": video_id,
"title": title,
"duration": duration,
"uploader": uploader,
"uploader_id": uploader_id,
"formats": formats,
"subtitles": subtitles,
}
class GloboArticleIE(InfoExtractor):
_VALID_URL = r"https?://.+?\.globo\.com/(?:[^/]+/)*(?P<id>[^/.]+)(?:\.html)?"
_VIDEOID_REGEXES = [
r'\bdata-video-id=["\'](\d{7,})',
r'\bdata-player-videosids=["\'](\d{7,})',
r'\bvideosIDs\s*:\s*["\']?(\d{7,})',
r'\bdata-id=["\'](\d{7,})',
r'<div[^>]+\bid=["\'](\d{7,})',
]
_TESTS = [
{
"url": "http://g1.globo.com/jornal-nacional/noticia/2014/09/novidade-na-fiscalizacao-de-bagagem-pela-receita-provoca-discussoes.html",
"info_dict": {
"id": "novidade-na-fiscalizacao-de-bagagem-pela-receita-provoca-discussoes",
"title": "Novidade na fiscalização de bagagem pela Receita provoca discussões",
"description": "md5:c3c4b4d4c30c32fce460040b1ac46b12",
},
"playlist_count": 1,
},
{
"url": "http://g1.globo.com/pr/parana/noticia/2016/09/mpf-denuncia-lula-marisa-e-mais-seis-na-operacao-lava-jato.html",
"info_dict": {
"id": "mpf-denuncia-lula-marisa-e-mais-seis-na-operacao-lava-jato",
"title": "Lula era o 'comandante máximo' do esquema da Lava Jato, diz MPF",
"description": "md5:8aa7cc8beda4dc71cc8553e00b77c54c",
},
"playlist_count": 6,
},
{
"url": "http://gq.globo.com/Prazeres/Poder/noticia/2015/10/all-o-desafio-assista-ao-segundo-capitulo-da-serie.html",
"only_matching": True,
},
{
"url": "http://gshow.globo.com/programas/tv-xuxa/O-Programa/noticia/2014/01/xuxa-e-junno-namoram-muuuito-em-luau-de-zeze-di-camargo-e-luciano.html",
"only_matching": True,
},
{
"url": "http://oglobo.globo.com/rio/a-amizade-entre-um-entregador-de-farmacia-um-piano-19946271",
"only_matching": True,
},
]
@classmethod
def suitable(cls, url):
return (
False if GloboIE.suitable(url) else super(GloboArticleIE, cls).suitable(url)
)
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_ids = []
for video_regex in self._VIDEOID_REGEXES:
video_ids.extend(re.findall(video_regex, webpage))
entries = [
self.url_result("globo:%s" % video_id, GloboIE.ie_key())
for video_id in orderedSet(video_ids)
]
title = self._og_search_title(webpage, fatal=False)
description = self._html_search_meta("description", webpage)
return self.playlist_result(entries, display_id, title, description)
|
py | 1a36f7b9d0484e125f3cd6c299ece0b094b5aa28 | # %%
from cProfile import label
import imp
from turtle import shape
import numpy as np
import pandas as pd
import tensorflow.keras as keras
from matplotlib import pyplot as plt, units
pd.options.display.max_rows = 10
pd.options.display.float_format = "{:.1f}".format
# %% Load datasets from Internet
train_df: pd.DataFrame = pd.read_csv(
"https://download.mlcc.google.com/mledu-datasets/california_housing_train.csv"
)
test_df: pd.DataFrame = pd.read_csv(
"https://download.mlcc.google.com/mledu-datasets/california_housing_test.csv"
)
# %%
scale_label = 1000.
train_df['median_house_value'] /= scale_label
test_df['median_house_value'] /= scale_label
# %%
def build_model(learning_rate: float):
model = keras.models.Sequential()
model.add(keras.layers.Input(shape=(1, )))
model.add(keras.layers.Dense(units=1))
model.compile(
optimizer=keras.optimizers.RMSprop(learning_rate=learning_rate),
loss=keras.losses.MeanSquaredError(),
metrics=[keras.metrics.RootMeanSquaredError()])
return model
def train_model(model: keras.Model,
df: pd.DataFrame,
feature,
label,
epochs: int,
batch_size=None,
validation_split=0.1):
history = model.fit(
x=df[feature],
y=df[label],
batch_size=batch_size,
epochs=epochs,
validation_split=validation_split,
)
params = model.get_weights()
weight = params[0]
bias = params[1]
hist = pd.DataFrame(history.history)
rmse = hist["root_mean_squared_error"]
return history.epoch, rmse, history.history
# %%
def plot_loss_curve(epochs, mae_training, mae_validation):
plt.figure()
plt.xlabel("Epoch")
plt.ylabel("RMSE")
plt.plot(epochs[1:], mae_training[1:], label="training loss")
plt.plot(epochs[1:], mae_validation[1:], label="validation loss")
plt.legend()
# We're not going to plot the first epoch, since the loss on the first epoch
# is often substantially greater than the loss for other epochs.
merged_mae_lists = mae_training[1:] + mae_validation[1:]
highest_loss = max(merged_mae_lists)
lowest_loss = min(merged_mae_lists)
delta = highest_loss - lowest_loss
print(delta)
top_of_y_axis = highest_loss + (delta * 0.05)
bottom_of_y_axis = lowest_loss - (delta * 0.05)
plt.ylim([bottom_of_y_axis, top_of_y_axis])
plt.show()
# %%
learning_rate = 0.05
epoch = 30
batch_size = 100
validation_split = 0.2
my_feature = "median_income"
my_label = "median_house_value"
my_model = None
my_model = build_model(learning_rate)
epochs, rmse, history = train_model(my_model, train_df, my_feature, my_label,
epoch, batch_size, validation_split)
plot_loss_curve(epochs, history["root_mean_squared_error"],
history["val_root_mean_squared_error"])
# %%
# No matter how you split the training set and the validation set, the loss curves differ significantly. Evidently, the data in the training set isn't similar enough to the data in the validation set. Counterintuitive? Yes, but this problem is actually pretty common in machine learning.
# Your task is to determine why the loss curves aren't highly similar. As with most issues in machine learning, the problem is rooted in the data itself. To solve this mystery of why the training set and validation set aren't almost identical, write a line or two of pandas code in the following code cell. Here are a couple of hints:
# The previous code cell split the original training set into:
# a reduced training set (the original training set - the validation set)
# the validation set
# By default, the pandas head method outputs the first 5 rows of the DataFrame. To see more of the training set, specify the n argument to head and assign a large positive integer to n.
train_df.head(1000)
# %%
# shuffle data before splitting
shuffled_train_df = train_df.reindex(np.random.permutation(train_df.index))
epochs, rmse, history = train_model(my_model, shuffled_train_df, my_feature,
my_label, epoch, batch_size,
validation_split)
plot_loss_curve(epochs, history["root_mean_squared_error"],
history["val_root_mean_squared_error"])
# %%
|
py | 1a36faaefaa573e7b1ce3344eb00b70223a0761f | import glob
import os
from torch.utils.data import Dataset, DataLoader
from natsort import natsorted
import numpy as np
import cv2
import torch
import re
from src.utils.io import path_leaf, load_image
fileExtensions = ["jpg", "jpeg", "png", "tiff"]
class ImagesDataset(Dataset):
"""
Ce dataset renvoie une image à la fois. Il sera certainement plus pertinent qu'il renvoie à chaque fois une image
et la phase associée, mais cela prend de réfléchir à la gestion de la phase. Une facon de faire serait de stocker
cette information dans un fichier qui puisse être lu par pandas. A chaque image
"""
def __init__(self, groundtruth_list, path_weights, path_img, shape=(512, 512), RNN_len=100, recursive=True): #passer le tableau ou le chemin
"""
A compléter éventuellement pour prendre en entrée le chemin vers le fichier des phases (groundtruth)
:param path_img:
:param shape:
:param recursive:
"""
super(ImagesDataset, self).__init__()
if isinstance(shape, int):
shape = (shape, shape)
self.path_img = path_img #adresse du DOSSIER d'images
self.shape = shape
self.path_weights = path_weights
self.da_core = None # Data augmentation instance. Only initialized if required
self.groundtruth_list = groundtruth_list
self.RNN_len = RNN_len
self.img_filepath = []
for file in os.listdir(self.path_img):
self.img_filepath.extend(glob.glob(self.path_img + file + '/' + '*.pt', recursive=recursive)) #was .pt
img_filenames = [path_leaf(path).split('.')[0] for path in self.img_filepath] #Liste de toutes les images ['frame0', 'frame1', ...]
self.img_filepath = np.asarray(self.img_filepath)
img_argsort = np.argsort(img_filenames)
self.img_filepath = self.img_filepath[img_argsort] #array de tous les paths (\data01\frameX.jpg), pas dans l'ordre
self.img_filepath = np.array(natsorted(self.img_filepath))
def set_data_augmentation_core(self, da_core):
# self.da_core = da_core
pass
def subset(self, indices):
self.img_filepath = natsorted(self.img_filepath[indices])
self.img_filepath = np.array(self.img_filepath)
def __len__(self):
return len(self.img_filepath)
#diviser et multiplier dans getitem
# - RNN_len
#padding
def __getitem__(self, item):
"""
Item est un index (entier), le dataset renvoie l'image et la groundtruth correspondante
:param item:
:return:
"""
video = self.video_number(self.img_filepath[item])
sequence_img = torch.FloatTensor()
seq_len = 0
if len(self.img_filepath[item:]) > self.RNN_len:
for tensor in self.img_filepath[item : item + self.RNN_len]:
if self.video_number(tensor) == video:
seq_len += 1
img = torch.load(tensor, map_location = torch.device('cpu')) #tensor contient à la fois le n° du dossier et le n° de frame
img = img.reshape(1,img.shape[-1])
sequence_img = torch.cat((sequence_img, img), 0)
else:
break
sequence_phase = self.read_phase(self.img_filepath[item : item+seq_len])
else:
for tensor in self.img_filepath[item:]:
if self.video_number(tensor) == video:
img = torch.load(tensor, map_location = torch.device('cpu'))
img = img.reshape(1,img.shape[-1])
sequence_img = torch.cat((sequence_img, img), 0) #img.logits
else:
break
sequence_phase = self.read_phase(self.img_filepath[item:])
seq_len = len(sequence_img)
return self.pad_seq(sequence_img), self.pad_seq(sequence_phase), seq_len
def pad_seq(self, array):
shape = array.shape
dtype = array.dtype
pad = self.RNN_len - shape[0]
padding = [(0, pad)] + [(0, 0) for _ in shape[1:]]
padded_array = np.pad(array.detach(), padding, mode='constant', constant_values=-1)
if dtype==int:
return padded_array.astype(dtype)
else:
return padded_array.astype(np.float32)
#padding dans getitem
#padding dans le trainer
#ignorer les valeurs dans CrossEntropyLoss
def get_classes_weight(self):
""" Fonction à implémenter potentiellement: elle charge ou calcul une pondération par classe permettant de les
équilibrer.
J'ai mis du pseudo-code à compléter selon le besoin, cela dépend de l'utilisation
"""
classes_weight_path = os.path.join(self.path_weights, 'classes_weight.npy') # chemin de sauvergarde des weights
if os.path.exists(classes_weight_path):
print("Loading existing weights for class balance from", classes_weight_path)
class_weights = np.load(classes_weight_path)
else:
print("Building weights for class balance")
classes_counts = np.zeros(128,
dtype=int) # Arbitrary number because the number of classes is unknown at this point
for i in range(len(self.img_filepath)):
phase = self.read_phase(self.img_filepath[i])
u, counts = np.unique(phase, return_counts=True)
classes_counts[u] += counts
classes_counts = classes_counts[
:np.max(np.nonzero(classes_counts)) + 1] # Keep only the classes that have a count
n_classes = len(classes_counts)
n_samples = classes_counts.sum()
class_weights = (n_samples / (n_classes * classes_counts + 1e-8)).astype(np.float32)
np.save(classes_weight_path, class_weights)
print('Weights stored in ', classes_weight_path)
return class_weights
def read_phase(self, filepaths):
Phases = []
for filepath in filepaths:
#find the number X of the video and the number Y of the image, saved in a file dataX with the name frameY
temp = re.findall(r'\d+', filepath)
res = list(map(int, temp))
X = res[-2] - 1 #les indices de la list groundtruth démarrent à 0 et les fichiers dataX démarrent à 1
Y = res[-1]
groundtruth = self.groundtruth_list[X]
B = (groundtruth.at[Y,"Frame,Steps"]) #groundtruth est un DataFrame créé par Pandas regroupant toutes les informations Frame,Steps
temp = re.findall(r'\d+', B)
res = list(map(int, temp)) #getting numbers from the string B = "frame_number,step_number"
#if there was no Steps value specified, then there is no surgical phase on the image
if len(res) == 2:
Phase = res[1]
else:
Phase = 0
Phases.append(Phase)
return torch.LongTensor(Phases)
def video_number(self,filepath):
temp = re.findall(r'\d+', filepath)
res = list(map(int, temp))
return res[-2]
|
py | 1a36fc1ab88b00a2288d1f39fc2f92e65acfd864 | """Model-Agnostic Meta-Learning (MAML) algorithm implementation for RL."""
# yapf: disable
import collections
import copy
from dowel import tabular
import numpy as np
import torch
from garage import (_Default, EpisodeBatch, log_multitask_performance,
make_optimizer)
from garage.np import discount_cumsum
from garage.torch import update_module_params
from garage.torch.optimizers import (ConjugateGradientOptimizer,
DifferentiableSGD)
# yapf: enable
class MAML:
"""Model-Agnostic Meta-Learning (MAML).
Args:
inner_algo (garage.torch.algos.VPG): The inner algorithm used for
computing loss.
env (Environment): An environment.
policy (garage.torch.policies.Policy): Policy.
sampler (garage.sampler.Sampler): Sampler.
task_sampler (garage.experiment.TaskSampler): Task sampler.
meta_optimizer (Union[torch.optim.Optimizer, tuple]):
Type of optimizer.
This can be an optimizer type such as `torch.optim.Adam` or a tuple
of type and dictionary, where dictionary contains arguments to
initialize the optimizer e.g. `(torch.optim.Adam, {'lr' : 1e-3})`.
meta_batch_size (int): Number of tasks sampled per batch.
inner_lr (float): Adaptation learning rate.
outer_lr (float): Meta policy learning rate.
num_grad_updates (int): Number of adaptation gradient steps.
meta_evaluator (MetaEvaluator): A meta evaluator for meta-testing. If
None, don't do meta-testing.
evaluate_every_n_epochs (int): Do meta-testing every this epochs.
"""
def __init__(self,
inner_algo,
env,
policy,
sampler,
task_sampler,
meta_optimizer,
meta_batch_size=40,
inner_lr=0.1,
outer_lr=1e-3,
num_grad_updates=1,
meta_evaluator=None,
evaluate_every_n_epochs=1):
self._sampler = sampler
self.max_episode_length = inner_algo.max_episode_length
self._meta_evaluator = meta_evaluator
self._policy = policy
self._env = env
self._task_sampler = task_sampler
self._value_function = copy.deepcopy(inner_algo._value_function)
self._initial_vf_state = self._value_function.state_dict()
self._num_grad_updates = num_grad_updates
self._meta_batch_size = meta_batch_size
self._inner_algo = inner_algo
self._inner_optimizer = DifferentiableSGD(self._policy, lr=inner_lr)
self._meta_optimizer = make_optimizer(meta_optimizer,
module=policy,
lr=_Default(outer_lr),
eps=_Default(1e-5))
self._evaluate_every_n_epochs = evaluate_every_n_epochs
def train(self, trainer):
"""Obtain samples and start training for each epoch.
Args:
trainer (Trainer): Gives the algorithm access to
:method:`~Trainer.step_epochs()`, which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch cycle.
"""
last_return = None
for _ in trainer.step_epochs():
all_samples, all_params = self._obtain_samples(trainer)
last_return = self._train_once(trainer, all_samples, all_params)
trainer.step_itr += 1
return last_return
def _train_once(self, trainer, all_samples, all_params):
"""Train the algorithm once.
Args:
trainer (Trainer): The experiment runner.
all_samples (list[list[_MAMLEpisodeBatch]]): A two
dimensional list of _MAMLEpisodeBatch of size
[meta_batch_size * (num_grad_updates + 1)]
all_params (list[dict]): A list of named parameter dictionaries.
Each dictionary contains key value pair of names (str) and
parameters (torch.Tensor).
Returns:
float: Average return.
"""
itr = trainer.step_itr
old_theta = dict(self._policy.named_parameters())
kl_before = self._compute_kl_constraint(all_samples,
all_params,
set_grad=False)
meta_objective = self._compute_meta_loss(all_samples, all_params)
self._meta_optimizer.zero_grad()
meta_objective.backward()
self._meta_optimize(all_samples, all_params)
# Log
loss_after = self._compute_meta_loss(all_samples,
all_params,
set_grad=False)
kl_after = self._compute_kl_constraint(all_samples,
all_params,
set_grad=False)
with torch.no_grad():
policy_entropy = self._compute_policy_entropy(
[task_samples[0] for task_samples in all_samples])
average_return = self._log_performance(
itr, all_samples, meta_objective.item(), loss_after.item(),
kl_before.item(), kl_after.item(),
policy_entropy.mean().item())
if self._meta_evaluator and itr % self._evaluate_every_n_epochs == 0:
self._meta_evaluator.evaluate(self)
update_module_params(self._old_policy, old_theta)
return average_return
def _train_value_function(self, paths):
"""Train the value function.
Args:
paths (list[dict]): A list of collected paths.
Returns:
torch.Tensor: Calculated mean scalar value of value function loss
(float).
"""
# MAML resets a value function to its initial state before training.
self._value_function.load_state_dict(self._initial_vf_state)
obs = np.concatenate([path['observations'] for path in paths], axis=0)
returns = np.concatenate([path['returns'] for path in paths])
obs = torch.Tensor(obs)
returns = torch.Tensor(returns)
vf_loss = self._value_function.compute_loss(obs, returns)
# pylint: disable=protected-access
self._inner_algo._vf_optimizer.zero_grad()
vf_loss.backward()
# pylint: disable=protected-access
self._inner_algo._vf_optimizer.step()
return vf_loss
def _obtain_samples(self, trainer):
"""Obtain samples for each task before and after the fast-adaptation.
Args:
trainer (Trainer): A trainer instance to obtain samples.
Returns:
tuple: Tuple of (all_samples, all_params).
all_samples (list[_MAMLEpisodeBatch]): A list of size
[meta_batch_size * (num_grad_updates + 1)]
all_params (list[dict]): A list of named parameter
dictionaries.
"""
tasks = self._task_sampler.sample(self._meta_batch_size)
all_samples = [[] for _ in range(len(tasks))]
all_params = []
theta = dict(self._policy.named_parameters())
for i, env_up in enumerate(tasks):
for j in range(self._num_grad_updates + 1):
episodes = trainer.obtain_episodes(trainer.step_itr,
env_update=env_up)
batch_samples = self._process_samples(episodes)
all_samples[i].append(batch_samples)
# The last iteration does only sampling but no adapting
if j < self._num_grad_updates:
# A grad need to be kept for the next grad update
# Except for the last grad update
require_grad = j < self._num_grad_updates - 1
self._adapt(batch_samples, set_grad=require_grad)
all_params.append(dict(self._policy.named_parameters()))
# Restore to pre-updated policy
update_module_params(self._policy, theta)
return all_samples, all_params
def _adapt(self, batch_samples, set_grad=True):
"""Performs one MAML inner step to update the policy.
Args:
batch_samples (_MAMLEpisodeBatch): Samples data for one
task and one gradient step.
set_grad (bool): if False, update policy parameters in-place.
Else, allow taking gradient of functions of updated parameters
with respect to pre-updated parameters.
"""
# pylint: disable=protected-access
loss = self._inner_algo._compute_loss(*batch_samples[1:])
# Update policy parameters with one SGD step
self._inner_optimizer.zero_grad()
loss.backward(create_graph=set_grad)
with torch.set_grad_enabled(set_grad):
self._inner_optimizer.step()
def _meta_optimize(self, all_samples, all_params):
if isinstance(self._meta_optimizer, ConjugateGradientOptimizer):
self._meta_optimizer.step(
f_loss=lambda: self._compute_meta_loss(
all_samples, all_params, set_grad=False),
f_constraint=lambda: self._compute_kl_constraint(
all_samples, all_params))
else:
self._meta_optimizer.step(lambda: self._compute_meta_loss(
all_samples, all_params, set_grad=False))
def _compute_meta_loss(self, all_samples, all_params, set_grad=True):
"""Compute loss to meta-optimize.
Args:
all_samples (list[list[_MAMLEpisodeBatch]]): A two
dimensional list of _MAMLEpisodeBatch of size
[meta_batch_size * (num_grad_updates + 1)]
all_params (list[dict]): A list of named parameter dictionaries.
Each dictionary contains key value pair of names (str) and
parameters (torch.Tensor).
set_grad (bool): Whether to enable gradient calculation or not.
Returns:
torch.Tensor: Calculated mean value of loss.
"""
theta = dict(self._policy.named_parameters())
old_theta = dict(self._old_policy.named_parameters())
losses = []
for task_samples, task_params in zip(all_samples, all_params):
for i in range(self._num_grad_updates):
require_grad = i < self._num_grad_updates - 1 or set_grad
self._adapt(task_samples[i], set_grad=require_grad)
update_module_params(self._old_policy, task_params)
with torch.set_grad_enabled(set_grad):
# pylint: disable=protected-access
last_update = task_samples[-1]
loss = self._inner_algo._compute_loss(*last_update[1:])
losses.append(loss)
update_module_params(self._policy, theta)
update_module_params(self._old_policy, old_theta)
return torch.stack(losses).mean()
def _compute_kl_constraint(self, all_samples, all_params, set_grad=True):
"""Compute KL divergence.
For each task, compute the KL divergence between the old policy
distribution and current policy distribution.
Args:
all_samples (list[list[_MAMLEpisodeBatch]]): Two
dimensional list of _MAMLEpisodeBatch of size
[meta_batch_size * (num_grad_updates + 1)]
all_params (list[dict]): A list of named parameter dictionaries.
Each dictionary contains key value pair of names (str) and
parameters (torch.Tensor).
set_grad (bool): Whether to enable gradient calculation or not.
Returns:
torch.Tensor: Calculated mean value of KL divergence.
"""
theta = dict(self._policy.named_parameters())
old_theta = dict(self._old_policy.named_parameters())
kls = []
for task_samples, task_params in zip(all_samples, all_params):
for i in range(self._num_grad_updates):
require_grad = i < self._num_grad_updates - 1 or set_grad
self._adapt(task_samples[i], set_grad=require_grad)
update_module_params(self._old_policy, task_params)
with torch.set_grad_enabled(set_grad):
# pylint: disable=protected-access
kl = self._inner_algo._compute_kl_constraint(
task_samples[-1].observations)
kls.append(kl)
update_module_params(self._policy, theta)
update_module_params(self._old_policy, old_theta)
return torch.stack(kls).mean()
def _compute_policy_entropy(self, task_samples):
"""Compute policy entropy.
Args:
task_samples (list[_MAMLEpisodeBatch]): Samples data for
one task.
Returns:
torch.Tensor: Computed entropy value.
"""
obs = torch.cat([samples.observations for samples in task_samples])
# pylint: disable=protected-access
entropies = self._inner_algo._compute_policy_entropy(obs)
return entropies.mean()
@property
def policy(self):
"""Current policy of the inner algorithm.
Returns:
garage.torch.policies.Policy: Current policy of the inner
algorithm.
"""
return self._policy
@property
def _old_policy(self):
"""Old policy of the inner algorithm.
Returns:
garage.torch.policies.Policy: Old policy of the inner algorithm.
"""
# pylint: disable=protected-access
return self._inner_algo._old_policy
def _process_samples(self, episodes):
"""Process sample data based on the collected paths.
Args:
episodes (EpisodeBatch): Collected batch of episodes.
Returns:
_MAMLEpisodeBatch: Processed samples data.
"""
paths = episodes.to_list()
for path in paths:
path['returns'] = discount_cumsum(
path['rewards'], self._inner_algo.discount).copy()
self._train_value_function(paths)
obs = torch.Tensor(episodes.padded_observations)
actions = torch.Tensor(episodes.padded_actions)
rewards = torch.Tensor(episodes.padded_rewards)
valids = torch.Tensor(episodes.lengths).int()
with torch.no_grad():
# pylint: disable=protected-access
baselines = self._inner_algo._value_function(obs)
return _MAMLEpisodeBatch(paths, obs, actions, rewards, valids,
baselines)
def _log_performance(self, itr, all_samples, loss_before, loss_after,
kl_before, kl, policy_entropy):
"""Evaluate performance of this batch.
Args:
itr (int): Iteration number.
all_samples (list[list[_MAMLEpisodeBatch]]): Two
dimensional list of _MAMLEpisodeBatch of size
[meta_batch_size * (num_grad_updates + 1)]
loss_before (float): Loss before optimization step.
loss_after (float): Loss after optimization step.
kl_before (float): KL divergence before optimization step.
kl (float): KL divergence after optimization step.
policy_entropy (float): Policy entropy.
Returns:
float: The average return in last epoch cycle.
"""
tabular.record('Iteration', itr)
name_map = None
if hasattr(self._env, 'all_task_names'):
names = self._env.all_task_names
name_map = dict(zip(names, names))
rtns = log_multitask_performance(
itr,
EpisodeBatch.from_list(
env_spec=self._env.spec,
paths=[
path for task_paths in all_samples
for path in task_paths[self._num_grad_updates].paths
]),
discount=self._inner_algo.discount,
name_map=name_map)
with tabular.prefix(self._policy.name + '/'):
tabular.record('LossBefore', loss_before)
tabular.record('LossAfter', loss_after)
tabular.record('dLoss', loss_before - loss_after)
tabular.record('KLBefore', kl_before)
tabular.record('KLAfter', kl)
tabular.record('Entropy', policy_entropy)
return np.mean(rtns)
def get_exploration_policy(self):
"""Return a policy used before adaptation to a specific task.
Each time it is retrieved, this policy should only be evaluated in one
task.
Returns:
Policy: The policy used to obtain samples that are later used for
meta-RL adaptation.
"""
return copy.deepcopy(self._policy)
def adapt_policy(self, exploration_policy, exploration_episodes):
"""Adapt the policy by one gradient steps for a task.
Args:
exploration_policy (Policy): A policy which was returned from
get_exploration_policy(), and which generated
exploration_episodes by interacting with an environment.
The caller may not use this object after passing it into this
method.
exploration_episodes (EpisodeBatch): Episodes with which to adapt,
generated by exploration_policy exploring the environment.
Returns:
Policy: A policy adapted to the task represented by the
exploration_episodes.
"""
old_policy, self._policy = self._policy, exploration_policy
self._inner_algo.policy = exploration_policy
self._inner_optimizer.module = exploration_policy
batch_samples = self._process_samples(exploration_episodes)
self._adapt(batch_samples, set_grad=False)
self._policy = old_policy
self._inner_algo.policy = self._inner_optimizer.module = old_policy
return exploration_policy
class _MAMLEpisodeBatch(
collections.namedtuple('_MAMLEpisodeBatch', [
'paths', 'observations', 'actions', 'rewards', 'valids',
'baselines'
])):
r"""A tuple representing a batch of whole episodes in MAML.
A :class:`_MAMLEpisodeBatch` represents a batch of whole episodes
produced from one environment.
+-----------------------+-------------------------------------------------+
| Symbol | Description |
+=======================+=================================================+
| :math:`N` | Episode batch dimension |
+-----------------------+-------------------------------------------------+
| :math:`T` | Maximum length of an episode |
+-----------------------+-------------------------------------------------+
| :math:`S^*` | Single-step shape of a time-series tensor |
+-----------------------+-------------------------------------------------+
Attributes:
paths (list[dict[str, np.ndarray or dict[str, np.ndarray]]]):
Nonflatten original paths from sampler.
observations (torch.Tensor): A torch tensor of shape
:math:`(N \bullet T, O^*)` containing the (possibly
multi-dimensional) observations for all time steps in this batch.
These must conform to :obj:`env_spec.observation_space`.
actions (torch.Tensor): A torch tensor of shape
:math:`(N \bullet T, A^*)` containing the (possibly
multi-dimensional) actions for all time steps in this batch. These
must conform to :obj:`env_spec.action_space`.
rewards (torch.Tensor): A torch tensor of shape
:math:`(N \bullet T)` containing the rewards for all time
steps in this batch.
valids (numpy.ndarray): An integer numpy array of shape :math:`(N, )`
containing the length of each episode in this batch. This may be
used to reconstruct the individual episodes.
baselines (numpy.ndarray): An numpy array of shape
:math:`(N \bullet T, )` containing the value function estimation
at all time steps in this batch.
Raises:
ValueError: If any of the above attributes do not conform to their
prescribed types and shapes.
"""
|
py | 1a36fc8d615b3f3951c6b664f230231e1c88cfff | # -*- coding:utf-8 -*-
import hashlib
import json
import os
import shutil
from datetime import datetime
import cloudscraper
from requests.structures import CaseInsensitiveDict
from .utils import PixivError, JsonDict
class BasePixivAPI(object):
client_id = 'MOBrBDS8blbauoSck0ZfDbtuzpyT'
client_secret = 'lsACyCD94FhDUtGTXi3QzcFE2uU1hqtDaKeqrdwj'
hash_secret = '28c1fdd170a5204386cb1313c7077b34f83e4aaf4aa829ce78c231e05b0bae2c'
def __init__(self, **requests_kwargs):
"""initialize requests kwargs if need be"""
self.user_id = 0
self.access_token = None
self.refresh_token = None
# self.requests = requests.Session()
self.requests = cloudscraper.create_scraper() # fix due to #140
self.additional_headers = CaseInsensitiveDict(requests_kwargs.pop('headers', {}))
self.requests_kwargs = requests_kwargs
def set_additional_headers(self, headers):
"""manually specify additional headers. will overwrite API default headers in case of collision"""
self.additional_headers = CaseInsensitiveDict(headers)
# 设置HTTP的Accept-Language (用于获取tags的对应语言translated_name)
# language: en-us, zh-cn, ...
def set_accept_language(self, language):
"""set header Accept-Language for all requests (useful for get tags.translated_name)"""
self.additional_headers['Accept-Language'] = language
@classmethod
def parse_json(cls, json_str):
"""parse str into JsonDict"""
return json.loads(json_str, object_hook=JsonDict)
def require_auth(self):
if self.access_token is None:
raise PixivError('Authentication required! Call login() or set_auth() first!')
def requests_call(self, method, url, headers=None, params=None, data=None, stream=False):
""" requests http/https call for Pixiv API """
merged_headers = self.additional_headers.copy()
if headers:
# Use the headers in the parameter to override the
# additional_headers setting.
merged_headers.update(headers)
try:
if method == 'GET':
return self.requests.get(
url, params=params,
headers=merged_headers, stream=stream,
**self.requests_kwargs
)
elif method == 'POST':
return self.requests.post(
url, params=params, data=data,
headers=merged_headers, stream=stream,
**self.requests_kwargs
)
elif method == 'DELETE':
return self.requests.delete(
url, params=params, data=data,
headers=merged_headers, stream=stream,
**self.requests_kwargs
)
except Exception as e:
raise PixivError('requests %s %s error: %s' % (method, url, e))
raise PixivError('Unknown method: %s' % method)
def set_auth(self, access_token, refresh_token=None):
self.access_token = access_token
self.refresh_token = refresh_token
def login(self, username, password):
return self.auth(username=username, password=password)
def set_client(self, client_id, client_secret):
self.client_id = client_id
self.client_secret = client_secret
def auth(self, username=None, password=None, refresh_token=None, headers=None):
"""Login with password, or use the refresh_token to acquire a new bearer token"""
local_time = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S+00:00')
headers = CaseInsensitiveDict(headers or {})
headers['x-client-time'] = local_time
headers['x-client-hash'] = hashlib.md5((local_time + self.hash_secret).encode('utf-8')).hexdigest()
# Allow mock UA due to #171: https://github.com/upbit/pixivpy/issues/171
if 'user-agent' not in headers:
headers['app-os'] = 'ios'
headers['app-os-version'] = '14.6'
headers['user-agent'] = 'PixivIOSApp/7.13.3 (iOS 14.6; iPhone13,2)'
# noinspection PyUnresolvedReferences
if not hasattr(self, 'hosts') or self.hosts == 'https://app-api.pixiv.net':
auth_hosts = 'https://oauth.secure.pixiv.net'
else:
# noinspection PyUnresolvedReferences
auth_hosts = self.hosts # BAPI解析成IP的场景
headers['host'] = 'oauth.secure.pixiv.net'
url = '%s/auth/token' % auth_hosts
data = {
'get_secure_url': 1,
'client_id': self.client_id,
'client_secret': self.client_secret,
}
if username and password:
data['grant_type'] = 'password'
data['username'] = username
data['password'] = password
elif refresh_token or self.refresh_token:
data['grant_type'] = 'refresh_token'
data['refresh_token'] = refresh_token or self.refresh_token
else:
raise PixivError('[ERROR] auth() but no password or refresh_token is set.')
r = self.requests_call('POST', url, headers=headers, data=data)
if r.status_code not in {200, 301, 302}:
if data['grant_type'] == 'password':
raise PixivError(
'[ERROR] auth() failed! check username and password.\nHTTP %s: %s' % (r.status_code, r.text),
header=r.headers, body=r.text,
)
else:
raise PixivError(
'[ERROR] auth() failed! check refresh_token.\nHTTP %s: %s' % (r.status_code, r.text),
header=r.headers, body=r.text,
)
token = None
try:
# get access_token
token = self.parse_json(r.text)
self.user_id = token.response.user.id
self.access_token = token.response.access_token
self.refresh_token = token.response.refresh_token
except json.JSONDecodeError:
raise PixivError('Get access_token error! Response: %s' % token, header=r.headers, body=r.text)
# return auth/token response
return token
def download(self, url, prefix='', path=os.path.curdir, name=None, replace=False, fname=None,
referer='https://app-api.pixiv.net/'):
"""Download image to file (use 6.0 app-api)"""
if hasattr(fname, 'write'):
# A file-like object has been provided.
file = fname
else:
# Determine file path by parameters.
name = prefix + (name or fname or os.path.basename(url))
file = os.path.join(path, name)
if os.path.exists(file) and not replace:
return False
with self.requests_call('GET', url, headers={'Referer': referer}, stream=True) as response:
if isinstance(file, str):
with open(file, 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
else:
shutil.copyfileobj(response.raw, file)
return True
|
py | 1a36fdd0d4e1a604ed7aecae1073d3e3a53fa8d8 | import sendgrid
import json
import os
sg = sendgrid.SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))
##################################################
# Retrieve email statistics by client type. #
# GET /clients/stats #
params = {'aggregated_by': 'day',
'start_date': '2016-01-01',
'end_date': '2016-04-01'}
response = sg.client.clients.stats.get(query_params=params)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Retrieve stats by a specific client type. #
# GET /clients/{client_type}/stats #
params = {'aggregated_by': 'day',
'start_date': '2016-01-01',
'end_date': '2016-04-01'}
client_type = "test_url_param"
response = sg.client.clients._(client_type).stats.get(query_params=params)
print(response.status_code)
print(response.body)
print(response.headers)
|
py | 1a36fedc44974ce32679676c36966a7fb9029997 | import pytest
import numpy as np
import functools
from numpy.testing import assert_allclose
from alphacsc.init_dict import init_dictionary
from alphacsc.update_d_multi import prox_uv, prox_d
from alphacsc.learn_d_z_multi import learn_d_z_multi
from alphacsc.utils import check_random_state
@pytest.mark.parametrize("rank1", [True, False])
@pytest.mark.parametrize("uv_constraint", [
'separate', 'joint', 'box'
])
def test_init_array(rank1, uv_constraint):
n_trials, n_channels, n_times = 5, 3, 100
n_times_atom, n_atoms = 10, 4
if rank1:
expected_shape = (n_atoms, n_channels + n_times_atom)
prox = functools.partial(prox_uv, uv_constraint=uv_constraint,
n_channels=n_channels)
else:
expected_shape = (n_atoms, n_channels, n_times_atom)
prox = prox_d
X = np.random.randn(n_trials, n_channels, n_times)
# Test that init_dictionary is doing what we expect for D_init array
D_init = np.random.randn(*expected_shape)
D_hat = init_dictionary(X, n_atoms, n_times_atom, D_init=D_init,
rank1=rank1, uv_constraint=uv_constraint)
D_init = prox(D_init)
assert_allclose(D_hat, D_init)
assert id(D_hat) != id(D_init)
# Test that learn_d_z_multi is doing what we expect for D_init array
D_init = np.random.randn(*expected_shape)
_, _, D_hat, _ = learn_d_z_multi(
X, n_atoms, n_times_atom, D_init=D_init, n_iter=0, rank1=rank1,
uv_constraint=uv_constraint)
D_init = prox(D_init)
assert_allclose(D_hat, D_init)
@pytest.mark.parametrize("rank1", [True, False])
@pytest.mark.parametrize("uv_constraint", [
'separate', 'joint', 'box'
])
def test_init_random(rank1, uv_constraint):
""""""
n_trials, n_channels, n_times = 5, 3, 100
n_times_atom, n_atoms = 10, 4
if rank1:
expected_shape = (n_atoms, n_channels + n_times_atom)
prox = functools.partial(prox_uv, uv_constraint=uv_constraint,
n_channels=n_channels)
else:
expected_shape = (n_atoms, n_channels, n_times_atom)
prox = prox_d
X = np.random.randn(n_trials, n_channels, n_times)
# Test that init_dictionary is doing what we expect for D_init random
random_state = 42
D_hat = init_dictionary(X, n_atoms, n_times_atom, D_init='random',
rank1=rank1, uv_constraint=uv_constraint,
random_state=random_state)
rng = check_random_state(random_state)
D_init = rng.randn(*expected_shape)
D_init = prox(D_init)
assert_allclose(D_hat, D_init, err_msg="The random state is not correctly "
"used in init_dictionary .")
# Test that learn_d_z_multi is doing what we expect for D_init random
random_state = 27
_, _, D_hat, _ = learn_d_z_multi(
X, n_atoms, n_times_atom, D_init='random', n_iter=0,
rank1=rank1, uv_constraint=uv_constraint, random_state=random_state)
rng = check_random_state(random_state)
D_init = rng.randn(*expected_shape)
D_init = prox(D_init)
assert_allclose(D_hat, D_init, err_msg="The random state is not correctly "
"used in learn_d_z_multi.")
@pytest.mark.parametrize("rank1", [True, False])
@pytest.mark.parametrize("D_init", [
None, 'random', 'chunk', 'kmeans'
])
def test_init_shape(D_init, rank1):
n_trials, n_channels, n_times = 5, 3, 100
n_times_atom, n_atoms = 10, 4
X = np.random.randn(n_trials, n_channels, n_times)
expected_shape = (n_atoms, n_channels, n_times_atom)
if rank1:
expected_shape = (n_atoms, n_channels + n_times_atom)
# Test that init_dictionary returns correct shape
uv_hat = init_dictionary(X, n_atoms, n_times_atom, D_init=D_init,
rank1=rank1, uv_constraint='separate',
random_state=42)
assert uv_hat.shape == expected_shape
|
py | 1a36ffcc03d56870da34897f490138973494714d | import copy
import pytest
from multidict._compat import USE_CYTHON
from multidict._multidict_py import CIMultiDict as PyCIMultiDict
from multidict._multidict_py import CIMultiDictProxy as PyCIMultiDictProxy
from multidict._multidict_py import MultiDict as PyMultiDict # noqa: E402
from multidict._multidict_py import MultiDictProxy as PyMultiDictProxy
if USE_CYTHON:
from multidict._multidict import ( # type: ignore
CIMultiDict,
CIMultiDictProxy,
MultiDict,
MultiDictProxy,
)
@pytest.fixture(
params=([MultiDict, CIMultiDict] if USE_CYTHON else [])
+ [PyMultiDict, PyCIMultiDict],
ids=(["MultiDict", "CIMultiDict"] if USE_CYTHON else [])
+ ["PyMultiDict", "PyCIMultiDict"],
)
def cls(request):
return request.param
@pytest.fixture(
params=(
[(MultiDictProxy, MultiDict), (CIMultiDictProxy, CIMultiDict)]
if USE_CYTHON
else []
)
+ [(PyMultiDictProxy, PyMultiDict), (PyCIMultiDictProxy, PyCIMultiDict)],
ids=(["MultiDictProxy", "CIMultiDictProxy"] if USE_CYTHON else [])
+ ["PyMultiDictProxy", "PyCIMultiDictProxy"],
)
def proxy_classes(request):
return request.param
def test_copy(cls):
d = cls()
d["foo"] = 6
d2 = d.copy()
d2["foo"] = 7
assert d["foo"] == 6
assert d2["foo"] == 7
def test_copy_proxy(proxy_classes):
proxy_cls, dict_cls = proxy_classes
d = dict_cls()
d["foo"] = 6
p = proxy_cls(d)
d2 = p.copy()
d2["foo"] = 7
assert d["foo"] == 6
assert p["foo"] == 6
assert d2["foo"] == 7
def test_copy_std_copy(cls):
d = cls()
d["foo"] = 6
d2 = copy.copy(d)
d2["foo"] = 7
assert d["foo"] == 6
assert d2["foo"] == 7
def test_ci_multidict_clone(cls):
d = cls(foo=6)
d2 = cls(d)
d2["foo"] = 7
assert d["foo"] == 6
assert d2["foo"] == 7
|
py | 1a36fff5c9ca9c22f84c52d2ba713d3393b1b121 | #!/usr/bin/env python3
################################################################################
# lev2 sample which renders an instanced model, optionally in VR mode
# Copyright 1996-2020, Michael T. Mayers.
# Distributed under the Boost Software License - Version 1.0 - August 17, 2003
# see http://www.boost.org/LICENSE_1_0.txt
################################################################################
import math, random, argparse
from orkengine.core import *
from orkengine.lev2 import *
################################################################################
parser = argparse.ArgumentParser(description='scenegraph example')
parser.add_argument('--numinstances', metavar="numinstances", help='number of mesh instances' )
parser.add_argument('--vrmode', action="store_true", help='run in vr' )
################################################################################
args = vars(parser.parse_args())
vrmode = (args["vrmode"]==True)
if args["numinstances"]==None:
numinstances = 10000
else:
numinstances = int(args["numinstances"])
################################################################################
class AnimationState(object):
def __init__(self):
super().__init__()
self.curpos = vec3(0,0,-15)
self.dstpos = vec3()
self.currot = quat()
self.cursca = 0.0
self.dstsca = 1.0
self.incrot = quat()
def update(self,deltatime):
self.lerpindex += deltatime*0.33
if self.lerpindex > 1:
self.lerpindex = 1
pos = vec3()
pos.lerp(self.curpos,self.dstpos,self.lerpindex)
sca = self.dstsca*self.lerpindex + self.cursca*(1-self.lerpindex)
mtx = mtx4()
mtx.compose(pos,self.currot,sca)
self.currot = self.currot * self.incrot
done = self.lerpindex>=1
if done:
self.curpos = pos
self.cursca = sca
return mtx,done
################################################################################
class instance_set(object):
########################################################
def __init__(self,model,num_instances,layer):
super().__init__()
self.num_instances = num_instances
self.model = model
self.sgnode = model.createInstancedNode(num_instances,"node1",layer)
self.animated = dict()
self.animstates = dict()
for i in range(num_instances):
self.animstates[i] = AnimationState()
########################################################
def animateInstance(self,deltatime,instance_id):
animstate =self.animstates[instance_id]
self.animated[instance_id] = animstate
########################################
incraxis = vec3(random.uniform(-1,1),
random.uniform(-1,1),
random.uniform(-1,1)).normal()
incrmagn = random.uniform(-0.05,0.05)
########################################
Z = random.uniform(-2.5,-50)
animstate.dstpos = vec3(random.uniform(-2.5,2.5)*Z,
random.uniform(-2.5,2.5)*Z,
Z)
animstate.incrot = quat(incraxis,incrmagn)
animstate.dstsca = random.uniform(0.1,0.65)
animstate.lerpindex = 0.0
########################################################
def update(self,deltatime):
for i in range(5):
instance_id = random.randint(0,numinstances-1)
self.animateInstance(deltatime,instance_id)
keys2del = list()
for id in self.animated.keys():
animstate = self.animstates[id]
matrix, done = animstate.update(deltatime)
self.sgnode.setInstanceMatrix(id,matrix)
if done:
keys2del += [id]
for id in keys2del:
del self.animated[id]
################################################################################
class SceneGraphApp(object):
################################################
def __init__(self):
super().__init__()
self.sceneparams = VarMap()
self.sceneparams.preset = "PBRVR" if vrmode else "PBR"
self.qtapp = OrkEzQtApp.create(self)
self.qtapp.setRefreshPolicy(RefreshFastest, 0)
self.instancesets=[]
##############################################
def onGpuInit(self,ctx):
layer = self.scene.createLayer("layer1")
models = []
#models += [Model("data://tests/pbr1/pbr1")]
#models += [Model("data://tests/pbr_calib.gltf")]
#models += [Model("src://environ/objects/misc/headwalker.obj")]
models += [Model("src://environ/objects/misc/ref/uvsph.glb")]
###################################
for model in models:
self.instancesets += [instance_set(model,numinstances,layer)]
###################################
self.camera = CameraData()
self.cameralut = CameraDataLut()
self.cameralut.addCamera("spawncam",self.camera)
###################################
self.camera.perspective(0.1, 150.0, 45.0)
self.camera.lookAt(vec3(0,0,5), # eye
vec3(0, 0, 0), # tgt
vec3(0, 1, 0)) # up
################################################
def onUpdate(self,updinfo):
###################################
for minst in self.instancesets:
minst.update(updinfo.deltatime)
###################################
self.scene.updateScene(self.cameralut) # update and enqueue all scenenodes
################################################
app = SceneGraphApp()
app.qtapp.exec()
|
py | 1a370021f7731968f3f34b5a4586574198f79c3d | import unittest
import warnings
import tempfile
from tests.core import TestCore
from tests.core import ASSET_DIR
from pyrep.objects.object import Object
from pyrep.objects.shape import Shape
from pyrep.objects.dummy import Dummy
from pyrep.objects.joint import Joint
from pyrep.objects.proximity_sensor import ProximitySensor
from pyrep.objects.force_sensor import ForceSensor
from pyrep.objects.cartesian_path import CartesianPath
from pyrep.errors import WrongObjectTypeError
import os
from os import path
import numpy as np
class TestPyrep(TestCore):
def test_get_object_wrong_type(self):
with self.assertRaises(WrongObjectTypeError):
ProximitySensor('dynamic_cube')
def test_get_shape(self):
cube = Shape('dynamic_cube')
self.assertIsInstance(cube, Shape)
def test_get_joint(self):
cube = Joint('prismatic_joint')
self.assertIsInstance(cube, Joint)
def test_get_proximity_sensor(self):
cube = ProximitySensor('proximity_sensor')
self.assertIsInstance(cube, ProximitySensor)
def test_get_force_sensor(self):
cube = ForceSensor('force_sensor')
self.assertIsInstance(cube, ForceSensor)
def test_get_cartesian_path(self):
cube = CartesianPath('cartesian_path')
self.assertIsInstance(cube, CartesianPath)
def test_step(self):
cube = Shape('dynamic_cube')
start_pos = cube.get_position()
[self.pyrep.step() for _ in range(2)]
end_pos = cube.get_position()
self.assertFalse(np.allclose(start_pos, end_pos))
def test_load_model(self):
m = self.pyrep.import_model(path.join(ASSET_DIR, 'loadable_model.ttm'))
self.assertIsInstance(m, Shape)
def test_export_scene(self):
scene_file = tempfile.mktemp('.ttt')
self.pyrep.export_scene(scene_file)
os.remove(scene_file)
def test_group_objects(self):
top = Dummy('cubes_under_dummy')
self.assertEqual(
len(top.get_objects_in_tree(exclude_base=True)), 3)
cubes = [Shape('cube%d' % i) for i in range(3)]
ob = self.pyrep.group_objects(cubes)
self.assertIsInstance(ob, Object)
self.assertEqual(
len(top.get_objects_in_tree(exclude_base=True)), 1)
def test_merge_objects(self):
top = Dummy('cubes_under_dummy')
self.assertEqual(
len(top.get_objects_in_tree(exclude_base=True)), 3)
cubes = [Shape('cube%d' % i) for i in range(3)]
ob = self.pyrep.merge_objects(cubes)
self.assertIsInstance(ob, Object)
self.assertEqual(
len(top.get_objects_in_tree(exclude_base=True)), 1)
def test_set_configuration_tree(self):
dynamic_cube = Shape('dynamic_cube')
pos = dynamic_cube.get_position()
config = dynamic_cube.get_configuration_tree()
self.assertIsNotNone(config)
[self.pyrep.step() for _ in range(10)]
self.pyrep.set_configuration_tree(config)
self.assertTrue(np.allclose(pos, dynamic_cube.get_position()))
def test_create_texture_and_get_texture(self):
plane, texture = self.pyrep.create_texture(
path.join(ASSET_DIR, 'wood_texture.jpg'))
self.assertGreaterEqual(texture.get_texture_id(), 0)
self.assertEqual(texture.get_texture_id(),
plane.get_texture().get_texture_id())
def test_get_objects_in_tree(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
objects = self.pyrep.get_objects_in_tree()
self.assertNotEqual(len(w), 0)
for obj in objects:
self.assertIsInstance(obj, Object)
dummys = [Dummy('nested_dummy%d' % i) for i in range(3)]
for root_obj in [dummys[0], dummys[0].get_handle()]:
objects = self.pyrep.get_objects_in_tree(
root_obj, exclude_base=False, first_generation_only=False)
self.assertListEqual(objects, dummys)
for obj in objects:
self.assertIs(type(obj), Dummy)
self.assertListEqual(
self.pyrep.get_objects_in_tree(
root_obj, exclude_base=True, first_generation_only=False),
dummys[1:])
self.assertListEqual(
self.pyrep.get_objects_in_tree(
root_obj, exclude_base=False,first_generation_only=True),
dummys[:-1])
def test_get_collection_by_name(self):
self.assertIsInstance(self.pyrep.get_collection_handle_by_name('Panda_arm'), int)
if __name__ == '__main__':
unittest.main()
|
py | 1a370035165b19828df3c0a187f5df6ff97bad20 | from django.db import models
class Forms(models.Model):
name = models.CharField(verbose_name="Form Name",max_length=50)
slug = models.SlugField(verbose_name="Form Slug",max_length=50,default="")
description = models.CharField(verbose_name="Form Description",max_length=15,null=True,blank=True)
to_url = models.CharField(verbose_name="URL to ?",max_length=150)
def __str__(self):
return self.name
class FormFields(models.Model):
title = models.CharField(verbose_name="Field Name",max_length=30)
description = models.CharField(verbose_name="Field Description",max_length=50,null=True,blank=True)
is_required = models.BooleanField(verbose_name="Is Required?",default=False)
field_type = models.CharField(verbose_name="Field Type",max_length=50)
placeholder = models.CharField(verbose_name="Field Placeholder",max_length=50)
custom_css = models.TextField(verbose_name="Field Custom CSS",null=True,blank=True)
def __str__(self):
return self.title
class FormFieldsStorage(models.Model):
field = models.ForeignKey("form.FormFields",on_delete=models.CASCADE,verbose_name="Field")
form = models.ForeignKey("form.Forms",on_delete=models.CASCADE,verbose_name="Form")
def __str__(self):
return self.form.name + "@" + self.field.title
|
py | 1a3701c5701a89982833a69f89220fa81831fdd2 | # Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import os
from tests.unit import unittest
import boto
from boto.regioninfo import RegionInfo, load_endpoint_json, merge_endpoints
from boto.regioninfo import load_regions, get_regions
class TestRegionInfo(object):
def __init__(self, connection=None, name=None, endpoint=None,
connection_cls=None, provider=None):
self.connection = connection
self.name = name
self.endpoint = endpoint
self.connection_cls = connection_cls
self.provider = provider
class FakeConn(object):
pass
class TestEndpointLoading(unittest.TestCase):
def setUp(self):
super(TestEndpointLoading, self).setUp()
def test_load_endpoint_json(self):
endpoints = load_endpoint_json(boto.ENDPOINTS_PATH)
self.assertTrue('ec2' in endpoints)
self.assertEqual(
endpoints['ec2']['us-east-1'],
'ec2.us-east-1.amazonaws.com'
)
def test_merge_endpoints(self):
defaults = {
'ec2': {
'us-east-1': 'ec2.us-east-1.amazonaws.com',
'us-west-1': 'ec2.us-west-1.amazonaws.com',
}
}
additions = {
# Top-level addition.
's3': {
'us-east-1': 's3.amazonaws.com'
},
'ec2': {
# Overwrite. This doesn't exist, just test data.
'us-east-1': 'ec2.auto-resolve.amazonaws.com',
# Deep addition.
'us-west-2': 'ec2.us-west-2.amazonaws.com',
}
}
endpoints = merge_endpoints(defaults, additions)
self.assertEqual(endpoints, {
'ec2': {
'us-east-1': 'ec2.auto-resolve.amazonaws.com',
'us-west-1': 'ec2.us-west-1.amazonaws.com',
'us-west-2': 'ec2.us-west-2.amazonaws.com',
},
's3': {
'us-east-1': 's3.amazonaws.com'
}
})
def test_load_regions(self):
# Just the defaults.
endpoints = load_regions()
self.assertTrue('us-east-1' in endpoints['ec2'])
self.assertFalse('test-1' in endpoints['ec2'])
# With ENV overrides.
os.environ['BOTO_ENDPOINTS'] = os.path.join(
os.path.dirname(__file__),
'test_endpoints.json'
)
self.addCleanup(os.environ.pop, 'BOTO_ENDPOINTS')
endpoints = load_regions()
self.assertTrue('us-east-1' in endpoints['ec2'])
self.assertTrue('test-1' in endpoints['ec2'])
self.assertEqual(endpoints['ec2']['test-1'], 'ec2.test-1.amazonaws.com')
def test_get_regions(self):
# With defaults.
ec2_regions = get_regions('ec2')
self.assertTrue(len(ec2_regions) >= 10)
west_2 = None
for region_info in ec2_regions:
if region_info.name == 'us-west-2':
west_2 = region_info
break
self.assertNotEqual(west_2, None, "Couldn't find the us-west-2 region!")
self.assertTrue(isinstance(west_2, RegionInfo))
self.assertEqual(west_2.name, 'us-west-2')
self.assertEqual(west_2.endpoint, 'ec2.us-west-2.amazonaws.com')
self.assertEqual(west_2.connection_cls, None)
def test_get_regions_overrides(self):
ec2_regions = get_regions(
'ec2',
region_cls=TestRegionInfo,
connection_cls=FakeConn
)
self.assertTrue(len(ec2_regions) >= 10)
west_2 = None
for region_info in ec2_regions:
if region_info.name == 'us-west-2':
west_2 = region_info
break
self.assertNotEqual(west_2, None, "Couldn't find the us-west-2 region!")
self.assertFalse(isinstance(west_2, RegionInfo))
self.assertTrue(isinstance(west_2, TestRegionInfo))
self.assertEqual(west_2.name, 'us-west-2')
self.assertEqual(west_2.endpoint, 'ec2.us-west-2.amazonaws.com')
self.assertEqual(west_2.connection_cls, FakeConn)
if __name__ == '__main__':
unittest.main()
|
py | 1a3702be15ebcb4bd15bdff000d4d69a1406a580 | # See also the methods already implemented we have in cm for ssh management
# I think you reimplemented things that already exists.
# see and inspect cloudmesh.common
import os
from os.path import expanduser
# see content of path_expand it does expanduser as far as I know
from cloudmesh.common.util import path_expand
from cloudmesh.management.configuration.SSHkey import SSHkey
from cloudmesh.mongo.DataBaseDecorator import DatabaseUpdate
from cloudmesh.common.debug import VERBOSE
from pprint import pprint
from cloudmesh.configuration.Config import Config
# noinspection PyPep8Naming
class Key(object):
@classmethod
def get_from_dir(cls, directory=None, store=True):
directory = directory or path_expand("~/.ssh")
# find way that also works on windows, code always must work on windows
# and Linux, if not you need to have if condition
os.system("chmod 700 $HOME /.ssh")
files = [file for file in os.listdir(expanduser(path_expand(directory)))
if file.lower().endswith(".pub")]
d = []
for file in files:
print(file)
path = directory + "/" + file
# find way that also works on windows, code always must work on
# windows and Linux, if not you need to have if condition
os.system("chmod 700 $HOME /.ssh")
with open(path) as fd:
for pubkey in map(str.strip, fd):
# skip empty lines
if not pubkey:
continue
print(pubkey)
d.append(pubkey)
return d
@DatabaseUpdate()
def add(self, name, source):
"""
key add [NAME] [--source=FILENAME]
key add [NAME] [--source=git]
key add [NAME] [--source=ssh]
"""
keys = None
if source == "git":
config = Config()
username = config["cloudmesh.profile.github"]
keys = SSHkey().get_from_git(username)
elif source == "ssh":
key = SSHkey(name=name)
keys = [key]
else:
raise NotImplementedError
# source is filename
return keys
if __name__ == "__main__":
Key.get_from_dir(None, True)
|
py | 1a370383536998c08936e09d7e3fb8090b1c2eb0 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.jvm.tasks.jvm_tool_task_mixin import JvmToolTaskMixin
from pants.backend.jvm.tasks.nailgun_task import NailgunTask
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnit
from pants.option.options import Options
class ThriftLintError(Exception):
"""Raised on a lint failure."""
class ThriftLinter(NailgunTask, JvmToolTaskMixin):
"""Print linter warnings for thrift files.
"""
_CONFIG_SECTION = 'thrift-linter'
@staticmethod
def _is_thrift(target):
return target.is_thrift
@classmethod
def register_options(cls, register):
super(ThriftLinter, cls).register_options(register)
register('--skip', action='store_true', help='Skip thrift linting.')
register('--strict', default=None, action='store_true',
help='Fail the goal if thrift linter errors are found. Overrides the '
'`strict-default` option.')
register('--strict-default', default=False, advanced=True, action='store_true',
help='Sets the default strictness for targets. The `strict` option overrides '
'this value if it is set.')
register('--linter-args', default=[], advanced=True, type=Options.list,
help='Additional options passed to the linter.')
cls.register_jvm_tool(register, 'scrooge-linter')
@classmethod
def product_types(cls):
# Declare the product of this goal. Gen depends on thrift-linter.
return ['thrift-linter']
@classmethod
def prepare(cls, options, round_manager):
super(ThriftLinter, cls).prepare(options, round_manager)
# Linter depends on ivy running before it.
round_manager.require_data('ivy_imports')
@property
def config_section(self):
return self._CONFIG_SECTION
@staticmethod
def _to_bool(value):
# Converts boolean and string values to boolean.
return str(value) == 'True'
def _is_strict(self, target):
# The strict value is read from the following, in order:
# 1. options, --[no-]strict
# 2. java_thrift_library target in BUILD file, thrift_linter_strict = False,
# 3. options, --[no-]strict-default
cmdline_strict = self.get_options().strict
if cmdline_strict is not None:
return self._to_bool(cmdline_strict)
if target.thrift_linter_strict is not None:
return self._to_bool(target.thrift_linter_strict)
return self._to_bool(self.get_options().strict_default)
def _lint(self, target):
self.context.log.debug('Linting {0}'.format(target.address.spec))
classpath = self.tool_classpath('scrooge-linter')
config_args = []
config_args.extend(self.get_options().linter_args)
if not self._is_strict(target):
config_args.append('--ignore-errors')
paths = target.sources_relative_to_buildroot()
args = config_args + paths
# If runjava returns non-zero, this marks the workunit as a
# FAILURE, and there is no way to wrap this here.
returncode = self.runjava(classpath=classpath,
main='com.twitter.scrooge.linter.Main',
args=args,
workunit_labels=[WorkUnit.COMPILER]) # to let stdout/err through.
if returncode != 0:
raise ThriftLintError(
'Lint errors in target {0} for {1}.'.format(target.address.spec, paths))
def execute(self):
if self.get_options().skip:
return
thrift_targets = self.context.targets(self._is_thrift)
with self.invalidated(thrift_targets) as invalidation_check:
errors = []
for vt in invalidation_check.invalid_vts:
try:
self._lint(vt.target)
except ThriftLintError as e:
errors.append(str(e))
else:
vt.update()
if errors:
raise TaskError('\n'.join(errors))
|
py | 1a37045177e9241a7ff98ad3dfec83659c73778d | # Author: Steven J. Bethard <[email protected]>.
"""Command-line parsing library. Implements argparse for Python 2.6 and below.
This module is an optparse-inspired command-line parsing library that:
- handles both optional and positional arguments
- produces highly informative usage messages
- supports parsers that dispatch to sub-parsers
The following is a simple usage example that sums integers from the
command-line and writes the result to a file::
parser = argparse.ArgumentParser(
description='sum the integers at the command line')
parser.add_argument(
'integers', metavar='int', nargs='+', type=int,
help='an integer to be summed')
parser.add_argument(
'--log', default=sys.stdout, type=argparse.FileType('w'),
help='the file where the sum should be written')
args = parser.parse_args()
args.log.write('%s' % sum(args.integers))
args.log.close()
The module contains the following public classes:
- ArgumentParser -- The main entry point for command-line parsing. As the
example above shows, the add_argument() method is used to populate
the parser with actions for optional and positional arguments. Then
the parse_args() method is invoked to convert the args at the
command-line into an object with attributes.
- ArgumentError -- The exception raised by ArgumentParser structures when
there are errors with the parser's actions. Errors raised while
parsing the command-line are caught by ArgumentParser and emitted
as command-line messages.
- FileType -- A factory for defining types of files to be created. As the
example above shows, instances of FileType are typically passed as
the type= argument of add_argument() calls.
- Action -- The base class for parser actions. Typically actions are
selected by passing strings like 'store_true' or 'append_const' to
the action= argument of add_argument(). However, for greater
customization of ArgumentParser actions, subclasses of Action may
be defined and passed as the action= argument.
- HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter,
ArgumentDefaultsHelpFormatter -- Formatter classes which
may be passed as the formatter_class= argument to the
ArgumentParser constructor. HelpFormatter is the default,
RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser
not to change the formatting for help text, and
ArgumentDefaultsHelpFormatter adds information about argument defaults
to the help.
All other classes in this module are considered implementation details.
(Also note that HelpFormatter and RawDescriptionHelpFormatter are only
considered public as object names -- the API of the formatter structures is
still considered an implementation detail.)
"""
__version__ = '1.3.0' # we use our own version number independant of the
# one in stdlib and we release this on pypi.
__external_lib__ = True # to make sure the tests really test THIS lib,
# not the builtin one in Python stdlib
__all__ = [
'ArgumentParser',
'ArgumentError',
'ArgumentTypeError',
'FileType',
'HelpFormatter',
'ArgumentDefaultsHelpFormatter',
'RawDescriptionHelpFormatter',
'RawTextHelpFormatter',
'Namespace',
'Action',
'ONE_OR_MORE',
'OPTIONAL',
'PARSER',
'REMAINDER',
'SUPPRESS',
'ZERO_OR_MORE',
]
import copy as _copy
import os as _os
import re as _re
import sys as _sys
import textwrap as _textwrap
from gettext import gettext as _
try:
set
except NameError:
# for python < 2.4 compatibility (sets module is there since 2.3):
from sets import Set as set
try:
basestring
except NameError:
basestring = str
try:
sorted
except NameError:
# for python < 2.4 compatibility:
def sorted(iterable, reverse=False):
result = list(iterable)
result.sort()
if reverse:
result.reverse()
return result
def _callable(obj):
return hasattr(obj, '__call__') or hasattr(obj, '__bases__')
SUPPRESS = '==SUPPRESS=='
OPTIONAL = '?'
ZERO_OR_MORE = '*'
ONE_OR_MORE = '+'
PARSER = 'A...'
REMAINDER = '...'
_UNRECOGNIZED_ARGS_ATTR = '_unrecognized_args'
# =============================
# Utility functions and classes
# =============================
class _AttributeHolder(object):
"""Abstract base class that provides __repr__.
The __repr__ method returns a string in the format::
ClassName(attr=name, attr=name, ...)
The attributes are determined either by a class-level attribute,
'_kwarg_names', or by inspecting the instance __dict__.
"""
def __repr__(self):
type_name = type(self).__name__
arg_strings = []
for arg in self._get_args():
arg_strings.append(repr(arg))
for name, value in self._get_kwargs():
arg_strings.append('%s=%r' % (name, value))
return '%s(%s)' % (type_name, ', '.join(arg_strings))
def _get_kwargs(self):
return sorted(self.__dict__.items())
def _get_args(self):
return []
def _ensure_value(namespace, name, value):
if getattr(namespace, name, None) is None:
setattr(namespace, name, value)
return getattr(namespace, name)
# ===============
# Formatting Help
# ===============
class HelpFormatter(object):
"""Formatter for generating usage messages and argument help strings.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def __init__(self,
prog,
indent_increment=2,
max_help_position=24,
width=None):
# default setting for width
if width is None:
try:
width = int(_os.environ['COLUMNS'])
except (KeyError, ValueError):
width = 80
width -= 2
self._prog = prog
self._indent_increment = indent_increment
self._max_help_position = max_help_position
self._width = width
self._current_indent = 0
self._level = 0
self._action_max_length = 0
self._root_section = self._Section(self, None)
self._current_section = self._root_section
self._whitespace_matcher = _re.compile(r'\s+')
self._long_break_matcher = _re.compile(r'\n\n\n+')
# ===============================
# Section and indentation methods
# ===============================
def _indent(self):
self._current_indent += self._indent_increment
self._level += 1
def _dedent(self):
self._current_indent -= self._indent_increment
assert self._current_indent >= 0, 'Indent decreased below 0.'
self._level -= 1
class _Section(object):
def __init__(self, formatter, parent, heading=None):
self.formatter = formatter
self.parent = parent
self.heading = heading
self.items = []
def format_help(self):
# format the indented section
if self.parent is not None:
self.formatter._indent()
join = self.formatter._join_parts
for func, args in self.items:
func(*args)
item_help = join([func(*args) for func, args in self.items])
if self.parent is not None:
self.formatter._dedent()
# return nothing if the section was empty
if not item_help:
return ''
# add the heading if the section was non-empty
if self.heading is not SUPPRESS and self.heading is not None:
current_indent = self.formatter._current_indent
heading = '%*s%s:\n' % (current_indent, '', self.heading)
else:
heading = ''
# join the section-initial newline, the heading and the help
return join(['\n', heading, item_help, '\n'])
def _add_item(self, func, args):
self._current_section.items.append((func, args))
# ========================
# Message building methods
# ========================
def start_section(self, heading):
self._indent()
section = self._Section(self, self._current_section, heading)
self._add_item(section.format_help, [])
self._current_section = section
def end_section(self):
self._current_section = self._current_section.parent
self._dedent()
def add_text(self, text):
if text is not SUPPRESS and text is not None:
self._add_item(self._format_text, [text])
def add_usage(self, usage, actions, groups, prefix=None):
if usage is not SUPPRESS:
args = usage, actions, groups, prefix
self._add_item(self._format_usage, args)
def add_argument(self, action):
if action.help is not SUPPRESS:
# find all invocations
get_invocation = self._format_action_invocation
invocations = [get_invocation(action)]
for subaction in self._iter_indented_subactions(action):
invocations.append(get_invocation(subaction))
# update the maximum item length
invocation_length = max([len(s) for s in invocations])
action_length = invocation_length + self._current_indent
self._action_max_length = max(self._action_max_length,
action_length)
# add the item to the list
self._add_item(self._format_action, [action])
def add_arguments(self, actions):
for action in actions:
self.add_argument(action)
# =======================
# Help-formatting methods
# =======================
def format_help(self):
help = self._root_section.format_help()
if help:
help = self._long_break_matcher.sub('\n\n', help)
help = help.strip('\n') + '\n'
return help
def _join_parts(self, part_strings):
return ''.join([part
for part in part_strings
if part and part is not SUPPRESS])
def _format_usage(self, usage, actions, groups, prefix):
if prefix is None:
prefix = _('usage: ')
# if usage is specified, use that
if usage is not None:
usage = usage % dict(prog=self._prog)
# if no optionals or positionals are available, usage is just prog
elif usage is None and not actions:
usage = '%(prog)s' % dict(prog=self._prog)
# if optionals and positionals are available, calculate usage
elif usage is None:
prog = '%(prog)s' % dict(prog=self._prog)
# split optionals from positionals
optionals = []
positionals = []
for action in actions:
if action.option_strings:
optionals.append(action)
else:
positionals.append(action)
# build full usage string
format = self._format_actions_usage
action_usage = format(optionals + positionals, groups)
usage = ' '.join([s for s in [prog, action_usage] if s])
# wrap the usage parts if it's too long
text_width = self._width - self._current_indent
if len(prefix) + len(usage) > text_width:
# break usage into wrappable parts
part_regexp = r'\(.*?\)+|\[.*?\]+|\S+'
opt_usage = format(optionals, groups)
pos_usage = format(positionals, groups)
opt_parts = _re.findall(part_regexp, opt_usage)
pos_parts = _re.findall(part_regexp, pos_usage)
assert ' '.join(opt_parts) == opt_usage
assert ' '.join(pos_parts) == pos_usage
# helper for wrapping lines
def get_lines(parts, indent, prefix=None):
lines = []
line = []
if prefix is not None:
line_len = len(prefix) - 1
else:
line_len = len(indent) - 1
for part in parts:
if line_len + 1 + len(part) > text_width:
lines.append(indent + ' '.join(line))
line = []
line_len = len(indent) - 1
line.append(part)
line_len += len(part) + 1
if line:
lines.append(indent + ' '.join(line))
if prefix is not None:
lines[0] = lines[0][len(indent):]
return lines
# if prog is short, follow it with optionals or positionals
if len(prefix) + len(prog) <= 0.75 * text_width:
indent = ' ' * (len(prefix) + len(prog) + 1)
if opt_parts:
lines = get_lines([prog] + opt_parts, indent, prefix)
lines.extend(get_lines(pos_parts, indent))
elif pos_parts:
lines = get_lines([prog] + pos_parts, indent, prefix)
else:
lines = [prog]
# if prog is long, put it on its own line
else:
indent = ' ' * len(prefix)
parts = opt_parts + pos_parts
lines = get_lines(parts, indent)
if len(lines) > 1:
lines = []
lines.extend(get_lines(opt_parts, indent))
lines.extend(get_lines(pos_parts, indent))
lines = [prog] + lines
# join lines into usage
usage = '\n'.join(lines)
# prefix with 'usage:'
return '%s%s\n\n' % (prefix, usage)
def _format_actions_usage(self, actions, groups):
# find group indices and identify actions in groups
group_actions = set()
inserts = {}
for group in groups:
try:
start = actions.index(group._group_actions[0])
except ValueError:
continue
else:
end = start + len(group._group_actions)
if actions[start:end] == group._group_actions:
for action in group._group_actions:
group_actions.add(action)
if not group.required:
if start in inserts:
inserts[start] += ' ['
else:
inserts[start] = '['
inserts[end] = ']'
else:
if start in inserts:
inserts[start] += ' ('
else:
inserts[start] = '('
inserts[end] = ')'
for i in range(start + 1, end):
inserts[i] = '|'
# collect all actions format strings
parts = []
for i, action in enumerate(actions):
# suppressed arguments are marked with None
# remove | separators for suppressed arguments
if action.help is SUPPRESS:
parts.append(None)
if inserts.get(i) == '|':
inserts.pop(i)
elif inserts.get(i + 1) == '|':
inserts.pop(i + 1)
# produce all arg strings
elif not action.option_strings:
part = self._format_args(action, action.dest)
# if it's in a group, strip the outer []
if action in group_actions:
if part[0] == '[' and part[-1] == ']':
part = part[1:-1]
# add the action string to the list
parts.append(part)
# produce the first way to invoke the option in brackets
else:
option_string = action.option_strings[0]
# if the Optional doesn't take a value, format is:
# -s or --long
if action.nargs == 0:
part = '%s' % option_string
# if the Optional takes a value, format is:
# -s ARGS or --long ARGS
else:
default = action.dest.upper()
args_string = self._format_args(action, default)
part = '%s %s' % (option_string, args_string)
# make it look optional if it's not required or in a group
if not action.required and action not in group_actions:
part = '[%s]' % part
# add the action string to the list
parts.append(part)
# insert things at the necessary indices
for i in sorted(inserts, reverse=True):
parts[i:i] = [inserts[i]]
# join all the action items with spaces
text = ' '.join([item for item in parts if item is not None])
# clean up separators for mutually exclusive groups
open = r'[\[(]'
close = r'[\])]'
text = _re.sub(r'(%s) ' % open, r'\1', text)
text = _re.sub(r' (%s)' % close, r'\1', text)
text = _re.sub(r'%s *%s' % (open, close), r'', text)
text = _re.sub(r'\(([^|]*)\)', r'\1', text)
text = text.strip()
# return the text
return text
def _format_text(self, text):
if '%(prog)' in text:
text = text % dict(prog=self._prog)
text_width = self._width - self._current_indent
indent = ' ' * self._current_indent
return self._fill_text(text, text_width, indent) + '\n\n'
def _format_action(self, action):
# determine the required width and the entry label
help_position = min(self._action_max_length + 2,
self._max_help_position)
help_width = self._width - help_position
action_width = help_position - self._current_indent - 2
action_header = self._format_action_invocation(action)
# ho nelp; start on same line and add a final newline
if not action.help:
tup = self._current_indent, '', action_header
action_header = '%*s%s\n' % tup
# short action name; start on the same line and pad two spaces
elif len(action_header) <= action_width:
tup = self._current_indent, '', action_width, action_header
action_header = '%*s%-*s ' % tup
indent_first = 0
# long action name; start on the next line
else:
tup = self._current_indent, '', action_header
action_header = '%*s%s\n' % tup
indent_first = help_position
# collect the pieces of the action help
parts = [action_header]
# if there was help for the action, add lines of help text
if action.help:
help_text = self._expand_help(action)
help_lines = self._split_lines(help_text, help_width)
parts.append('%*s%s\n' % (indent_first, '', help_lines[0]))
for line in help_lines[1:]:
parts.append('%*s%s\n' % (help_position, '', line))
# or add a newline if the description doesn't end with one
elif not action_header.endswith('\n'):
parts.append('\n')
# if there are any sub-actions, add their help as well
for subaction in self._iter_indented_subactions(action):
parts.append(self._format_action(subaction))
# return a single string
return self._join_parts(parts)
def _format_action_invocation(self, action):
if not action.option_strings:
metavar, = self._metavar_formatter(action, action.dest)(1)
return metavar
else:
parts = []
# if the Optional doesn't take a value, format is:
# -s, --long
if action.nargs == 0:
parts.extend(action.option_strings)
# if the Optional takes a value, format is:
# -s ARGS, --long ARGS
else:
default = action.dest.upper()
args_string = self._format_args(action, default)
for option_string in action.option_strings:
parts.append('%s %s' % (option_string, args_string))
return ', '.join(parts)
def _metavar_formatter(self, action, default_metavar):
if action.metavar is not None:
result = action.metavar
elif action.choices is not None:
choice_strs = [str(choice) for choice in action.choices]
result = '{%s}' % ','.join(choice_strs)
else:
result = default_metavar
def format(tuple_size):
if isinstance(result, tuple):
return result
else:
return (result, ) * tuple_size
return format
def _format_args(self, action, default_metavar):
get_metavar = self._metavar_formatter(action, default_metavar)
if action.nargs is None:
result = '%s' % get_metavar(1)
elif action.nargs == OPTIONAL:
result = '[%s]' % get_metavar(1)
elif action.nargs == ZERO_OR_MORE:
result = '[%s [%s ...]]' % get_metavar(2)
elif action.nargs == ONE_OR_MORE:
result = '%s [%s ...]' % get_metavar(2)
elif action.nargs == REMAINDER:
result = '...'
elif action.nargs == PARSER:
result = '%s ...' % get_metavar(1)
else:
formats = ['%s' for _ in range(action.nargs)]
result = ' '.join(formats) % get_metavar(action.nargs)
return result
def _expand_help(self, action):
params = dict(vars(action), prog=self._prog)
for name in list(params):
if params[name] is SUPPRESS:
del params[name]
for name in list(params):
if hasattr(params[name], '__name__'):
params[name] = params[name].__name__
if params.get('choices') is not None:
choices_str = ', '.join([str(c) for c in params['choices']])
params['choices'] = choices_str
return self._get_help_string(action) % params
def _iter_indented_subactions(self, action):
try:
get_subactions = action._get_subactions
except AttributeError:
pass
else:
self._indent()
for subaction in get_subactions():
yield subaction
self._dedent()
def _split_lines(self, text, width):
text = self._whitespace_matcher.sub(' ', text).strip()
return _textwrap.wrap(text, width)
def _fill_text(self, text, width, indent):
text = self._whitespace_matcher.sub(' ', text).strip()
return _textwrap.fill(text, width, initial_indent=indent,
subsequent_indent=indent)
def _get_help_string(self, action):
return action.help
class RawDescriptionHelpFormatter(HelpFormatter):
"""Help message formatter which retains any formatting in descriptions.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _fill_text(self, text, width, indent):
return ''.join([indent + line for line in text.splitlines(True)])
class RawTextHelpFormatter(RawDescriptionHelpFormatter):
"""Help message formatter which retains formatting of all help text.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _split_lines(self, text, width):
return text.splitlines()
class ArgumentDefaultsHelpFormatter(HelpFormatter):
"""Help message formatter which adds default values to argument help.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _get_help_string(self, action):
help = action.help
if '%(default)' not in action.help:
if action.default is not SUPPRESS:
defaulting_nargs = [OPTIONAL, ZERO_OR_MORE]
if action.option_strings or action.nargs in defaulting_nargs:
help += ' (default: %(default)s)'
return help
# =====================
# Options and Arguments
# =====================
def _get_action_name(argument):
if argument is None:
return None
elif argument.option_strings:
return '/'.join(argument.option_strings)
elif argument.metavar not in (None, SUPPRESS):
return argument.metavar
elif argument.dest not in (None, SUPPRESS):
return argument.dest
else:
return None
class ArgumentError(Exception):
"""An error from creating or using an argument (optional or positional).
The string value of this exception is the message, augmented with
information about the argument that caused it.
"""
def __init__(self, argument, message):
self.argument_name = _get_action_name(argument)
self.message = message
def __str__(self):
if self.argument_name is None:
format = '%(message)s'
else:
format = 'argument %(argument_name)s: %(message)s'
return format % dict(message=self.message,
argument_name=self.argument_name)
class ArgumentTypeError(Exception):
"""An error from trying to convert a command line string to a type."""
pass
# ==============
# Action classes
# ==============
class Action(_AttributeHolder):
"""Information about how to convert command line strings to Python structures.
Action structures are used by an ArgumentParser to represent the information
needed to parse a single argument from one or more strings from the
command line. The keyword arguments to the Action constructor are also
all attributes of Action instances.
Keyword Arguments:
- option_strings -- A list of command-line option strings which
should be associated with this action.
- dest -- The name of the attribute to hold the created object(s)
- nargs -- The number of command-line arguments that should be
consumed. By default, one argument will be consumed and a single
value will be produced. Other values include::
- N (an integer) consumes N arguments (and produces a list)
- '?' consumes zero or one arguments
- '*' consumes zero or more arguments (and produces a list)
- '+' consumes one or more arguments (and produces a list)
Note that the difference between the default and nargs=1 is that
with the default, a single value will be produced, while with
nargs=1, a list containing a single value will be produced.
- const -- The value to be produced if the option is specified and the
option uses an action that takes no values.
- default -- The value to be produced if the option is not specified.
- type -- The type which the command-line arguments should be converted
to, should be one of 'string', 'int', 'float', 'complex' or a
callable object that accepts a single string argument. If None,
'string' is assumed.
- choices -- A container of values that should be allowed. If not None,
after a command-line argument has been converted to the appropriate
type, an exception will be raised if it is not a member of this
collection.
- required -- True if the action must always be specified at the
command line. This is only meaningful for optional command-line
arguments.
- help -- The help string describing the argument.
- metavar -- The name to be used for the option's argument with the
help string. If None, the 'dest' value will be used as the name.
"""
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
self.option_strings = option_strings
self.dest = dest
self.nargs = nargs
self.const = const
self.default = default
self.type = type
self.choices = choices
self.required = required
self.help = help
self.metavar = metavar
def _get_kwargs(self):
names = [
'option_strings',
'dest',
'nargs',
'const',
'default',
'type',
'choices',
'help',
'metavar',
]
return [(name, getattr(self, name)) for name in names]
def __call__(self, parser, namespace, values, option_string=None):
raise NotImplementedError(_('.__call__() not defined'))
class _StoreAction(Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
if nargs == 0:
raise ValueError('nargs for store actions must be > 0; if you '
'have nothing to store, actions such as store '
'true or store const may be more appropriate')
if const is not None and nargs != OPTIONAL:
raise ValueError('nargs must be %r to supply const' % OPTIONAL)
super(_StoreAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
class _StoreConstAction(Action):
def __init__(self,
option_strings,
dest,
const,
default=None,
required=False,
help=None,
metavar=None):
super(_StoreConstAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, self.const)
class _StoreTrueAction(_StoreConstAction):
def __init__(self,
option_strings,
dest,
default=False,
required=False,
help=None):
super(_StoreTrueAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=True,
default=default,
required=required,
help=help)
class _StoreFalseAction(_StoreConstAction):
def __init__(self,
option_strings,
dest,
default=True,
required=False,
help=None):
super(_StoreFalseAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=False,
default=default,
required=required,
help=help)
class _AppendAction(Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
if nargs == 0:
raise ValueError('nargs for append actions must be > 0; if arg '
'strings are not supplying the value to append, '
'the append const action may be more appropriate')
if const is not None and nargs != OPTIONAL:
raise ValueError('nargs must be %r to supply const' % OPTIONAL)
super(_AppendAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
items = _copy.copy(_ensure_value(namespace, self.dest, []))
items.append(values)
setattr(namespace, self.dest, items)
class _AppendConstAction(Action):
def __init__(self,
option_strings,
dest,
const,
default=None,
required=False,
help=None,
metavar=None):
super(_AppendConstAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
items = _copy.copy(_ensure_value(namespace, self.dest, []))
items.append(self.const)
setattr(namespace, self.dest, items)
class _CountAction(Action):
def __init__(self,
option_strings,
dest,
default=None,
required=False,
help=None):
super(_CountAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
default=default,
required=required,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
new_count = _ensure_value(namespace, self.dest, 0) + 1
setattr(namespace, self.dest, new_count)
class _HelpAction(Action):
def __init__(self,
option_strings,
dest=SUPPRESS,
default=SUPPRESS,
help=None):
super(_HelpAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
parser.print_help()
parser.exit()
class _VersionAction(Action):
def __init__(self,
option_strings,
version=None,
dest=SUPPRESS,
default=SUPPRESS,
help="show program's version number and exit"):
super(_VersionAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
self.version = version
def __call__(self, parser, namespace, values, option_string=None):
version = self.version
if version is None:
version = parser.version
formatter = parser._get_formatter()
formatter.add_text(version)
parser.exit(message=formatter.format_help())
class _SubParsersAction(Action):
class _ChoicesPseudoAction(Action):
def __init__(self, name, aliases, help):
metavar = dest = name
if aliases:
metavar += ' (%s)' % ', '.join(aliases)
sup = super(_SubParsersAction._ChoicesPseudoAction, self)
sup.__init__(option_strings=[], dest=dest, help=help,
metavar=metavar)
def __init__(self,
option_strings,
prog,
parser_class,
dest=SUPPRESS,
help=None,
metavar=None):
self._prog_prefix = prog
self._parser_class = parser_class
self._name_parser_map = {}
self._choices_actions = []
super(_SubParsersAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=PARSER,
choices=self._name_parser_map,
help=help,
metavar=metavar)
def add_parser(self, name, **kwargs):
# set prog from the existing prefix
if kwargs.get('prog') is None:
kwargs['prog'] = '%s %s' % (self._prog_prefix, name)
aliases = kwargs.pop('aliases', ())
# create a pseudo-action to hold the choice help
if 'help' in kwargs:
help = kwargs.pop('help')
choice_action = self._ChoicesPseudoAction(name, aliases, help)
self._choices_actions.append(choice_action)
# create the parser and add it to the map
parser = self._parser_class(**kwargs)
self._name_parser_map[name] = parser
# make parser available under aliases also
for alias in aliases:
self._name_parser_map[alias] = parser
return parser
def _get_subactions(self):
return self._choices_actions
def __call__(self, parser, namespace, values, option_string=None):
parser_name = values[0]
arg_strings = values[1:]
# set the parser name if requested
if self.dest is not SUPPRESS:
setattr(namespace, self.dest, parser_name)
# select the parser
try:
parser = self._name_parser_map[parser_name]
except KeyError:
tup = parser_name, ', '.join(self._name_parser_map)
msg = _('unknown parser %r (choices: %s)' % tup)
raise ArgumentError(self, msg)
# parse all the remaining options into the namespace
# store any unrecognized options on the object, so that the top
# level parser can decide what to do with them
namespace, arg_strings = parser.parse_known_args(arg_strings, namespace)
if arg_strings:
vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, [])
getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings)
# ==============
# Type classes
# ==============
class FileType(object):
"""Factory for creating file object types
Instances of FileType are typically passed as type= arguments to the
ArgumentParser add_argument() method.
Keyword Arguments:
- mode -- A string indicating how the file is to be opened. Accepts the
same values as the builtin open() function.
- bufsize -- The file's desired buffer size. Accepts the same values as
the builtin open() function.
"""
def __init__(self, mode='r', bufsize=None):
self._mode = mode
self._bufsize = bufsize
def __call__(self, string):
# the special argument "-" means sys.std{in,out}
if string == '-':
if 'r' in self._mode:
return _sys.stdin
elif 'w' in self._mode:
return _sys.stdout
else:
msg = _('argument "-" with mode %r' % self._mode)
raise ValueError(msg)
# all other arguments are used as file names
if self._bufsize:
return open(string, self._mode, self._bufsize)
else:
return open(string, self._mode)
def __repr__(self):
args = [self._mode, self._bufsize]
args_str = ', '.join([repr(arg) for arg in args if arg is not None])
return '%s(%s)' % (type(self).__name__, args_str)
# ===========================
# Optional and Positional Parsing
# ===========================
class Namespace(_AttributeHolder):
"""Simple object for storing attributes.
Implements equality by attribute names and values, and provides a simple
string representation.
"""
def __init__(self, **kwargs):
for name in kwargs:
setattr(self, name, kwargs[name])
__hash__ = None
def __eq__(self, other):
return vars(self) == vars(other)
def __ne__(self, other):
return not (self == other)
def __contains__(self, key):
return key in self.__dict__
class _ActionsContainer(object):
def __init__(self,
description,
prefix_chars,
argument_default,
conflict_handler):
super(_ActionsContainer, self).__init__()
self.description = description
self.argument_default = argument_default
self.prefix_chars = prefix_chars
self.conflict_handler = conflict_handler
# set up registries
self._registries = {}
# register actions
self.register('action', None, _StoreAction)
self.register('action', 'store', _StoreAction)
self.register('action', 'store_const', _StoreConstAction)
self.register('action', 'store_true', _StoreTrueAction)
self.register('action', 'store_false', _StoreFalseAction)
self.register('action', 'append', _AppendAction)
self.register('action', 'append_const', _AppendConstAction)
self.register('action', 'count', _CountAction)
self.register('action', 'help', _HelpAction)
self.register('action', 'version', _VersionAction)
self.register('action', 'parsers', _SubParsersAction)
# raise an exception if the conflict handler is invalid
self._get_handler()
# action storage
self._actions = []
self._option_string_actions = {}
# groups
self._action_groups = []
self._mutually_exclusive_groups = []
# defaults storage
self._defaults = {}
# determines whether an "option" looks like a negative number
self._negative_number_matcher = _re.compile(r'^-\d+$|^-\d*\.\d+$')
# whether or not there are any optionals that look like negative
# numbers -- uses a list so it can be shared and edited
self._has_negative_number_optionals = []
# ====================
# Registration methods
# ====================
def register(self, registry_name, value, object):
registry = self._registries.setdefault(registry_name, {})
registry[value] = object
def _registry_get(self, registry_name, value, default=None):
return self._registries[registry_name].get(value, default)
# ==================================
# Namespace default accessor methods
# ==================================
def set_defaults(self, **kwargs):
self._defaults.update(kwargs)
# if these defaults match any existing arguments, replace
# the previous default on the object with the new one
for action in self._actions:
if action.dest in kwargs:
action.default = kwargs[action.dest]
def get_default(self, dest):
for action in self._actions:
if action.dest == dest and action.default is not None:
return action.default
return self._defaults.get(dest, None)
# =======================
# Adding argument actions
# =======================
def add_argument(self, *args, **kwargs):
"""
add_argument(dest, ..., name=value, ...)
add_argument(option_string, option_string, ..., name=value, ...)
"""
# if no positional args are supplied or only one is supplied and
# it doesn't look like an option string, parse a positional
# argument
chars = self.prefix_chars
if not args or len(args) == 1 and args[0][0] not in chars:
if args and 'dest' in kwargs:
raise ValueError('dest supplied twice for positional argument')
kwargs = self._get_positional_kwargs(*args, **kwargs)
# otherwise, we're adding an optional argument
else:
kwargs = self._get_optional_kwargs(*args, **kwargs)
# if no default was supplied, use the parser-level default
if 'default' not in kwargs:
dest = kwargs['dest']
if dest in self._defaults:
kwargs['default'] = self._defaults[dest]
elif self.argument_default is not None:
kwargs['default'] = self.argument_default
# create the action object, and add it to the parser
action_class = self._pop_action_class(kwargs)
if not _callable(action_class):
raise ValueError('unknown action "%s"' % action_class)
action = action_class(**kwargs)
# raise an error if the action type is not callable
type_func = self._registry_get('type', action.type, action.type)
if not _callable(type_func):
raise ValueError('%r is not callable' % type_func)
return self._add_action(action)
def add_argument_group(self, *args, **kwargs):
group = _ArgumentGroup(self, *args, **kwargs)
self._action_groups.append(group)
return group
def add_mutually_exclusive_group(self, **kwargs):
group = _MutuallyExclusiveGroup(self, **kwargs)
self._mutually_exclusive_groups.append(group)
return group
def _add_action(self, action):
# resolve any conflicts
self._check_conflict(action)
# add to actions list
self._actions.append(action)
action.container = self
# index the action by any option strings it has
for option_string in action.option_strings:
self._option_string_actions[option_string] = action
# set the flag if any option strings look like negative numbers
for option_string in action.option_strings:
if self._negative_number_matcher.match(option_string):
if not self._has_negative_number_optionals:
self._has_negative_number_optionals.append(True)
# return the created action
return action
def _remove_action(self, action):
self._actions.remove(action)
def _add_container_actions(self, container):
# collect groups by titles
title_group_map = {}
for group in self._action_groups:
if group.title in title_group_map:
msg = _('cannot merge actions - two groups are named %r')
raise ValueError(msg % (group.title))
title_group_map[group.title] = group
# map each action to its group
group_map = {}
for group in container._action_groups:
# if a group with the title exists, use that, otherwise
# create a new group matching the container's group
if group.title not in title_group_map:
title_group_map[group.title] = self.add_argument_group(
title=group.title,
description=group.description,
conflict_handler=group.conflict_handler)
# map the actions to their new group
for action in group._group_actions:
group_map[action] = title_group_map[group.title]
# add container's mutually exclusive groups
# NOTE: if add_mutually_exclusive_group ever gains title= and
# description= then this code will need to be expanded as above
for group in container._mutually_exclusive_groups:
mutex_group = self.add_mutually_exclusive_group(
required=group.required)
# map the actions to their new mutex group
for action in group._group_actions:
group_map[action] = mutex_group
# add all actions to this container or their group
for action in container._actions:
group_map.get(action, self)._add_action(action)
def _get_positional_kwargs(self, dest, **kwargs):
# make sure required is not specified
if 'required' in kwargs:
msg = _("'required' is an invalid argument for positionals")
raise TypeError(msg)
# mark positional arguments as required if at least one is
# always required
if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]:
kwargs['required'] = True
if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs:
kwargs['required'] = True
# return the keyword arguments with no option strings
return dict(kwargs, dest=dest, option_strings=[])
def _get_optional_kwargs(self, *args, **kwargs):
# determine short and long option strings
option_strings = []
long_option_strings = []
for option_string in args:
# error on strings that don't start with an appropriate prefix
if not option_string[0] in self.prefix_chars:
msg = _('invalid option string %r: '
'must start with a character %r')
tup = option_string, self.prefix_chars
raise ValueError(msg % tup)
# strings starting with two prefix characters are long options
option_strings.append(option_string)
if option_string[0] in self.prefix_chars:
if len(option_string) > 1:
if option_string[1] in self.prefix_chars:
long_option_strings.append(option_string)
# infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x'
dest = kwargs.pop('dest', None)
if dest is None:
if long_option_strings:
dest_option_string = long_option_strings[0]
else:
dest_option_string = option_strings[0]
dest = dest_option_string.lstrip(self.prefix_chars)
if not dest:
msg = _('dest= is required for options like %r')
raise ValueError(msg % option_string)
dest = dest.replace('-', '_')
# return the updated keyword arguments
return dict(kwargs, dest=dest, option_strings=option_strings)
def _pop_action_class(self, kwargs, default=None):
action = kwargs.pop('action', default)
return self._registry_get('action', action, action)
def _get_handler(self):
# determine function from conflict handler string
handler_func_name = '_handle_conflict_%s' % self.conflict_handler
try:
return getattr(self, handler_func_name)
except AttributeError:
msg = _('invalid conflict_resolution value: %r')
raise ValueError(msg % self.conflict_handler)
def _check_conflict(self, action):
# find all options that conflict with this option
confl_optionals = []
for option_string in action.option_strings:
if option_string in self._option_string_actions:
confl_optional = self._option_string_actions[option_string]
confl_optionals.append((option_string, confl_optional))
# resolve any conflicts
if confl_optionals:
conflict_handler = self._get_handler()
conflict_handler(action, confl_optionals)
def _handle_conflict_error(self, action, conflicting_actions):
message = _('conflicting option string(s): %s')
conflict_string = ', '.join([option_string
for option_string, action
in conflicting_actions])
raise ArgumentError(action, message % conflict_string)
def _handle_conflict_resolve(self, action, conflicting_actions):
# remove all conflicting options
for option_string, action in conflicting_actions:
# remove the conflicting option
action.option_strings.destroy(option_string)
self._option_string_actions.pop(option_string, None)
# if the option now has no option string, remove it from the
# container holding it
if not action.option_strings:
action.container._remove_action(action)
class _ArgumentGroup(_ActionsContainer):
def __init__(self, container, title=None, description=None, **kwargs):
# add any missing keyword arguments by checking the container
update = kwargs.setdefault
update('conflict_handler', container.conflict_handler)
update('prefix_chars', container.prefix_chars)
update('argument_default', container.argument_default)
super_init = super(_ArgumentGroup, self).__init__
super_init(description=description, **kwargs)
# group attributes
self.title = title
self._group_actions = []
# share most attributes with the container
self._registries = container._registries
self._actions = container._actions
self._option_string_actions = container._option_string_actions
self._defaults = container._defaults
self._has_negative_number_optionals = \
container._has_negative_number_optionals
def _add_action(self, action):
action = super(_ArgumentGroup, self)._add_action(action)
self._group_actions.append(action)
return action
def _remove_action(self, action):
super(_ArgumentGroup, self)._remove_action(action)
self._group_actions.remove(action)
class _MutuallyExclusiveGroup(_ArgumentGroup):
def __init__(self, container, required=False):
super(_MutuallyExclusiveGroup, self).__init__(container)
self.required = required
self._container = container
def _add_action(self, action):
if action.required:
msg = _('mutually exclusive arguments must be optional')
raise ValueError(msg)
action = self._container._add_action(action)
self._group_actions.append(action)
return action
def _remove_action(self, action):
self._container._remove_action(action)
self._group_actions.remove(action)
class ArgumentParser(_AttributeHolder, _ActionsContainer):
"""Object for parsing command line strings into Python structures.
Keyword Arguments:
- prog -- The name of the program (default: sys.argv[0])
- usage -- A usage message (default: auto-generated from arguments)
- description -- A description of what the program does
- epilog -- Text following the argument descriptions
- parents -- Parsers whose arguments should be copied into this one
- formatter_class -- HelpFormatter class for printing help messages
- prefix_chars -- Characters that prefix optional arguments
- fromfile_prefix_chars -- Characters that prefix files containing
additional arguments
- argument_default -- The default value for all arguments
- conflict_handler -- String indicating how to handle conflicts
- add_help -- Add a -h/-help option
"""
def __init__(self,
prog=None,
usage=None,
description=None,
epilog=None,
version=None,
parents=[],
formatter_class=HelpFormatter,
prefix_chars='-',
fromfile_prefix_chars=None,
argument_default=None,
conflict_handler='error',
add_help=True):
if version is not None:
import warnings
warnings.warn(
"""The "version" argument to ArgumentParser is deprecated. """
"""Please use """
""""add_argument(..., action='version', version="N", ...)" """
"""instead""", DeprecationWarning)
superinit = super(ArgumentParser, self).__init__
superinit(description=description,
prefix_chars=prefix_chars,
argument_default=argument_default,
conflict_handler=conflict_handler)
# default setting for prog
if prog is None:
prog = _os.path.basename(_sys.argv[0])
self.prog = prog
self.usage = usage
self.epilog = epilog
self.version = version
self.formatter_class = formatter_class
self.fromfile_prefix_chars = fromfile_prefix_chars
self.add_help = add_help
add_group = self.add_argument_group
self._positionals = add_group(_('positional arguments'))
self._optionals = add_group(_('optional arguments'))
self._subparsers = None
# register types
def identity(string):
return string
self.register('type', None, identity)
# add help and version arguments if necessary
# (using explicit default to override global argument_default)
if '-' in prefix_chars:
default_prefix = '-'
else:
default_prefix = prefix_chars[0]
if self.add_help:
self.add_argument(
default_prefix+'h', default_prefix*2+'help',
action='help', default=SUPPRESS,
help=_('show this help message and exit'))
if self.version:
self.add_argument(
default_prefix+'v', default_prefix*2+'version',
action='version', default=SUPPRESS,
version=self.version,
help=_("show program's version number and exit"))
# add parent arguments and defaults
for parent in parents:
self._add_container_actions(parent)
try:
defaults = parent._defaults
except AttributeError:
pass
else:
self._defaults.update(defaults)
# =======================
# Pretty __repr__ methods
# =======================
def _get_kwargs(self):
names = [
'prog',
'usage',
'description',
'version',
'formatter_class',
'conflict_handler',
'add_help',
]
return [(name, getattr(self, name)) for name in names]
# ==================================
# Optional/Positional adding methods
# ==================================
def add_subparsers(self, **kwargs):
if self._subparsers is not None:
self.error(_('cannot have multiple subparser arguments'))
# add the parser class to the arguments if it's not present
kwargs.setdefault('parser_class', type(self))
if 'title' in kwargs or 'description' in kwargs:
title = _(kwargs.pop('title', 'subcommands'))
description = _(kwargs.pop('description', None))
self._subparsers = self.add_argument_group(title, description)
else:
self._subparsers = self._positionals
# prog defaults to the usage message of this parser, skipping
# optional arguments and with no "usage:" prefix
if kwargs.get('prog') is None:
formatter = self._get_formatter()
positionals = self._get_positional_actions()
groups = self._mutually_exclusive_groups
formatter.add_usage(self.usage, positionals, groups, '')
kwargs['prog'] = formatter.format_help().strip()
# create the parsers action and add it to the positionals list
parsers_class = self._pop_action_class(kwargs, 'parsers')
action = parsers_class(option_strings=[], **kwargs)
self._subparsers._add_action(action)
# return the created parsers action
return action
def _add_action(self, action):
if action.option_strings:
self._optionals._add_action(action)
else:
self._positionals._add_action(action)
return action
def _get_optional_actions(self):
return [action
for action in self._actions
if action.option_strings]
def _get_positional_actions(self):
return [action
for action in self._actions
if not action.option_strings]
# =====================================
# Command line argument parsing methods
# =====================================
def parse_args(self, args=None, namespace=None):
args, argv = self.parse_known_args(args, namespace)
if argv:
msg = _('unrecognized arguments: %s')
self.error(msg % ' '.join(argv))
return args
def parse_known_args(self, args=None, namespace=None):
# args default to the system args
if args is None:
args = _sys.argv[1:]
# default Namespace built from parser defaults
if namespace is None:
namespace = Namespace()
# add any action defaults that aren't present
for action in self._actions:
if action.dest is not SUPPRESS:
if not hasattr(namespace, action.dest):
if action.default is not SUPPRESS:
default = action.default
if isinstance(action.default, basestring):
default = self._get_value(action, default)
setattr(namespace, action.dest, default)
# add any parser defaults that aren't present
for dest in self._defaults:
if not hasattr(namespace, dest):
setattr(namespace, dest, self._defaults[dest])
# parse the arguments and exit if there are any errors
try:
namespace, args = self._parse_known_args(args, namespace)
if hasattr(namespace, _UNRECOGNIZED_ARGS_ATTR):
args.extend(getattr(namespace, _UNRECOGNIZED_ARGS_ATTR))
delattr(namespace, _UNRECOGNIZED_ARGS_ATTR)
return namespace, args
except ArgumentError:
err = _sys.exc_info()[1]
self.error(str(err))
def _parse_known_args(self, arg_strings, namespace):
# replace arg strings that are file references
if self.fromfile_prefix_chars is not None:
arg_strings = self._read_args_from_files(arg_strings)
# map all mutually exclusive arguments to the other arguments
# they can't occur with
action_conflicts = {}
for mutex_group in self._mutually_exclusive_groups:
group_actions = mutex_group._group_actions
for i, mutex_action in enumerate(mutex_group._group_actions):
conflicts = action_conflicts.setdefault(mutex_action, [])
conflicts.extend(group_actions[:i])
conflicts.extend(group_actions[i + 1:])
# find all option indices, and determine the arg_string_pattern
# which has an 'O' if there is an option at an index,
# an 'A' if there is an argument, or a '-' if there is a '--'
option_string_indices = {}
arg_string_pattern_parts = []
arg_strings_iter = iter(arg_strings)
for i, arg_string in enumerate(arg_strings_iter):
# all args after -- are non-options
if arg_string == '--':
arg_string_pattern_parts.append('-')
for arg_string in arg_strings_iter:
arg_string_pattern_parts.append('A')
# otherwise, add the arg to the arg strings
# and note the index if it was an option
else:
option_tuple = self._parse_optional(arg_string)
if option_tuple is None:
pattern = 'A'
else:
option_string_indices[i] = option_tuple
pattern = 'O'
arg_string_pattern_parts.append(pattern)
# join the pieces together to form the pattern
arg_strings_pattern = ''.join(arg_string_pattern_parts)
# converts arg strings to the appropriate and then takes the action
seen_actions = set()
seen_non_default_actions = set()
def take_action(action, argument_strings, option_string=None):
seen_actions.add(action)
argument_values = self._get_values(action, argument_strings)
# error if this argument is not allowed with other previously
# seen arguments, assuming that actions that use the default
# value don't really count as "present"
if argument_values is not action.default:
seen_non_default_actions.add(action)
for conflict_action in action_conflicts.get(action, []):
if conflict_action in seen_non_default_actions:
msg = _('not allowed with argument %s')
action_name = _get_action_name(conflict_action)
raise ArgumentError(action, msg % action_name)
# take the action if we didn't receive a SUPPRESS value
# (e.g. from a default)
if argument_values is not SUPPRESS:
action(self, namespace, argument_values, option_string)
# function to convert arg_strings into an optional action
def consume_optional(start_index):
# get the optional identified at this index
option_tuple = option_string_indices[start_index]
action, option_string, explicit_arg = option_tuple
# identify additional optionals in the same arg string
# (e.g. -xyz is the same as -x -y -z if no args are required)
match_argument = self._match_argument
action_tuples = []
while True:
# if we found no optional action, skip it
if action is None:
extras.append(arg_strings[start_index])
return start_index + 1
# if there is an explicit argument, try to match the
# optional's string arguments to only this
if explicit_arg is not None:
arg_count = match_argument(action, 'A')
# if the action is a single-dash option and takes no
# arguments, try to parse more single-dash options out
# of the tail of the option string
chars = self.prefix_chars
if arg_count == 0 and option_string[1] not in chars:
action_tuples.append((action, [], option_string))
char = option_string[0]
option_string = char + explicit_arg[0]
new_explicit_arg = explicit_arg[1:] or None
optionals_map = self._option_string_actions
if option_string in optionals_map:
action = optionals_map[option_string]
explicit_arg = new_explicit_arg
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if the action expect exactly one argument, we've
# successfully matched the option; exit the loop
elif arg_count == 1:
stop = start_index + 1
args = [explicit_arg]
action_tuples.append((action, args, option_string))
break
# error if a double-dash option did not use the
# explicit argument
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if there is no explicit argument, try to match the
# optional's string arguments with the following strings
# if successful, exit the loop
else:
start = start_index + 1
selected_patterns = arg_strings_pattern[start:]
arg_count = match_argument(action, selected_patterns)
stop = start + arg_count
args = arg_strings[start:stop]
action_tuples.append((action, args, option_string))
break
# add the Optional to the list and return the index at which
# the Optional's string args stopped
assert action_tuples
for action, args, option_string in action_tuples:
take_action(action, args, option_string)
return stop
# the list of Positionals left to be parsed; this is modified
# by consume_positionals()
positionals = self._get_positional_actions()
# function to convert arg_strings into positional actions
def consume_positionals(start_index):
# match as many Positionals as possible
match_partial = self._match_arguments_partial
selected_pattern = arg_strings_pattern[start_index:]
arg_counts = match_partial(positionals, selected_pattern)
# slice off the appropriate arg strings for each Positional
# and add the Positional and its args to the list
for action, arg_count in zip(positionals, arg_counts):
args = arg_strings[start_index: start_index + arg_count]
start_index += arg_count
take_action(action, args)
# slice off the Positionals that we just parsed and return the
# index at which the Positionals' string args stopped
positionals[:] = positionals[len(arg_counts):]
return start_index
# consume Positionals and Optionals alternately, until we have
# passed the last option string
extras = []
start_index = 0
if option_string_indices:
max_option_string_index = max(option_string_indices)
else:
max_option_string_index = -1
while start_index <= max_option_string_index:
# consume any Positionals preceding the next option
next_option_string_index = min([
index
for index in option_string_indices
if index >= start_index])
if start_index != next_option_string_index:
positionals_end_index = consume_positionals(start_index)
# only try to parse the next optional if we didn't consume
# the option string during the positionals parsing
if positionals_end_index > start_index:
start_index = positionals_end_index
continue
else:
start_index = positionals_end_index
# if we consumed all the positionals we could and we're not
# at the index of an option string, there were extra arguments
if start_index not in option_string_indices:
strings = arg_strings[start_index:next_option_string_index]
extras.extend(strings)
start_index = next_option_string_index
# consume the next optional and any arguments for it
start_index = consume_optional(start_index)
# consume any positionals following the last Optional
stop_index = consume_positionals(start_index)
# if we didn't consume all the argument strings, there were extras
extras.extend(arg_strings[stop_index:])
# if we didn't use all the Positional structures, there were too few
# arg strings supplied.
if positionals:
self.error(_('too few arguments'))
# make sure all required actions were present
for action in self._actions:
if action.required:
if action not in seen_actions:
name = _get_action_name(action)
self.error(_('argument %s is required') % name)
# make sure all required groups had one option present
for group in self._mutually_exclusive_groups:
if group.required:
for action in group._group_actions:
if action in seen_non_default_actions:
break
# if no actions were used, report the error
else:
names = [_get_action_name(action)
for action in group._group_actions
if action.help is not SUPPRESS]
msg = _('one of the arguments %s is required')
self.error(msg % ' '.join(names))
# return the updated namespace and the extra arguments
return namespace, extras
def _read_args_from_files(self, arg_strings):
# expand arguments referencing files
new_arg_strings = []
for arg_string in arg_strings:
# for regular arguments, just add them back into the list
if arg_string[0] not in self.fromfile_prefix_chars:
new_arg_strings.append(arg_string)
# replace arguments referencing files with the file content
else:
try:
args_file = open(arg_string[1:])
try:
arg_strings = []
for arg_line in args_file.read().splitlines():
for arg in self.convert_arg_line_to_args(arg_line):
arg_strings.append(arg)
arg_strings = self._read_args_from_files(arg_strings)
new_arg_strings.extend(arg_strings)
finally:
args_file.close()
except IOError:
err = _sys.exc_info()[1]
self.error(str(err))
# return the modified argument list
return new_arg_strings
def convert_arg_line_to_args(self, arg_line):
return [arg_line]
def _match_argument(self, action, arg_strings_pattern):
# match the pattern for this action to the arg strings
nargs_pattern = self._get_nargs_pattern(action)
match = _re.match(nargs_pattern, arg_strings_pattern)
# raise an exception if we weren't able to find a match
if match is None:
nargs_errors = {
None: _('expected one argument'),
OPTIONAL: _('expected at most one argument'),
ONE_OR_MORE: _('expected at least one argument'),
}
default = _('expected %s argument(s)') % action.nargs
msg = nargs_errors.get(action.nargs, default)
raise ArgumentError(action, msg)
# return the number of arguments matched
return len(match.group(1))
def _match_arguments_partial(self, actions, arg_strings_pattern):
# progressively shorten the actions list by slicing off the
# final actions until we find a match
result = []
for i in range(len(actions), 0, -1):
actions_slice = actions[:i]
pattern = ''.join([self._get_nargs_pattern(action)
for action in actions_slice])
match = _re.match(pattern, arg_strings_pattern)
if match is not None:
result.extend([len(string) for string in match.groups()])
break
# return the list of arg string counts
return result
def _parse_optional(self, arg_string):
# if it's an empty string, it was meant to be a positional
if not arg_string:
return None
# if it doesn't start with a prefix, it was meant to be positional
if not arg_string[0] in self.prefix_chars:
return None
# if the option string is present in the parser, return the action
if arg_string in self._option_string_actions:
action = self._option_string_actions[arg_string]
return action, arg_string, None
# if it's just a single character, it was meant to be positional
if len(arg_string) == 1:
return None
# if the option string before the "=" is present, return the action
if '=' in arg_string:
option_string, explicit_arg = arg_string.split('=', 1)
if option_string in self._option_string_actions:
action = self._option_string_actions[option_string]
return action, option_string, explicit_arg
# search through all possible prefixes of the option string
# and all actions in the parser for possible interpretations
option_tuples = self._get_option_tuples(arg_string)
# if multiple actions match, the option string was ambiguous
if len(option_tuples) > 1:
options = ', '.join([option_string
for action, option_string, explicit_arg in option_tuples])
tup = arg_string, options
self.error(_('ambiguous option: %s could match %s') % tup)
# if exactly one action matched, this segmentation is good,
# so return the parsed action
elif len(option_tuples) == 1:
option_tuple, = option_tuples
return option_tuple
# if it was not found as an option, but it looks like a negative
# number, it was meant to be positional
# unless there are negative-number-like options
if self._negative_number_matcher.match(arg_string):
if not self._has_negative_number_optionals:
return None
# if it contains a space, it was meant to be a positional
if ' ' in arg_string:
return None
# it was meant to be an optional but there is no such option
# in this parser (though it might be a valid option in a subparser)
return None, arg_string, None
def _get_option_tuples(self, option_string):
result = []
# option strings starting with two prefix characters are only
# split at the '='
chars = self.prefix_chars
if option_string[0] in chars and option_string[1] in chars:
if '=' in option_string:
option_prefix, explicit_arg = option_string.split('=', 1)
else:
option_prefix = option_string
explicit_arg = None
for option_string in self._option_string_actions:
if option_string.startswith(option_prefix):
action = self._option_string_actions[option_string]
tup = action, option_string, explicit_arg
result.append(tup)
# single character options can be concatenated with their arguments
# but multiple character options always have to have their argument
# separate
elif option_string[0] in chars and option_string[1] not in chars:
option_prefix = option_string
explicit_arg = None
short_option_prefix = option_string[:2]
short_explicit_arg = option_string[2:]
for option_string in self._option_string_actions:
if option_string == short_option_prefix:
action = self._option_string_actions[option_string]
tup = action, option_string, short_explicit_arg
result.append(tup)
elif option_string.startswith(option_prefix):
action = self._option_string_actions[option_string]
tup = action, option_string, explicit_arg
result.append(tup)
# shouldn't ever get here
else:
self.error(_('unexpected option string: %s') % option_string)
# return the collected option tuples
return result
def _get_nargs_pattern(self, action):
# in all examples below, we have to allow for '--' args
# which are represented as '-' in the pattern
nargs = action.nargs
# the default (None) is assumed to be a single argument
if nargs is None:
nargs_pattern = '(-*A-*)'
# allow zero or one arguments
elif nargs == OPTIONAL:
nargs_pattern = '(-*A?-*)'
# allow zero or more arguments
elif nargs == ZERO_OR_MORE:
nargs_pattern = '(-*[A-]*)'
# allow one or more arguments
elif nargs == ONE_OR_MORE:
nargs_pattern = '(-*A[A-]*)'
# allow any number of options or arguments
elif nargs == REMAINDER:
nargs_pattern = '([-AO]*)'
# allow one argument followed by any number of options or arguments
elif nargs == PARSER:
nargs_pattern = '(-*A[-AO]*)'
# all others should be integers
else:
nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs)
# if this is an optional action, -- is not allowed
if action.option_strings:
nargs_pattern = nargs_pattern.replace('-*', '')
nargs_pattern = nargs_pattern.replace('-', '')
# return the pattern
return nargs_pattern
# ========================
# Value conversion methods
# ========================
def _get_values(self, action, arg_strings):
# for everything but PARSER args, strip out '--'
if action.nargs not in [PARSER, REMAINDER]:
arg_strings = [s for s in arg_strings if s != '--']
# optional argument produces a default when not present
if not arg_strings and action.nargs == OPTIONAL:
if action.option_strings:
value = action.const
else:
value = action.default
if isinstance(value, basestring):
value = self._get_value(action, value)
self._check_value(action, value)
# when nargs='*' on a positional, if there were no command-line
# args, use the default if it is anything other than None
elif (not arg_strings and action.nargs == ZERO_OR_MORE and
not action.option_strings):
if action.default is not None:
value = action.default
else:
value = arg_strings
self._check_value(action, value)
# single argument or optional argument produces a single value
elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]:
arg_string, = arg_strings
value = self._get_value(action, arg_string)
self._check_value(action, value)
# REMAINDER arguments convert all values, checking none
elif action.nargs == REMAINDER:
value = [self._get_value(action, v) for v in arg_strings]
# PARSER arguments convert all values, but check only the first
elif action.nargs == PARSER:
value = [self._get_value(action, v) for v in arg_strings]
self._check_value(action, value[0])
# all other types of nargs produce a list
else:
value = [self._get_value(action, v) for v in arg_strings]
for v in value:
self._check_value(action, v)
# return the converted value
return value
def _get_value(self, action, arg_string):
type_func = self._registry_get('type', action.type, action.type)
if not _callable(type_func):
msg = _('%r is not callable')
raise ArgumentError(action, msg % type_func)
# convert the value to the appropriate type
try:
result = type_func(arg_string)
# ArgumentTypeErrors indicate errors
except ArgumentTypeError:
name = getattr(action.type, '__name__', repr(action.type))
msg = str(_sys.exc_info()[1])
raise ArgumentError(action, msg)
# TypeErrors or ValueErrors also indicate errors
except (TypeError, ValueError):
name = getattr(action.type, '__name__', repr(action.type))
msg = _('invalid %s value: %r')
raise ArgumentError(action, msg % (name, arg_string))
# return the converted value
return result
def _check_value(self, action, value):
# converted value must be one of the choices (if specified)
if action.choices is not None and value not in action.choices:
tup = value, ', '.join(map(repr, action.choices))
msg = _('invalid choice: %r (choose from %s)') % tup
raise ArgumentError(action, msg)
# =======================
# Help-formatting methods
# =======================
def format_usage(self):
formatter = self._get_formatter()
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
return formatter.format_help()
def format_help(self):
formatter = self._get_formatter()
# usage
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
# description
formatter.add_text(self.description)
# positionals, optionals and user-defined groups
for action_group in self._action_groups:
formatter.start_section(action_group.title)
formatter.add_text(action_group.description)
formatter.add_arguments(action_group._group_actions)
formatter.end_section()
# epilog
formatter.add_text(self.epilog)
# determine help from format above
return formatter.format_help()
def format_version(self):
import warnings
warnings.warn(
'The format_version method is deprecated -- the "version" '
'argument to ArgumentParser is no longer supported.',
DeprecationWarning)
formatter = self._get_formatter()
formatter.add_text(self.version)
return formatter.format_help()
def _get_formatter(self):
return self.formatter_class(prog=self.prog)
# =====================
# Help-printing methods
# =====================
def print_usage(self, file=None):
if file is None:
file = _sys.stdout
self._print_message(self.format_usage(), file)
def print_help(self, file=None):
if file is None:
file = _sys.stdout
self._print_message(self.format_help(), file)
def print_version(self, file=None):
import warnings
warnings.warn(
'The print_version method is deprecated -- the "version" '
'argument to ArgumentParser is no longer supported.',
DeprecationWarning)
self._print_message(self.format_version(), file)
def _print_message(self, message, file=None):
if message:
if file is None:
file = _sys.stderr
file.write(message)
# ===============
# Exiting methods
# ===============
def exit(self, status=0, message=None):
if message:
self._print_message(message, _sys.stderr)
_sys.exit(status)
def error(self, message):
"""error(message: string)
Prints a usage message incorporating the message to stderr and
exits.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(_sys.stderr)
self.exit(2, _('%s: error: %s\n') % (self.prog, message))
|
py | 1a370571c844519164b3e7b650f19a2a0e5e377a | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Object Server for Swift """
import six.moves.cPickle as pickle
import json
import os
import multiprocessing
import time
import traceback
import socket
import math
from swift import gettext_ as _
from hashlib import md5
from eventlet import sleep, wsgi, Timeout, tpool
from eventlet.greenthread import spawn
from swift.common.utils import public, get_logger, \
config_true_value, timing_stats, replication, \
normalize_delete_at_timestamp, get_log_line, Timestamp, \
get_expirer_container, parse_mime_headers, \
iter_multipart_mime_documents, extract_swift_bytes, safe_json_loads, \
config_auto_int_value
from swift.common.bufferedhttp import http_connect
from swift.common.constraints import check_object_creation, \
valid_timestamp, check_utf8
from swift.common.exceptions import ConnectionTimeout, DiskFileQuarantined, \
DiskFileNotExist, DiskFileCollision, DiskFileNoSpace, DiskFileDeleted, \
DiskFileDeviceUnavailable, DiskFileExpired, ChunkReadTimeout, \
ChunkReadError, DiskFileXattrNotSupported
from swift.obj import ssync_receiver
from swift.common.http import is_success
from swift.common.base_storage_server import BaseStorageServer
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.request_helpers import get_name_and_placement, \
is_user_meta, is_sys_or_user_meta, is_object_transient_sysmeta, \
resolve_etag_is_at_header, is_sys_meta
from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPCreated, \
HTTPInternalServerError, HTTPNoContent, HTTPNotFound, \
HTTPPreconditionFailed, HTTPRequestTimeout, HTTPUnprocessableEntity, \
HTTPClientDisconnect, HTTPMethodNotAllowed, Request, Response, \
HTTPInsufficientStorage, HTTPForbidden, HTTPException, HTTPConflict, \
HTTPServerError
from swift.obj.diskfile import DATAFILE_SYSTEM_META, DiskFileRouter
def iter_mime_headers_and_bodies(wsgi_input, mime_boundary, read_chunk_size):
mime_documents_iter = iter_multipart_mime_documents(
wsgi_input, mime_boundary, read_chunk_size)
for file_like in mime_documents_iter:
hdrs = parse_mime_headers(file_like)
yield (hdrs, file_like)
def drain(file_like, read_size, timeout):
"""
Read and discard any bytes from file_like.
:param file_like: file-like object to read from
:param read_size: how big a chunk to read at a time
:param timeout: how long to wait for a read (use None for no timeout)
:raises ChunkReadTimeout: if no chunk was read in time
"""
while True:
with ChunkReadTimeout(timeout):
chunk = file_like.read(read_size)
if not chunk:
break
def _make_backend_fragments_header(fragments):
if fragments:
result = {}
for ts, frag_list in fragments.items():
result[ts.internal] = frag_list
return json.dumps(result)
return None
class EventletPlungerString(str):
"""
Eventlet won't send headers until it's accumulated at least
eventlet.wsgi.MINIMUM_CHUNK_SIZE bytes or the app iter is exhausted. If we
want to send the response body behind Eventlet's back, perhaps with some
zero-copy wizardry, then we have to unclog the plumbing in eventlet.wsgi
to force the headers out, so we use an EventletPlungerString to empty out
all of Eventlet's buffers.
"""
def __len__(self):
return wsgi.MINIMUM_CHUNK_SIZE + 1
class ObjectController(BaseStorageServer):
"""Implements the WSGI application for the Swift Object Server."""
server_type = 'object-server'
def __init__(self, conf, logger=None):
"""
Creates a new WSGI application for the Swift Object Server. An
example configuration is given at
<source-dir>/etc/object-server.conf-sample or
/etc/swift/object-server.conf-sample.
"""
super(ObjectController, self).__init__(conf)
self.logger = logger or get_logger(conf, log_route='object-server')
self.node_timeout = float(conf.get('node_timeout', 3))
self.container_update_timeout = float(
conf.get('container_update_timeout', 1))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.client_timeout = int(conf.get('client_timeout', 60))
self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536))
self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
self.log_requests = config_true_value(conf.get('log_requests', 'true'))
self.max_upload_time = int(conf.get('max_upload_time', 86400))
self.slow = int(conf.get('slow', 0))
self.keep_cache_private = \
config_true_value(conf.get('keep_cache_private', 'false'))
default_allowed_headers = '''
content-disposition,
content-encoding,
x-delete-at,
x-object-manifest,
x-static-large-object,
'''
extra_allowed_headers = [
header.strip().lower() for header in conf.get(
'allowed_headers', default_allowed_headers).split(',')
if header.strip()
]
self.allowed_headers = set()
for header in extra_allowed_headers:
if header not in DATAFILE_SYSTEM_META:
self.allowed_headers.add(header)
self.auto_create_account_prefix = \
conf.get('auto_create_account_prefix') or '.'
self.expiring_objects_account = self.auto_create_account_prefix + \
(conf.get('expiring_objects_account_name') or 'expiring_objects')
self.expiring_objects_container_divisor = \
int(conf.get('expiring_objects_container_divisor') or 86400)
# Initialization was successful, so now apply the network chunk size
# parameter as the default read / write buffer size for the network
# sockets.
#
# NOTE WELL: This is a class setting, so until we get set this on a
# per-connection basis, this affects reading and writing on ALL
# sockets, those between the proxy servers and external clients, and
# those between the proxy servers and the other internal servers.
#
# ** Because the primary motivation for this is to optimize how data
# is written back to the proxy server, we could use the value from the
# disk_chunk_size parameter. However, it affects all created sockets
# using this class so we have chosen to tie it to the
# network_chunk_size parameter value instead.
socket._fileobject.default_bufsize = self.network_chunk_size
# Provide further setup specific to an object server implementation.
self.setup(conf)
def setup(self, conf):
"""
Implementation specific setup. This method is called at the very end
by the constructor to allow a specific implementation to modify
existing attributes or add its own attributes.
:param conf: WSGI configuration parameter
"""
# Common on-disk hierarchy shared across account, container and object
# servers.
self._diskfile_router = DiskFileRouter(conf, self.logger)
# This is populated by global_conf_callback way below as the semaphore
# is shared by all workers.
if 'replication_semaphore' in conf:
# The value was put in a list so it could get past paste
self.replication_semaphore = conf['replication_semaphore'][0]
else:
self.replication_semaphore = None
self.replication_failure_threshold = int(
conf.get('replication_failure_threshold') or 100)
self.replication_failure_ratio = float(
conf.get('replication_failure_ratio') or 1.0)
servers_per_port = int(conf.get('servers_per_port', '0') or 0)
if servers_per_port:
# The typical servers-per-port deployment also uses one port per
# disk, so you really get N servers per disk. In that case,
# having a pool of 20 threads per server per disk is far too
# much. For example, given a 60-disk chassis and 4 servers per
# disk, the default configuration will give us 21 threads per
# server (the main thread plus the twenty tpool threads), for a
# total of around 60 * 21 * 4 = 5040 threads. This is clearly
# too high.
#
# Instead, we use a tpool size of 1, giving us 2 threads per
# process. In the example above, that's 60 * 2 * 4 = 480
# threads, which is reasonable since there are 240 processes.
default_tpool_size = 1
else:
# If we're not using servers-per-port, then leave the tpool size
# alone. The default (20) is typically good enough for one
# object server handling requests for many disks.
default_tpool_size = None
tpool_size = config_auto_int_value(
conf.get('eventlet_tpool_num_threads'),
default_tpool_size)
if tpool_size:
tpool.set_num_threads(tpool_size)
def get_diskfile(self, device, partition, account, container, obj,
policy, **kwargs):
"""
Utility method for instantiating a DiskFile object supporting a given
REST API.
An implementation of the object server that wants to use a different
DiskFile class would simply over-ride this method to provide that
behavior.
"""
return self._diskfile_router[policy].get_diskfile(
device, partition, account, container, obj, policy, **kwargs)
def async_update(self, op, account, container, obj, host, partition,
contdevice, headers_out, objdevice, policy,
logger_thread_locals=None):
"""
Sends or saves an async update.
:param op: operation performed (ex: 'PUT', or 'DELETE')
:param account: account name for the object
:param container: container name for the object
:param obj: object name
:param host: host that the container is on
:param partition: partition that the container is on
:param contdevice: device name that the container is on
:param headers_out: dictionary of headers to send in the container
request
:param objdevice: device name that the object is in
:param policy: the associated BaseStoragePolicy instance
:param logger_thread_locals: The thread local values to be set on the
self.logger to retain transaction
logging information.
"""
if logger_thread_locals:
self.logger.thread_locals = logger_thread_locals
headers_out['user-agent'] = 'object-server %s' % os.getpid()
full_path = '/%s/%s/%s' % (account, container, obj)
if all([host, partition, contdevice]):
try:
with ConnectionTimeout(self.conn_timeout):
ip, port = host.rsplit(':', 1)
conn = http_connect(ip, port, contdevice, partition, op,
full_path, headers_out)
with Timeout(self.node_timeout):
response = conn.getresponse()
response.read()
if is_success(response.status):
return
else:
self.logger.error(_(
'ERROR Container update failed '
'(saving for async update later): %(status)d '
'response from %(ip)s:%(port)s/%(dev)s'),
{'status': response.status, 'ip': ip, 'port': port,
'dev': contdevice})
except (Exception, Timeout):
self.logger.exception(_(
'ERROR container update failed with '
'%(ip)s:%(port)s/%(dev)s (saving for async update later)'),
{'ip': ip, 'port': port, 'dev': contdevice})
data = {'op': op, 'account': account, 'container': container,
'obj': obj, 'headers': headers_out}
timestamp = headers_out.get('x-meta-timestamp',
headers_out.get('x-timestamp'))
self._diskfile_router[policy].pickle_async_update(
objdevice, account, container, obj, data, timestamp, policy)
def container_update(self, op, account, container, obj, request,
headers_out, objdevice, policy):
"""
Update the container when objects are updated.
:param op: operation performed (ex: 'PUT', or 'DELETE')
:param account: account name for the object
:param container: container name for the object
:param obj: object name
:param request: the original request object driving the update
:param headers_out: dictionary of headers to send in the container
request(s)
:param objdevice: device name that the object is in
:param policy: the BaseStoragePolicy instance
"""
headers_in = request.headers
conthosts = [h.strip() for h in
headers_in.get('X-Container-Host', '').split(',')]
contdevices = [d.strip() for d in
headers_in.get('X-Container-Device', '').split(',')]
contpartition = headers_in.get('X-Container-Partition', '')
if len(conthosts) != len(contdevices):
# This shouldn't happen unless there's a bug in the proxy,
# but if there is, we want to know about it.
self.logger.error(_(
'ERROR Container update failed: different '
'numbers of hosts and devices in request: '
'"%(hosts)s" vs "%(devices)s"') % {
'hosts': headers_in.get('X-Container-Host', ''),
'devices': headers_in.get('X-Container-Device', '')})
return
if contpartition:
updates = zip(conthosts, contdevices)
else:
updates = []
headers_out['x-trans-id'] = headers_in.get('x-trans-id', '-')
headers_out['referer'] = request.as_referer()
headers_out['X-Backend-Storage-Policy-Index'] = int(policy)
update_greenthreads = []
for conthost, contdevice in updates:
gt = spawn(self.async_update, op, account, container, obj,
conthost, contpartition, contdevice, headers_out,
objdevice, policy,
logger_thread_locals=self.logger.thread_locals)
update_greenthreads.append(gt)
# Wait a little bit to see if the container updates are successful.
# If we immediately return after firing off the greenthread above, then
# we're more likely to confuse the end-user who does a listing right
# after getting a successful response to the object create. The
# `container_update_timeout` bounds the length of time we wait so that
# one slow container server doesn't make the entire request lag.
try:
with Timeout(self.container_update_timeout):
for gt in update_greenthreads:
gt.wait()
except Timeout:
# updates didn't go through, log it and return
self.logger.debug(
'Container update timeout (%.4fs) waiting for %s',
self.container_update_timeout, updates)
def delete_at_update(self, op, delete_at, account, container, obj,
request, objdevice, policy):
"""
Update the expiring objects container when objects are updated.
:param op: operation performed (ex: 'PUT', or 'DELETE')
:param delete_at: scheduled delete in UNIX seconds, int
:param account: account name for the object
:param container: container name for the object
:param obj: object name
:param request: the original request driving the update
:param objdevice: device name that the object is in
:param policy: the BaseStoragePolicy instance (used for tmp dir)
"""
if config_true_value(
request.headers.get('x-backend-replication', 'f')):
return
delete_at = normalize_delete_at_timestamp(delete_at)
updates = [(None, None)]
partition = None
hosts = contdevices = [None]
headers_in = request.headers
headers_out = HeaderKeyDict({
# system accounts are always Policy-0
'X-Backend-Storage-Policy-Index': 0,
'x-timestamp': request.timestamp.internal,
'x-trans-id': headers_in.get('x-trans-id', '-'),
'referer': request.as_referer()})
if op != 'DELETE':
delete_at_container = headers_in.get('X-Delete-At-Container', None)
if not delete_at_container:
self.logger.warning(
'X-Delete-At-Container header must be specified for '
'expiring objects background %s to work properly. Making '
'best guess as to the container name for now.' % op)
# TODO(gholt): In a future release, change the above warning to
# a raised exception and remove the guess code below.
delete_at_container = get_expirer_container(
delete_at, self.expiring_objects_container_divisor,
account, container, obj)
partition = headers_in.get('X-Delete-At-Partition', None)
hosts = headers_in.get('X-Delete-At-Host', '')
contdevices = headers_in.get('X-Delete-At-Device', '')
updates = [upd for upd in
zip((h.strip() for h in hosts.split(',')),
(c.strip() for c in contdevices.split(',')))
if all(upd) and partition]
if not updates:
updates = [(None, None)]
headers_out['x-size'] = '0'
headers_out['x-content-type'] = 'text/plain'
headers_out['x-etag'] = 'd41d8cd98f00b204e9800998ecf8427e'
else:
# DELETEs of old expiration data have no way of knowing what the
# old X-Delete-At-Container was at the time of the initial setting
# of the data, so a best guess is made here.
# Worst case is a DELETE is issued now for something that doesn't
# exist there and the original data is left where it is, where
# it will be ignored when the expirer eventually tries to issue the
# object DELETE later since the X-Delete-At value won't match up.
delete_at_container = get_expirer_container(
delete_at, self.expiring_objects_container_divisor,
account, container, obj)
delete_at_container = normalize_delete_at_timestamp(
delete_at_container)
for host, contdevice in updates:
self.async_update(
op, self.expiring_objects_account, delete_at_container,
'%s-%s/%s/%s' % (delete_at, account, container, obj),
host, partition, contdevice, headers_out, objdevice,
policy)
def _make_timeout_reader(self, file_like):
def timeout_reader():
with ChunkReadTimeout(self.client_timeout):
try:
return file_like.read(self.network_chunk_size)
except (IOError, ValueError):
raise ChunkReadError
return timeout_reader
def _read_put_commit_message(self, mime_documents_iter):
rcvd_commit = False
try:
with ChunkReadTimeout(self.client_timeout):
commit_hdrs, commit_iter = next(mime_documents_iter)
if commit_hdrs.get('X-Document', None) == "put commit":
rcvd_commit = True
drain(commit_iter, self.network_chunk_size, self.client_timeout)
except ChunkReadError:
raise HTTPClientDisconnect()
except ChunkReadTimeout:
raise HTTPRequestTimeout()
except StopIteration:
raise HTTPBadRequest(body="couldn't find PUT commit MIME doc")
return rcvd_commit
def _read_metadata_footer(self, mime_documents_iter):
try:
with ChunkReadTimeout(self.client_timeout):
footer_hdrs, footer_iter = next(mime_documents_iter)
except ChunkReadError:
raise HTTPClientDisconnect()
except ChunkReadTimeout:
raise HTTPRequestTimeout()
except StopIteration:
raise HTTPBadRequest(body="couldn't find footer MIME doc")
timeout_reader = self._make_timeout_reader(footer_iter)
try:
footer_body = ''.join(iter(timeout_reader, ''))
except ChunkReadError:
raise HTTPClientDisconnect()
except ChunkReadTimeout:
raise HTTPRequestTimeout()
footer_md5 = footer_hdrs.get('Content-MD5')
if not footer_md5:
raise HTTPBadRequest(body="no Content-MD5 in footer")
if footer_md5 != md5(footer_body).hexdigest():
raise HTTPUnprocessableEntity(body="footer MD5 mismatch")
try:
return HeaderKeyDict(json.loads(footer_body))
except ValueError:
raise HTTPBadRequest("invalid JSON for footer doc")
def _check_container_override(self, update_headers, metadata,
footers=None):
"""
Applies any overrides to the container update headers.
Overrides may be in the x-object-sysmeta-container-update- namespace or
the x-backend-container-update-override- namespace. The former is
preferred and is used by proxy middlewares. The latter is historical
but is still used with EC policy PUT requests; for backwards
compatibility the header names used with EC policy requests have not
been changed to the sysmeta namespace - that way the EC PUT path of a
newer proxy will remain compatible with an object server that pre-dates
the introduction of the x-object-sysmeta-container-update- namespace
and vice-versa.
:param update_headers: a dict of headers used in the container update
:param metadata: a dict that may container override items
:param footers: another dict that may container override items, at a
higher priority than metadata
"""
footers = footers or {}
# the order of this list is significant:
# x-object-sysmeta-container-update-override-* headers take precedence
# over x-backend-container-update-override-* headers
override_prefixes = ['x-backend-container-update-override-',
'x-object-sysmeta-container-update-override-']
for override_prefix in override_prefixes:
for key, val in metadata.items():
if key.lower().startswith(override_prefix):
override = key.lower().replace(override_prefix, 'x-')
update_headers[override] = val
# apply x-backend-container-update-override* from footers *before*
# x-object-sysmeta-container-update-override-* from headers
for key, val in footers.items():
if key.lower().startswith(override_prefix):
override = key.lower().replace(override_prefix, 'x-')
update_headers[override] = val
def _preserve_slo_manifest(self, update_metadata, orig_metadata):
if 'X-Static-Large-Object' in orig_metadata:
update_metadata['X-Static-Large-Object'] = \
orig_metadata['X-Static-Large-Object']
@public
@timing_stats()
def POST(self, request):
"""Handle HTTP POST requests for the Swift Object Server."""
device, partition, account, container, obj, policy = \
get_name_and_placement(request, 5, 5, True)
req_timestamp = valid_timestamp(request)
new_delete_at = int(request.headers.get('X-Delete-At') or 0)
if new_delete_at and new_delete_at < time.time():
return HTTPBadRequest(body='X-Delete-At in past', request=request,
content_type='text/plain')
next_part_power = request.headers.get('X-Backend-Next-Part-Power')
try:
disk_file = self.get_diskfile(
device, partition, account, container, obj,
policy=policy, open_expired=config_true_value(
request.headers.get('x-backend-replication', 'false')),
next_part_power=next_part_power)
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=device, request=request)
try:
orig_metadata = disk_file.read_metadata()
except DiskFileXattrNotSupported:
return HTTPInsufficientStorage(drive=device, request=request)
except (DiskFileNotExist, DiskFileQuarantined):
return HTTPNotFound(request=request)
orig_timestamp = Timestamp(orig_metadata.get('X-Timestamp', 0))
orig_ctype_timestamp = disk_file.content_type_timestamp
req_ctype_time = '0'
req_ctype = request.headers.get('Content-Type')
if req_ctype:
req_ctype_time = request.headers.get('Content-Type-Timestamp',
req_timestamp.internal)
req_ctype_timestamp = Timestamp(req_ctype_time)
if orig_timestamp >= req_timestamp \
and orig_ctype_timestamp >= req_ctype_timestamp:
return HTTPConflict(
request=request,
headers={'X-Backend-Timestamp': orig_timestamp.internal})
if req_timestamp > orig_timestamp:
metadata = {'X-Timestamp': req_timestamp.internal}
self._preserve_slo_manifest(metadata, orig_metadata)
metadata.update(val for val in request.headers.items()
if (is_user_meta('object', val[0]) or
is_object_transient_sysmeta(val[0])))
headers_to_copy = (
request.headers.get(
'X-Backend-Replication-Headers', '').split() +
list(self.allowed_headers))
for header_key in headers_to_copy:
if header_key in request.headers:
header_caps = header_key.title()
metadata[header_caps] = request.headers[header_key]
orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0)
if orig_delete_at != new_delete_at:
if new_delete_at:
self.delete_at_update(
'PUT', new_delete_at, account, container, obj, request,
device, policy)
if orig_delete_at:
self.delete_at_update('DELETE', orig_delete_at, account,
container, obj, request, device,
policy)
else:
# preserve existing metadata, only content-type may be updated
metadata = dict(disk_file.get_metafile_metadata())
if req_ctype_timestamp > orig_ctype_timestamp:
# we have a new content-type, add to metadata and container update
content_type_headers = {
'Content-Type': request.headers['Content-Type'],
'Content-Type-Timestamp': req_ctype_timestamp.internal
}
metadata.update(content_type_headers)
else:
# send existing content-type with container update
content_type_headers = {
'Content-Type': disk_file.content_type,
'Content-Type-Timestamp': orig_ctype_timestamp.internal
}
if orig_ctype_timestamp != disk_file.data_timestamp:
# only add to metadata if it's not the datafile content-type
metadata.update(content_type_headers)
try:
disk_file.write_metadata(metadata)
except (DiskFileXattrNotSupported, DiskFileNoSpace):
return HTTPInsufficientStorage(drive=device, request=request)
if (content_type_headers['Content-Type-Timestamp']
!= disk_file.data_timestamp):
# Current content-type is not from the datafile, but the datafile
# content-type may have a swift_bytes param that was appended by
# SLO and we must continue to send that with the container update.
# Do this (rather than use a separate header) for backwards
# compatibility because there may be 'legacy' container updates in
# async pending that have content-types with swift_bytes params, so
# we have to be able to handle those in container server anyway.
_, swift_bytes = extract_swift_bytes(
disk_file.get_datafile_metadata()['Content-Type'])
if swift_bytes:
content_type_headers['Content-Type'] += (';swift_bytes=%s'
% swift_bytes)
update_headers = HeaderKeyDict({
'x-size': orig_metadata['Content-Length'],
'x-content-type': content_type_headers['Content-Type'],
'x-timestamp': disk_file.data_timestamp.internal,
'x-content-type-timestamp':
content_type_headers['Content-Type-Timestamp'],
'x-meta-timestamp': metadata['X-Timestamp'],
'x-etag': orig_metadata['ETag']})
# Special cases for backwards compatibility.
# For EC policy, send X-Object-Sysmeta-Ec-Etag which is same as the
# X-Backend-Container-Update-Override-Etag value sent with the original
# PUT. Similarly send X-Object-Sysmeta-Ec-Content-Length which is the
# same as the X-Backend-Container-Update-Override-Size value. We have
# to send Etag and size with a POST container update because the
# original PUT container update may have failed or be in async_pending.
if 'X-Object-Sysmeta-Ec-Etag' in orig_metadata:
update_headers['X-Etag'] = orig_metadata[
'X-Object-Sysmeta-Ec-Etag']
if 'X-Object-Sysmeta-Ec-Content-Length' in orig_metadata:
update_headers['X-Size'] = orig_metadata[
'X-Object-Sysmeta-Ec-Content-Length']
self._check_container_override(update_headers, orig_metadata)
# object POST updates are PUT to the container server
self.container_update(
'PUT', account, container, obj, request, update_headers,
device, policy)
# Add sysmeta to response
resp_headers = {}
for key, value in orig_metadata.items():
if is_sys_meta('object', key):
resp_headers[key] = value
return HTTPAccepted(request=request, headers=resp_headers)
@public
@timing_stats()
def PUT(self, request):
"""Handle HTTP PUT requests for the Swift Object Server."""
device, partition, account, container, obj, policy = \
get_name_and_placement(request, 5, 5, True)
req_timestamp = valid_timestamp(request)
error_response = check_object_creation(request, obj)
if error_response:
return error_response
new_delete_at = int(request.headers.get('X-Delete-At') or 0)
try:
fsize = request.message_length()
except ValueError as e:
return HTTPBadRequest(body=str(e), request=request,
content_type='text/plain')
# In case of multipart-MIME put, the proxy sends a chunked request,
# but may let us know the real content length so we can verify that
# we have enough disk space to hold the object.
if fsize is None:
fsize = request.headers.get('X-Backend-Obj-Content-Length')
if fsize is not None:
try:
fsize = int(fsize)
except ValueError as e:
return HTTPBadRequest(body=str(e), request=request,
content_type='text/plain')
# SSYNC will include Frag-Index header for subrequests to primary
# nodes; handoff nodes should 409 subrequests to over-write an
# existing data fragment until they offloaded the existing fragment
frag_index = request.headers.get('X-Backend-Ssync-Frag-Index')
next_part_power = request.headers.get('X-Backend-Next-Part-Power')
try:
disk_file = self.get_diskfile(
device, partition, account, container, obj,
policy=policy, frag_index=frag_index,
next_part_power=next_part_power)
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=device, request=request)
try:
orig_metadata = disk_file.read_metadata()
orig_timestamp = disk_file.data_timestamp
except DiskFileXattrNotSupported:
return HTTPInsufficientStorage(drive=device, request=request)
except DiskFileDeleted as e:
orig_metadata = {}
orig_timestamp = e.timestamp
except (DiskFileNotExist, DiskFileQuarantined):
orig_metadata = {}
orig_timestamp = Timestamp(0)
# Checks for If-None-Match
if request.if_none_match is not None and orig_metadata:
if '*' in request.if_none_match:
# File exists already so return 412
return HTTPPreconditionFailed(request=request)
if orig_metadata.get('ETag') in request.if_none_match:
# The current ETag matches, so return 412
return HTTPPreconditionFailed(request=request)
if orig_timestamp >= req_timestamp:
return HTTPConflict(
request=request,
headers={'X-Backend-Timestamp': orig_timestamp.internal})
orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0)
upload_expiration = time.time() + self.max_upload_time
etag = md5()
elapsed_time = 0
try:
with disk_file.create(size=fsize) as writer:
upload_size = 0
# If the proxy wants to send us object metadata after the
# object body, it sets some headers. We have to tell the
# proxy, in the 100 Continue response, that we're able to
# parse a multipart MIME document and extract the object and
# metadata from it. If we don't, then the proxy won't
# actually send the footer metadata.
have_metadata_footer = False
use_multiphase_commit = False
mime_documents_iter = iter([])
obj_input = request.environ['wsgi.input']
hundred_continue_headers = []
if config_true_value(
request.headers.get(
'X-Backend-Obj-Multiphase-Commit')):
use_multiphase_commit = True
hundred_continue_headers.append(
('X-Obj-Multiphase-Commit', 'yes'))
if config_true_value(
request.headers.get('X-Backend-Obj-Metadata-Footer')):
have_metadata_footer = True
hundred_continue_headers.append(
('X-Obj-Metadata-Footer', 'yes'))
if have_metadata_footer or use_multiphase_commit:
obj_input.set_hundred_continue_response_headers(
hundred_continue_headers)
mime_boundary = request.headers.get(
'X-Backend-Obj-Multipart-Mime-Boundary')
if not mime_boundary:
return HTTPBadRequest("no MIME boundary")
try:
with ChunkReadTimeout(self.client_timeout):
mime_documents_iter = iter_mime_headers_and_bodies(
request.environ['wsgi.input'],
mime_boundary, self.network_chunk_size)
_junk_hdrs, obj_input = next(mime_documents_iter)
except ChunkReadError:
return HTTPClientDisconnect(request=request)
except ChunkReadTimeout:
return HTTPRequestTimeout(request=request)
timeout_reader = self._make_timeout_reader(obj_input)
try:
for chunk in iter(timeout_reader, ''):
start_time = time.time()
if start_time > upload_expiration:
self.logger.increment('PUT.timeouts')
return HTTPRequestTimeout(request=request)
etag.update(chunk)
upload_size = writer.write(chunk)
elapsed_time += time.time() - start_time
except ChunkReadError:
return HTTPClientDisconnect(request=request)
except ChunkReadTimeout:
return HTTPRequestTimeout(request=request)
if upload_size:
self.logger.transfer_rate(
'PUT.' + device + '.timing', elapsed_time,
upload_size)
if fsize is not None and fsize != upload_size:
return HTTPClientDisconnect(request=request)
footer_meta = {}
if have_metadata_footer:
footer_meta = self._read_metadata_footer(
mime_documents_iter)
request_etag = (footer_meta.get('etag') or
request.headers.get('etag', '')).lower()
etag = etag.hexdigest()
if request_etag and request_etag != etag:
return HTTPUnprocessableEntity(request=request)
metadata = {
'X-Timestamp': request.timestamp.internal,
'Content-Type': request.headers['content-type'],
'ETag': etag,
'Content-Length': str(upload_size),
}
metadata.update(val for val in request.headers.items()
if (is_sys_or_user_meta('object', val[0]) or
is_object_transient_sysmeta(val[0])))
metadata.update(val for val in footer_meta.items()
if (is_sys_or_user_meta('object', val[0]) or
is_object_transient_sysmeta(val[0])))
headers_to_copy = (
request.headers.get(
'X-Backend-Replication-Headers', '').split() +
list(self.allowed_headers))
for header_key in headers_to_copy:
if header_key in request.headers:
header_caps = header_key.title()
metadata[header_caps] = request.headers[header_key]
writer.put(metadata)
# if the PUT requires a two-phase commit (a data and a commit
# phase) send the proxy server another 100-continue response
# to indicate that we are finished writing object data
if use_multiphase_commit:
request.environ['wsgi.input'].\
send_hundred_continue_response()
if not self._read_put_commit_message(mime_documents_iter):
return HTTPServerError(request=request)
# got 2nd phase confirmation (when required), call commit to
# indicate a successful PUT
writer.commit(request.timestamp)
# Drain any remaining MIME docs from the socket. There
# shouldn't be any, but we must read the whole request body.
try:
while True:
with ChunkReadTimeout(self.client_timeout):
_junk_hdrs, _junk_body = next(mime_documents_iter)
drain(_junk_body, self.network_chunk_size,
self.client_timeout)
except ChunkReadError:
raise HTTPClientDisconnect()
except ChunkReadTimeout:
raise HTTPRequestTimeout()
except StopIteration:
pass
except (DiskFileXattrNotSupported, DiskFileNoSpace):
return HTTPInsufficientStorage(drive=device, request=request)
if orig_delete_at != new_delete_at:
if new_delete_at:
self.delete_at_update(
'PUT', new_delete_at, account, container, obj, request,
device, policy)
if orig_delete_at:
self.delete_at_update(
'DELETE', orig_delete_at, account, container, obj,
request, device, policy)
update_headers = HeaderKeyDict({
'x-size': metadata['Content-Length'],
'x-content-type': metadata['Content-Type'],
'x-timestamp': metadata['X-Timestamp'],
'x-etag': metadata['ETag']})
# apply any container update header overrides sent with request
self._check_container_override(update_headers, request.headers,
footer_meta)
self.container_update(
'PUT', account, container, obj, request,
update_headers,
device, policy)
return HTTPCreated(request=request, etag=etag)
@public
@timing_stats()
def GET(self, request):
"""Handle HTTP GET requests for the Swift Object Server."""
device, partition, account, container, obj, policy = \
get_name_and_placement(request, 5, 5, True)
frag_prefs = safe_json_loads(
request.headers.get('X-Backend-Fragment-Preferences'))
try:
disk_file = self.get_diskfile(
device, partition, account, container, obj,
policy=policy, frag_prefs=frag_prefs)
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=device, request=request)
try:
with disk_file.open():
metadata = disk_file.get_metadata()
obj_size = int(metadata['Content-Length'])
file_x_ts = Timestamp(metadata['X-Timestamp'])
keep_cache = (self.keep_cache_private or
('X-Auth-Token' not in request.headers and
'X-Storage-Token' not in request.headers))
conditional_etag = resolve_etag_is_at_header(request, metadata)
response = Response(
app_iter=disk_file.reader(keep_cache=keep_cache),
request=request, conditional_response=True,
conditional_etag=conditional_etag)
response.headers['Content-Type'] = metadata.get(
'Content-Type', 'application/octet-stream')
for key, value in metadata.items():
if (is_sys_or_user_meta('object', key) or
is_object_transient_sysmeta(key) or
key.lower() in self.allowed_headers):
response.headers[key] = value
response.etag = metadata['ETag']
response.last_modified = math.ceil(float(file_x_ts))
response.content_length = obj_size
try:
response.content_encoding = metadata[
'Content-Encoding']
except KeyError:
pass
response.headers['X-Timestamp'] = file_x_ts.normal
response.headers['X-Backend-Timestamp'] = file_x_ts.internal
response.headers['X-Backend-Data-Timestamp'] = \
disk_file.data_timestamp.internal
if disk_file.durable_timestamp:
response.headers['X-Backend-Durable-Timestamp'] = \
disk_file.durable_timestamp.internal
response.headers['X-Backend-Fragments'] = \
_make_backend_fragments_header(disk_file.fragments)
resp = request.get_response(response)
except DiskFileXattrNotSupported:
return HTTPInsufficientStorage(drive=device, request=request)
except (DiskFileNotExist, DiskFileQuarantined) as e:
headers = {}
if hasattr(e, 'timestamp'):
headers['X-Backend-Timestamp'] = e.timestamp.internal
resp = HTTPNotFound(request=request, headers=headers,
conditional_response=True)
return resp
@public
@timing_stats(sample_rate=0.8)
def HEAD(self, request):
"""Handle HTTP HEAD requests for the Swift Object Server."""
device, partition, account, container, obj, policy = \
get_name_and_placement(request, 5, 5, True)
frag_prefs = safe_json_loads(
request.headers.get('X-Backend-Fragment-Preferences'))
try:
disk_file = self.get_diskfile(
device, partition, account, container, obj,
policy=policy, frag_prefs=frag_prefs)
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=device, request=request)
try:
metadata = disk_file.read_metadata()
except DiskFileXattrNotSupported:
return HTTPInsufficientStorage(drive=device, request=request)
except (DiskFileNotExist, DiskFileQuarantined) as e:
headers = {}
if hasattr(e, 'timestamp'):
headers['X-Backend-Timestamp'] = e.timestamp.internal
return HTTPNotFound(request=request, headers=headers,
conditional_response=True)
conditional_etag = resolve_etag_is_at_header(request, metadata)
response = Response(request=request, conditional_response=True,
conditional_etag=conditional_etag)
response.headers['Content-Type'] = metadata.get(
'Content-Type', 'application/octet-stream')
for key, value in metadata.items():
if (is_sys_or_user_meta('object', key) or
is_object_transient_sysmeta(key) or
key.lower() in self.allowed_headers):
response.headers[key] = value
response.etag = metadata['ETag']
ts = Timestamp(metadata['X-Timestamp'])
response.last_modified = math.ceil(float(ts))
# Needed for container sync feature
response.headers['X-Timestamp'] = ts.normal
response.headers['X-Backend-Timestamp'] = ts.internal
response.headers['X-Backend-Data-Timestamp'] = \
disk_file.data_timestamp.internal
if disk_file.durable_timestamp:
response.headers['X-Backend-Durable-Timestamp'] = \
disk_file.durable_timestamp.internal
response.headers['X-Backend-Fragments'] = \
_make_backend_fragments_header(disk_file.fragments)
response.content_length = int(metadata['Content-Length'])
try:
response.content_encoding = metadata['Content-Encoding']
except KeyError:
pass
return response
@public
@timing_stats()
def DELETE(self, request):
"""Handle HTTP DELETE requests for the Swift Object Server."""
device, partition, account, container, obj, policy = \
get_name_and_placement(request, 5, 5, True)
req_timestamp = valid_timestamp(request)
next_part_power = request.headers.get('X-Backend-Next-Part-Power')
try:
disk_file = self.get_diskfile(
device, partition, account, container, obj,
policy=policy, next_part_power=next_part_power)
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=device, request=request)
try:
orig_metadata = disk_file.read_metadata()
except DiskFileXattrNotSupported:
return HTTPInsufficientStorage(drive=device, request=request)
except DiskFileExpired as e:
orig_timestamp = e.timestamp
orig_metadata = e.metadata
response_class = HTTPNotFound
except DiskFileDeleted as e:
orig_timestamp = e.timestamp
orig_metadata = {}
response_class = HTTPNotFound
except (DiskFileNotExist, DiskFileQuarantined):
orig_timestamp = 0
orig_metadata = {}
response_class = HTTPNotFound
else:
orig_timestamp = disk_file.data_timestamp
if orig_timestamp < req_timestamp:
response_class = HTTPNoContent
else:
response_class = HTTPConflict
response_timestamp = max(orig_timestamp, req_timestamp)
orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0)
try:
req_if_delete_at_val = request.headers['x-if-delete-at']
req_if_delete_at = int(req_if_delete_at_val)
except KeyError:
pass
except ValueError:
return HTTPBadRequest(
request=request,
body='Bad X-If-Delete-At header value')
else:
# request includes x-if-delete-at; we must not place a tombstone
# if we can not verify the x-if-delete-at time
if not orig_timestamp:
# no object found at all
return HTTPNotFound()
if orig_delete_at != req_if_delete_at:
return HTTPPreconditionFailed(
request=request,
body='X-If-Delete-At and X-Delete-At do not match')
else:
# differentiate success from no object at all
response_class = HTTPNoContent
if orig_delete_at:
self.delete_at_update('DELETE', orig_delete_at, account,
container, obj, request, device,
policy)
if orig_timestamp < req_timestamp:
try:
disk_file.delete(req_timestamp)
except DiskFileNoSpace:
return HTTPInsufficientStorage(drive=device, request=request)
self.container_update(
'DELETE', account, container, obj, request,
HeaderKeyDict({'x-timestamp': req_timestamp.internal}),
device, policy)
return response_class(
request=request,
headers={'X-Backend-Timestamp': response_timestamp.internal})
@public
@replication
@timing_stats(sample_rate=0.1)
def REPLICATE(self, request):
"""
Handle REPLICATE requests for the Swift Object Server. This is used
by the object replicator to get hashes for directories.
Note that the name REPLICATE is preserved for historical reasons as
this verb really just returns the hashes information for the specified
parameters and is used, for example, by both replication and EC.
"""
device, partition, suffix_parts, policy = \
get_name_and_placement(request, 2, 3, True)
suffixes = suffix_parts.split('-') if suffix_parts else []
try:
hashes = self._diskfile_router[policy].get_hashes(
device, partition, suffixes, policy)
except DiskFileDeviceUnavailable:
resp = HTTPInsufficientStorage(drive=device, request=request)
else:
resp = Response(body=pickle.dumps(hashes))
return resp
@public
@replication
@timing_stats(sample_rate=0.1)
def SSYNC(self, request):
return Response(app_iter=ssync_receiver.Receiver(self, request)())
def __call__(self, env, start_response):
"""WSGI Application entry point for the Swift Object Server."""
start_time = time.time()
req = Request(env)
self.logger.txn_id = req.headers.get('x-trans-id', None)
if not check_utf8(req.path_info):
res = HTTPPreconditionFailed(body='Invalid UTF8 or contains NULL')
else:
try:
# disallow methods which have not been marked 'public'
if req.method not in self.allowed_methods:
res = HTTPMethodNotAllowed()
else:
res = getattr(self, req.method)(req)
except DiskFileCollision:
res = HTTPForbidden(request=req)
except HTTPException as error_response:
res = error_response
except (Exception, Timeout):
self.logger.exception(_(
'ERROR __call__ error with %(method)s'
' %(path)s '), {'method': req.method, 'path': req.path})
res = HTTPInternalServerError(body=traceback.format_exc())
trans_time = time.time() - start_time
res.fix_conditional_response()
if self.log_requests:
log_line = get_log_line(req, res, trans_time, '')
if req.method in ('REPLICATE', 'SSYNC') or \
'X-Backend-Replication' in req.headers:
self.logger.debug(log_line)
else:
self.logger.info(log_line)
if req.method in ('PUT', 'DELETE'):
slow = self.slow - trans_time
if slow > 0:
sleep(slow)
# To be able to zero-copy send the object, we need a few things.
# First, we have to be responding successfully to a GET, or else we're
# not sending the object. Second, we have to be able to extract the
# socket file descriptor from the WSGI input object. Third, the
# diskfile has to support zero-copy send.
#
# There's a good chance that this could work for 206 responses too,
# but the common case is sending the whole object, so we'll start
# there.
if req.method == 'GET' and res.status_int == 200 and \
isinstance(env['wsgi.input'], wsgi.Input):
app_iter = getattr(res, 'app_iter', None)
checker = getattr(app_iter, 'can_zero_copy_send', None)
if checker and checker():
# For any kind of zero-copy thing like sendfile or splice, we
# need the file descriptor. Eventlet doesn't provide a clean
# way of getting that, so we resort to this.
wsock = env['wsgi.input'].get_socket()
wsockfd = wsock.fileno()
# Don't call zero_copy_send() until after we force the HTTP
# headers out of Eventlet and into the socket.
def zero_copy_iter():
# If possible, set TCP_CORK so that headers don't
# immediately go on the wire, but instead, wait for some
# response body to make the TCP frames as large as
# possible (and hence as few packets as possible).
#
# On non-Linux systems, we might consider TCP_NODELAY, but
# since the only known zero-copy-capable diskfile uses
# Linux-specific syscalls, we'll defer that work until
# someone needs it.
if hasattr(socket, 'TCP_CORK'):
wsock.setsockopt(socket.IPPROTO_TCP,
socket.TCP_CORK, 1)
yield EventletPlungerString()
try:
app_iter.zero_copy_send(wsockfd)
except Exception:
self.logger.exception("zero_copy_send() blew up")
raise
yield ''
# Get headers ready to go out
res(env, start_response)
return zero_copy_iter()
else:
return res(env, start_response)
else:
return res(env, start_response)
def global_conf_callback(preloaded_app_conf, global_conf):
"""
Callback for swift.common.wsgi.run_wsgi during the global_conf
creation so that we can add our replication_semaphore, used to
limit the number of concurrent SSYNC_REQUESTS across all
workers.
:param preloaded_app_conf: The preloaded conf for the WSGI app.
This conf instance will go away, so
just read from it, don't write.
:param global_conf: The global conf that will eventually be
passed to the app_factory function later.
This conf is created before the worker
subprocesses are forked, so can be useful to
set up semaphores, shared memory, etc.
"""
replication_concurrency = int(
preloaded_app_conf.get('replication_concurrency') or 4)
if replication_concurrency:
# Have to put the value in a list so it can get past paste
global_conf['replication_semaphore'] = [
multiprocessing.BoundedSemaphore(replication_concurrency)]
def app_factory(global_conf, **local_conf):
"""paste.deploy app factory for creating WSGI object server apps"""
conf = global_conf.copy()
conf.update(local_conf)
return ObjectController(conf)
|
py | 1a370663b582f2b8497f98cfff06815dd935e237 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class sslpolicy_binding(base_resource):
""" Binding class showing the resources that can be bound to sslpolicy_binding.
"""
def __init__(self) :
self._name = ""
self.sslpolicy_csvserver_binding = []
self.sslpolicy_sslservice_binding = []
self.sslpolicy_lbvserver_binding = []
self.sslpolicy_sslvserver_binding = []
self.sslpolicy_sslpolicylabel_binding = []
self.sslpolicy_sslglobal_binding = []
@property
def name(self) :
ur"""Name of the SSL policy for which to display detailed information.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name of the SSL policy for which to display detailed information.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def sslpolicy_sslvserver_bindings(self) :
ur"""sslvserver that can be bound to sslpolicy.
"""
try :
return self._sslpolicy_sslvserver_binding
except Exception as e:
raise e
@property
def sslpolicy_sslservice_bindings(self) :
ur"""sslservice that can be bound to sslpolicy.
"""
try :
return self._sslpolicy_sslservice_binding
except Exception as e:
raise e
@property
def sslpolicy_csvserver_bindings(self) :
ur"""csvserver that can be bound to sslpolicy.
"""
try :
return self._sslpolicy_csvserver_binding
except Exception as e:
raise e
@property
def sslpolicy_lbvserver_bindings(self) :
ur"""lbvserver that can be bound to sslpolicy.
"""
try :
return self._sslpolicy_lbvserver_binding
except Exception as e:
raise e
@property
def sslpolicy_sslpolicylabel_bindings(self) :
ur"""sslpolicylabel that can be bound to sslpolicy.
"""
try :
return self._sslpolicy_sslpolicylabel_binding
except Exception as e:
raise e
@property
def sslpolicy_sslglobal_bindings(self) :
ur"""sslglobal that can be bound to sslpolicy.
"""
try :
return self._sslpolicy_sslglobal_binding
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(sslpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.sslpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(self, service, name) :
ur""" Use this API to fetch sslpolicy_binding resource.
"""
try :
if type(name) is not list :
obj = sslpolicy_binding()
obj.name = name
response = obj.get_resource(service)
else :
if name and len(name) > 0 :
obj = [sslpolicy_binding() for _ in range(len(name))]
for i in range(len(name)) :
obj[i].name = name[i];
response[i] = obj[i].get_resource(service)
return response
except Exception as e:
raise e
class sslpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.sslpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.sslpolicy_binding = [sslpolicy_binding() for _ in range(length)]
|
py | 1a3706e2c40b4bf6fe169bbe4f7519d072775fdd | # Copyright (c) 2020 kamyu. All rights reserved.
#
# Google Code Jam 2020 Round 2 - Problem D. Emacs++
# https://codingcompetitions.withgoogle.com/codejam/round/000000000019ffb9/000000000033893b
#
# Time: O(K * (logK)^2 + QlogK), correct and fully tested by lots of edge cases,
# optimized without MLE by lazy build,
# pass in test case 1 for PyPy2 but TLE in test case 2 (time is very tight for Python/PyPy2)
# Space: O(KlogK)
#
# cleaner by slower solution
#
from itertools import izip
from heapq import heappop, heappush
from bisect import bisect_left
def dijkstra(adj, t): # Time: O(KlogK)
result, visited = {t:0}, set()
min_heap = [(0, t)]
while min_heap and len(visited) != len(adj):
curr, u = heappop(min_heap)
if u in visited:
continue
visited.add(u)
for v, w in adj[u].iteritems():
if v in visited:
continue
if v in result and result[v] <= curr+w:
continue
result[v] = curr+w
heappush(min_heap, (curr+w, v))
return result
def update_adj(lookup, brackets, adj, is_reversed, front, back, direction, dummy, d): # Time: O(K)
prev = back if front-d != dummy else dummy
for src in direction(brackets):
if prev == dummy:
prev = src
continue
dst, via = prev, src-d
if via == dst:
w = lookup[dst][not is_reversed][src]
else:
w = lookup[via][not is_reversed][src] + lookup[via][is_reversed][dst]
adj[src][dst] = w if dst not in adj[src] else min(adj[src][dst], w)
prev = src
def find_shortest_path(is_undir, pairs, lookup, brackets, t): # Time: O(KlogK)
result = []
for is_reversed in xrange(1 if is_undir else 2):
adj = {}
for src in brackets:
dst = pairs[src]
w = lookup[dst][not is_reversed][src]
if src not in adj:
adj[src] = {}
adj[src][dst] = w if dst not in adj[src] else min(adj[src][dst], w)
update_adj(lookup, brackets, adj, is_reversed,
brackets[0], brackets[-1], lambda x:x,
-1, 1)
update_adj(lookup, brackets, adj, is_reversed,
brackets[-1], brackets[0], lambda x:reversed(x),
len(pairs), -1)
result.append(dijkstra(adj, t))
if is_undir:
result.append(result[-1])
return result
def find_next_bracket(PRG, brackets, curr, d): # Time: O(K)
count = 0
while True:
curr += d
if not (0 <= curr < len(brackets)):
break
if PRG[brackets[curr]] == '(':
count += 1
else:
count -= 1
if count == -d:
break
return curr
def find_partitions(PRG, pairs, parents, brackets): # Time: O(K)
result, mid = [-1]*4, (len(brackets)-1)//2
if PRG[brackets[mid]] == '(':
left, right = mid, find_next_bracket(PRG, brackets, mid, 1)
else:
left, right = find_next_bracket(PRG, brackets, mid, -1), mid
while 2*(right-left+1) <= len(brackets)+2: # including virtual brackets we added
result[1], result[2] = brackets[left], brackets[right]
left, right = find_next_bracket(PRG, brackets, left, -1), find_next_bracket(PRG, brackets, right, 1)
result[0] = brackets[left] if left != -1 else parents[brackets[0]]
result[-1] = brackets[right] if right != len(brackets) else parents[brackets[-1]]
return result
def find_subregions(brackets, partitions):
i, new_brackets = 0, [[] for _ in xrange(4)]
for b in brackets:
if i < 4 and b > partitions[i]:
i += 1
if i < 4 and b == partitions[i]:
continue
new_brackets[i%4].append(b)
return new_brackets
def build(PRG, is_undir, pairs, parents, lookup, tree, node): # Time: O(KlogK)
brackets = tree[node][0]
partitions = find_partitions(PRG, pairs, parents, brackets) # Time: O(K)
children = [0]*4
tree[node] = [partitions, children]
for i in [0, 3]:
if not (brackets[0] <= partitions[i] <= brackets[-1]): # virtual brackets we added
continue
lookup[partitions[i]] = find_shortest_path(is_undir, pairs, lookup, brackets, partitions[i]) # Time: O(KlogK)
middle_brackets = [b for b in brackets if partitions[0] < b < partitions[3]]
for i in [1, 2]:
lookup[partitions[i]] = find_shortest_path(is_undir, pairs, lookup, middle_brackets, partitions[i]) # Time: O(KlogK)
for i, new_brackets in enumerate(find_subregions(brackets, partitions)):
if not new_brackets:
continue
children[i] = len(tree)
tree.append([new_brackets])
def query(PRG, is_undir, pairs, parents, lookup, tree, node, s, e): # Time: O(K * (logK)^2) for lazy build, O(QlogK) for query, run at most O(KlogK) in each depth, at most O(logK) depth
depth, ceil_log2_Kp1 = 0, ((len(PRG)+1)-1).bit_length() # 2^d-1 >= k, d >= ceil(log(k+1))
while True:
depth += 1
assert(depth <= ceil_log2_Kp1)
if len(tree[node]) == 1: # unvisited
build(PRG, is_undir, pairs, parents, lookup, tree, node)
partitions, children = tree[node]
a, b = map(lambda x: bisect_left(partitions, x)%4, (s, e))
if s == partitions[a] or e == partitions[b] or a != b:
break
node = children[a] # same subregion without covering partition nodes, visit subregion
return min(lookup[p][1][s] + lookup[p][0][e] for p in partitions if 0 <= p < len(PRG) and s in lookup[p][1] and e in lookup[p][0]) # find min LCA dist
def find_pairs_and_parents(s): # Time: O(K)
pairs, parents, stk = [0]*len(s), [None]*len(s), []
parent = -1
for right, p in enumerate(s):
if p == '(':
parents[right] = parent
parent = right
stk.append(right)
else:
left = stk.pop()
parent = parents[left]
pairs[left], pairs[right] = right, left
for i in xrange(len(s)):
if parents[i] is None:
parents[i] = pairs[parents[pairs[i]]] if parents[pairs[i]] != -1 else len(s)
return pairs, parents
def init_dist(L, R, P, pairs):
lookup = [0]*len(pairs)
for i in xrange(len(pairs)):
lookup[i] = [{}, {}]
lookup[i][0][pairs[i]] = P[i] if pairs[i] not in lookup[i][0] else min(lookup[i][0][pairs[i]], P[i])
lookup[i][1][pairs[i]] = P[pairs[i]] if pairs[i] not in lookup[i][1] else min(lookup[i][1][pairs[i]], P[pairs[i]])
if i-1 >= 0:
lookup[i][0][i-1] = L[i] if i-1 not in lookup[i][0] else min(lookup[i][0][i-1], L[i])
lookup[i][1][i-1] = R[i-1] if i-1 not in lookup[i][1] else min(lookup[i][1][i-1], R[i-1])
if i+1 < len(pairs):
lookup[i][0][i+1] = R[i] if i+1 not in lookup[i][0] else min(lookup[i][0][i+1], R[i])
lookup[i][1][i+1] = L[i+1] if i+1 not in lookup[i][1] else min(lookup[i][1][i+1], L[i+1])
return lookup
def is_undirected(L, R, P, pairs):
for i in xrange(len(pairs)):
if P[i] != P[pairs[i]]:
break
if i-1 >= 0 and L[i] != R[i-1]:
break
if i+1 < len(pairs) and R[i] != L[i+1]:
break
else:
return True
return False
def emacspp():
K, Q = map(int, raw_input().strip().split())
PRG = raw_input().strip()
L, R, P, S, E = [map(int, raw_input().strip().split()) for _ in xrange(5)]
pairs, parents = find_pairs_and_parents(PRG)
is_undir, lookup, tree = is_undirected(L, R, P, pairs), init_dist(L, R, P, pairs), [[range(len(PRG))]]
return sum(query(PRG, is_undir, pairs, parents, lookup, tree, 0, s-1, e-1) for s, e in izip(S, E))
for case in xrange(input()):
print 'Case #%d: %s' % (case+1, emacspp())
|
py | 1a3707393b3bf9302405937b2662be638a5805d1 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import json
import logging
import os
import sys
from abc import ABCMeta
from importlib import import_module
from django.utils.module_loading import module_has_submodule
from six import with_metaclass
from kolibri.utils.build_config.default_plugins import DEFAULT_PLUGINS
from kolibri.utils.conf import KOLIBRI_HOME
logger = logging.getLogger(__name__)
conf_file = os.path.join(KOLIBRI_HOME, "plugins.json")
class ConfigDict(dict):
# These values are encoded on the config dict as sets
# so they need to be treated specially for serialization
# and deserialization to/from JSON
SET_KEYS = ("INSTALLED_PLUGINS", "DISABLED_PLUGINS", "UPDATED_PLUGINS")
def __init__(self):
# If the settings file does not exist or does not contain
# valid JSON then create it
self.set_defaults()
if os.path.isfile(conf_file):
try:
# Open up the config file and load settings
# use default OS encoding
with open(conf_file, "r") as kolibri_conf_file:
self.update(json.load(kolibri_conf_file))
return
except ValueError:
logger.warn(
"Attempted to load plugins.json but encountered a file that could not be decoded as valid JSON."
)
self.save()
logger.info("Initialized plugins.json")
def set_defaults(self):
self.update(
{
#: Everything in this list is added to django.conf.settings.INSTALLED_APPS
# except disabled ones below
"INSTALLED_PLUGINS": DEFAULT_PLUGINS,
#: Everything in this list is removed from the list above
"DISABLED_PLUGINS": [],
# Plugins that have been updated since we last initialized Kolibri
"UPDATED_PLUGINS": [],
# The current versions of plugins (both internal and external)
"PLUGIN_VERSIONS": {},
}
)
@property
def ACTIVE_PLUGINS(self):
return list(self["INSTALLED_PLUGINS"] - self["DISABLED_PLUGINS"])
def update(self, new_values):
"""
Updates current configuration with ``new_values``. Does not save to file.
"""
values_copy = new_values.copy()
for key in self.SET_KEYS:
if key in values_copy:
values_copy[key] = set(values_copy[key])
super(ConfigDict, self).update(values_copy)
def save(self):
# use default OS encoding
config_copy = self.copy()
for key in self.SET_KEYS:
if key in config_copy:
config_copy[key] = list(config_copy[key])
with open(conf_file, "w") as kolibri_conf_file:
json.dump(config_copy, kolibri_conf_file, indent=2, sort_keys=True)
def add_plugin(self, module_path):
if module_path in self.ACTIVE_PLUGINS:
logger.warning("{} already enabled".format(module_path))
return
self["INSTALLED_PLUGINS"].add(module_path)
self["UPDATED_PLUGINS"].add(module_path)
try:
self["DISABLED_PLUGINS"].remove(module_path)
except KeyError:
pass
self.save()
def remove_plugin(self, module_path):
if module_path not in self.ACTIVE_PLUGINS:
logger.warning("{} already disabled".format(module_path))
return
self["DISABLED_PLUGINS"].add(module_path)
try:
self["INSTALLED_PLUGINS"].remove(module_path)
except KeyError:
pass
try:
self["UPDATED_PLUGINS"].remove(module_path)
except KeyError:
pass
self.save()
def clear_plugin(self, module_path):
# Clean up references to plugins that either don't exist
# Or don't import properly.
try:
self["INSTALLED_PLUGINS"].remove(module_path)
except KeyError:
pass
try:
self["DISABLED_PLUGINS"].remove(module_path)
except KeyError:
pass
try:
self["UPDATED_PLUGINS"].remove(module_path)
except KeyError:
pass
self.save()
def update_plugin_version(self, module_path, new_version):
self["PLUGIN_VERSIONS"][module_path] = new_version
try:
self["UPDATED_PLUGINS"].remove(module_path)
except KeyError:
pass
self.save()
#: Set defaults before updating the dict
config = ConfigDict()
class SingletonMeta(ABCMeta):
_instances = {}
# Make all classes using this metaclass singletons
# Taken from here: https://stackoverflow.com/q/6760685
# Should be resistant to the __new__ method on the class object
# being overwritten.
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(SingletonMeta, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class KolibriPluginBase(with_metaclass(SingletonMeta)):
"""
This is the base class that all Kolibri plugins need to implement.
"""
#: Comment
# Name of a local module that contains url_patterns that define
# URLs for views that do not contain any
# translated content, and hence will not be prefixed
# with a language prefix
untranslated_view_urls = None
#: Comment
# Name of a local module that contains url_patterns that define
# URLs for views that contain
# translated content, and hence will be prefixed
# with a language prefixs
translated_view_urls = None
#: Comment
# Name of a local module that contains url_patterns that define
# URLs for views that should be attached to the domain root.
# Use with caution! The lack of namespacing is dangerous.
root_view_urls = None
#: Comment
# Name of a local module that contains additional settings to augment
# Django settings.
# For settings that take a tuple or list, these will be appended to the value from
# the base settings module set through conventional Django means.
django_settings = None
#: Comment
# Name of a local module, containing a config spec as the 'option_spec' value.
# These options should not override the core config spec. To override default values
# of other options see the attribute below
kolibri_options = None
#: Comment
# Name of a local module, containing a set of options defaults as the 'option_defaults' value.
# Should be of the form:
# option_defaults = {
# "<Section Name>": {
# "<Option Name>": "<New Default Value>",
# }
# }
kolibri_option_defaults = None
# : Suggested property, not yet in use
migrate_on_enable = False
# : Suggested property, not yet in use
collect_static_on_enable = False
# : Suggested property, not yet in use
collect_static_on_enable = False
def __init__(self):
self.INSTALLED_APPS = []
@classmethod
def class_module_path(self):
return ".".join(self.__module__.split(".")[:-1])
@property
def module_path(self):
return self.class_module_path()
def _installed_apps_add(self):
"""Call this from your enable() method to have the plugin automatically
added to Kolibri configuration"""
config.add_plugin(self.module_path)
def _installed_apps_remove(self):
"""Call this from your enable() method to have the plugin automatically
added to Kolibri configuration"""
config.remove_plugin(self.module_path)
def enable(self):
"""Modify the kolibri config dict to your plugin's needs"""
self._installed_apps_add()
def disable(self):
"""Modify the kolibri config dict to your plugin's needs"""
self._installed_apps_remove()
def _return_module(self, module_name):
if module_has_submodule(sys.modules[self.module_path], module_name):
models_module_name = "%s.%s" % (self.module_path, module_name)
try:
return import_module(models_module_name)
except Exception as e:
logging.warn(
"Tried to import module {module_name} from {plugin} but an error was raised".format(
plugin=self.module_path, module_name=module_name
)
)
logging.exception(e)
return None
@property
def url_module(self):
"""
Return a url module, containing ``urlpatterns = [...]``, a conventional
Django application url module.
URLs are by default accessed through Django's reverse lookups like
this::
reverse('kolibri:mypluginclass:url_name')
To customize "mypluginclass" (which is automatically derived from the
plugin's class name), override ``url_namespace``.
By default this will be discovered based on the translated_view_urls
property.
"""
if self.translated_view_urls:
module = self._return_module(self.translated_view_urls)
if module is None:
logging.warn(
"{plugin} defined {urls} translated view urls but the module was not found".format(
plugin=self.module_path, urls=self.translated_view_urls
)
)
return module
@property
def api_url_module(self):
"""
Return a url module, containing ``urlpatterns = [...]``, a conventional
Django application url module.
Do this separately for API endpoints so that they do not need
to be prefixed by the language code.
URLs are by default accessed through Django's reverse lookups like
this::
reverse('kolibri:mypluginclass:url_name')
To customize "mypluginclass" (which is automatically derived from the
plugin's class name), override ``url_namespace``.
By default this will be discovered based on the untranslated_view_urls
property.
"""
if self.untranslated_view_urls:
module = self._return_module(self.untranslated_view_urls)
if module is None:
logging.warn(
"{plugin} defined {urls} untranslated view urls but the module was not found".format(
plugin=self.module_path, urls=self.untranslated_view_urls
)
)
return module
@property
def root_url_module(self):
"""
Return a url module, containing ``urlpatterns = [...]``, a conventional
Django application url module.
Do this separately for endpoints that need to be attached at the root.
URLs are by default accessed through Django's reverse lookups like
this::
reverse('kolibri:url_name')
By default this will be discovered based on the root_view_urls
property.
"""
if self.root_view_urls:
module = self._return_module(self.root_view_urls)
if module is None:
logging.warn(
"{plugin} defined {urls} root view urls but the module was not found".format(
plugin=self.module_path, urls=self.root_view_urls
)
)
return module
@property
def settings_module(self):
"""
Return a settings module, containing Django settings that this
module wants to apply.
For settings that take a tuple or list, these will be appended to the value from
the base settings module set through conventional Django means.
By default this will be discovered based on the django_settings
property.
"""
if self.django_settings:
module = self._return_module(self.django_settings)
if module is None:
logging.warn(
"{plugin} defined {module} django settings but the module was not found".format(
plugin=self.module_path, module=self.django_settings
)
)
return module
@property
def options_module(self):
"""
Return an options module, containing a config spec as the 'option_spec' value.
These options should not override the core config spec.
By default this will be discovered based on the kolibri_options
property.
"""
if self.kolibri_options:
module = self._return_module(self.kolibri_options)
if module is None:
logging.warn(
"{plugin} defined {module} kolibri options but the module was not found".format(
plugin=self.module_path, module=self.kolibri_options
)
)
return module
@property
def option_defaults_module(self):
"""
Return an option defaults module, containing default overrides as the 'options_default' value.
By default this will be discovered based on the kolibri_options
property.
"""
if self.kolibri_option_defaults:
module = self._return_module(self.kolibri_option_defaults)
if module is None:
logging.warn(
"{plugin} defined {module} kolibri option defaults but the module was not found".format(
plugin=self.module_path, module=self.kolibri_option_defaults
)
)
return module
@property
def url_slug(self):
"""
Where should urls be included? By default, this is a lower-case version
of the class name.
Example::
return r"my-plugin/"
.. warning:: Avoid the empty string, as you might get conflicts.
"""
return self.module_path.split(".")[-1].lower() + "/"
|
py | 1a37073c92b8a73b47300969ae85cf39fd34cd2e | # -*- coding: utf-8 -*-
import numpy as np
from endochrone import Base
from endochrone.stats.measures import euclidean_dist
__author__ = "nickwood"
__copyright__ = "nickwood"
__license__ = "mit"
class KMeans(Base):
def __init__(self, k=3):
self.n_centroids_ = k
super().__init__(properties={'requires_targets': False})
def fit(self, *, features, initial_centroids=None):
self.validate_fit(features=features)
if initial_centroids is None:
self.centroids = self.forgy_centroids_(features=features)
else:
self.centroids = initial_centroids
old_centroids = np.zeros(self.centroids.shape)
while np.any(old_centroids != self.centroids):
old_centroids = self.centroids
self.centroids = self.calculate_step(features=features)
def forgy_centroids_(self, *, features):
k = self.n_centroids_
n_samples = features.shape[0]
_indexes = np.random.choice(range(n_samples), k, replace=False)
return features[_indexes, :]
def calculate_step(self, *, features):
n_c = self.nearest_centroids(features=features)
return self.recalculate_centroids(features=features, assignments=n_c)
def nearest_centroids(self, *, features):
return np.array([self.nearest_centroid(point=p) for p in features])
def nearest_centroid(self, *, point):
return np.argmin([euclidean_dist(point, c) for c in self.centroids])
def recalculate_centroids(self, *, features, assignments):
return np.array([np.mean(features[assignments == i], axis=0)
for i in range(self.n_centroids_)])
def predict(self, *, features):
self.validate_predict(features=features)
return self.nearest_centroids(features=features)
|
py | 1a370796706af963aefbc1a8738f1397626fabf1 | import learner
import recognizer
import dbconn
print("Sign up enter 1")
print("Sign in enter 2\n")
print("Select action from above two.")
print("Press 'q' for exit from camera view.\n")
action = raw_input('Select action : ')
email = raw_input('Enter email : ')
try:
if int(action) == 1:
name = raw_input('Enter name : ')
res = dbconn.create_user(email, name)
if res == True:
id, name = dbconn.get_user(email)
res_train = learner.learn_user(id)
if res_train == True:
print("\nUser sign up successful.")
else:
# delete user if training unsuccessful
dbconn.del_user(id)
print("\nUser sign up unsuccessful.")
else:
print('\nEmail address already exist.')
elif int(action) == 2:
res = dbconn.get_user(email)
if res != None:
id, name = res
recognizer.recognize_face(id, name)
else:
print('\nPlease sign up.')
except Exception as e:
print("\nInvalid action.")
|
py | 1a370800ff25204304b345e3ad294bc3a96ad3c5 | """
Virtual environment (venv) package for Python. Based on PEP 405.
Copyright (C) 2011-2014 Vinay Sajip.
Licensed to the PSF under a contributor agreement.
"""
import logging
import os
import shutil
import subprocess
import sys
import sysconfig
import types
logger = logging.getLogger(__name__)
class EnvBuilder:
"""
This class exists to allow virtual environment creation to be
customized. The constructor parameters determine the builder's
behaviour when called upon to create a virtual environment.
By default, the builder makes the system (global) site-packages dir
*un*available to the created environment.
If invoked using the Python -m option, the default is to use copying
on Windows platforms but symlinks elsewhere. If instantiated some
other way, the default is to *not* use symlinks.
:param system_site_packages: If True, the system (global) site-packages
dir is available to created environments.
:param clear: If True, delete the contents of the environment directory if
it already exists, before environment creation.
:param symlinks: If True, attempt to symlink rather than copy files into
virtual environment.
:param upgrade: If True, upgrade an existing virtual environment.
:param with_pip: If True, ensure pip is installed in the virtual
environment
:param prompt: Alternative terminal prefix for the environment.
"""
def __init__(self, system_site_packages=False, clear=False,
symlinks=False, upgrade=False, with_pip=False, prompt=None):
self.system_site_packages = system_site_packages
self.clear = clear
self.symlinks = symlinks
self.upgrade = upgrade
self.with_pip = with_pip
self.prompt = prompt
def create(self, env_dir):
"""
Create a virtual environment in a directory.
:param env_dir: The target directory to create an environment in.
"""
env_dir = os.path.abspath(env_dir)
context = self.ensure_directories(env_dir)
# See issue 24875. We need system_site_packages to be False
# until after pip is installed.
true_system_site_packages = self.system_site_packages
self.system_site_packages = False
self.create_configuration(context)
self.setup_python(context)
if self.with_pip:
self._setup_pip(context)
if not self.upgrade:
self.setup_scripts(context)
self.post_setup(context)
if true_system_site_packages:
# We had set it to False before, now
# restore it and rewrite the configuration
self.system_site_packages = True
self.create_configuration(context)
def clear_directory(self, path):
for fn in os.listdir(path):
fn = os.path.join(path, fn)
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
def ensure_directories(self, env_dir):
"""
Create the directories for the environment.
Returns a context object which holds paths in the environment,
for use by subsequent logic.
"""
def create_if_needed(d):
if not os.path.exists(d):
os.makedirs(d)
elif os.path.islink(d) or os.path.isfile(d):
raise ValueError('Unable to create directory %r' % d)
if os.path.exists(env_dir) and self.clear:
self.clear_directory(env_dir)
context = types.SimpleNamespace()
context.env_dir = env_dir
context.env_name = os.path.split(env_dir)[1]
prompt = self.prompt if self.prompt is not None else context.env_name
context.prompt = '(%s) ' % prompt
create_if_needed(env_dir)
executable = sys._base_executable
dirname, exename = os.path.split(os.path.abspath(executable))
context.executable = executable
context.python_dir = dirname
context.python_exe = exename
if sys.platform == 'win32':
binname = 'Scripts'
incpath = 'Include'
libpath = os.path.join(env_dir, 'Lib', 'site-packages')
else:
binname = 'bin'
incpath = 'include'
libpath = os.path.join(env_dir, 'lib',
'python%d.%d' % sys.version_info[:2],
'site-packages')
context.inc_path = path = os.path.join(env_dir, incpath)
create_if_needed(path)
create_if_needed(libpath)
# Issue 21197: create lib64 as a symlink to lib on 64-bit non-OS X POSIX
if ((sys.maxsize > 2**32) and (os.name == 'posix') and
(sys.platform != 'darwin')):
link_path = os.path.join(env_dir, 'lib64')
if not os.path.exists(link_path): # Issue #21643
os.symlink('lib', link_path)
context.bin_path = binpath = os.path.join(env_dir, binname)
context.bin_name = binname
context.env_exe = os.path.join(binpath, exename)
create_if_needed(binpath)
return context
def create_configuration(self, context):
"""
Create a configuration file indicating where the environment's Python
was copied from, and whether the system site-packages should be made
available in the environment.
:param context: The information for the environment creation request
being processed.
"""
context.cfg_path = path = os.path.join(context.env_dir, 'pyvenv.cfg')
with open(path, 'w', encoding='utf-8') as f:
f.write('home = %s\n' % context.python_dir)
if self.system_site_packages:
incl = 'true'
else:
incl = 'false'
f.write('include-system-site-packages = %s\n' % incl)
f.write('version = %d.%d.%d\n' % sys.version_info[:3])
if self.prompt is not None:
f.write(f'prompt = {self.prompt!r}\n')
if os.name != 'nt':
def symlink_or_copy(self, src, dst, relative_symlinks_ok=False):
"""
Try symlinking a file, and if that fails, fall back to copying.
"""
force_copy = not self.symlinks
if not force_copy:
try:
if not os.path.islink(dst): # can't link to itself!
if relative_symlinks_ok:
assert os.path.dirname(src) == os.path.dirname(dst)
os.symlink(os.path.basename(src), dst)
else:
os.symlink(src, dst)
except Exception: # may need to use a more specific exception
logger.warning('Unable to symlink %r to %r', src, dst)
force_copy = True
if force_copy:
shutil.copyfile(src, dst)
else:
def symlink_or_copy(self, src, dst, relative_symlinks_ok=False):
"""
Try symlinking a file, and if that fails, fall back to copying.
"""
bad_src = os.path.lexists(src) and not os.path.exists(src)
if self.symlinks and not bad_src and not os.path.islink(dst):
try:
if relative_symlinks_ok:
assert os.path.dirname(src) == os.path.dirname(dst)
os.symlink(os.path.basename(src), dst)
else:
os.symlink(src, dst)
return
except Exception: # may need to use a more specific exception
logger.warning('Unable to symlink %r to %r', src, dst)
# On Windows, we rewrite symlinks to our base python.exe into
# copies of venvlauncher.exe
basename, ext = os.path.splitext(os.path.basename(src))
srcfn = os.path.join(os.path.dirname(__file__),
"scripts",
"nt",
basename + ext)
# Builds or venv's from builds need to remap source file
# locations, as we do not put them into Lib/venv/scripts
if sysconfig.is_python_build(True) or not os.path.isfile(srcfn):
if basename.endswith('_d'):
ext = '_d' + ext
basename = basename[:-2]
if basename == 'python':
basename = 'venvlauncher'
elif basename == 'pythonw':
basename = 'venvwlauncher'
src = os.path.join(os.path.dirname(src), basename + ext)
else:
if basename.startswith('python'):
scripts = sys.prefix
else:
scripts = os.path.join(os.path.dirname(__file__), "scripts", "nt")
src = os.path.join(scripts, basename + ext)
if not os.path.exists(src):
if not bad_src:
logger.warning('Unable to copy %r', src)
return
shutil.copyfile(src, dst)
def setup_python(self, context):
"""
Set up a Python executable in the environment.
:param context: The information for the environment creation request
being processed.
"""
binpath = context.bin_path
path = context.env_exe
copier = self.symlink_or_copy
copier(context.executable, path)
dirname = context.python_dir
if os.name != 'nt':
if not os.path.islink(path):
os.chmod(path, 0o755)
for suffix in ('python', 'python3'):
path = os.path.join(binpath, suffix)
if not os.path.exists(path):
# Issue 18807: make copies if
# symlinks are not wanted
copier(context.env_exe, path, relative_symlinks_ok=True)
if not os.path.islink(path):
os.chmod(path, 0o755)
else:
if self.symlinks:
# For symlinking, we need a complete copy of the root directory
# If symlinks fail, you'll get unnecessary copies of files, but
# we assume that if you've opted into symlinks on Windows then
# you know what you're doing.
suffixes = [
f for f in os.listdir(dirname) if
os.path.normcase(os.path.splitext(f)[1]) in ('.exe', '.dll')
]
if sysconfig.is_python_build(True):
suffixes = [
f for f in suffixes if
os.path.normcase(f).startswith(('python', 'vcruntime'))
]
else:
suffixes = ['python.exe', 'python_d.exe', 'pythonw.exe',
'pythonw_d.exe']
for suffix in suffixes:
src = os.path.join(dirname, suffix)
if os.path.lexists(src):
copier(src, os.path.join(binpath, suffix))
if sysconfig.is_python_build(True):
# copy init.tcl
for root, dirs, files in os.walk(context.python_dir):
if 'init.tcl' in files:
tcldir = os.path.basename(root)
tcldir = os.path.join(context.env_dir, 'Lib', tcldir)
if not os.path.exists(tcldir):
os.makedirs(tcldir)
src = os.path.join(root, 'init.tcl')
dst = os.path.join(tcldir, 'init.tcl')
shutil.copyfile(src, dst)
break
def _setup_pip(self, context):
"""Installs or upgrades pip in a virtual environment"""
# We run ensurepip in isolated mode to avoid side effects from
# environment vars, the current directory and anything else
# intended for the global Python environment
cmd = [context.env_exe, '-Im', 'ensurepip', '--upgrade',
'--default-pip']
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
def setup_scripts(self, context):
"""
Set up scripts into the created environment from a directory.
This method installs the default scripts into the environment
being created. You can prevent the default installation by overriding
this method if you really need to, or if you need to specify
a different location for the scripts to install. By default, the
'scripts' directory in the venv package is used as the source of
scripts to install.
"""
path = os.path.abspath(os.path.dirname(__file__))
path = os.path.join(path, 'scripts')
self.install_scripts(context, path)
def post_setup(self, context):
"""
Hook for post-setup modification of the venv. Subclasses may install
additional packages or scripts here, add activation shell scripts, etc.
:param context: The information for the environment creation request
being processed.
"""
pass
def replace_variables(self, text, context):
"""
Replace variable placeholders in script text with context-specific
variables.
Return the text passed in , but with variables replaced.
:param text: The text in which to replace placeholder variables.
:param context: The information for the environment creation request
being processed.
"""
text = text.replace('__VENV_DIR__', context.env_dir)
text = text.replace('__VENV_NAME__', context.env_name)
text = text.replace('__VENV_PROMPT__', context.prompt)
text = text.replace('__VENV_BIN_NAME__', context.bin_name)
text = text.replace('__VENV_PYTHON__', context.env_exe)
return text
def install_scripts(self, context, path):
"""
Install scripts into the created environment from a directory.
:param context: The information for the environment creation request
being processed.
:param path: Absolute pathname of a directory containing script.
Scripts in the 'common' subdirectory of this directory,
and those in the directory named for the platform
being run on, are installed in the created environment.
Placeholder variables are replaced with environment-
specific values.
"""
binpath = context.bin_path
plen = len(path)
for root, dirs, files in os.walk(path):
if root == path: # at top-level, remove irrelevant dirs
for d in dirs[:]:
if d not in ('common', os.name):
dirs.remove(d)
continue # ignore files in top level
for f in files:
if (os.name == 'nt' and f.startswith('python')
and f.endswith(('.exe', '.pdb'))):
continue
srcfile = os.path.join(root, f)
suffix = root[plen:].split(os.sep)[2:]
if not suffix:
dstdir = binpath
else:
dstdir = os.path.join(binpath, *suffix)
if not os.path.exists(dstdir):
os.makedirs(dstdir)
dstfile = os.path.join(dstdir, f)
with open(srcfile, 'rb') as f:
data = f.read()
if not srcfile.endswith(('.exe', '.pdb')):
try:
data = data.decode('utf-8')
data = self.replace_variables(data, context)
data = data.encode('utf-8')
except UnicodeError as e:
data = None
logger.warning('unable to copy script %r, '
'may be binary: %s', srcfile, e)
if data is not None:
with open(dstfile, 'wb') as f:
f.write(data)
shutil.copymode(srcfile, dstfile)
def create(env_dir, system_site_packages=False, clear=False,
symlinks=False, with_pip=False, prompt=None):
"""Create a virtual environment in a directory."""
builder = EnvBuilder(system_site_packages=system_site_packages,
clear=clear, symlinks=symlinks, with_pip=with_pip,
prompt=prompt)
builder.create(env_dir)
def main(args=None):
compatible = True
if sys.version_info < (3, 3):
compatible = False
elif not hasattr(sys, 'base_prefix'):
compatible = False
if not compatible:
raise ValueError('This script is only for use with Python >= 3.3')
else:
import argparse
parser = argparse.ArgumentParser(prog=__name__,
description='Creates virtual Python '
'environments in one or '
'more target '
'directories.',
epilog='Once an environment has been '
'created, you may wish to '
'activate it, e.g. by '
'sourcing an activate script '
'in its bin directory.')
parser.add_argument('dirs', metavar='ENV_DIR', nargs='+',
help='A directory to create the environment in.')
parser.add_argument('--system-site-packages', default=False,
action='store_true', dest='system_site',
help='Give the virtual environment access to the '
'system site-packages dir.')
if os.name == 'nt':
use_symlinks = False
else:
use_symlinks = True
group = parser.add_mutually_exclusive_group()
group.add_argument('--symlinks', default=use_symlinks,
action='store_true', dest='symlinks',
help='Try to use symlinks rather than copies, '
'when symlinks are not the default for '
'the platform.')
group.add_argument('--copies', default=not use_symlinks,
action='store_false', dest='symlinks',
help='Try to use copies rather than symlinks, '
'even when symlinks are the default for '
'the platform.')
parser.add_argument('--clear', default=False, action='store_true',
dest='clear', help='Delete the contents of the '
'environment directory if it '
'already exists, before '
'environment creation.')
parser.add_argument('--upgrade', default=False, action='store_true',
dest='upgrade', help='Upgrade the environment '
'directory to use this version '
'of Python, assuming Python '
'has been upgraded in-place.')
parser.add_argument('--without-pip', dest='with_pip',
default=True, action='store_false',
help='Skips installing or upgrading pip in the '
'virtual environment (pip is bootstrapped '
'by default)')
parser.add_argument('--prompt',
help='Provides an alternative prompt prefix for '
'this environment.')
options = parser.parse_args(args)
if options.upgrade and options.clear:
raise ValueError('you cannot supply --upgrade and --clear together.')
builder = EnvBuilder(system_site_packages=options.system_site,
clear=options.clear,
symlinks=options.symlinks,
upgrade=options.upgrade,
with_pip=options.with_pip,
prompt=options.prompt)
for d in options.dirs:
builder.create(d)
if __name__ == '__main__':
rc = 1
try:
main()
rc = 0
except Exception as e:
print('Error: %s' % e, file=sys.stderr)
sys.exit(rc)
|
py | 1a370892900e5a775b747f48e1eaae8cdd45a656 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""Implement standard (and unused) TCP protocols.
These protocols are either provided by inetd, or are not provided at all.
"""
# system imports
import time, struct
from zope.interface import implements
# twisted import
from twisted.internet import protocol, interfaces
class Echo(protocol.Protocol):
"""As soon as any data is received, write it back (RFC 862)"""
def dataReceived(self, data):
self.transport.write(data)
class Discard(protocol.Protocol):
"""Discard any received data (RFC 863)"""
def dataReceived(self, data):
# I'm ignoring you, nyah-nyah
pass
class Chargen(protocol.Protocol):
"""Generate repeating noise (RFC 864)"""
noise = r'@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~ !"#$%&?'
implements(interfaces.IProducer)
def connectionMade(self):
self.transport.registerProducer(self, 0)
def resumeProducing(self):
self.transport.write(self.noise)
def pauseProducing(self):
pass
def stopProducing(self):
pass
class QOTD(protocol.Protocol):
"""Return a quote of the day (RFC 865)"""
def connectionMade(self):
self.transport.write(self.getQuote())
self.transport.loseConnection()
def getQuote(self):
"""Return a quote. May be overrriden in subclasses."""
return "An apple a day keeps the doctor away.\r\n"
class Who(protocol.Protocol):
"""Return list of active users (RFC 866)"""
def connectionMade(self):
self.transport.write(self.getUsers())
self.transport.loseConnection()
def getUsers(self):
"""Return active users. Override in subclasses."""
return "root\r\n"
class Daytime(protocol.Protocol):
"""Send back the daytime in ASCII form (RFC 867)"""
def connectionMade(self):
self.transport.write(time.asctime(time.gmtime(time.time())) + '\r\n')
self.transport.loseConnection()
class Time(protocol.Protocol):
"""Send back the time in machine readable form (RFC 868)"""
def connectionMade(self):
# is this correct only for 32-bit machines?
result = struct.pack("!i", int(time.time()))
self.transport.write(result)
self.transport.loseConnection()
|
py | 1a3708ee8f45cadf4e3cfda8e55e67e28911b47b | import pandas as pd
def to_reise(data):
"""Format data for REISE.
:param pandas.DataFrame data: data frame as returned by
:func:`prereise.gather.solardata.nsrdb.naive.retrieve_data`,
:func:`prereise.gather.solardata.nsrdb.sam.retrieve_data` or
:func:`prereise.gather.solardata.ga_wind.ga_wind.retrieve_data`
:return: (*pandas.DataFrame*) -- data frame formatted for REISE.
:raises TypeError: if *'data'* is not a data frame.
:raises ValueError: if *'Pout'*, *'plant_id'*, *'ts'* and *'ts_id'* are not among
the columns.
"""
if not isinstance(data, pd.DataFrame):
raise TypeError("data must be a pandas.DataFrame")
if not {"Pout", "plant_id", "ts", "ts_id"}.issubset(data.columns):
raise ValueError(
"data frame must have Pout, plant_id, ts and ts_id among columns"
)
ts = data["ts"].unique()
plant_id = data[data.ts_id == 1].plant_id.values
profile = None
for i in range(1, max(data.ts_id) + 1):
data_tmp = pd.DataFrame(
{"Pout": data[data.ts_id == i].Pout.values}, index=plant_id
)
if i == 1:
profile = data_tmp.T
else:
profile = profile.append(data_tmp.T, sort=False, ignore_index=True)
profile.set_index(ts, inplace=True)
profile.index.name = "UTC"
return profile
def get_plant_id_unique_location(plant):
"""Identify unique location among plants.
:param pandas.DataFrame plant: plant data frame.
:return: (*dict*) -- keys are coordinates. Values is a list of *'plant_id'*.
:raises TypeError: if *'plant'* is not a data frame.
:raises ValueError: if *'plant_id'* is not the index and/or *'lat'* and *'lon'* are
not among the columns.
"""
if not isinstance(plant, pd.DataFrame):
raise TypeError("plant must be a pandas.DataFrame")
if not (plant.index.name == "plant_id" and {"lat", "lon"}.issubset(plant.columns)):
raise ValueError(
"data frame must have plant_id as index and lat and lon among columns"
)
return plant.groupby(["lon", "lat"]).groups
|
py | 1a3708fb74626cab3012a7ece6eb8074446eb1c1 | """
Support for Alexa skill service end point.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/alexa/
"""
import asyncio
import copy
import enum
import logging
import uuid
from datetime import datetime
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.const import HTTP_BAD_REQUEST
from homeassistant.helpers import template, script, config_validation as cv
from homeassistant.components.http import HomeAssistantView
_LOGGER = logging.getLogger(__name__)
INTENTS_API_ENDPOINT = '/api/alexa'
FLASH_BRIEFINGS_API_ENDPOINT = '/api/alexa/flash_briefings/{briefing_id}'
CONF_ACTION = 'action'
CONF_CARD = 'card'
CONF_INTENTS = 'intents'
CONF_SPEECH = 'speech'
CONF_TYPE = 'type'
CONF_TITLE = 'title'
CONF_CONTENT = 'content'
CONF_TEXT = 'text'
CONF_FLASH_BRIEFINGS = 'flash_briefings'
CONF_UID = 'uid'
CONF_TITLE = 'title'
CONF_AUDIO = 'audio'
CONF_TEXT = 'text'
CONF_DISPLAY_URL = 'display_url'
ATTR_UID = 'uid'
ATTR_UPDATE_DATE = 'updateDate'
ATTR_TITLE_TEXT = 'titleText'
ATTR_STREAM_URL = 'streamUrl'
ATTR_MAIN_TEXT = 'mainText'
ATTR_REDIRECTION_URL = 'redirectionURL'
DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.0Z'
DOMAIN = 'alexa'
DEPENDENCIES = ['http']
class SpeechType(enum.Enum):
"""The Alexa speech types."""
plaintext = "PlainText"
ssml = "SSML"
class CardType(enum.Enum):
"""The Alexa card types."""
simple = "Simple"
link_account = "LinkAccount"
CONFIG_SCHEMA = vol.Schema({
DOMAIN: {
CONF_INTENTS: {
cv.string: {
vol.Optional(CONF_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(CONF_CARD): {
vol.Required(CONF_TYPE): cv.enum(CardType),
vol.Required(CONF_TITLE): cv.template,
vol.Required(CONF_CONTENT): cv.template,
},
vol.Optional(CONF_SPEECH): {
vol.Required(CONF_TYPE): cv.enum(SpeechType),
vol.Required(CONF_TEXT): cv.template,
}
}
},
CONF_FLASH_BRIEFINGS: {
cv.string: vol.All(cv.ensure_list, [{
vol.Required(CONF_UID, default=str(uuid.uuid4())): cv.string,
vol.Required(CONF_TITLE): cv.template,
vol.Optional(CONF_AUDIO): cv.template,
vol.Required(CONF_TEXT, default=""): cv.template,
vol.Optional(CONF_DISPLAY_URL): cv.template,
}]),
}
}
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Activate Alexa component."""
intents = config[DOMAIN].get(CONF_INTENTS, {})
flash_briefings = config[DOMAIN].get(CONF_FLASH_BRIEFINGS, {})
hass.http.register_view(AlexaIntentsView(hass, intents))
hass.http.register_view(AlexaFlashBriefingView(hass, flash_briefings))
return True
class AlexaIntentsView(HomeAssistantView):
"""Handle Alexa requests."""
url = INTENTS_API_ENDPOINT
name = 'api:alexa'
def __init__(self, hass, intents):
"""Initialize Alexa view."""
super().__init__()
intents = copy.deepcopy(intents)
template.attach(hass, intents)
for name, intent in intents.items():
if CONF_ACTION in intent:
intent[CONF_ACTION] = script.Script(
hass, intent[CONF_ACTION], "Alexa intent {}".format(name))
self.intents = intents
@asyncio.coroutine
def post(self, request):
"""Handle Alexa."""
data = yield from request.json()
_LOGGER.debug('Received Alexa request: %s', data)
req = data.get('request')
if req is None:
_LOGGER.error('Received invalid data from Alexa: %s', data)
return self.json_message('Expected request value not received',
HTTP_BAD_REQUEST)
req_type = req['type']
if req_type == 'SessionEndedRequest':
return None
intent = req.get('intent')
response = AlexaResponse(request.app['hass'], intent)
if req_type == 'LaunchRequest':
response.add_speech(
SpeechType.plaintext,
"Hello, and welcome to the future. How may I help?")
return self.json(response)
if req_type != 'IntentRequest':
_LOGGER.warning('Received unsupported request: %s', req_type)
return self.json_message(
'Received unsupported request: {}'.format(req_type),
HTTP_BAD_REQUEST)
intent_name = intent['name']
config = self.intents.get(intent_name)
if config is None:
_LOGGER.warning('Received unknown intent %s', intent_name)
response.add_speech(
SpeechType.plaintext,
"This intent is not yet configured within Home Assistant.")
return self.json(response)
speech = config.get(CONF_SPEECH)
card = config.get(CONF_CARD)
action = config.get(CONF_ACTION)
if action is not None:
yield from action.async_run(response.variables)
# pylint: disable=unsubscriptable-object
if speech is not None:
response.add_speech(speech[CONF_TYPE], speech[CONF_TEXT])
if card is not None:
response.add_card(card[CONF_TYPE], card[CONF_TITLE],
card[CONF_CONTENT])
return self.json(response)
class AlexaResponse(object):
"""Help generating the response for Alexa."""
def __init__(self, hass, intent=None):
"""Initialize the response."""
self.hass = hass
self.speech = None
self.card = None
self.reprompt = None
self.session_attributes = {}
self.should_end_session = True
self.variables = {}
if intent is not None and 'slots' in intent:
for key, value in intent['slots'].items():
if 'value' in value:
underscored_key = key.replace('.', '_')
self.variables[underscored_key] = value['value']
def add_card(self, card_type, title, content):
"""Add a card to the response."""
assert self.card is None
card = {
"type": card_type.value
}
if card_type == CardType.link_account:
self.card = card
return
card["title"] = title.async_render(self.variables)
card["content"] = content.async_render(self.variables)
self.card = card
def add_speech(self, speech_type, text):
"""Add speech to the response."""
assert self.speech is None
key = 'ssml' if speech_type == SpeechType.ssml else 'text'
if isinstance(text, template.Template):
text = text.async_render(self.variables)
self.speech = {
'type': speech_type.value,
key: text
}
def add_reprompt(self, speech_type, text):
"""Add reprompt if user does not answer."""
assert self.reprompt is None
key = 'ssml' if speech_type == SpeechType.ssml else 'text'
self.reprompt = {
'type': speech_type.value,
key: text.async_render(self.variables)
}
def as_dict(self):
"""Return response in an Alexa valid dict."""
response = {
'shouldEndSession': self.should_end_session
}
if self.card is not None:
response['card'] = self.card
if self.speech is not None:
response['outputSpeech'] = self.speech
if self.reprompt is not None:
response['reprompt'] = {
'outputSpeech': self.reprompt
}
return {
'version': '1.0',
'sessionAttributes': self.session_attributes,
'response': response,
}
class AlexaFlashBriefingView(HomeAssistantView):
"""Handle Alexa Flash Briefing skill requests."""
url = FLASH_BRIEFINGS_API_ENDPOINT
name = 'api:alexa:flash_briefings'
def __init__(self, hass, flash_briefings):
"""Initialize Alexa view."""
super().__init__()
self.flash_briefings = copy.deepcopy(flash_briefings)
template.attach(hass, self.flash_briefings)
@callback
def get(self, request, briefing_id):
"""Handle Alexa Flash Briefing request."""
_LOGGER.debug('Received Alexa flash briefing request for: %s',
briefing_id)
if self.flash_briefings.get(briefing_id) is None:
err = 'No configured Alexa flash briefing was found for: %s'
_LOGGER.error(err, briefing_id)
return b'', 404
briefing = []
for item in self.flash_briefings.get(briefing_id, []):
output = {}
if item.get(CONF_TITLE) is not None:
if isinstance(item.get(CONF_TITLE), template.Template):
output[ATTR_TITLE_TEXT] = item[CONF_TITLE].async_render()
else:
output[ATTR_TITLE_TEXT] = item.get(CONF_TITLE)
if item.get(CONF_TEXT) is not None:
if isinstance(item.get(CONF_TEXT), template.Template):
output[ATTR_MAIN_TEXT] = item[CONF_TEXT].async_render()
else:
output[ATTR_MAIN_TEXT] = item.get(CONF_TEXT)
if item.get(CONF_UID) is not None:
output[ATTR_UID] = item.get(CONF_UID)
if item.get(CONF_AUDIO) is not None:
if isinstance(item.get(CONF_AUDIO), template.Template):
output[ATTR_STREAM_URL] = item[CONF_AUDIO].async_render()
else:
output[ATTR_STREAM_URL] = item.get(CONF_AUDIO)
if item.get(CONF_DISPLAY_URL) is not None:
if isinstance(item.get(CONF_DISPLAY_URL),
template.Template):
output[ATTR_REDIRECTION_URL] = \
item[CONF_DISPLAY_URL].async_render()
else:
output[ATTR_REDIRECTION_URL] = item.get(CONF_DISPLAY_URL)
output[ATTR_UPDATE_DATE] = datetime.now().strftime(DATE_FORMAT)
briefing.append(output)
return self.json(briefing)
|
py | 1a370973985b2a8a104b8450003925bf9ea322f0 | #coding:utf-8
#
# id: bugs.core_3919
# title: Improve SIMILAR TO performance
# decription:
# Confirmed normal work on WI-T4.0.0.1598. Moreover, SIMILAR TO is about 5x faster than LIKE comparison in this test.
#
# CAUTION.
# This test must be run only on 4.0+, despite that its 'Fix version' = 3.0 Alpha 1.
# Performance of SIMILAR TO statement is extremely poor in comparison with LIKE operator:
# COUNT through the table of 102 records requires 27 seconds vs 16 ms (checked on WI-V6.3.6.33246).
#
# tracker_id: CORE-3919
# min_versions: ['4.0']
# versions: 4.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 4.0
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(from_backup='core3919.fbk', init=init_script_1)
test_script_1 = """
set heading off;
set list on;
set term ^;
execute block returns(
ratio_of_time varchar(255)
) as
declare i int;
declare j int;
declare t0 timestamp;
declare t1 timestamp;
declare elap_ms_using_like int;
declare elap_ms_using_similar_to int;
declare s varchar(32761);
declare ratio_similar_vs_like numeric(15,4);
declare MAX_RATIO numeric(15,4) = 2;
-- ^
-- #############
-- MAX THRESHOLD
-- #############
declare n_count int = 100; -- do not set it to values less than 10: duration should not be zero!
begin
t0 = cast('now' as timestamp);
select count(*) as like_count, sum(char_length(b)) as like_sum_len
from test t, (select 1 i from rdb$types rows (:n_count) ) n
where
t.b like '%a%' or
t.b like '%b%' or
t.b like '%c%' or
t.b like '%d%' or
t.b like '%e%' or
t.b like '%f%' or
t.b like '%g%' or
t.b like '%h%' or
t.b like '%i%' or
t.b like '%j%' or
t.b like '%k%' or
t.b like '%l%' or
t.b like '%m%' or
t.b like '%n%' or
t.b like '%o%' or
t.b like '%p%' or
t.b like '%q%' or
t.b like '%r%' or
t.b like '%s%' or
t.b like '%t%' or
t.b like '%u%' or
t.b like '%v%' or
t.b like '%w%' or
t.b like '%x%' or
t.b like '%y%' or
t.b like '%z%'
into i,j
;
t1 = cast('now' as timestamp);
elap_ms_using_like = datediff(millisecond from t0 to t1);
t0 = cast('now' as timestamp);
select count(*) as similar_to_count, sum(char_length(b)) as similar_to_sum_len
from test t, (select 1 i from rdb$types rows (:n_count) ) n
where t.b similar to '%[a-z]%'
into i,j
;
t1 = cast('now' as timestamp);
elap_ms_using_similar_to = datediff(millisecond from t0 to t1);
ratio_similar_vs_like = 1.0000 * elap_ms_using_similar_to / elap_ms_using_like;
ratio_of_time = iif( ratio_similar_vs_like < MAX_RATIO
,'acceptable'
,'TOO LONG: '|| ratio_similar_vs_like ||' times. This is more than max threshold = ' || MAX_RATIO || ' times'
)
;
suspend;
end
^
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
RATIO_OF_TIME acceptable
"""
@pytest.mark.version('>=4.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
py | 1a3709da599434bf24d69e8077036916486b13c6 | info = {
"name": "nn",
"date_order": "DMY",
"january": [
"jan",
"januar"
],
"february": [
"feb",
"februar"
],
"march": [
"mar",
"mars"
],
"april": [
"apr",
"april"
],
"may": [
"mai"
],
"june": [
"jun",
"juni"
],
"july": [
"jul",
"juli"
],
"august": [
"aug",
"august"
],
"september": [
"sep",
"september"
],
"october": [
"okt",
"oktober"
],
"november": [
"nov",
"november"
],
"december": [
"des",
"desember"
],
"monday": [
"må",
"mån",
"måndag"
],
"tuesday": [
"ty",
"tys",
"tysdag"
],
"wednesday": [
"on",
"ons",
"onsdag"
],
"thursday": [
"to",
"tor",
"torsdag"
],
"friday": [
"fr",
"fre",
"fredag"
],
"saturday": [
"la",
"lau",
"laurdag"
],
"sunday": [
"sø",
"søn",
"søndag"
],
"am": [
"fm",
"formiddag"
],
"pm": [
"em",
"ettermiddag"
],
"year": [
"år"
],
"month": [
"månad"
],
"week": [
"veke"
],
"day": [
"dag"
],
"hour": [
"time"
],
"minute": [
"minutt"
],
"second": [
"sekund"
],
"relative-type": {
"0 day ago": [
"i dag"
],
"0 hour ago": [
"this hour"
],
"0 minute ago": [
"this minute"
],
"0 month ago": [
"this month"
],
"0 second ago": [
"now"
],
"0 week ago": [
"this week"
],
"0 year ago": [
"this year"
],
"1 day ago": [
"i går"
],
"1 month ago": [
"last month"
],
"1 week ago": [
"last week"
],
"1 year ago": [
"last year"
],
"in 1 day": [
"i morgon"
],
"in 1 month": [
"next month"
],
"in 1 week": [
"next week"
],
"in 1 year": [
"next year"
]
},
"relative-type-regex": {
"\\1 day ago": [
"for (\\d+) døgn siden"
],
"\\1 hour ago": [
"for (\\d+) time siden",
"for (\\d+) timer siden"
],
"\\1 minute ago": [
"for (\\d+) minutt siden",
"for (\\d+) minutter siden"
],
"\\1 month ago": [
"for (\\d+) måned siden",
"for (\\d+) måneder siden"
],
"\\1 second ago": [
"for (\\d+) sekund siden",
"for (\\d+) sekunder siden"
],
"\\1 week ago": [
"for (\\d+) uke siden",
"for (\\d+) uker siden"
],
"\\1 year ago": [
"for (\\d+) år siden"
],
"in \\1 day": [
"om (\\d+) døgn"
],
"in \\1 hour": [
"om (\\d+) time",
"om (\\d+) timer"
],
"in \\1 minute": [
"om (\\d+) minutt",
"om (\\d+) minutter"
],
"in \\1 month": [
"om (\\d+) måned",
"om (\\d+) måneder"
],
"in \\1 second": [
"om (\\d+) sekund",
"om (\\d+) sekunder"
],
"in \\1 week": [
"om (\\d+) uke",
"om (\\d+) uker"
],
"in \\1 year": [
"om (\\d+) år"
]
},
"locale_specific": {},
"skip": [
" ",
"'",
",",
"-",
".",
"/",
";",
"@",
"[",
"]",
"|",
","
]
}
|
py | 1a370a2746fac0f86d0c987bf1e482feb40a9df3 | from django.db import models
from django.contrib.auth.models import User
from books.constants import BOOK_STATUS_CHOICES, BOOK_GENRE_CHOICES
# Create your models here.
class Book(models.Model):
name = models.CharField(max_length=50)
author = models.CharField(max_length=50)
added_on = models.DateField(null=True)
published = models.DateField(null=True)
class Rating(models.IntegerChoices):
unrated = 0
one = 1
two = 2
three = 3
four = 4
five = 5
rating = models.IntegerField(choices=Rating.choices, null=True)
status = models.CharField(max_length=30, choices=BOOK_STATUS_CHOICES, default="read")
genre = models.CharField(max_length=40, choices=BOOK_GENRE_CHOICES, default="other")
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
|
py | 1a370aa1ce8ef1f248c9de35be2ff1ae4800930b | import unittest
from common.CustomUI import FileChooserTextBox
from PyQt5.QtWidgets import (QLineEdit, QLabel, QApplication)
class TestCustomUI(unittest.TestCase):
def setUp(self):
self.app = QApplication([])
self.testWidget = FileChooserTextBox("Test Label", "Test Cue", True)
def tearDown(self):
self.app.quit()
def testFileChooserDefaults(self):
self.assertEqual(self.testWidget.getSelection(), "")
self.assertEqual(self.testWidget.label, "Test Label")
self.assertEqual(self.testWidget.cue, "Test Cue")
self.assertEqual(self.testWidget.dir, True)
self.assertEqual(self.testWidget.findChild(QLineEdit, "txtAddress").text(), "")
self.assertEqual(self.testWidget.findChild(QLabel).text(), "Test Label")
def testFileChooserSelectionChanged(self):
"""
Test logic to simulate QFileDialog choice and assert selection
:return:
"""
pass
|
py | 1a370b883a33680a812a154a9d955f77dfc7f713 |
# copy only files |
py | 1a370c97dd246c37c6f2b85f55ce59f590527a13 | from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from matplotlib import cm
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import time
# Variance
def var(f_model):
n = np.size(f_model)
f_model_mean = np.sum(f_model)/n
#f_model_mean = np.mean(f_model)
return np.sum((f_model-f_model_mean)**2)/n
#================================================================================================================
# Bias
def bias(f_true,f_model):
n = np.size(f_model)
#f_model_mean = np.sum(f_model)/n
f_model_mean = np.mean(f_model)
return np.sum((f_true-f_model_mean)**2)/n
#================================================================================================================
# MSE
def MSE(f_true,f_model):
n = np.size(f_model)
return np.sum((f_true-f_model)**2)/n
#================================================================================================================
# Extra term
def extra_term(f_true,f_model):
n = np.size(f_model)
f_model_mean = np.mean(f_model)
return 2.0/n*np.sum((f_model_mean-f_true)*(f_model-f_model_mean))
#================================================================================================================
# SVD invert
def SVDinv(A):
''' Takes as input a numpy matrix A and returns inv(A) based on singular value decomposition (SVD).
SVD is numerically more stable (at least in our case) than the inversion algorithms provided by
numpy and scipy.linalg at the cost of being slower.
'''
U, s, VT = linalg.svd(A)
D = np.zeros((len(U),len(VT)))
for i in range(0,len(VT)):
D[i,i]=s[i]
UT = np.transpose(U); V = np.transpose(VT); invD = np.linalg.inv(D)
return np.matmul(V,np.matmul(invD,UT))
#================================================================================================================
# R2 score
def R2(x_true,x_predict):
n = np.size(x_true)
x_avg = np.sum(x_true)/n
enumerator = np.sum ((x_true-x_predict)**2)
denominator = np.sum((x_true-x_avg)**2)
return 1.0 - enumerator/denominator
#================================================================================================================
## Mean
#def mean(x):
# n = np.size(x)
# x_avg = np.sum(x)/n
# return x_avg
#================================================================================================================
# get sub-entries of matrix A
def get_subset(A,indices):
'''given an indexing set "indices", return the vector consisting of
entries A[i,j] where (i,j) is an entry in indices.'''
N = len(indices)
B = np.zeros(N)
for k in range(0,N):
i = indices[k][0]
j = indices[k][1]
B[k] = A[j,i]
return B
#============================================================================================================================
class k_cross_validation:
'''An k-cross validation object is initialized by passing to it data of the type linreg,
and a paritition of the data. The class function R2 calculates the mean R2 scores
of test and training data for the given model. The function MSE calculates the mean MSE, bias,
variance and error terms of the test data for the given model. These quantities are stored
as self variables.'''
def __init__(self, data, partition,*args):
self.data = data; self.partition = partition; self.args = args;
#f = data.f; X = data.X; z = data.z; correspondence = data.correspondence;
self.k = len(partition)
self.test_R2, self.test_var, self.test_bias, self.test_MSE, self.test_extra_terms = 0, 0, 0, 0, 0
self.train_R2 = 0
#self.train_var, self.train_bias, self.train_MSE, self.train_extra_terms = 0, 0, 0, 0
def R2(self):
data = self.data
f = data.f; X = data.X; z = data.z; correspondence = data.correspondence; partition = self.partition
k = self.k
args = self.args
test_R2, train_R2 = 0, 0
for i, test_data in enumerate(partition):
train_data = [x for j,x in enumerate(partition) if j!=i]
train_data = sum(train_data, [])
beta = data.get_beta(X[train_data],z[train_data],*args)
freg = data.model(beta)
test_data = [correspondence[j] for j in test_data]
train_data = [correspondence[j] for j in train_data]
# test errors:
ftest = get_subset(f,test_data); fregtest = get_subset(freg,test_data)
test_R2 += R2(ftest,fregtest)
#training errors:
ftrain = get_subset(f,train_data); fregtrain = get_subset(freg,train_data)
train_R2 += R2(ftrain,fregtrain)
# self variables
self.test_R2 = test_R2/k
self.train_R2 = train_R2/k
def MSE(self):
data = self.data
f = data.f; X = data.X; z = data.z; correspondence = data.correspondence; partition = self.partition
k = self.k
args = self.args
test_var, test_bias, test_MSE, test_extra_terms = 0, 0, 0, 0
#train_var, train_bias, train_MSE, train_extra_terms = 0, 0, 0, 0
for i, test_data in enumerate(partition):
train_data = [x for j,x in enumerate(partition) if j!=i]
train_data = sum(train_data, [])
beta = data.get_beta(X[train_data],z[train_data],*args)
freg = data.model(beta)
test_data = [correspondence[j] for j in test_data]
# train_data = [correspondence[j] for j in train_data]
# test errors:
ftest = get_subset(f,test_data); fregtest = get_subset(freg,test_data)
test_var += var(fregtest)
test_bias += bias(ftest,fregtest)
test_MSE += MSE(ftest,fregtest)
test_extra_terms += extra_term(ftest,fregtest)
##training errors:
#ftrain = get_subset(f,train_data); fregtrain = get_subset(freg,train_data)
#train_var += var(fregtrain)
#train_bias += bias(ftrain,fregtrain)
#train_MSE += MSE(ftrain,fregtrain)
#train_extra_terms += extra_term(ftrain,fregtrain)
# self variables
self.test_var = test_var/k
self.test_bias = test_bias/k
self.test_MSE = test_MSE/k
self.test_extra_terms = test_extra_terms/k
#self.train_var = train_var/k
#self.train_bias = train_bias/k
#self.train_MSE = train_MSE/k
#self.train_extra_terms = train_extra_terms/k
#================================================================================================================
class regdata:
def __init__(self, f, degree):
# initializing variables
m = len(f[0,:]); n = len(f); mn = m*n;
x = np.linspace(0, 1, m); y = np.linspace(0, 1, n); z = np.zeros(mn); xy = np.zeros((mn,2));
# initializing some self variables
self.f = f; self.degree = degree; self.xm, self.ym = np.meshgrid(x,y); self.n=n;self.m=m; self.mn = mn; self.correspondence = []
# Making a sequence xy containing the pairs (x_i,y_j) for i,j=0,...,n, and a sequence z with matching pairs z_ij = f(x_i, y_j)
counter = 0
for i in range(0,m):
for j in range(0,n):
z[counter]=f[j,i] #wtf
xy[counter,:] = [x[i],y[j]]
self.correspondence.append([i,j]) #Saves the 1-1 correspondence: {counter} <-> {(i,j)} for later
counter+=1
self.z = z
# Make X
number_basis_elts=int((degree+2)*(degree+1)/2) #(degree+1)th triangular number (number of basis elements for R[x,y] of degree <= degree)
X = np.zeros((mn,number_basis_elts))
powers = []
for i in range(0,mn):
counter = 0
for j in range(0,degree+1):
k = 0
while j+k <= degree:
xi = xy[i,0]
yi = xy[i,1]
X[i,counter]= (xi**j)*(yi**k)
powers.append([j , k])
k+=1
counter+=1
self.X = X
self.powers = powers
self.number_basis_elts = number_basis_elts
self.invXTX = linalg.inv(np.matmul(np.transpose(X),X))
# Regression
def get_reg(self, *args):
'''Returns the polynomial fit as a numpy array. If *args is empty the fit is based on an ordinary least square.
If *args contains a number LAMBDA, then the fit is found using Ridge for the given bias LAMBDA. If *args contains
two numbers LAMBDA and epsilon, then the fit is found using lasso. See the function " __get_beta" for more details.'''
X=self.X; z=self.z #relabeling self variables
beta = self.get_beta(X,z,*args) #obtaining beta
reg = self.model(beta) #obtaining model from coefficients beta
return reg
# Get beta (given X and z)
def get_beta(self, X, z,*args):
'''Returns coefficients for a given beta as a numpy array, found using either ordinary least square,
Ridge or Lasso regression depending on the arguments. If *args is empty, then beta is found using
ordinary least square. If *args contains a number it will be treated as a bias LAMBDA for a Ridge regression.
If *args contains two numbers, then the first will count as a LAMBDA and the second as a tolerance epsilon.
In this case beta is found using a shooting algorithm that runs until it converges up to the set tolerance.
'''
XT = np.transpose(X)
beta = np.matmul(XT,X)
if len(args) >= 1: #Ridge parameter LAMBDA
LAMBDA = args[0]
beta[np.diag_indices_from(beta)]+=LAMBDA
beta = SVDinv(beta)
beta = np.matmul(beta,XT)
beta = np.matmul(beta,z)
#Shooting algorithm for Lasso
if len(args)>=2:
epsilon = args[1]
D = self.number_basis_elts
ints = np.arange(0,D,1)
beta_old = 0.0
while np.linalg.norm(beta-beta_old)>=epsilon:
beta_old = np.copy(beta)
for j in range(0,D):
aj = 2*np.sum(X[:,j]**2)
no_j = ints[np.arange(D)!=j]
cj = 2*np.sum(np.multiply(X[:,j],(z-np.matmul(X[:,no_j],beta[no_j]))))
if cj<-LAMBDA:
beta[j]=(cj+LAMBDA)/aj
elif cj > LAMBDA:
beta[j]=(cj-LAMBDA)/aj
else:
beta[j]=0.0
return beta
# Get model given beta
def model(self,beta):
'''Returns heigh values based on the coefficients beta as a matrix
that matches the grid xm, ym. The degree of the polynomial equals self.degree.
'''
xm = self.xm; ym = self.ym; degree = self.degree #relabeling self variables
s=0
counter = 0
# loop that adds terms of the form beta*x^j*y^k such that j+k<=5
for j in range(0,degree + 1):
k = 0
while j+k <= degree:
s+= beta[counter]*(xm**j)*(ym**k)
counter +=1
k+=1
return s
def get_data_partition(self,k):
''' Creates a random partition of k (almost) equally sized parts of the array
{1,2,...,mn}. This can be used to make training/testing data.
'''
mn = self.mn; correspondence = self.correspondence
indices = np.arange(mn)
indices_shuffle = np.arange(mn)
np.random.shuffle(indices_shuffle)
partition = []
for step in range(0,k):
part = list(indices_shuffle[step:mn:k])
#part = [correspondence[i] for i in part]
partition.append(part)
return partition
def bootstrap_step(self, samplesize, *args):
'''Finds and returns the coefficient that determines a model (ols, Ridge or Lasso),
depending on args*.
'''
mn = self.mn; X = self.X; z = self.z; #relabeling self variables
integers = np.random.randint(low=0, high=mn-1, size=samplesize)
znew = z[integers]
Xnew = X[integers,:]
betanew = self.get_beta(Xnew,znew,*args)
return betanew
# Variance/ covariance matrix
def var_covar_matrix(self,reg):
''' Returns the variance/covariance matrix for beta based on the given data.
This matrix is derived from a statistical viewpoint, where one assumes beta to
have a normal distribution.
'''
p = self.number_basis_elts; invXTX = self.invXTX; N = self.mn; f = self.f # Relabeling self variables
sigma2=1.0/(N-p-1)*np.sum((f-reg)*(f-reg))
return sigma2*invXTX # OBS! Based on matrix inversion. Inaccurate for N,p>>0.
#================================================================================================================
def plot_3D(f,plottitle):
''' Simple function to create 3d plot of the given data f,
with plotitle.
'''
m = len(f[0,:]); n = len(f);
x = np.linspace(0, 1, m)
y = np.linspace(0, 1, n);
xm, ym = np.meshgrid(x,y)
# Plot f
fig = plt.figure()
ax = fig.gca(projection="3d")
surf = ax.plot_surface(xm, ym, f, cmap=cm.coolwarm, linewidth=0, antialiased=False)
# Customize the z axis.
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter("%.02f"))
ax.text2D(0.05, 0.95, plottitle, transform=ax.transAxes)
ax.view_init(30, 60)
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show(block=False)
#================================================================================================================
def numerical_error(data,LAMBDA):
'''Rough numerical analysis of matrix inversions for this problem. Comparison of error and time usage
of SVD (singular values decomposition) for matrix inversion against scipy.linalg inversion algorithm.
Printing results to terminal.
'''
return_items = []
degree = data.degree; m = data.m; n = data.n
# Study numerical error and time for SVD
print("Polynomial fit of FrankeFunction in x, y of degree ", degree," with grid size ", (m,n)," analysis:")
print("")
X = data.X; XT = np.transpose(X); XTX = np.matmul(XT,X) #Obtaining XTX
start_time = time.time() # start meassuring time
inv_XTX = linalg.inv(XTX) # inversion using scipi.linalg
end_time = time.time()
print("Inverting XTX without SVD", "--- %s seconds ---" % (end_time - start_time)); return_items.append(end_time - start_time)
inv_XTX_ = np.copy(inv_XTX) # storing inversion of XTX for later
start_time = time.time()
inv_XTX = SVDinv(XTX)
end_time = time.time()
print("Inverting XTX with SVD", "--- %s seconds ---" % (end_time - start_time)); return_items.append(end_time - start_time)
print(' ')
I_approx_ = np.matmul(inv_XTX_,XTX); # approximate I (no SVD)
I = np.identity(len(I_approx_)); # obtaining analytical I
output = np.linalg.norm(I_approx_-I)
print("|(X^TX)^-1(X^TX)-I| = ",output, " (no SVD)"); return_items.append(output)
I_approx = np.matmul(inv_XTX,XTX) # approximate I (SVD)
output = np.linalg.norm(I_approx-I)
print("|(X^TX)^-1(X^TX)-I| = ",np.linalg.norm(I_approx-I), " (SVD)"); return_items.append(output)
XTX[np.diag_indices_from(XTX)]+=LAMBDA
inv_XTX = linalg.inv(XTX)
I_approx_ = np.matmul(inv_XTX,XTX) # approximate I (no SVD)
output = np.linalg.norm(I_approx_-I)
print("|(X^TX + I LAMBDA)^-1(X^TX + I LAMBDA)-I| = ",output , ", LAMBDA = ", LAMBDA, " (no SVD)"); return_items.append(output)
inv_XTX = SVDinv(XTX)
I_approx = np.matmul(inv_XTX,XTX)
output = np.linalg.norm(I_approx-I)
print("|(X^TX + I LAMBDA)^-1(X^TX + I LAMBDA)-I| = ",output, ", LAMBDA = ", LAMBDA, " (SVD)"); return_items.append(output)
print(' ')
return return_items
#================================================================================================================
def plot_R2_scores(data,Nstart,Nstop,name, epsilon = 0.001):
''' This function makes a plot of the R2 scores vs Lambda of the different regression methods,
for a given dataset.'''
degree = data.degree; f = data.f # obtaining class data
N = Nstop-Nstart # number of lambdas
lambdas = np.zeros(N)
R2_ols = np.zeros(N)
R2_Ridge = np.zeros(N)
R2_Lasso = np.zeros(N)
for i in range(0,N):
LAMBDA = 10**(Nstart+i)
lambdas[i]=LAMBDA
R2_ols[i]=R2(f, data.get_reg())
R2_Ridge[i]=R2(f, data.get_reg(LAMBDA))
R2_Lasso[i]=R2(f, data.get_reg(LAMBDA,epsilon))
print("Completed lambda: ", LAMBDA, " Completion: {:.1%}".format(float(i)/(N-1)))
plotitle = '$R^2$ score of degree {} polynomial fit on {}'.format(degree,name)
plt.figure()
plt.plot(np.log10(lambdas),R2_ols)
plt.plot(np.log10(lambdas),R2_Ridge)
plt.plot(np.log10(lambdas),R2_Lasso,'--')
plt.axis([Nstart, N+Nstart-1, 0, 1])
plt.xlabel('log $\lambda$')
plt.ylabel('$R^2$ score')
plt.legend(('Ordinary least square','Ridge','Lasso'))
plt.title(plotitle)
plt.grid(True)
plt.show(block=False)
#================================================================================================================
def plot_R2_scores_k_cross_validation(data,Nstart,Nstop,k,name, epsilon = 0.001):
''' This function makes a plot of the R2 scores vs LAMBDA of the best iteration from a k-fold cross validation on
the data set from the given data. Best in the sense that the fit had the highest R2 score on testing data. The same
partition of the data set is used for each lambda, and each time we select the best training data on which we base the model.
See "k_cross_validation" for more details.'''
degree = data.degree; f = data.f # obtaining class data
N = Nstop-Nstart # number of lambdas
# Comparing R2 scores, regression with fixed degree, variable LAMBDA
lambdas = np.zeros(N)
partition = data.get_data_partition(k)
kval = k_cross_validation(data,partition)
kval.R2()
R2_Lasso_test_data = np.zeros(N)
R2_Lasso_training_data = np.zeros(N)
R2_Ridge_test_data = np.zeros(N)
R2_Ridge_training_data = np.zeros(N)
# OLS R2 score
R2score_ols_test, R2score_ols_train = kval.test_R2, kval.train_R2
R2_ols_test_data = np.ones(N)*R2score_ols_test
R2_ols_training_data = np.ones(N)*R2score_ols_train
for i in range(0,N):
LAMBDA = 10**(Nstart+i)
lambdas[i]=LAMBDA
kval = k_cross_validation(data,partition,LAMBDA)
kval.R2()
# Ridge R2 score
R2score_ridge_test, R2score_ridge_train = kval.test_R2, kval.train_R2
R2_Ridge_test_data[i] = R2score_ridge_test
R2_Ridge_training_data[i] = R2score_ridge_train
kval = k_cross_validation(data,partition,LAMBDA,epsilon)
kval.R2()
# Lasso R2 score
R2score_lasso_test, R2score_lasso_train = kval.test_R2, kval.train_R2
R2_Lasso_test_data[i] = R2score_lasso_test
R2_Lasso_training_data[i] = R2score_lasso_train
print("Completed lambda: ", LAMBDA, " Completion: {:.1%}".format(float(i)/(N-1)))
plotitle = '$R^2$ scores of degree {} polynomial fit on {}, $k=${}'.format(degree,name,k)
plt.figure()
plt.plot(np.log10(lambdas),R2_ols_test_data)
plt.plot(np.log10(lambdas),R2_ols_training_data,'--')
plt.plot(np.log10(lambdas),R2_Ridge_test_data)
plt.plot(np.log10(lambdas),R2_Ridge_training_data,'--')
plt.plot(np.log10(lambdas),R2_Lasso_test_data)
plt.plot(np.log10(lambdas),R2_Lasso_training_data,'--')
plt.axis([Nstart, Nstart+N-2, 0, 1])
plt.xlabel('log $\lambda$')
plt.ylabel('$R^2$ score')
if (np.amax(R2_ols_test_data)> 0 and np.amax(R2_ols_training_data)> 0):
plt.legend(('OLS: test data', 'OLS: training data','Ridge: test data', 'Ridge: training data','Lasso: test data', 'Lasso: training data'))
elif (np.amax(R2_ols_test_data)<= 0 and np.amax(R2_ols_training_data)> 0):
plt.legend(('OLS: test data (negative)', 'OLS: training data','Ridge: test data', 'Ridge: training data','Lasso: test data', 'Lasso: training data'))
elif (np.amax(R2_ols_test_data)> 0 and np.amax(R2_ols_training_data)<= 0):
plt.legend(('OLS: test data', 'OLS: training data (negative)','Ridge: test data', 'Ridge: training data','Lasso: test data', 'Lasso: training data'))
elif (np.amax(R2_ols_test_data)<= 0 and np.amax(R2_ols_training_data)<= 0):
plt.legend(('OLS: test data (negative)', 'OLS: training data (negative)','Ridge: test data', 'Ridge: training data','Lasso: test data', 'Lasso: training data'))
plt.title(plotitle)
plt.grid(True)
plt.show(block=False)
#return ols_best, ridge_best, lasso_best
#================================================================================================================
def plot_R2_complexity(degstart,degend,degstep,f,name, LAMBDA = 0.00001, epsilon = 0.001):
''' Comparing R2 scores, regression with fixed LAMBDA, variable degree as well as variance and Bias
Plotting the result.
'''
degrees = np.arange(degstart,degend+1,degstep)
N = len(degrees)
R2_ols, R2_Ridge, R2_Lasso = np.zeros(N), np.zeros(N), np.zeros(N)
for i, degree in enumerate(degrees):
data_f = regdata(f,degree)
R2_ols[i]=R2(f, data_f.get_reg())
R2_Ridge[i]=R2(f, data_f.get_reg(LAMBDA))
R2_Lasso[i]=R2(f, data_f.get_reg(LAMBDA,epsilon))
print("Completed degree: ", degree, " Completion: {:.1%}".format(float(i)/(N-1)))
plotitle = '$R^2$ score of polynomial fit on {} with $\lambda=${}'.format(name,LAMBDA)
plt.figure()
plt.plot(degrees,R2_ols)
plt.plot(degrees,R2_Ridge)
plt.plot(degrees,R2_Lasso,'--')
plt.xlabel('degree of fitting polynomial')
plt.ylabel('$R^2$ score')
plt.axis([degstart,degend, 0, 1])
plt.legend(('Ordinary least square','Ridge','Lasso'))
plt.title(plotitle)
plt.grid(True)
plt.show(block=False)
#================================================================================================================
def plot_MSE_variance(degstart, degend, degstep, f, LAMBDA = 0.01, epsilon = 0.001, k=10):
# Comparing MSE, bias, variance and additional terms as function of complexity.
degrees = np.arange(degstart,degend+1,degstep)
N = len(degrees)
data = regdata(f,5)
fvar = np.zeros(N); fbias = np.zeros(N); fMSE = np.zeros(N); fextra_terms = np.zeros(N)
# function for plotting
def makeplot(methodname, *args, partition = None):
print(methodname)
for i, degree in enumerate(degrees):
data = regdata(f,degree)
if partition == None:
freg = data.get_reg(*args)
fvar[i], fbias[i], fMSE[i], fextra_terms[i] = var(freg), bias(f,freg), MSE(f,freg), extra_term(f,freg)
else:
kval = k_cross_validation(data, partition, *args)
kval.MSE()
fvar[i] = kval.test_var
fbias[i] = kval.test_bias
fMSE[i] = kval.test_MSE
fextra_terms[i] =kval.test_extra_terms
#fvar[i], fbias[i], fMSE[i], fextra_terms[i], train_var, train_bias, train_MSE, train_extra_terms
print("Completed degree: ", degree, " Completion: {:.1%}".format(float(degree-degstart)/(degend-degstart)))
plt.figure()
plt.plot(degrees, fvar)
plt.plot(degrees, fbias)
plt.plot(degrees, fMSE,'--')
plt.plot(degrees, fextra_terms)
plt.xlabel('degree')
plt.ylabel('Variance, bias, and MSE')
plt.legend(('Variance','Bias','MSE','Additional term'))
plt.grid(True)
plt.show(block=False)
#It is a good idea to comment out the plots that you dont need
## Ordinary least square plot
#makeplot("Ordinary least squares")
#plt.title("Error of ordinary least squares")
## Ridge plot
#makeplot("Ridge regression",LAMBDA)
#plt.title("Error of Ridge regression, $\lambda=${}".format(LAMBDA))
## Lasso plot
#makeplot("Lasso regression",LAMBDA,epsilon)
#plt.title("Error of lasso regression, $\lambda=${}".format(LAMBDA))
# k-cross validation
partition_ = data.get_data_partition(k)
# Ordinary least square plot
# makeplot("Ordinary least squares {}-fold cross validation".format(k), partition = partition_)
# plt.title("Error OLS using {}-fold cross validation".format(k))
## Ridge plot
#makeplot("Ridge regression {}-fold cross validation".format(k), LAMBDA, partition=partition_)
#plt.title("Error Ridge using {}-fold cross validation, $\lambda=${}".format(k,LAMBDA))
# Lasso plot
makeplot("Lasso regression {}-fold cross validation".format(k), LAMBDA, epsilon, partition_)
plt.title("Error Lasso using {}-fold cross validation, $\lambda=${}".format(k,LAMBDA))
|
py | 1a370cea0d651c080adc7afef33c5a4a39ceddd6 | import spacy
from spacy.tokenizer import Tokenizer
from spacy.lang.char_classes import ALPHA, ALPHA_LOWER, ALPHA_UPPER
from spacy.lang.char_classes import CONCAT_QUOTES, LIST_ELLIPSES, LIST_ICONS
from spacy.language import Language
from spacy.pipe_analysis import Doc
from spacy.util import compile_infix_regex
from gensim.corpora.dictionary import Dictionary
from itertools import tee
from enum import Enum
from os import cpu_count
from typing import Iterable
class LangEnum(Enum):
"""
Enum to represent supported language codes
"""
EN = 0
RU = 1
class Preprocessor:
"""
Use this class to encapsulate Spacy models, Gensim stuff and everything
else needed for text preprocessing.
"""
def __init__(self, language: LangEnum = 0,
stop_words: Iterable[str] = None,
tokenize_ents: bool = True,
workers: int = cpu_count()):
# Preload ready to use spacy language model (tokenizer, lemmatizer, etc)
if language == LangEnum.EN:
self.nlp: Language = spacy.load('en_core_web_sm')
elif language == LangEnum.RU:
self.nlp: Language = spacy.load('ru_core_news_md')
else:
raise NotImplementedError('Only Russian and English '
'languages are supported at the moment')
# Wheter or not to tokenize detected named entities
self.tokenize_ents = tokenize_ents
self.workers = workers
# Modify tokenizer infix patterns
infixes = (
LIST_ELLIPSES
+ LIST_ICONS
+ [
r"(?<=[0-9])[+\-\*^](?=[0-9-])",
r"(?<=[{al}{q}])\.(?=[{au}{q}])".format(
al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES
),
r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA),
# r"(?<=[{a}])(?:{h})(?=[{a}])".format(a=ALPHA, h=HYPHENS),
r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA),
]
)
infix_re = compile_infix_regex(infixes)
self.nlp.tokenizer.infix_finditer = infix_re.finditer
# Update the built-in stopwords list
if stop_words is not None:
self.update_stopwords(stop_words)
@spacy.Language.component(name='custom_preproc')
def lemmatize(doc: Doc):
tokens = [token for token in doc
if not (token.is_stop or
token.is_punct or
token.like_email or
token.like_url or
token.is_space or
token.is_currency or
token.like_num or
token.lemma_.lower() in
self.nlp.Defaults.stop_words)]
res_tokens = []
if not self.tokenize_ents and len(doc.ents) > 0:
merged_tokens = ""
for token in tokens:
if token.ent_iob == 3: # Beggining of the entity
# token = "-".join(token.lemma_.lower().split('-'))
merged_tokens = token.lemma_.lower().strip() + "_"
elif token.ent_iob == 1: # Inside the entity
merged_tokens += token.lemma_.lower().strip() + "_"
elif merged_tokens == "":
res_tokens.append(token.lemma_.lower().strip())
else:
res_tokens.append(merged_tokens[:-1])
merged_tokens = ""
else:
res_tokens = [t.lemma_.lower().strip() for t in tokens]
new_doc = Doc(vocab=doc.vocab,
words=res_tokens)
return new_doc
# Add stop words removing to spacy pipeline
self.nlp.add_pipe(
'custom_preproc',
last=True
)
def update_stopwords(self, stop_words: Iterable[str]) -> None:
"""
Update built-in spacy language model stopwords list
:param stop_words: Iterable of strings - target stopwords
:return: None
"""
self.nlp.Defaults.stop_words.update(stop_words)
for word in self.nlp.Defaults.stop_words:
lexeme = self.nlp.vocab[word]
lexeme.is_stop = True
def preprocess_texts(self,
data: Iterable[str]) -> (Iterable[Doc], Dictionary):
"""
Get preprocessed texts
:param data: iterable of strings
(each string is considered to be a single document)
:return: preprocessed documents and
a gensim Dictionary of the given docs
"""
docs = self.__get_preprocessed_docs__(data)
docs, docs_iter_copy = tee(docs)
return docs, Dictionary(map(lambda x: [y.text for y in x], docs_iter_copy))
def __get_preprocessed_docs__(self,
data: Iterable[str]):
"""
Helper function to generate new docs using spacy Language.pipe()
:param data: iterable of strings (1 string = 1 doc)
:return: spacy Document generator
"""
docs = self.nlp.pipe(data, n_process=self.workers)
for doc in docs:
yield doc
|
py | 1a370d9fa0053cfffd9f865894092efbd6934d6b | """
This file offers the methods to automatically retrieve the graph Kocuria polaris.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 19:50:41.650070
The undirected graph Kocuria polaris has 2971 nodes and 239488 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.05428 and has 26 connected components, where the component with most
nodes has 2911 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 127, the mean node degree is 161.22, and
the node degree mode is 1. The top 5 most central nodes are 136273.GY22_03000
(degree 1183), 136273.GY22_03030 (degree 1011), 136273.GY22_10725 (degree
912), 136273.GY22_05440 (degree 904) and 136273.GY22_12710 (degree 893).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import KocuriaPolaris
# Then load the graph
graph = KocuriaPolaris()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def KocuriaPolaris(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Kocuria polaris graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Kocuria polaris graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 19:50:41.650070
The undirected graph Kocuria polaris has 2971 nodes and 239488 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.05428 and has 26 connected components, where the component with most
nodes has 2911 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 127, the mean node degree is 161.22, and
the node degree mode is 1. The top 5 most central nodes are 136273.GY22_03000
(degree 1183), 136273.GY22_03030 (degree 1011), 136273.GY22_10725 (degree
912), 136273.GY22_05440 (degree 904) and 136273.GY22_12710 (degree 893).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import KocuriaPolaris
# Then load the graph
graph = KocuriaPolaris()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="KocuriaPolaris",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
py | 1a370da2c0df30f94d35f4bf4cc4d0a4851fe0f2 | #
# General Electricity sector Decarbonization Model (GEDM)
# Copyright (C) 2020 Cheng-Ta Chu.
# Licensed under the MIT License (see LICENSE file).
#
# Module note:
# Define commodity, time slice and transmission classes
#
#------------ commodity -------------
class Commodity:
""" Commodity class """
def __init__(self, **kwargs):
self.sCommodityName = str( kwargs["CommodityName"] )
self.sCategory = str( kwargs["Category"] )
self.fHeatRate = float(kwargs["HeatRate"])
self.fEmissionFactor_CO2 = float(kwargs["EmissionFactor_CO2"]) # M.Tonne/PJ = Tonn/GJ
self.fFuelPrice_YS = list() # USD/GJ
return
#------------ time slice -------------
class TimeSlice:
""" time slice class """
def __init__(self, **kwargs):
self.sTSIndex = str(kwargs["TSIndex"])
self.sMonth = str(kwargs["Month"])
self.sDay = str(kwargs["Day"])
self.sHour = str(kwargs["Hour"])
self.iDayIndex = int(kwargs["DayIndex"])
self.iRepDayInYear = int(kwargs["RepDayInYear"])
self.iRepHoursInDay = int(kwargs["RepHoursInDay"])
self.iRepHoursInYear = int(kwargs["RepHoursInYear"])
return
class DayTimeSlice:
""" TS day class """
def __init__(self, **kwargs):
self.MonthDay = str(kwargs["MonthDay"])
self.iDayIndex = int(kwargs["iDayIndex"])
self.lsDiurnalTS = list() # list of DiurnalTimeSlice objects
return
class DiurnalTimeSlice:
""" diurnal time slice class """
def __init__(self, **kwargs):
self.sTSIndex = kwargs["sTSIndex"]
self.iTimeSliceIndex = kwargs["iTimeSliceIndex"]
self.iRepHoursInYear = kwargs["iRepHoursInYear"]
self.iRepHoursInDay = kwargs["iRepHoursInDay"]
self.fValue = 0
return
#------------ transmission -------------
class Transmission:
""" transmission class, links between zones """
def __init__(self, **kwargs):
self.sTransID = str( kwargs["From"] ) + "/" + str( kwargs["To"] )
self.sFrom = str( kwargs["From"] ) # source zone
self.sTo = str( kwargs["To"] ) # destination zone
self.fDistance = float( kwargs["Dist"] ) # KM, distance of the link
self.b2015Conn = int( kwargs["Conn2015"] ) # connection status in base year
self.fBaseCap = float( kwargs["BaseCap"] ) # base year capacity
self.dicTransNewBuild_YS = {} # MW, new capacity by period
self.dicTransAccCap_YS = {} # MW, total capacity by period
return
|
py | 1a370e496bb11f9d73159a23798793d296f9b7ce | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# to configure behavior, define $CQL_TEST_HOST to the destination address
# for Thrift connections, and $CQL_TEST_PORT to the associated port.
from __future__ import with_statement
import re
from .basecase import BaseTestCase, cqlsh
from .cassconnect import testrun_cqlsh
import unittest
import sys
BEL = '\x07' # the terminal-bell character
CTRL_C = '\x03'
TAB = '\t'
# completions not printed out in this many seconds may not be acceptable.
# tune if needed for a slow system, etc, but be aware that the test will
# need to wait this long for each completion test, to make sure more info
# isn't coming
COMPLETION_RESPONSE_TIME = 0.5
completion_separation_re = re.compile(r'\s+')
@unittest.skipIf(sys.platform == "win32", 'Tab completion tests not supported on Windows')
class CqlshCompletionCase(BaseTestCase):
def setUp(self):
self.cqlsh_runner = testrun_cqlsh(cqlver=cqlsh.DEFAULT_CQLVER, env={'COLUMNS': '100000'})
self.cqlsh = self.cqlsh_runner.__enter__()
def tearDown(self):
self.cqlsh_runner.__exit__(None, None, None)
def _get_completions(self, inputstring, split_completed_lines=True):
"""
Get results of tab completion in cqlsh. Returns a bare string if a
string completes immediately. Otherwise, returns a set of all
whitespace-separated tokens in the offered completions by default, or a
list of the lines in the offered completions if split_completed_lines is
False.
"""
self.cqlsh.send(inputstring)
self.cqlsh.send(TAB)
immediate = self.cqlsh.read_up_to_timeout(COMPLETION_RESPONSE_TIME)
immediate = immediate.replace(' \b', '')
self.assertEqual(immediate[:len(inputstring)], inputstring)
immediate = immediate[len(inputstring):]
immediate = immediate.replace(BEL, '')
if immediate:
return immediate
self.cqlsh.send(TAB)
choice_output = self.cqlsh.read_up_to_timeout(COMPLETION_RESPONSE_TIME)
if choice_output == BEL:
choice_output = ''
self.cqlsh.send(CTRL_C) # cancel any current line
self.cqlsh.read_to_next_prompt()
choice_lines = choice_output.splitlines()
if choice_lines:
# ensure the last line of the completion is the prompt
prompt_regex = self.cqlsh.prompt.lstrip() + re.escape(inputstring)
msg = ('Double-tab completion '
'does not print prompt for input "{}"'.format(inputstring))
self.assertRegexpMatches(choice_lines[-1], prompt_regex, msg=msg)
choice_lines = [line.strip() for line in choice_lines[:-1]]
choice_lines = [line for line in choice_lines if line]
if split_completed_lines:
completed_lines = map(set, (completion_separation_re.split(line.strip())
for line in choice_lines))
if not completed_lines:
return set()
completed_tokens = set.union(*completed_lines)
return completed_tokens - {''}
else:
return choice_lines
assert False
def _trycompletions_inner(self, inputstring, immediate='', choices=(),
other_choices_ok=False,
split_completed_lines=True):
"""
Test tab completion in cqlsh. Enters in the text in inputstring, then
simulates a tab keypress to see what is immediately completed (this
should only happen when there is only one completion possible). If
there is an immediate completion, the new text is expected to match
'immediate'. If there is no immediate completion, another tab keypress
is simulated in order to get a list of choices, which are expected to
match the items in 'choices' (order is not important, but case is).
"""
completed = self._get_completions(inputstring,
split_completed_lines=split_completed_lines)
if immediate:
msg = 'cqlsh completed %r, but we expected %r' % (completed, immediate)
self.assertEqual(completed, immediate, msg=msg)
return
if other_choices_ok:
self.assertEqual(set(choices), completed.intersection(choices))
else:
self.assertEqual(set(choices), set(completed))
def trycompletions(self, inputstring, immediate='', choices=(),
other_choices_ok=False, split_completed_lines=True):
try:
self._trycompletions_inner(inputstring, immediate, choices,
other_choices_ok=other_choices_ok,
split_completed_lines=split_completed_lines)
finally:
self.cqlsh.send(CTRL_C) # cancel any current line
self.cqlsh.read_to_next_prompt()
def strategies(self):
return self.module.CqlRuleSet.replication_strategies
class TestCqlshCompletion(CqlshCompletionCase):
cqlver = '3.1.6'
module = cqlsh.cql3handling
def test_complete_on_empty_string(self):
self.trycompletions('', choices=('?', 'ALTER', 'BEGIN', 'CAPTURE', 'CONSISTENCY',
'COPY', 'CREATE', 'DEBUG', 'DELETE', 'DESC', 'DESCRIBE',
'DROP', 'GRANT', 'HELP', 'INSERT', 'LIST', 'LOGIN', 'PAGING', 'REVOKE',
'SELECT', 'SHOW', 'SOURCE', 'TRACING', 'EXPAND', 'SERIAL', 'TRUNCATE',
'UPDATE', 'USE', 'exit', 'quit', 'CLEAR', 'CLS'))
def test_complete_command_words(self):
self.trycompletions('alt', '\b\b\bALTER ')
self.trycompletions('I', 'NSERT INTO ')
self.trycompletions('exit', ' ')
def test_complete_in_uuid(self):
pass
def test_complete_in_select(self):
pass
def test_complete_in_insert(self):
self.trycompletions('INSERT INTO ',
choices=('twenty_rows_table',
'ascii_with_special_chars',
'users',
'has_all_types',
'system.',
'empty_composite_table',
'empty_table',
'undefined_values_table',
'dynamic_columns',
'twenty_rows_composite_table',
'utf8_with_special_chars',
'system_traces.',
'songs'),
other_choices_ok=True)
self.trycompletions('INSERT INTO twenty_rows_composite_table',
immediate=' ')
self.trycompletions('INSERT INTO twenty_rows_composite_table ',
choices=['(', 'JSON'])
self.trycompletions('INSERT INTO twenty_rows_composite_table (a, b ',
choices=(')', ','))
self.trycompletions('INSERT INTO twenty_rows_composite_table (a, b, ',
immediate='c ')
self.trycompletions('INSERT INTO twenty_rows_composite_table (a, b, c ',
choices=(',', ')'))
self.trycompletions('INSERT INTO twenty_rows_composite_table (a, b)',
immediate=' VALUES ( ')
self.trycompletions('INSERT INTO twenty_rows_composite_table (a, b, c) VAL',
immediate='UES ( ')
self.trycompletions(
'INSERT INTO twenty_rows_composite_table (a, b, c) VALUES (',
['<value for a (text)>'],
split_completed_lines=False)
self.trycompletions(
"INSERT INTO twenty_rows_composite_table (a, b, c) VALUES ('",
['<value for a (text)>'],
split_completed_lines=False)
self.trycompletions(
"INSERT INTO twenty_rows_composite_table (a, b, c) VALUES ( 'eggs",
['<value for a (text)>'],
split_completed_lines=False)
self.trycompletions(
"INSERT INTO twenty_rows_composite_table (a, b, c) VALUES ('eggs'",
immediate=', ')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs',"),
['<value for b (text)>'],
split_completed_lines=False)
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam')"),
immediate=' ')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') "),
choices=[';', 'USING', 'IF'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam');"),
choices=['?', 'ALTER', 'BEGIN', 'CAPTURE', 'CONSISTENCY', 'COPY',
'CREATE', 'DEBUG', 'DELETE', 'DESC', 'DESCRIBE', 'DROP',
'EXPAND', 'GRANT', 'HELP', 'INSERT', 'LIST', 'LOGIN', 'PAGING',
'REVOKE', 'SELECT', 'SHOW', 'SOURCE', 'SERIAL', 'TRACING',
'TRUNCATE', 'UPDATE', 'USE', 'exit', 'quit',
'CLEAR', 'CLS'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') US"),
immediate='ING T')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING"),
immediate=' T')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING T"),
choices=['TTL', 'TIMESTAMP'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TT"),
immediate='L ')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TI"),
immediate='MESTAMP ')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TIMESTAMP "),
choices=['<wholenumber>'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TTL "),
choices=['<wholenumber>'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TIMESTAMP 0 "),
choices=['AND', ';'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TTL 0 "),
choices=['AND', ';'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TIMESTAMP 0 A"),
immediate='ND TTL ')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TTL 0 A"),
immediate='ND TIMESTAMP ')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TTL 0 AND TIMESTAMP "),
choices=['<wholenumber>'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TTL 0 AND TIMESTAMP 0 "),
choices=['AND', ';'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TTL 0 AND TIMESTAMP 0 AND "),
choices=[])
def test_complete_in_update(self):
self.trycompletions("UPD", immediate="ATE ")
self.trycompletions("UPDATE ",
choices=['twenty_rows_table',
'users', 'has_all_types', 'system.',
'ascii_with_special_chars',
'empty_composite_table', 'empty_table',
'undefined_values_table',
'dynamic_columns',
'twenty_rows_composite_table',
'utf8_with_special_chars',
'system_traces.', 'songs'],
other_choices_ok=True)
self.trycompletions("UPDATE empty_table ", choices=['USING', 'SET'])
self.trycompletions("UPDATE empty_table S",
immediate='ET lonelycol = ')
self.trycompletions("UPDATE empty_table SET lon",
immediate='elycol = ')
self.trycompletions("UPDATE empty_table SET lonelycol",
immediate=' = ')
self.trycompletions("UPDATE empty_table U", immediate='SING T')
self.trycompletions("UPDATE empty_table USING T",
choices=["TTL", "TIMESTAMP"])
self.trycompletions("UPDATE empty_table SET lonelycol = ",
choices=['<term (text)>'],
split_completed_lines=False)
self.trycompletions("UPDATE empty_table SET lonelycol = 'eg",
choices=['<term (text)>'],
split_completed_lines=False)
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs'",
choices=[',', 'WHERE'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE ",
choices=['TOKEN(', 'lonelykey'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE ",
choices=['TOKEN(', 'lonelykey'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE lonel",
immediate='ykey ')
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE lonelykey ",
choices=['=', '<=', '>=', '>', '<', 'CONTAINS', 'IN', '['])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE lonelykey = 0.0 ",
choices=['AND', 'IF', ';'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE lonelykey = 0.0 AND ",
choices=['TOKEN(', 'lonelykey'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE TOKEN(lonelykey ",
choices=[',', ')'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE TOKEN(lonelykey) ",
choices=['=', '<=', '>=', '<', '>'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE TOKEN(lonelykey) <= TOKEN(13) ",
choices=[';', 'AND', 'IF'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE TOKEN(lonelykey) <= TOKEN(13) IF ",
choices=['EXISTS', '<quotedName>', '<identifier>'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE TOKEN(lonelykey) <= TOKEN(13) IF EXISTS ",
choices=['>=', '!=', '<=', 'IN', '[', ';', '=', '<', '>'])
def test_complete_in_delete(self):
self.trycompletions('DELETE F', choices=['FROM', '<identifier>', '<quotedName>'])
self.trycompletions('DELETE a ', choices=['FROM', '[', ','])
self.trycompletions('DELETE a [',
choices=['<wholenumber>', 'false', '-', '<uuid>',
'<pgStringLiteral>', '<float>', 'TOKEN',
'<identifier>', '<quotedStringLiteral>',
'{', '[', 'NULL', 'true', '<blobLiteral>'])
self.trycompletions('DELETE a, ',
choices=['<identifier>', '<quotedName>'])
self.trycompletions('DELETE a FROM ',
choices=['twenty_rows_table',
'ascii_with_special_chars', 'users',
'has_all_types', 'system.',
'empty_composite_table', 'empty_table',
'system_auth.', 'undefined_values_table',
'dynamic_columns',
'twenty_rows_composite_table',
'utf8_with_special_chars',
'system_traces.', 'songs',
'"' + self.cqlsh.keyspace + '".'],
other_choices_ok=True)
self.trycompletions('DELETE FROM ',
choices=['twenty_rows_table',
'ascii_with_special_chars', 'users',
'has_all_types', 'system.',
'empty_composite_table', 'empty_table',
'system_auth.', 'undefined_values_table',
'dynamic_columns',
'twenty_rows_composite_table',
'utf8_with_special_chars',
'system_traces.', 'songs',
'system_auth.', 'system_distributed.',
'system_traces.',
'"' + self.cqlsh.keyspace + '".'],
other_choices_ok=True)
self.trycompletions('DELETE FROM twenty_rows_composite_table ',
choices=['USING', 'WHERE'])
self.trycompletions('DELETE FROM twenty_rows_composite_table U',
immediate='SING TIMESTAMP ')
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP ',
choices=['<wholenumber>'])
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0',
choices=['<wholenumber>'])
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 ',
immediate='WHERE ')
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE ',
choices=['a', 'b', 'TOKEN('])
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE a ',
choices=['<=', '>=', 'CONTAINS', 'IN', '[', '=', '<', '>'])
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE TOKEN(',
immediate='a ')
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE TOKEN(a',
immediate=' ')
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE TOKEN(a ',
choices=[')', ','])
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE TOKEN(a) ',
choices=['>=', '<=', '=', '<', '>'])
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE TOKEN(a) >= ',
choices=['false', 'true', '<pgStringLiteral>',
'token(', '-', '<float>', 'TOKEN',
'<identifier>', '<uuid>', '{', '[', 'NULL',
'<quotedStringLiteral>', '<blobLiteral>',
'<wholenumber>'])
self.trycompletions(('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE '
'TOKEN(a) >= TOKEN(0) '),
choices=['AND', 'IF', ';'])
self.trycompletions(('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE '
'TOKEN(a) >= TOKEN(0) IF '),
choices=['EXISTS', '<identifier>', '<quotedName>'])
self.trycompletions(('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE '
'TOKEN(a) >= TOKEN(0) IF b '),
choices=['>=', '!=', '<=', 'IN', '[', '=', '<', '>'])
self.trycompletions(('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE '
'TOKEN(a) >= TOKEN(0) IF b < 0 '),
choices=['AND', ';'])
self.trycompletions(('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE '
'TOKEN(a) >= TOKEN(0) IF b < 0 AND '),
choices=['<identifier>', '<quotedName>'])
self.trycompletions(("DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE "
"b = 'eggs'"),
choices=['AND', 'IF', ';'])
def test_complete_in_batch(self):
pass
def test_complete_in_create_keyspace(self):
self.trycompletions('create keyspace ', '', choices=('<identifier>', '<quotedName>', 'IF'))
self.trycompletions('create keyspace moo ',
"WITH replication = {'class': '")
self.trycompletions('create keyspace "12SomeName" with ',
"replication = {'class': '")
self.trycompletions("create keyspace fjdkljf with foo=bar ", "",
choices=('AND', ';'))
self.trycompletions("create keyspace fjdkljf with foo=bar AND ",
"replication = {'class': '")
self.trycompletions("create keyspace moo with replication", " = {'class': '")
self.trycompletions("create keyspace moo with replication=", " {'class': '")
self.trycompletions("create keyspace moo with replication={", "'class':'")
self.trycompletions("create keyspace moo with replication={'class'", ":'")
self.trycompletions("create keyspace moo with replication={'class': ", "'")
self.trycompletions("create keyspace moo with replication={'class': '", "",
choices=self.strategies())
# ttl is an "unreserved keyword". should work
self.trycompletions("create keySPACE ttl with replication ="
"{ 'class' : 'SimpleStrategy'", ", 'replication_factor': ")
self.trycompletions("create keyspace ttl with replication ="
"{'class':'SimpleStrategy',", " 'replication_factor': ")
self.trycompletions("create keyspace \"ttl\" with replication ="
"{'class': 'SimpleStrategy', ", "'replication_factor': ")
self.trycompletions("create keyspace \"ttl\" with replication ="
"{'class': 'SimpleStrategy', 'repl", "ication_factor'")
self.trycompletions("create keyspace foo with replication ="
"{'class': 'SimpleStrategy', 'replication_factor': ", '',
choices=('<term>',))
self.trycompletions("create keyspace foo with replication ="
"{'class': 'SimpleStrategy', 'replication_factor': 1", '',
choices=('<term>',))
self.trycompletions("create keyspace foo with replication ="
"{'class': 'SimpleStrategy', 'replication_factor': 1 ", '}')
self.trycompletions("create keyspace foo with replication ="
"{'class': 'SimpleStrategy', 'replication_factor': 1, ",
'', choices=())
self.trycompletions("create keyspace foo with replication ="
"{'class': 'SimpleStrategy', 'replication_factor': 1} ",
'', choices=('AND', ';'))
self.trycompletions("create keyspace foo with replication ="
"{'class': 'NetworkTopologyStrategy', ", '',
choices=('<dc_name>',))
self.trycompletions("create keyspace \"PB and J\" with replication={"
"'class': 'NetworkTopologyStrategy'", ', ')
self.trycompletions("create keyspace PBJ with replication={"
"'class': 'NetworkTopologyStrategy'} and ",
"durable_writes = '")
def test_complete_in_string_literals(self):
# would be great if we could get a space after this sort of completion,
# but readline really wants to make things difficult for us
self.trycompletions("create keyspace blah with replication = {'class': 'Sim",
"pleStrategy'")
def test_complete_in_drop(self):
self.trycompletions('DR', immediate='OP ')
self.trycompletions('DROP ',
choices=['AGGREGATE', 'COLUMNFAMILY', 'FUNCTION',
'INDEX', 'KEYSPACE', 'ROLE', 'TABLE',
'TRIGGER', 'TYPE', 'USER'])
def test_complete_in_drop_keyspace(self):
self.trycompletions('DROP K', immediate='EYSPACE ')
quoted_keyspace = '"' + self.cqlsh.keyspace + '"'
self.trycompletions('DROP KEYSPACE ',
choices=['IF', quoted_keyspace])
self.trycompletions('DROP KEYSPACE ' + quoted_keyspace,
choices=[';'])
self.trycompletions('DROP KEYSPACE I',
immediate='F EXISTS ' + quoted_keyspace + ';')
def create_columnfamily_table_template(self, name):
"""Parameterized test for CREATE COLUMNFAMILY and CREATE TABLE. Since
they're synonyms, they should have the same completion behavior, so this
test avoids duplication between tests for the two statements."""
prefix = 'CREATE ' + name + ' '
quoted_keyspace = '"' + self.cqlsh.keyspace + '"'
self.trycompletions(prefix + '',
choices=['IF', quoted_keyspace, '<new_table_name>'])
self.trycompletions(prefix + 'IF ',
immediate='NOT EXISTS ')
self.trycompletions(prefix + 'IF NOT EXISTS ',
choices=['<new_table_name>', quoted_keyspace])
self.trycompletions(prefix + 'IF NOT EXISTS new_table ',
immediate='( ')
self.trycompletions(prefix + quoted_keyspace, choices=['.', '('])
self.trycompletions(prefix + quoted_keyspace + '( ',
choices=['<new_column_name>', '<identifier>',
'<quotedName>'])
self.trycompletions(prefix + quoted_keyspace + '.',
choices=['<new_table_name>'])
self.trycompletions(prefix + quoted_keyspace + '.new_table ',
immediate='( ')
self.trycompletions(prefix + quoted_keyspace + '.new_table ( ',
choices=['<new_column_name>', '<identifier>',
'<quotedName>'])
self.trycompletions(prefix + ' new_table ( ',
choices=['<new_column_name>', '<identifier>',
'<quotedName>'])
self.trycompletions(prefix + ' new_table (col_a ine',
immediate='t ')
self.trycompletions(prefix + ' new_table (col_a int ',
choices=[',', 'PRIMARY'])
self.trycompletions(prefix + ' new_table (col_a int P',
immediate='RIMARY KEY ')
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY ',
choices=[')', ','])
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY,',
choices=['<identifier>', '<quotedName>'])
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY)',
immediate=' ')
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) ',
choices=[';', 'WITH'])
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) W',
immediate='ITH ')
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) WITH ',
choices=['bloom_filter_fp_chance', 'compaction',
'compression',
'dclocal_read_repair_chance',
'default_time_to_live', 'gc_grace_seconds',
'max_index_interval',
'memtable_flush_period_in_ms',
'read_repair_chance', 'CLUSTERING',
'COMPACT', 'caching', 'comment',
'min_index_interval', 'speculative_retry'])
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) WITH ',
choices=['bloom_filter_fp_chance', 'compaction',
'compression',
'dclocal_read_repair_chance',
'default_time_to_live', 'gc_grace_seconds',
'max_index_interval',
'memtable_flush_period_in_ms',
'read_repair_chance', 'CLUSTERING',
'COMPACT', 'caching', 'comment',
'min_index_interval', 'speculative_retry'])
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) WITH bloom_filter_fp_chance ',
immediate='= ')
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) WITH bloom_filter_fp_chance = ',
choices=['<float_between_0_and_1>'])
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) WITH compaction ',
immediate="= {'class': '")
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': '",
choices=['SizeTieredCompactionStrategy',
'LeveledCompactionStrategy',
'DateTieredCompactionStrategy',
'TimeWindowCompactionStrategy'])
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'S",
immediate="izeTieredCompactionStrategy'")
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'SizeTieredCompactionStrategy",
immediate="'")
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'SizeTieredCompactionStrategy'",
choices=['}', ','])
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'SizeTieredCompactionStrategy', ",
immediate="'")
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'SizeTieredCompactionStrategy', '",
choices=['bucket_high', 'bucket_low', 'class',
'enabled', 'max_threshold',
'min_sstable_size', 'min_threshold',
'tombstone_compaction_interval',
'tombstone_threshold',
'unchecked_tombstone_compaction'])
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'SizeTieredCompactionStrategy'}",
choices=[';', 'AND'])
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'SizeTieredCompactionStrategy'} AND ",
choices=['bloom_filter_fp_chance', 'compaction',
'compression',
'dclocal_read_repair_chance',
'default_time_to_live', 'gc_grace_seconds',
'max_index_interval',
'memtable_flush_period_in_ms',
'read_repair_chance', 'CLUSTERING',
'COMPACT', 'caching', 'comment',
'min_index_interval', 'speculative_retry'])
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'DateTieredCompactionStrategy', '",
choices=['base_time_seconds', 'max_sstable_age_days',
'timestamp_resolution', 'min_threshold', 'class', 'max_threshold',
'tombstone_compaction_interval', 'tombstone_threshold',
'enabled', 'unchecked_tombstone_compaction',
'max_window_size_seconds'])
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'TimeWindowCompactionStrategy', '",
choices=['compaction_window_unit', 'compaction_window_size',
'timestamp_resolution', 'min_threshold', 'class', 'max_threshold',
'tombstone_compaction_interval', 'tombstone_threshold',
'enabled', 'unchecked_tombstone_compaction',
'only_purge_repaired_tombstones'])
def test_complete_in_create_columnfamily(self):
self.trycompletions('CREATE C', choices=['COLUMNFAMILY', 'CUSTOM'])
self.trycompletions('CREATE CO', immediate='LUMNFAMILY ')
self.create_columnfamily_table_template('COLUMNFAMILY')
def test_complete_in_create_table(self):
self.trycompletions('CREATE T', choices=['TRIGGER', 'TABLE', 'TYPE'])
self.trycompletions('CREATE TA', immediate='BLE ')
self.create_columnfamily_table_template('TABLE')
def test_complete_in_describe(self):
"""
Tests for Cassandra-10733
"""
self.trycompletions('DES', immediate='C')
# quoted_keyspace = '"' + self.cqlsh.keyspace + '"'
self.trycompletions('DESCR', immediate='IBE ')
self.trycompletions('DESC TABLE ',
choices=['twenty_rows_table',
'ascii_with_special_chars', 'users',
'has_all_types', 'system.',
'empty_composite_table', 'empty_table',
'system_auth.', 'undefined_values_table',
'dynamic_columns',
'twenty_rows_composite_table',
'utf8_with_special_chars',
'system_traces.', 'songs',
'system_distributed.',
'"' + self.cqlsh.keyspace + '".'],
other_choices_ok=True)
self.trycompletions('DESC TYPE ',
choices=['system.',
'system_auth.',
'system_traces.',
'system_distributed.',
'address',
'phone_number',
'band_info_type',
'tags'],
other_choices_ok=True)
self.trycompletions('DESC FUNCTION ',
choices=['system.',
'system_auth.',
'system_traces.',
'system_distributed.',
'fbestband',
'fbestsong',
'fmax',
'fmin',
'"' + self.cqlsh.keyspace + '".'],
other_choices_ok=True)
self.trycompletions('DESC AGGREGATE ',
choices=['system.',
'system_auth.',
'system_traces.',
'system_distributed.',
'aggmin',
'aggmax',
'"' + self.cqlsh.keyspace + '".'],
other_choices_ok=True)
# Unfortunately these commented tests will not work. This is due to the keyspace name containing quotes;
# cqlsh auto-completes a DESC differently when the keyspace contains quotes. I'll leave the
# test here though in case we ever change this script to test using keyspace names without
# quotes
# self.trycompletions('DESC TABLE ' + '"' + self.cqlsh.keyspace + '"', immediate='.')
self.trycompletions('DESC TABLE ' + '"' + self.cqlsh.keyspace + '".',
choices=['twenty_rows_table',
'ascii_with_special_chars',
'users',
'has_all_types',
'empty_composite_table',
'empty_table',
'undefined_values_table',
'dynamic_columns',
'twenty_rows_composite_table',
'utf8_with_special_chars',
'songs'],
other_choices_ok=True)
# See comment above for DESC TABLE
# self.trycompletions('DESC TYPE ' + '"' + self.cqlsh.keyspace + '"', immediate='.')
self.trycompletions('DESC TYPE ' + '"' + self.cqlsh.keyspace + '".',
choices=['address',
'phone_number',
'band_info_type',
'tags'],
other_choices_ok=True)
# See comment above for DESC TABLE
# self.trycompletions('DESC FUNCTION ' + '"' + self.cqlsh.keyspace + '"', immediate='.f')
self.trycompletions('DESC FUNCTION ' + '"' + self.cqlsh.keyspace + '".', immediate='f')
self.trycompletions('DESC FUNCTION ' + '"' + self.cqlsh.keyspace + '".f',
choices=['fbestband',
'fbestsong',
'fmax',
'fmin'],
other_choices_ok=True)
# See comment above for DESC TABLE
# self.trycompletions('DESC AGGREGATE ' + '"' + self.cqlsh.keyspace + '"', immediate='.aggm')
self.trycompletions('DESC AGGREGATE ' + '"' + self.cqlsh.keyspace + '".', immediate='aggm')
self.trycompletions('DESC AGGREGATE ' + '"' + self.cqlsh.keyspace + '".aggm',
choices=['aggmin',
'aggmax'],
other_choices_ok=True)
def test_complete_in_drop_columnfamily(self):
pass
def test_complete_in_truncate(self):
pass
def test_complete_in_alter_columnfamily(self):
pass
def test_complete_in_use(self):
pass
def test_complete_in_create_index(self):
pass
def test_complete_in_drop_index(self):
pass
|
py | 1a370eba296ea9565dfce486af0b9571197624fc | from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.0.1',
description='Analysis of various tms induced networkss',
author='Manjari Narayan',
license='BSD-3',
)
|
py | 1a370ecf31f1f67b678b087177b97bd8ad6b896a | '''
Examples of using binary_threshold mask
Modification History:
'''
## import packages
import numpy as np
import matplotlib.pyplot as plt
import gzip
import pickle
from pyechoplot.plotting import plot_Sv, plot_mask, save_png_plot
## import pyechomask modules
from pyechomask.masks import binary_pulse, binary_threshold
from pyechomask.manipulate import merge_binary
## read raw multi-frequency EK60 data
def getSv(filepath):
f = gzip.open(filepath,'rb')
obj = pickle.load(f,encoding = 'bytes')
f.close()
return obj
## read Sv
Sv18 = getSv('./data/PS_Sv18.pklz')
Sv38 = getSv('./data/PS_Sv38.pklz')
## plot 18 kHz echogram
plot_Sv(Sv18)
plt.title("Sv18")
plt.show()
## create masks
pulse_mask_18 = binary_pulse(Sv18)
threshold_mask_18 = binary_threshold(Sv18,-75)
threshold_mask_38 = binary_threshold(Sv38,-85)
## plot 18 kHz echogram with pulse mask
plot_Sv(Sv18,mask = pulse_mask_18)
plt.title("18 kHz echogram with pulse mask")
plt.show()
#### create composite masks
## presence absence mask
pa_mask = threshold_mask_18 + threshold_mask_38
pa_mask[pa_mask > 0] = 1
plot_Sv(Sv18,mask = pa_mask)
plt.title("Presence or absense mask")
plt.show()
## merge masks
merged_mask = merge_binary([threshold_mask_18,threshold_mask_38])
## this time, plot just the mask
plot_mask(merged_mask)
plt.title("Merged mask")
plt.show()
# save
save_png_plot('./','merged masks')
#In this example, the merged_mask has 4 values (0,1,2,3).
#their binary representations are:
for i in np.unique(merged_mask):
print(i,bin(i)[2:].ljust(2,'0'))
#By example, cells with the value of 3 (11) have values of 1 for the
#first two binary mask.
#In this case, the Sv value is larger than -75 dB at 18 and larger
#than -85 dB at 38 kHz.
|
py | 1a371007d34466e75431b9399af850acd057f015 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from os import environ
from _internal_utils import exec_bash, pf, lines
from fabric.api import cd, settings, sudo
if not environ.get("PY2"):
environ["PY2"] = "2.7.15"
if not environ.get("PY3"):
environ["PY3"] = "3.6.8"
@exec_bash
def depend_redhat():
"""
yum install -y gcc make
yum install -y zlib-devel bzip2-devel openssl-devel ncurses-devel sqlite-devel readline-devel gdbm-devel xz-devel libffi-devel
"""
@exec_bash
def depend_debian():
"""
apt-get update
apt-get install -y gcc make
apt-get install -y libreadline-gplv2-dev libncursesw5-dev libssl-dev libsqlite3-dev tk-dev libgdbm-dev libc6-dev libbz2-dev
"""
def download_py2():
"""
curl -sL https://www.python.org/ftp/python/{var}/Python-{var}.tgz | tar -xz
"""
with cd("/usr/src"), settings(warn_only=True):
for line in lines(download_py2):
sudo(line.format(var=environ["PY2"]))
def download_py3():
"""
curl -sL https://www.python.org/ftp/python/3.6.8/Python-3.6.8.tgz | tar -xz
"""
with cd("/usr/src"), settings(warn_only=True):
for line in lines(download_py3):
sudo(line)
def depend():
depend_map = [
("debian", depend_debian),
("redhat", depend_redhat),
]
dict(depend_map)[pf()]()
def setup_pip():
"""
curl -o get-pip.py https://bootstrap.pypa.io/get-pip.py
python3 get-pip.py
python get-pip.py
"""
with cd("/usr/src"), settings(warn_only=True):
for line in lines(setup_pip):
sudo(line)
def install_py2():
"""
./configure --enable-optimizations --enable-shared
make -s -j2
make install
ln -sf /usr/local/bin/python /usr/bin/python
echo "/usr/local/lib/" > /etc/ld.so.conf.d/python3.conf
ldconfig
"""
depend()
download_py2()
with cd("/usr/src/Python-{var}".format(var=environ["PY2"])), settings(warn_only=True):
for line in lines(install_py2):
sudo(line)
def install_py3():
"""
./configure --enable-optimizations --enable-shared
make -s -j2
make install
ln -sf /usr/local/bin/python3 /usr/bin/python3
echo "/usr/local/lib/" > /etc/ld.so.conf.d/python3.conf
ldconfig
"""
depend()
download_py3()
with cd("/usr/src/Python-{var}".format(var=environ["PY3"])), settings(warn_only=True):
for line in lines(install_py3):
sudo(line)
|
py | 1a37111f547c88bf2b12ebe98f37babd2eb63091 | import noise
import numpy as np
from PIL import Image
import math
import io
import json
from scipy.misc import toimage
shape = (1024, 1024)
scale = 150
octaves = 4
persistence = 0.5
lacunarity = 2.0
threshold = 0.05
seed = np.random.randint(0, 500)
black = [0, 0, 0]
blue = [65,105,225]
green = [34,139,34]
beach = [238, 214, 175]
snow = [255, 250, 250]
mountain = [139, 137, 137]
lightblue = [0,191,255]
darkgreen = [0,100,0]
sandy = [210,180,140]
def add_color2(world):
color_world = np.zeros(world.shape+(3,))
for i in range(shape[0]):
for j in range(shape[1]):
if world[i][j] < threshold + 0.05:
color_world[i][j] = blue
elif world[i][j] < threshold + 0.055:
color_world[i][j] = sandy
elif world[i][j] < threshold + 0.1:
color_world[i][j] = beach
elif world[i][j] < threshold + 0.25:
color_world[i][j] = green
elif world[i][j] < threshold + 0.6:
color_world[i][j] = darkgreen
elif world[i][j] < threshold + 0.7:
color_world[i][j] = mountain
elif world[i][j] < threshold + 1.0:
color_world[i][j] = snow
return color_world
world = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
world[i][j] = noise.pnoise2(i / scale,
j / scale,
octaves=octaves,
persistence=persistence,
lacunarity=lacunarity,
repeatx=2048,
repeaty=2048,
base=seed)
center_x, center_y = shape[1] // 2, shape[0] // 2
circle_grad = np.zeros_like(world)
for y in range(world.shape[0]):
for x in range(world.shape[1]):
distx = abs(x - center_x)
disty = abs(y - center_y)
dist = math.sqrt(distx*distx + disty*disty)
circle_grad[y][x] = dist
# get it between -1 and 1
max_grad = np.max(circle_grad)
circle_grad = circle_grad / max_grad
circle_grad -= 0.5
circle_grad *= 2.0
circle_grad = -circle_grad
# shrink gradient
for y in range(world.shape[0]):
for x in range(world.shape[1]):
if circle_grad[y][x] > 0:
circle_grad[y][x] *= 20
# get it between 0 and 1
max_grad = np.max(circle_grad)
circle_grad = circle_grad / max_grad
with io.open("grad.json", "w") as file:
file.write(json.dumps({ "grad": circle_grad.tolist()}))
toimage(circle_grad).show()
world_noise = np.zeros_like(world)
for i in range(shape[0]):
for j in range(shape[1]):
world_noise[i][j] = (world[i][j] * circle_grad[i][j])
if world_noise[i][j] > 0:
world_noise[i][j] *= 20
# get it between 0 and 1
max_grad = np.max(world_noise)
world_noise = world_noise / max_grad
island_world_grad = add_color2(world_noise)
toimage(island_world_grad).show() |
py | 1a37114b654f284255bcce4de68dffab841505c3 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Astropy documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this file.
#
# All configuration values have a default. Some values are defined in
# the global Astropy configuration which is loaded here before anything else.
# See astropy.sphinx.conf for which values are set there.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('..'))
# IMPORTANT: the above commented section was generated by sphinx-quickstart, but
# is *NOT* appropriate for astropy or Astropy affiliated packages. It is left
# commented out with this explanation to make it clear why this should not be
# done. If the sys.path entry above is added, when the astropy.sphinx.conf
# import occurs, it will import the *source* version of astropy instead of the
# version installed (if invoked as "make html" or directly with sphinx), or the
# version in the build directory (if "python setup.py build_sphinx" is used).
# Thus, any C-extensions that are needed to build the documentation will *not*
# be accessible, and the documentation will not build correctly.
import datetime
import os
import sys
try:
import astropy_helpers
except ImportError:
# Building from inside the docs/ directory?
if os.path.basename(os.getcwd()) == 'docs':
a_h_path = os.path.abspath(os.path.join('..', 'astropy_helpers'))
if os.path.isdir(a_h_path):
sys.path.insert(1, a_h_path)
# Load all of the global Astropy configuration
from astropy_helpers.sphinx.conf import *
# Get configuration information from setup.cfg
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
conf = ConfigParser()
conf.read([os.path.join(os.path.dirname(__file__), '..', 'setup.cfg')])
setup_cfg = dict(conf.items('metadata'))
# -- General configuration ----------------------------------------------------
del intersphinx_mapping['scipy']
del intersphinx_mapping['h5py']
intersphinx_mapping['healpy'] = ('http://healpy.readthedocs.io/en/latest/', None)
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.2'
# To perform a Sphinx version check that needs to be more specific than
# major.minor, call `check_sphinx_version("x.y.z")` here.
# check_sphinx_version("1.2.1")
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns.append('_templates')
# This is added to the end of RST files - a good place to put substitutions to
# be used globally.
rst_epilog += """
"""
# -- Project information ------------------------------------------------------
# This does not *have* to match the package name, but typically does
project = setup_cfg['package_name']
author = setup_cfg['author']
copyright = '{0}, {1}'.format(
datetime.datetime.now().year, setup_cfg['author'])
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
__import__(setup_cfg['package_name'])
package = sys.modules[setup_cfg['package_name']]
# The short X.Y version.
version = package.__version__.split('-', 1)[0]
# The full version, including alpha/beta/rc tags.
release = package.__version__
# -- Options for HTML output ---------------------------------------------------
# A NOTE ON HTML THEMES
# The global astropy configuration uses a custom theme, 'bootstrap-astropy',
# which is installed along with astropy. A different theme can be used or
# the options for this theme can be modified by overriding some of the
# variables set in the global configuration. The variables set in the
# global configuration are listed below, commented out.
# Add any paths that contain custom themes here, relative to this directory.
# To use a different custom theme, add the directory containing the theme.
# html_theme_path = []
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. To override the custom theme, set this to the
# name of a builtin theme or the name of a custom theme in html_theme_path.
#html_theme = None
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = ''
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = ''
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = '{0} v{1}'.format(project, release)
# Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc'
# -- Options for LaTeX output --------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', project + '.tex', project + u' Documentation',
author, 'manual')]
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', project.lower(), project + u' Documentation',
[author], 1)]
## -- Options for the edit_on_github extension ----------------------------------------
if eval(setup_cfg.get('edit_on_github')):
extensions += ['astropy_helpers.sphinx.ext.edit_on_github']
versionmod = __import__(setup_cfg['package_name'] + '.version')
edit_on_github_project = setup_cfg['github_project']
if versionmod.version.release:
edit_on_github_branch = "v" + versionmod.version.version
else:
edit_on_github_branch = "master"
edit_on_github_source_root = ""
edit_on_github_doc_root = "docs"
|
py | 1a371287871cb971040cfdb8b5b00cced4cfb53c | """
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
from __future__ import annotations
import typing
from aws_metrics.aws_metrics_stack import AWSMetricsStack
from aws_metrics.real_time_data_processing import RealTimeDataProcessing
from aws_metrics.data_ingestion import DataIngestion
from aws_metrics.dashboard import Dashboard
from aws_metrics.data_lake_integration import DataLakeIntegration
from aws_metrics.batch_processing import BatchProcessing
from aws_metrics.batch_analytics import BatchAnalytics
class PolicyStatementsBuilderInterface:
"""
Build the policy statement list for the AWSMetrics gem
"""
def __init__(self):
self._policy_statement_mapping = dict()
def add_aws_metrics_stack_policy_statements(self, component: AWSMetricsStack) -> PolicyStatementsBuilderInterface:
"""
Add the policy statements related with the CloudFormation stack for basic users.
:param component: CloudFormation stack created by the metrics gem.
:return: The policy statement builder itself.
"""
return self
def add_data_ingestion_policy_statements(self, component: DataIngestion) -> PolicyStatementsBuilderInterface:
"""
Add the policy statement related with the data ingestion component for basic users.
:param component: Data ingestion component created by the metrics gem.
:return: The policy statement builder itself.
"""
return self
def add_real_time_data_processing_policy_statements(
self,
component: RealTimeDataProcessing) -> PolicyStatementsBuilderInterface:
"""
Add the policy statements related with the real-time data processing component for basic users.
:param component: Real-time data processing component created by the metrics gem.
:return: The policy statement builder itself.
"""
return self
def add_dashboard_policy_statements(self, component: Dashboard) -> PolicyStatementsBuilderInterface:
"""
Add the policy statements related with the CloudWatch dashboard component for basic users.
:param component: CloudWatch dashboard component created by the metrics gem.
:return: The policy statement builder itself.
"""
return self
def add_data_lake_integration_policy_statements(
self,
component: DataLakeIntegration) -> PolicyStatementsBuilderInterface:
"""
Add the policy statements related with the data lake integration component for basic users.
:param component: Data lake integration component created by the metrics gem.
:return: The policy statement builder itself.
"""
return self
def add_batch_processing_policy_statements(self, component: BatchProcessing) -> PolicyStatementsBuilderInterface:
"""
Add the policy statements related with the batch processing component for basic users.
:param component: Batch processing component created by the metrics gem.
:return: The policy statement builder itself.
"""
return self
def add_batch_analytics_policy_statements(self, component: BatchAnalytics) -> PolicyStatementsBuilderInterface:
"""
Add the policy statements related with the batch analytics component for basic users.
:param component: Batch analytics component created by the metrics gem.
:return: The policy statement builder itself.
"""
return self
def build(self) -> typing.List:
"""
Retrieve the policy statement list generated by the builder.
:return: The policy statement list.
"""
return [value for key, value in self._policy_statement_mapping.items()]
|
py | 1a3712931b1fd87c2c29b0013b70d1f599c60766 | """This program unit tests the command-line processing capabilities of the
drake_cc_googletest bazel macro, by running
`bazel-bin/drake/common/drake_cc_googletest_main_test`
with a variety of command-line flags.
"""
import re
import subprocess
import os
import sys
import unittest
class TestGtestMain(unittest.TestCase):
def setUp(self):
self._main_exe, = sys.argv[1:]
self.assertTrue(
os.path.exists(self._main_exe),
"Could not find " + self._main_exe)
def _check_call(self, args, expected_returncode=0):
"""Run _main_exe with the given args; return output.
"""
try:
output = subprocess.check_output(
[self._main_exe] + args,
stderr=subprocess.STDOUT)
returncode = 0
except subprocess.CalledProcessError as e:
output = e.output
returncode = e.returncode
self.assertEqual(
returncode, expected_returncode,
"Expected returncode %r from %r but got %r with output %r" % (
expected_returncode, args, returncode, output))
return output.decode('utf8')
def test_pass(self):
# The device under test should pass when -magic_number=1.0 is present.
self._check_call(["-magic_number=1.0"], expected_returncode=0)
def test_no_arguments(self):
# The device under test should fail when -magic_number=1.0 is missing.
output = self._check_call([], expected_returncode=1)
self.assertTrue("Expected equality of these values:\n"
" FLAGS_magic_number" in output)
def test_help(self):
# The help string should mention all options. Just spot-check for one
# option from each expected contributor.
output = self._check_call([
"--help",
], expected_returncode=1)
self.assertGreater(len(output), 1000)
self.assertTrue("Using drake_cc_googletest_main" in output)
self.assertTrue("-gtest_list_tests" in output)
self.assertTrue("-spdlog_level" in output)
self.assertTrue("-magic_number" in output)
def test_logging(self):
# The spdlog flags should be able to enable debug logging.
# By default, there is no debug log.
log_message = "[debug] Cross your fingers for the magic_number 1"
args = ["-magic_number=1.0"]
output = self._check_call(args, expected_returncode=0)
self.assertFalse(log_message in output, output)
# Once enabled, we see a debug log.
args.append("-spdlog_level=debug")
output = self._check_call(args, expected_returncode=0)
self.assertTrue(log_message in output, output)
|
py | 1a371474e2ad097d0a2a93fb2e01d71d9577be6e | # Copyright (c) 2019-2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cudf
import numpy as np
import pytest
import random
import rmm
from numba import cuda
from cuml.ensemble import RandomForestClassifier as curfc
from cuml.ensemble import RandomForestRegressor as curfr
from cuml.metrics import r2_score
from cuml.test.utils import get_handle, unit_param, \
quality_param, stress_param
import cuml.common.logger as logger
from sklearn.ensemble import RandomForestClassifier as skrfc
from sklearn.ensemble import RandomForestRegressor as skrfr
from sklearn.metrics import accuracy_score, mean_squared_error
from sklearn.datasets import fetch_california_housing, \
make_classification, make_regression
from sklearn.model_selection import train_test_split
@pytest.fixture(
scope="session",
params=[
unit_param({'n_samples': 350, 'n_features': 20, 'n_informative': 10}),
quality_param({'n_samples': 5000, 'n_features': 200,
'n_informative': 80}),
stress_param({'n_samples': 500000, 'n_features': 400,
'n_informative': 180})
])
def small_clf(request):
X, y = make_classification(n_samples=request.param['n_samples'],
n_features=request.param['n_features'],
n_clusters_per_class=1,
n_informative=request.param['n_informative'],
random_state=123, n_classes=2)
return X, y
@pytest.fixture(
scope="session",
params=[
unit_param({'n_samples': 500, 'n_features': 20, 'n_informative': 10}),
quality_param({'n_samples': 5000, 'n_features': 200,
'n_informative': 50}),
stress_param({'n_samples': 500000, 'n_features': 400,
'n_informative': 100})
])
def large_clf(request):
X, y = make_classification(n_samples=request.param['n_samples'],
n_features=request.param['n_features'],
n_clusters_per_class=1,
n_informative=request.param['n_informative'],
random_state=123, n_classes=2)
return X, y
@pytest.fixture(
scope="session",
params=[
unit_param({'n_samples': 1500, 'n_features': 20, 'n_informative': 10}),
quality_param({'n_samples': 12000, 'n_features': 200,
'n_informative': 100}),
stress_param({'n_samples': 500000, 'n_features': 500,
'n_informative': 350})
])
def large_reg(request):
X, y = make_regression(n_samples=request.param['n_samples'],
n_features=request.param['n_features'],
n_informative=request.param['n_informative'],
random_state=123)
return X, y
special_reg_params = [
unit_param({'mode': 'unit', 'n_samples': 500,
'n_features': 20, 'n_informative': 10}),
quality_param({'mode': 'quality', 'n_samples': 500,
'n_features': 20, 'n_informative': 10}),
quality_param({'mode': 'quality', 'n_features': 200,
'n_informative': 50}),
stress_param({'mode': 'stress', 'n_samples': 500,
'n_features': 20, 'n_informative': 10}),
stress_param({'mode': 'stress', 'n_features': 200,
'n_informative': 50}),
stress_param({'mode': 'stress', 'n_samples': 1000,
'n_features': 400, 'n_informative': 100})
]
@pytest.fixture(
scope="session",
params=special_reg_params)
def special_reg(request):
if request.param['mode'] == 'quality':
X, y = fetch_california_housing(return_X_y=True)
else:
X, y = make_regression(n_samples=request.param['n_samples'],
n_features=request.param['n_features'],
n_informative=request.param['n_informative'],
random_state=123)
return X, y
@pytest.mark.parametrize('rows_sample', [unit_param(1.0), quality_param(0.90),
stress_param(0.95)])
@pytest.mark.parametrize('datatype', [np.float32])
@pytest.mark.parametrize('split_algo', [0, 1])
@pytest.mark.parametrize('max_features', [1.0, 'auto', 'log2', 'sqrt'])
def test_rf_classification(small_clf, datatype, split_algo,
rows_sample, max_features):
use_handle = True
X, y = small_clf
X = X.astype(datatype)
y = y.astype(np.int32)
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8,
random_state=0)
# Create a handle for the cuml model
handle, stream = get_handle(use_handle, n_streams=1)
# Initialize, fit and predict using cuML's
# random forest classification model
cuml_model = curfc(max_features=max_features, rows_sample=rows_sample,
n_bins=16, split_algo=split_algo, split_criterion=0,
min_rows_per_node=2, seed=123, n_streams=1,
n_estimators=40, handle=handle, max_leaves=-1,
max_depth=16)
cuml_model.fit(X_train, y_train)
fil_preds = cuml_model.predict(X_test,
predict_model="GPU",
output_class=True,
threshold=0.5,
algo='auto')
cu_preds = cuml_model.predict(X_test, predict_model="CPU")
fil_preds = np.reshape(fil_preds, np.shape(cu_preds))
cuml_acc = accuracy_score(y_test, cu_preds)
fil_acc = accuracy_score(y_test, fil_preds)
if X.shape[0] < 500000:
sk_model = skrfc(n_estimators=40,
max_depth=16,
min_samples_split=2, max_features=max_features,
random_state=10)
sk_model.fit(X_train, y_train)
sk_preds = sk_model.predict(X_test)
sk_acc = accuracy_score(y_test, sk_preds)
assert fil_acc >= (sk_acc - 0.07)
assert fil_acc >= (cuml_acc - 0.02)
@pytest.mark.parametrize('rows_sample', [unit_param(1.0), quality_param(0.90),
stress_param(0.95)])
@pytest.mark.parametrize('datatype', [np.float32])
@pytest.mark.parametrize('split_algo', [0, 1])
@pytest.mark.parametrize('max_features', [1.0, 'auto', 'log2', 'sqrt'])
def test_rf_regression(special_reg, datatype, split_algo, max_features,
rows_sample):
use_handle = True
X, y = special_reg
X = X.astype(datatype)
y = y.astype(datatype)
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8,
random_state=0)
# Create a handle for the cuml model
handle, stream = get_handle(use_handle, n_streams=1)
# Initialize and fit using cuML's random forest regression model
cuml_model = curfr(max_features=max_features, rows_sample=rows_sample,
n_bins=16, split_algo=split_algo, split_criterion=2,
min_rows_per_node=2, seed=123, n_streams=1,
n_estimators=50, handle=handle, max_leaves=-1,
max_depth=16, accuracy_metric='mse')
cuml_model.fit(X_train, y_train)
# predict using FIL
fil_preds = cuml_model.predict(X_test, predict_model="GPU")
cu_preds = cuml_model.predict(X_test, predict_model="CPU")
fil_preds = np.reshape(fil_preds, np.shape(cu_preds))
cu_r2 = r2_score(y_test, cu_preds, convert_dtype=datatype)
fil_r2 = r2_score(y_test, fil_preds, convert_dtype=datatype)
# Initialize, fit and predict using
# sklearn's random forest regression model
if X.shape[0] < 1000: # mode != "stress"
sk_model = skrfr(n_estimators=50, max_depth=16,
min_samples_split=2, max_features=max_features,
random_state=10)
sk_model.fit(X_train, y_train)
sk_preds = sk_model.predict(X_test)
sk_r2 = r2_score(y_test, sk_preds, convert_dtype=datatype)
assert fil_r2 >= (sk_r2 - 0.07)
assert fil_r2 >= (cu_r2 - 0.02)
@pytest.mark.parametrize('datatype', [np.float32])
def test_rf_classification_seed(small_clf, datatype):
X, y = small_clf
X = X.astype(datatype)
y = y.astype(np.int32)
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8,
random_state=0)
for i in range(8):
seed = random.randint(100, 1e5)
# Initialize, fit and predict using cuML's
# random forest classification model
cu_class = curfc(seed=seed, n_streams=1)
cu_class.fit(X_train, y_train)
# predict using FIL
fil_preds_orig = cu_class.predict(X_test,
predict_model="GPU")
cu_preds_orig = cu_class.predict(X_test,
predict_model="CPU")
cu_acc_orig = accuracy_score(y_test, cu_preds_orig)
fil_preds_orig = np.reshape(fil_preds_orig, np.shape(cu_preds_orig))
fil_acc_orig = accuracy_score(y_test, fil_preds_orig)
# Initialize, fit and predict using cuML's
# random forest classification model
cu_class2 = curfc(seed=seed, n_streams=1)
cu_class2.fit(X_train, y_train)
# predict using FIL
fil_preds_rerun = cu_class2.predict(X_test,
predict_model="GPU")
cu_preds_rerun = cu_class2.predict(X_test, predict_model="CPU")
cu_acc_rerun = accuracy_score(y_test, cu_preds_rerun)
fil_preds_rerun = np.reshape(fil_preds_rerun, np.shape(cu_preds_rerun))
fil_acc_rerun = accuracy_score(y_test, fil_preds_rerun)
assert fil_acc_orig == fil_acc_rerun
assert cu_acc_orig == cu_acc_rerun
assert (fil_preds_orig == fil_preds_rerun).all()
assert (cu_preds_orig == cu_preds_rerun).all()
@pytest.mark.parametrize('datatype', [(np.float64, np.float32),
(np.float32, np.float64)])
@pytest.mark.parametrize('convert_dtype', [True, False])
def test_rf_classification_float64(small_clf, datatype, convert_dtype):
X, y = small_clf
X = X.astype(datatype[0])
y = y.astype(np.int32)
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8,
random_state=0)
X_test = X_test.astype(datatype[1])
# Initialize, fit and predict using cuML's
# random forest classification model
cuml_model = curfc()
cuml_model.fit(X_train, y_train)
cu_preds = cuml_model.predict(X_test, predict_model="CPU")
cu_acc = accuracy_score(y_test, cu_preds)
# sklearn random forest classification model
# initialization, fit and predict
if X.shape[0] < 500000:
sk_model = skrfc(max_depth=16, random_state=10)
sk_model.fit(X_train, y_train)
sk_preds = sk_model.predict(X_test)
sk_acc = accuracy_score(y_test, sk_preds)
assert cu_acc >= (sk_acc - 0.07)
# predict using cuML's GPU based prediction
if datatype[0] == np.float32 and convert_dtype:
fil_preds = cuml_model.predict(X_test, predict_model="GPU",
convert_dtype=convert_dtype)
fil_preds = np.reshape(fil_preds, np.shape(cu_preds))
fil_acc = accuracy_score(y_test, fil_preds)
assert fil_acc >= (cu_acc - 0.02)
else:
with pytest.raises(TypeError):
fil_preds = cuml_model.predict(X_test, predict_model="GPU",
convert_dtype=convert_dtype)
@pytest.mark.parametrize('datatype', [(np.float64, np.float32),
(np.float32, np.float64)])
def test_rf_regression_float64(large_reg, datatype):
X, y = large_reg
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8,
random_state=0)
X_train = X_train.astype(datatype[0])
y_train = y_train.astype(datatype[0])
X_test = X_test.astype(datatype[1])
y_test = y_test.astype(datatype[1])
# Initialize, fit and predict using cuML's
# random forest classification model
cuml_model = curfr()
cuml_model.fit(X_train, y_train)
cu_preds = cuml_model.predict(X_test, predict_model="CPU")
cu_r2 = r2_score(y_test, cu_preds, convert_dtype=datatype[0])
# sklearn random forest classification model
# initialization, fit and predict
if X.shape[0] < 500000:
sk_model = skrfr(max_depth=16, random_state=10)
sk_model.fit(X_train, y_train)
sk_preds = sk_model.predict(X_test)
sk_r2 = r2_score(y_test, sk_preds, convert_dtype=datatype[0])
assert cu_r2 >= (sk_r2 - 0.09)
# predict using cuML's GPU based prediction
if datatype[0] == np.float32:
fil_preds = cuml_model.predict(X_test, predict_model="GPU",
convert_dtype=True)
fil_preds = np.reshape(fil_preds, np.shape(cu_preds))
fil_r2 = r2_score(y_test, fil_preds, convert_dtype=datatype[0])
assert fil_r2 >= (cu_r2 - 0.02)
# because datatype[0] != np.float32 or datatype[0] != datatype[1]
with pytest.raises(TypeError):
fil_preds = cuml_model.predict(X_test, predict_model="GPU",
convert_dtype=False)
@pytest.mark.parametrize('datatype', [(np.float32, np.float32)])
@pytest.mark.parametrize('column_info', [unit_param([20, 10]),
quality_param([200, 100]),
stress_param([500, 350])])
@pytest.mark.parametrize('nrows', [unit_param(500), quality_param(5000),
stress_param(500000)])
@pytest.mark.parametrize('n_classes', [10])
@pytest.mark.parametrize('type', ['dataframe', 'numpy'])
def test_rf_classification_multi_class(datatype, column_info, nrows,
n_classes, type):
ncols, n_info = column_info
X, y = make_classification(n_samples=nrows, n_features=ncols,
n_clusters_per_class=1, n_informative=n_info,
random_state=0, n_classes=n_classes)
X = X.astype(datatype[0])
y = y.astype(np.int32)
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8,
random_state=0)
X_test = X_test.astype(datatype[1])
# Initialize, fit and predict using cuML's
# random forest classification model
cuml_model = curfc()
if type == 'dataframe':
X_train_df = cudf.DataFrame.from_gpu_matrix(rmm.to_device(X_train))
y_train_df = cudf.Series(y_train)
X_test_df = cudf.DataFrame.from_gpu_matrix(rmm.to_device(X_test))
cuml_model.fit(X_train_df, y_train_df)
cu_preds = cuml_model.predict(X_test_df,
predict_model="CPU").to_array()
else:
cuml_model.fit(X_train, y_train)
cu_preds = cuml_model.predict(X_test, predict_model="CPU")
cu_acc = accuracy_score(y_test, cu_preds)
# sklearn random forest classification model
# initialization, fit and predict
if nrows < 500000:
sk_model = skrfc(max_depth=16, random_state=10)
sk_model.fit(X_train, y_train)
sk_preds = sk_model.predict(X_test)
sk_acc = accuracy_score(y_test, sk_preds)
assert cu_acc >= (sk_acc - 0.07)
@pytest.mark.parametrize('datatype', [np.float32])
@pytest.mark.parametrize('fil_sparse_format', ['not_supported', True,
'auto', False])
@pytest.mark.parametrize('algo', ['auto', 'naive', 'tree_reorg',
'batch_tree_reorg'])
def test_rf_classification_sparse(small_clf, datatype,
fil_sparse_format, algo):
use_handle = True
num_treees = 50
X, y = small_clf
X = X.astype(datatype)
y = y.astype(np.int32)
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8,
random_state=0)
# Create a handle for the cuml model
handle, stream = get_handle(use_handle, n_streams=1)
# Initialize, fit and predict using cuML's
# random forest classification model
cuml_model = curfc(n_bins=16, split_criterion=0,
min_rows_per_node=2, seed=123, n_streams=1,
n_estimators=num_treees, handle=handle, max_leaves=-1,
max_depth=40)
cuml_model.fit(X_train, y_train)
if ((not fil_sparse_format or algo == 'tree_reorg' or
algo == 'batch_tree_reorg') or
fil_sparse_format == 'not_supported'):
with pytest.raises(ValueError):
fil_preds = cuml_model.predict(X_test,
predict_model="GPU",
output_class=True,
threshold=0.5,
fil_sparse_format=fil_sparse_format,
algo=algo)
else:
fil_preds = cuml_model.predict(X_test,
predict_model="GPU",
output_class=True,
threshold=0.5,
fil_sparse_format=fil_sparse_format,
algo=algo)
fil_preds = np.reshape(fil_preds, np.shape(y_test))
fil_acc = accuracy_score(y_test, fil_preds)
fil_model = cuml_model.convert_to_fil_model()
input_type = 'numpy'
fil_model_preds = fil_model.predict(X_test,
output_type=input_type)
fil_model_acc = accuracy_score(y_test, fil_model_preds)
assert fil_acc == fil_model_acc
tl_model = cuml_model.convert_to_treelite_model()
assert num_treees == tl_model.num_trees
assert X.shape[1] == tl_model.num_features
if X.shape[0] < 500000:
sk_model = skrfc(n_estimators=50,
max_depth=40,
min_samples_split=2,
random_state=10)
sk_model.fit(X_train, y_train)
sk_preds = sk_model.predict(X_test)
sk_acc = accuracy_score(y_test, sk_preds)
assert fil_acc >= (sk_acc - 0.07)
@pytest.mark.parametrize('datatype', [np.float32])
@pytest.mark.parametrize('fil_sparse_format', ['not_supported', True,
'auto', False])
@pytest.mark.parametrize('algo', ['auto', 'naive', 'tree_reorg',
'batch_tree_reorg'])
def test_rf_regression_sparse(special_reg, datatype, fil_sparse_format, algo):
use_handle = True
num_treees = 50
X, y = special_reg
X = X.astype(datatype)
y = y.astype(datatype)
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8,
random_state=0)
# Create a handle for the cuml model
handle, stream = get_handle(use_handle, n_streams=1)
# Initialize and fit using cuML's random forest regression model
cuml_model = curfr(n_bins=16, split_criterion=2,
min_rows_per_node=2, seed=123, n_streams=1,
n_estimators=num_treees, handle=handle, max_leaves=-1,
max_depth=40, accuracy_metric='mse')
cuml_model.fit(X_train, y_train)
# predict using FIL
if ((not fil_sparse_format or algo == 'tree_reorg' or
algo == 'batch_tree_reorg') or
fil_sparse_format == 'not_supported'):
with pytest.raises(ValueError):
fil_preds = cuml_model.predict(X_test, predict_model="GPU",
fil_sparse_format=fil_sparse_format,
algo=algo)
else:
fil_preds = cuml_model.predict(X_test, predict_model="GPU",
fil_sparse_format=fil_sparse_format,
algo=algo)
fil_preds = np.reshape(fil_preds, np.shape(y_test))
fil_r2 = r2_score(y_test, fil_preds, convert_dtype=datatype)
fil_model = cuml_model.convert_to_fil_model()
input_type = 'numpy'
fil_model_preds = fil_model.predict(X_test,
output_type=input_type)
fil_model_preds = np.reshape(fil_model_preds, np.shape(y_test))
fil_model_r2 = r2_score(y_test, fil_model_preds,
convert_dtype=datatype)
assert fil_r2 == fil_model_r2
tl_model = cuml_model.convert_to_treelite_model()
assert num_treees == tl_model.num_trees
assert X.shape[1] == tl_model.num_features
# Initialize, fit and predict using
# sklearn's random forest regression model
if X.shape[0] < 1000: # mode != "stress":
sk_model = skrfr(n_estimators=50, max_depth=40,
min_samples_split=2,
random_state=10)
sk_model.fit(X_train, y_train)
sk_preds = sk_model.predict(X_test)
sk_r2 = r2_score(y_test, sk_preds, convert_dtype=datatype)
assert fil_r2 >= (sk_r2 - 0.07)
@pytest.mark.memleak
@pytest.mark.parametrize('fil_sparse_format', [True, False, 'auto'])
@pytest.mark.parametrize('n_iter', [unit_param(5), quality_param(30),
stress_param(80)])
def test_rf_memory_leakage(small_clf, fil_sparse_format, n_iter):
datatype = np.float32
use_handle = True
X, y = small_clf
X = X.astype(datatype)
y = y.astype(np.int32)
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8,
random_state=0)
# Create a handle for the cuml model
handle, stream = get_handle(use_handle, n_streams=1)
# Warmup. Some modules that are used in RF allocate space on the device
# and consume memory. This is to make sure that the allocation is done
# before the first call to get_memory_info.
base_model = curfc(handle=handle)
base_model.fit(X_train, y_train)
handle.sync() # just to be sure
free_mem = cuda.current_context().get_memory_info()[0]
def test_for_memory_leak():
cuml_mods = curfc(handle=handle)
cuml_mods.fit(X_train, y_train)
handle.sync() # just to be sure
# Calculate the memory free after fitting the cuML model
delta_mem = free_mem - cuda.current_context().get_memory_info()[0]
assert delta_mem == 0
for i in range(2):
cuml_mods.predict(X_test, predict_model="GPU",
fil_sparse_format=fil_sparse_format)
handle.sync() # just to be sure
# Calculate the memory free after predicting the cuML model
delta_mem = free_mem - cuda.current_context().get_memory_info()[0]
assert delta_mem == 0
for i in range(n_iter):
test_for_memory_leak()
@pytest.mark.parametrize('max_features', [1.0, 'auto', 'log2', 'sqrt'])
@pytest.mark.parametrize('max_depth', [10, 13, 16])
@pytest.mark.parametrize('n_estimators', [10, 20, 100])
@pytest.mark.parametrize('n_bins', [8, 9, 10])
def test_create_classification_model(max_features,
max_depth, n_estimators, n_bins):
# random forest classification model
cuml_model = curfc(max_features=max_features,
n_bins=n_bins,
n_estimators=n_estimators,
max_depth=max_depth)
params = cuml_model.get_params()
cuml_model2 = curfc()
cuml_model2.set_params(**params)
verfiy_params = cuml_model2.get_params()
assert params['max_features'] == verfiy_params['max_features']
assert params['max_depth'] == verfiy_params['max_depth']
assert params['n_estimators'] == verfiy_params['n_estimators']
assert params['n_bins'] == verfiy_params['n_bins']
@pytest.mark.parametrize('n_estimators', [10, 20, 100])
@pytest.mark.parametrize('n_bins', [8, 9, 10])
def test_multiple_fits_classification(large_clf, n_estimators, n_bins):
datatype = np.float32
X, y = large_clf
X = X.astype(datatype)
y = y.astype(np.int32)
cuml_model = curfc(n_bins=n_bins,
n_estimators=n_estimators,
max_depth=10)
# Calling multiple fits
cuml_model.fit(X, y)
cuml_model.fit(X, y)
# Check if params are still intact
params = cuml_model.get_params()
assert params['n_estimators'] == n_estimators
assert params['n_bins'] == n_bins
@pytest.mark.parametrize('column_info', [unit_param([100, 50]),
quality_param([200, 100]),
stress_param([500, 350])])
@pytest.mark.parametrize('nrows', [unit_param(500), quality_param(5000),
stress_param(500000)])
@pytest.mark.parametrize('n_estimators', [10, 20, 100])
@pytest.mark.parametrize('n_bins', [8, 9, 10])
def test_multiple_fits_regression(column_info, nrows, n_estimators, n_bins):
datatype = np.float32
ncols, n_info = column_info
X, y = make_regression(n_samples=nrows, n_features=ncols,
n_informative=n_info,
random_state=123)
X = X.astype(datatype)
y = y.astype(np.int32)
cuml_model = curfr(n_bins=n_bins,
n_estimators=n_estimators,
max_depth=10)
# Calling multiple fits
cuml_model.fit(X, y)
cuml_model.fit(X, y)
cuml_model.fit(X, y)
# Check if params are still intact
params = cuml_model.get_params()
assert params['n_estimators'] == n_estimators
assert params['n_bins'] == n_bins
@pytest.mark.parametrize('rows_sample', [unit_param(1.0),
stress_param(0.95)])
@pytest.mark.parametrize('datatype', [np.float32])
@pytest.mark.parametrize('max_features', [1.0, 'auto', 'log2', 'sqrt'])
def test_rf_classification_proba(small_clf, datatype,
rows_sample, max_features):
use_handle = True
X, y = small_clf
X = X.astype(datatype)
y = y.astype(np.int32)
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8,
random_state=0)
# Create a handle for the cuml model
handle, stream = get_handle(use_handle, n_streams=1)
# Initialize, fit and predict using cuML's
# random forest classification model
cuml_model = curfc(max_features=max_features, rows_sample=rows_sample,
n_bins=16, split_criterion=0,
min_rows_per_node=2, seed=123, n_streams=1,
n_estimators=40, handle=handle, max_leaves=-1,
max_depth=16)
cuml_model.fit(X_train, y_train)
fil_preds_proba = cuml_model.predict_proba(X_test,
output_class=True,
threshold=0.5,
algo='auto')
y_proba = np.zeros(np.shape(fil_preds_proba))
y_proba[:, 1] = y_test
y_proba[:, 0] = 1.0 - y_test
fil_mse = mean_squared_error(y_proba, fil_preds_proba)
if X.shape[0] < 500000:
sk_model = skrfc(n_estimators=40,
max_depth=16,
min_samples_split=2, max_features=max_features,
random_state=10)
sk_model.fit(X_train, y_train)
sk_preds_proba = sk_model.predict_proba(X_test)
sk_mse = mean_squared_error(y_proba, sk_preds_proba)
# Max difference of 0.0061 is seen between the mse values of
# predict proba function of fil and sklearn
assert fil_mse <= (sk_mse + 0.0061)
@pytest.mark.parametrize('n_estimators', [5, 10, 20])
@pytest.mark.parametrize('detailed_printing', [True, False])
def test_rf_printing(capfd, n_estimators, detailed_printing):
X, y = make_classification(n_samples=500, n_features=10,
n_clusters_per_class=1, n_informative=5,
random_state=94929, n_classes=2)
X = X.astype(np.float32)
y = y.astype(np.int32)
# Create a handle for the cuml model
handle, stream = get_handle(True, n_streams=1)
# Initialize cuML Random Forest classification model
cuml_model = curfc(handle=handle, max_features=1.0, rows_sample=1.0,
n_bins=16, split_algo=0, split_criterion=0,
min_rows_per_node=2, seed=23707, n_streams=1,
n_estimators=n_estimators, max_leaves=-1,
max_depth=16)
# Train model on the data
cuml_model.fit(X, y)
if detailed_printing:
cuml_model.print_detailed()
else:
cuml_model.print_summary()
# Read the captured output
printed_output = capfd.readouterr().out
# Test 1: Output is non-zero
assert '' != printed_output
# Count the number of trees printed
tree_count = 0
for line in printed_output.split('\n'):
if line.strip().startswith('Tree #'):
tree_count += 1
# Test 2: Correct number of trees are printed
assert n_estimators == tree_count
@pytest.mark.memleak
@pytest.mark.parametrize('estimator_type', ['classification'])
def test_rf_host_memory_leak(large_clf, estimator_type):
import gc
import os
try:
import psutil
except ImportError:
pytest.skip("psutil not installed")
process = psutil.Process(os.getpid())
X, y = large_clf
X = X.astype(np.float32)
if estimator_type == 'classification':
base_model = curfc(max_depth=10,
n_estimators=100,
seed=123)
y = y.astype(np.int32)
else:
base_model = curfr(max_depth=10,
n_estimators=100,
seed=123)
y = y.astype(np.float32)
# Pre-fit once - this is our baseline and memory usage
# should not significantly exceed it after later fits
base_model.fit(X, y)
gc.collect()
initial_baseline_mem = process.memory_info().rss
for i in range(5):
base_model.fit(X, y)
gc.collect()
final_mem = process.memory_info().rss
# Some tiny allocations may occur, but we shuld not leak
# without bounds, which previously happened
assert (final_mem - initial_baseline_mem) < 2e6
@pytest.mark.memleak
@pytest.mark.parametrize('estimator_type', ['regression', 'classification'])
def test_concat_memory_leak(large_clf, estimator_type):
import gc
import os
try:
import psutil
except ImportError:
pytest.skip("psutil not installed")
process = psutil.Process(os.getpid())
X, y = large_clf
X = X.astype(np.float32)
# Build a series of RF models
n_models = 10
if estimator_type == 'classification':
base_models = [curfc(max_depth=10,
n_estimators=100,
seed=123) for i in range(n_models)]
y = y.astype(np.int32)
elif estimator_type == 'regression':
base_models = [curfr(max_depth=10,
n_estimators=100,
seed=123) for i in range(n_models)]
y = y.astype(np.float32)
else:
assert False
# Pre-fit once - this is our baseline and memory usage
# should not significantly exceed it after later fits
for model in base_models:
model.fit(X, y)
# Just concatenate over and over in a loop
concat_models = base_models[1:]
init_model = base_models[0]
other_handles = [
model._obtain_treelite_handle() for model in concat_models
]
init_model._concatenate_treelite_handle(other_handles)
gc.collect()
initial_baseline_mem = process.memory_info().rss
for i in range(10):
init_model._concatenate_treelite_handle(other_handles)
gc.collect()
used_mem = process.memory_info().rss
logger.debug("memory at rep %2d: %d m" % (
i, (used_mem - initial_baseline_mem)/1e6))
gc.collect()
used_mem = process.memory_info().rss
logger.info("Final memory delta: %d" % (
(used_mem - initial_baseline_mem)/1e6))
assert (used_mem - initial_baseline_mem) < 1e6
@pytest.mark.xfail(strict=True, raises=ValueError)
def test_rf_nbins_small(small_clf):
X, y = small_clf
X = X.astype(np.float32)
y = y.astype(np.int32)
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8,
random_state=0)
# Initialize, fit and predict using cuML's
# random forest classification model
cuml_model = curfc()
cuml_model.fit(X_train[0:3, :], y_train[0:3])
|
py | 1a3714aea800131148c2df7d920c14992f5b5bc3 | # coding: utf-8
from __future__ import unicode_literals
from six import PY2
class BoxException(Exception):
"""
Base class exception for all errors raised from the SDK.
"""
def __str__(self):
# pylint:disable=no-member
# <https://github.com/box/box-python-sdk/issues/117>
return self.__unicode__().encode('utf-8') if PY2 else self.__unicode__()
class BoxNetworkException(BoxException):
"""
Exception raised from the network layer.
"""
pass
class BoxAPIException(BoxException):
"""
Exception raised from the box session layer.
"""
def __init__(self, status, code=None, message=None, request_id=None, headers=None, url=None, method=None, context_info=None):
"""
:param status:
HTTP status code of the failed response
:type status:
`int`
:param code:
The 'code' field of the failed response
:type code:
`unicode`
:param message:
A message to associate with the exception, e.g. 'message' field of the json in the failed response
:type message:
`unicode`
:param request_id:
The 'request_id' field of the json in the failed response
:type request_id:
`unicode`
:param headers:
The HTTP headers in the failed response
:type headers:
`dict`
:param url:
The url which raised the exception
:type url:
`unicode`
:param method:
The HTTP verb used to make the request.
:type method:
`unicode`
:param context_info:
The context_info returned in the failed response.
:type context_info:
`dict`
"""
super(BoxAPIException, self).__init__()
self._status = status
self._code = code
self._message = message
self._request_id = request_id
self._headers = headers
self._url = url
self._method = method
self._context_info = context_info
def __unicode__(self):
return '\nMessage: {0}\nStatus: {1}\nCode: {2}\nRequest id: {3}\nHeaders: {4}\nURL: {5}\nMethod: {6}\nContext info: {7}'.format(
self._message,
self._status,
self._code,
self._request_id,
self._headers,
self._url,
self._method,
self._context_info,
)
@property
def status(self):
"""
The status code of the network response that is responsible for the exception.
:rtype: `int`
"""
return self._status
@property
def code(self):
"""
The explanation of the status code of the network response that is responsible for the exception.
:rtype: `int`
"""
return self._code
@property
def message(self):
return self._message
@property
def request_id(self):
"""
The id the network request that is responsible for the exception.
:rtype: `unicode`
"""
return self._request_id
@property
def url(self):
"""
The URL of the network request that is responsible for the exception.
:rtype: `unicode`
"""
return self._url
@property
def method(self):
"""
The HTTP verb of the request that is responsible for the exception.
:rtype: `unicode`
"""
return self._method
@property
def context_info(self):
"""
The context_info returned in the failed response.
:rtype: `dict`
"""
return self._context_info
class BoxOAuthException(BoxException):
"""
Exception raised during auth.
"""
def __init__(self, status, message=None, url=None, method=None):
"""
:param status:
HTTP status code of the auth response
:type status:
`int`
:param message:
A message to associate with the exception, e.g. HTTP content of the auth response
:type message:
`unicode`
:param url:
The url which raised the exception
:type url:
`unicode`
:param method:
The HTTP verb used to make the request.
:type method:
`unicode`
"""
super(BoxOAuthException, self).__init__()
self._status = status
self._message = message
self._url = url
self._method = method
def __unicode__(self):
return '\nMessage: {0}\nStatus: {1}\nURL: {2}\nMethod: {3}'.format(
self._message,
self._status,
self._url,
self._method,
)
|
py | 1a3714ce5bc41ab38a2b185664a65927412b729c | import os
import time
import cv2
import imageio
from tensorboardX import SummaryWriter
from NeRF import *
from load_llff import load_llff_data
from run_nerf_helpers import *
from metrics import compute_img_metric
# np.random.seed(0)
DEBUG = False
def config_parser():
import configargparse
parser = configargparse.ArgumentParser()
parser.add_argument('--config', is_config_file=True,
help='config file path')
parser.add_argument("--expname", type=str,
help='experiment name')
parser.add_argument("--basedir", type=str, default='./logs/', required=True,
help='where to store ckpts and logs')
parser.add_argument("--datadir", type=str, required=True,
help='input data directory')
parser.add_argument("--datadownsample", type=float, default=-1,
help='if downsample > 0, means downsample the image to scale=datadownsample')
parser.add_argument("--tbdir", type=str, required=True,
help="tensorboard log directory")
parser.add_argument("--num_gpu", type=int, default=1,
help=">1 will use DataParallel")
parser.add_argument("--torch_hub_dir", type=str, default='',
help=">1 will use DataParallel")
# training options
parser.add_argument("--netdepth", type=int, default=8,
help='layers in network')
parser.add_argument("--netwidth", type=int, default=256,
help='channels per layer')
parser.add_argument("--netdepth_fine", type=int, default=8,
help='layers in fine network')
parser.add_argument("--netwidth_fine", type=int, default=256,
help='channels per layer in fine network')
parser.add_argument("--N_rand", type=int, default=32 * 32 * 4,
help='batch size (number of random rays per gradient step)')
parser.add_argument("--lrate", type=float, default=5e-4,
help='learning rate')
parser.add_argument("--lrate_decay", type=int, default=250,
help='exponential learning rate decay (in 1000 steps)')
# generate N_rand # of rays, divide into chunk # of batch
# then generate chunk * N_samples # of points, divide into netchunk # of batch
parser.add_argument("--chunk", type=int, default=1024 * 32,
help='number of rays processed in parallel, decrease if running out of memory')
parser.add_argument("--netchunk", type=int, default=1024 * 64,
help='number of pts sent through network in parallel, decrease if running out of memory')
parser.add_argument("--no_reload", action='store_true',
help='do not reload weights from saved ckpt')
parser.add_argument("--ft_path", type=str, default=None,
help='specific weights npy file to reload for coarse network')
# rendering options
parser.add_argument("--N_iters", type=int, default=50000,
help='number of iteration')
parser.add_argument("--N_samples", type=int, default=64,
help='number of coarse samples per ray')
parser.add_argument("--N_importance", type=int, default=0,
help='number of additional fine samples per ray')
parser.add_argument("--perturb", type=float, default=1.,
help='set to 0. for no jitter, 1. for jitter')
parser.add_argument("--use_viewdirs", action='store_true',
help='use full 5D input instead of 3D')
parser.add_argument("--i_embed", type=int, default=0,
help='set 0 for default positional encoding, -1 for none')
parser.add_argument("--multires", type=int, default=10,
help='log2 of max freq for positional encoding (3D location)')
parser.add_argument("--multires_views", type=int, default=4,
help='log2 of max freq for positional encoding (2D direction)')
parser.add_argument("--raw_noise_std", type=float, default=0.,
help='std dev of noise added to regularize sigma_a output, 1e0 recommended')
parser.add_argument("--rgb_activate", type=str, default='sigmoid',
help='activate function for rgb output, choose among "none", "sigmoid"')
parser.add_argument("--sigma_activate", type=str, default='relu',
help='activate function for sigma output, choose among "relu", "softplue"')
# ===============================
# Kernel optimizing
# ===============================
parser.add_argument("--kernel_type", type=str, default='kernel',
help='choose among <none>, <itsampling>, <sparsekernel>')
parser.add_argument("--kernel_isglobal", action='store_true',
help='if specified, the canonical kernel position is global')
parser.add_argument("--kernel_start_iter", type=int, default=0,
help='start training kernel after # iteration')
parser.add_argument("--kernel_ptnum", type=int, default=5,
help='the number of sparse locations in the kernels '
'that involves computing the final color of ray')
parser.add_argument("--kernel_random_hwindow", type=float, default=0.25,
help='randomly displace the predicted ray position')
parser.add_argument("--kernel_img_embed", type=int, default=32,
help='the dim of image laten code')
parser.add_argument("--kernel_rand_dim", type=int, default=2,
help='dimensions of input random number which uniformly sample from (0, 1)')
parser.add_argument("--kernel_rand_embed", type=int, default=3,
help='embed frequency of input kernel coordinate')
parser.add_argument("--kernel_rand_mode", type=str, default='float',
help='<float>, <<int#, such as<int5>>>, <fix>')
parser.add_argument("--kernel_random_mode", type=str, default='input',
help='<input>, <output>')
parser.add_argument("--kernel_spatial_embed", type=int, default=0,
help='the dim of spatial coordinate embedding')
parser.add_argument("--kernel_depth_embed", type=int, default=0,
help='the dim of depth coordinate embedding')
parser.add_argument("--kernel_hwindow", type=int, default=10,
help='the max window of the kernel (sparse location will lie inside the window')
parser.add_argument("--kernel_pattern_init_radius", type=float, default=0.1,
help='the initialize radius of init pattern')
parser.add_argument("--kernel_num_hidden", type=int, default=3,
help='the number of hidden layer')
parser.add_argument("--kernel_num_wide", type=int, default=64,
help='the wide of hidden layer')
parser.add_argument("--kernel_shortcut", action='store_true',
help='if yes, add a short cut to the network')
parser.add_argument("--align_start_iter", type=int, default=0,
help='start iteration of the align loss')
parser.add_argument("--align_end_iter", type=int, default=1e10,
help='end iteration of the align loss')
parser.add_argument("--kernel_align_weight", type=float, default=0,
help='align term weight')
parser.add_argument("--prior_start_iter", type=int, default=0,
help='start iteration of the prior loss')
parser.add_argument("--prior_end_iter", type=int, default=1e10,
help='end iteration of the prior loss')
parser.add_argument("--kernel_prior_weight", type=float, default=0,
help='weight of prior loss (regularization)')
parser.add_argument("--sparsity_start_iter", type=int, default=0,
help='start iteration of the sparsity loss')
parser.add_argument("--sparsity_end_iter", type=int, default=1e10,
help='end iteration of the sparsity loss')
parser.add_argument("--kernel_sparsity_type", type=str, default='tv',
help='type of sparse gradient loss', choices=['tv', 'normalize', 'robust'])
parser.add_argument("--kernel_sparsity_weight", type=float, default=0,
help='weight of sparsity loss')
parser.add_argument("--kernel_spatialvariant_trans", action='store_true',
help='if true, optimize spatial variant 3D translation of each sampling point')
parser.add_argument("--kernel_global_trans", action='store_true',
help='if true, optimize global 3D translation of each sampling point')
parser.add_argument("--tone_mapping_type", type=str, default='none',
help='the tone mapping of linear to LDR color space, <none>, <gamma>, <learn>')
####### render option, will not effect training ########
parser.add_argument("--render_only", action='store_true',
help='do not optimize, reload weights and render out render_poses path')
parser.add_argument("--render_test", action='store_true',
help='render the test set instead of render_poses path')
parser.add_argument("--render_multipoints", action='store_true',
help='render sub image that reconstruct the blur image')
parser.add_argument("--render_rmnearplane", type=int, default=0,
help='when render, set the density of nearest plane to 0')
parser.add_argument("--render_focuspoint_scale", type=float, default=1.,
help='scale the focal point when render')
parser.add_argument("--render_radius_scale", type=float, default=1.,
help='scale the radius of the camera path')
parser.add_argument("--render_factor", type=int, default=0,
help='downsampling factor to speed up rendering, set 4 or 8 for fast preview')
parser.add_argument("--render_epi", action='store_true',
help='render the video with epi path')
## llff flags
parser.add_argument("--factor", type=int, default=None,
help='downsample factor for LLFF images')
parser.add_argument("--no_ndc", action='store_true',
help='do not use normalized device coordinates (set for non-forward facing scenes)')
parser.add_argument("--lindisp", action='store_true',
help='sampling linearly in disparity rather than depth')
parser.add_argument("--spherify", action='store_true',
help='set for spherical 360 scenes')
parser.add_argument("--llffhold", type=int, default=8,
help='will take every 1/N images as LLFF test set, paper uses 8')
# ######### Unused params from the original ###########
parser.add_argument("--precrop_iters", type=int, default=0,
help='number of steps to train on central crops')
parser.add_argument("--precrop_frac", type=float,
default=.5, help='fraction of img taken for central crops')
# dataset options
parser.add_argument("--dataset_type", type=str, default='llff',
help='options: llff / blender / deepvoxels')
parser.add_argument("--testskip", type=int, default=8,
help='will load 1/N images from test/val sets, useful for large datasets like deepvoxels')
## deepvoxels flags
parser.add_argument("--shape", type=str, default='greek',
help='options : armchair / cube / greek / vase')
## blender flags
parser.add_argument("--white_bkgd", action='store_true',
help='set to render synthetic data on a white bkgd (always use for dvoxels)')
parser.add_argument("--half_res", action='store_true',
help='load blender synthetic data at 400x400 instead of 800x800')
################# logging/saving options ##################
parser.add_argument("--i_print", type=int, default=200,
help='frequency of console printout and metric loggin')
parser.add_argument("--i_tensorboard", type=int, default=200,
help='frequency of tensorboard image logging')
parser.add_argument("--i_weights", type=int, default=20000,
help='frequency of weight ckpt saving')
parser.add_argument("--i_testset", type=int, default=20000,
help='frequency of testset saving')
parser.add_argument("--i_video", type=int, default=20000,
help='frequency of render_poses video saving')
return parser
def train():
parser = config_parser()
args = parser.parse_args()
if len(args.torch_hub_dir) > 0:
print(f"Change torch hub cache to {args.torch_hub_dir}")
torch.hub.set_dir(args.torch_hub_dir)
# Load data
K = None
if args.dataset_type == 'llff':
images, poses, bds, render_poses, i_test = load_llff_data(args, args.datadir, args.factor,
recenter=True, bd_factor=.75,
spherify=args.spherify,
path_epi=args.render_epi)
hwf = poses[0, :3, -1]
poses = poses[:, :3, :4]
print('Loaded llff', images.shape, render_poses.shape, hwf, args.datadir)
if not isinstance(i_test, list):
i_test = [i_test]
print('LLFF holdout,', args.llffhold)
i_test = np.arange(images.shape[0])[::args.llffhold]
i_val = i_test
i_train = np.array([i for i in np.arange(int(images.shape[0])) if
(i not in i_test and i not in i_val)])
print('DEFINING BOUNDS')
if args.no_ndc:
near = np.min(bds) * 0.9
far = np.max(bds) * 1.0
else:
near = 0.
far = 1.
print('NEAR FAR', near, far)
else:
print('Unknown dataset type', args.dataset_type, 'exiting')
return
imagesf = images
images = (images * 255).astype(np.uint8)
images_idx = np.arange(0, len(images))
# Cast intrinsics to right types
H, W, focal = hwf
H, W = int(H), int(W)
hwf = [H, W, focal]
if K is None:
K = np.array([
[focal, 0, 0.5 * W],
[0, focal, 0.5 * H],
[0, 0, 1]
])
if args.render_test:
render_poses = np.array(poses)
# Create log dir and copy the config file
basedir = args.basedir
tensorboardbase = args.tbdir
expname = args.expname
test_metric_file = os.path.join(basedir, expname, 'test_metrics.txt')
os.makedirs(os.path.join(basedir, expname), exist_ok=True)
os.makedirs(os.path.join(tensorboardbase, expname), exist_ok=True)
tensorboard = SummaryWriter(os.path.join(tensorboardbase, expname))
f = os.path.join(basedir, expname, 'args.txt')
with open(f, 'w') as file:
for arg in sorted(vars(args)):
attr = getattr(args, arg)
file.write('{} = {}\n'.format(arg, attr))
if args.config is not None and not args.render_only:
f = os.path.join(basedir, expname, 'config.txt')
with open(f, 'w') as file:
file.write(open(args.config, 'r').read())
with open(test_metric_file, 'a') as file:
file.write(open(args.config, 'r').read())
file.write("\n============================\n"
"||\n"
"\\/\n")
# The DSK module
if args.kernel_type == 'deformablesparsekernel':
kernelnet = DSKnet(len(images), torch.tensor(poses[:, :3, :4]),
args.kernel_ptnum, args.kernel_hwindow,
random_hwindow=args.kernel_random_hwindow, in_embed=args.kernel_rand_embed,
random_mode=args.kernel_random_mode,
img_embed=args.kernel_img_embed,
spatial_embed=args.kernel_spatial_embed,
depth_embed=args.kernel_depth_embed,
num_hidden=args.kernel_num_hidden,
num_wide=args.kernel_num_wide,
short_cut=args.kernel_shortcut,
pattern_init_radius=args.kernel_pattern_init_radius,
isglobal=args.kernel_isglobal,
optim_trans=args.kernel_global_trans,
optim_spatialvariant_trans=args.kernel_spatialvariant_trans)
elif args.kernel_type == 'none':
kernelnet = None
else:
raise RuntimeError(f"kernel_type {args.kernel_type} not recognized")
# Create nerf model
nerf = NeRFAll(args, kernelnet)
nerf = nn.DataParallel(nerf, list(range(args.num_gpu)))
optim_params = nerf.parameters()
optimizer = torch.optim.Adam(params=optim_params,
lr=args.lrate,
betas=(0.9, 0.999))
start = 0
# Load Checkpoints
if args.ft_path is not None and args.ft_path != 'None':
ckpts = [args.ft_path]
else:
ckpts = [os.path.join(basedir, expname, f) for f in sorted(os.listdir(os.path.join(basedir, expname))) if
'.tar' in f]
print('Found ckpts', ckpts)
if len(ckpts) > 0 and not args.no_reload:
ckpt_path = ckpts[-1]
print('Reloading from', ckpt_path)
ckpt = torch.load(ckpt_path)
start = ckpt['global_step']
optimizer.load_state_dict(ckpt['optimizer_state_dict'])
# Load model
smart_load_state_dict(nerf, ckpt)
# figuring out the train/test configuration
render_kwargs_train = {
'perturb': args.perturb,
'N_importance': args.N_importance,
'N_samples': args.N_samples,
'use_viewdirs': args.use_viewdirs,
'white_bkgd': args.white_bkgd,
'raw_noise_std': args.raw_noise_std,
}
# NDC only good for LLFF-style forward facing data
if args.no_ndc: # args.dataset_type != 'llff' or
print('Not ndc!')
render_kwargs_train['ndc'] = False
render_kwargs_train['lindisp'] = args.lindisp
render_kwargs_test = {k: render_kwargs_train[k] for k in render_kwargs_train}
render_kwargs_test['perturb'] = False
render_kwargs_test['raw_noise_std'] = 0.
# visualize_motionposes(H, W, K, nerf, 2)
# visualize_kernel(H, W, K, nerf, 5)
# visualize_itsample(H, W, K, nerf)
# visualize_kmap(H, W, K, nerf, img_idx=1)
bds_dict = {
'near': near,
'far': far,
}
render_kwargs_train.update(bds_dict)
render_kwargs_test.update(bds_dict)
global_step = start
# Move testing data to GPU
render_poses = torch.tensor(render_poses[:, :3, :4]).cuda()
nerf = nerf.cuda()
# Short circuit if only rendering out from trained model
if args.render_only:
print('RENDER ONLY')
with torch.no_grad():
testsavedir = os.path.join(basedir, expname,
f"renderonly"
f"_{'test' if args.render_test else 'path'}"
f"_{start:06d}")
os.makedirs(testsavedir, exist_ok=True)
print('test poses shape', render_poses.shape)
dummy_num = ((len(poses) - 1) // args.num_gpu + 1) * args.num_gpu - len(poses)
dummy_poses = torch.eye(3, 4).unsqueeze(0).expand(dummy_num, 3, 4).type_as(render_poses)
print(f"Append {dummy_num} # of poses to fill all the GPUs")
nerf.eval()
rgbshdr, disps = nerf(
hwf[0], hwf[1], K, args.chunk,
poses=torch.cat([render_poses, dummy_poses], dim=0),
render_kwargs=render_kwargs_test,
render_factor=args.render_factor,
)
rgbshdr = rgbshdr[:len(rgbshdr) - dummy_num]
disps = (1. - disps)
disps = disps[:len(disps) - dummy_num].cpu().numpy()
rgbs = rgbshdr
rgbs = to8b(rgbs.cpu().numpy())
disps = to8b(disps / disps.max())
if args.render_test:
for rgb_idx, rgb8 in enumerate(rgbs):
imageio.imwrite(os.path.join(testsavedir, f'{rgb_idx:03d}.png'), rgb8)
imageio.imwrite(os.path.join(testsavedir, f'{rgb_idx:03d}_disp.png'), disps[rgb_idx])
else:
prefix = 'epi_' if args.render_epi else ''
imageio.mimwrite(os.path.join(testsavedir, f'{prefix}video.mp4'), rgbs, fps=30, quality=9)
imageio.mimwrite(os.path.join(testsavedir, f'{prefix}video_disp.mp4'), disps, fps=30, quality=9)
if args.render_test and args.render_multipoints:
for pti in range(args.kernel_ptnum):
nerf.eval()
poses_num = len(poses) + dummy_num
imgidx = torch.arange(poses_num, dtype=torch.long).to(render_poses.device).reshape(poses_num, 1)
rgbs, weights = nerf(
hwf[0], hwf[1], K, args.chunk,
poses=torch.cat([render_poses, dummy_poses], dim=0),
render_kwargs=render_kwargs_test,
render_factor=args.render_factor,
render_point=pti,
images_indices=imgidx
)
rgbs = rgbs[:len(rgbs) - dummy_num]
weights = weights[:len(weights) - dummy_num]
rgbs = to8b(rgbs.cpu().numpy())
weights = to8b(weights.cpu().numpy())
for rgb_idx, rgb8 in enumerate(rgbs):
imageio.imwrite(os.path.join(testsavedir, f'{rgb_idx:03d}_pt{pti}.png'), rgb8)
imageio.imwrite(os.path.join(testsavedir, f'w_{rgb_idx:03d}_pt{pti}.png'), weights[rgb_idx])
return
# ============================================
# Prepare ray dataset if batching random rays
# ============================================
N_rand = args.N_rand
train_datas = {}
# if downsample, downsample the images
if args.datadownsample > 0:
images_train = np.stack([cv2.resize(img_, None, None,
1 / args.datadownsample, 1 / args.datadownsample,
cv2.INTER_AREA) for img_ in imagesf], axis=0)
else:
images_train = imagesf
num_img, hei, wid, _ = images_train.shape
print(f"train on image sequence of len = {num_img}, {wid}x{hei}")
k_train = np.array([K[0, 0] * wid / W, 0, K[0, 2] * wid / W,
0, K[1, 1] * hei / H, K[1, 2] * hei / H,
0, 0, 1]).reshape(3, 3).astype(K.dtype)
# For random ray batching
print('get rays')
rays = np.stack([get_rays_np(hei, wid, k_train, p) for p in poses[:, :3, :4]], 0) # [N, ro+rd, H, W, 3]
rays = np.transpose(rays, [0, 2, 3, 1, 4])
train_datas['rays'] = rays[i_train].reshape(-1, 2, 3)
xs, ys = np.meshgrid(np.arange(wid, dtype=np.float32), np.arange(hei, dtype=np.float32), indexing='xy')
xs = np.tile((xs[None, ...] + HALF_PIX) * W / wid, [num_img, 1, 1])
ys = np.tile((ys[None, ...] + HALF_PIX) * H / hei, [num_img, 1, 1])
train_datas['rays_x'], train_datas['rays_y'] = xs[i_train].reshape(-1, 1), ys[i_train].reshape(-1, 1)
train_datas['rgbsf'] = images_train[i_train].reshape(-1, 3)
images_idx_tile = images_idx.reshape((num_img, 1, 1))
images_idx_tile = np.tile(images_idx_tile, [1, hei, wid])
train_datas['images_idx'] = images_idx_tile[i_train].reshape(-1, 1).astype(np.int64)
print('shuffle rays')
shuffle_idx = np.random.permutation(len(train_datas['rays']))
train_datas = {k: v[shuffle_idx] for k, v in train_datas.items()}
print('done')
i_batch = 0
# Move training data to GPU
images = torch.tensor(images).cuda()
imagesf = torch.tensor(imagesf).cuda()
poses = torch.tensor(poses).cuda()
train_datas = {k: torch.tensor(v).cuda() for k, v in train_datas.items()}
N_iters = args.N_iters + 1
print('Begin')
print('TRAIN views are', i_train)
print('TEST views are', i_test)
print('VAL views are', i_val)
# Summary writers
# writer = SummaryWriter(os.path.join(basedir, 'summaries', expname))
start = start + 1
for i in range(start, N_iters):
time0 = time.time()
# Sample random ray batch
iter_data = {k: v[i_batch:i_batch + N_rand] for k, v in train_datas.items()}
batch_rays = iter_data.pop('rays').permute(0, 2, 1)
i_batch += N_rand
if i_batch >= len(train_datas['rays']):
print("Shuffle data after an epoch!")
shuffle_idx = np.random.permutation(len(train_datas['rays']))
train_datas = {k: v[shuffle_idx] for k, v in train_datas.items()}
i_batch = 0
##### Core optimization loop #####
nerf.train()
if i == args.kernel_start_iter:
torch.cuda.empty_cache()
rgb, rgb0, extra_loss = nerf(H, W, K, chunk=args.chunk,
rays=batch_rays, rays_info=iter_data,
retraw=True, force_naive=i < args.kernel_start_iter,
**render_kwargs_train)
# Compute Losses
# =====================
target_rgb = iter_data['rgbsf'].squeeze(-2)
img_loss = img2mse(rgb, target_rgb)
loss = img_loss
psnr = mse2psnr(img_loss)
img_loss0 = img2mse(rgb0, target_rgb)
loss = loss + img_loss0
extra_loss = {k: torch.mean(v) for k, v in extra_loss.items()}
if len(extra_loss) > 0:
for k, v in extra_loss.items():
if f"kernel_{k}_weight" in vars(args).keys():
if vars(args)[f"{k}_start_iter"] <= i <= vars(args)[f"{k}_end_iter"]:
loss = loss + v * vars(args)[f"kernel_{k}_weight"]
optimizer.zero_grad()
loss.backward()
optimizer.step()
# NOTE: IMPORTANT!
### update learning rate ###
decay_rate = 0.1
decay_steps = args.lrate_decay * 1000
new_lrate = args.lrate * (decay_rate ** (global_step / decay_steps))
for param_group in optimizer.param_groups:
param_group['lr'] = new_lrate
################################
# dt = time.time() - time0
# print(f"Step: {global_step}, Loss: {loss}, Time: {dt}")
##### end #####
# Rest is logging
if i % args.i_weights == 0:
path = os.path.join(basedir, expname, '{:06d}.tar'.format(i))
torch.save({
'global_step': global_step,
'network_state_dict': nerf.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}, path)
print('Saved checkpoints at', path)
if i % args.i_video == 0 and i > 0:
# Turn on testing mode
with torch.no_grad():
nerf.eval()
rgbs, disps = nerf(H, W, K, args.chunk, poses=render_poses, render_kwargs=render_kwargs_test)
print('Done, saving', rgbs.shape, disps.shape)
moviebase = os.path.join(basedir, expname, '{}_spiral_{:06d}_'.format(expname, i))
rgbs = (rgbs - rgbs.min()) / (rgbs.max() - rgbs.min())
rgbs = rgbs.cpu().numpy()
disps = disps.cpu().numpy()
# disps_max_idx = int(disps.size * 0.9)
# disps_max = disps.reshape(-1)[np.argpartition(disps.reshape(-1), disps_max_idx)[disps_max_idx]]
imageio.mimwrite(moviebase + 'rgb.mp4', to8b(rgbs), fps=30, quality=8)
imageio.mimwrite(moviebase + 'disp.mp4', to8b(disps / disps.max()), fps=30, quality=8)
# if args.use_viewdirs:
# render_kwargs_test['c2w_staticcam'] = render_poses[0][:3,:4]
# with torch.no_grad():
# rgbs_still, _ = render_path(render_poses, hwf, args.chunk, render_kwargs_test)
# render_kwargs_test['c2w_staticcam'] = None
# imageio.mimwrite(moviebase + 'rgb_still.mp4', to8b(rgbs_still), fps=30, quality=8)
if i % args.i_testset == 0 and i > 0:
testsavedir = os.path.join(basedir, expname, 'testset_{:06d}'.format(i))
os.makedirs(testsavedir, exist_ok=True)
print('test poses shape', poses.shape)
dummy_num = ((len(poses) - 1) // args.num_gpu + 1) * args.num_gpu - len(poses)
dummy_poses = torch.eye(3, 4).unsqueeze(0).expand(dummy_num, 3, 4).type_as(render_poses)
print(f"Append {dummy_num} # of poses to fill all the GPUs")
with torch.no_grad():
nerf.eval()
rgbs, _ = nerf(H, W, K, args.chunk, poses=torch.cat([poses, dummy_poses], dim=0).cuda(),
render_kwargs=render_kwargs_test)
rgbs = rgbs[:len(rgbs) - dummy_num]
rgbs_save = rgbs # (rgbs - rgbs.min()) / (rgbs.max() - rgbs.min())
# saving
for rgb_idx, rgb in enumerate(rgbs_save):
rgb8 = to8b(rgb.cpu().numpy())
filename = os.path.join(testsavedir, f'{rgb_idx:03d}.png')
imageio.imwrite(filename, rgb8)
# evaluation
rgbs = rgbs[i_test]
target_rgb_ldr = imagesf[i_test]
test_mse = compute_img_metric(rgbs, target_rgb_ldr, 'mse')
test_psnr = compute_img_metric(rgbs, target_rgb_ldr, 'psnr')
test_ssim = compute_img_metric(rgbs, target_rgb_ldr, 'ssim')
test_lpips = compute_img_metric(rgbs, target_rgb_ldr, 'lpips')
if isinstance(test_lpips, torch.Tensor):
test_lpips = test_lpips.item()
tensorboard.add_scalar("Test MSE", test_mse, global_step)
tensorboard.add_scalar("Test PSNR", test_psnr, global_step)
tensorboard.add_scalar("Test SSIM", test_ssim, global_step)
tensorboard.add_scalar("Test LPIPS", test_lpips, global_step)
with open(test_metric_file, 'a') as outfile:
outfile.write(f"iter{i}/globalstep{global_step}: MSE:{test_mse:.8f} PSNR:{test_psnr:.8f}"
f" SSIM:{test_ssim:.8f} LPIPS:{test_lpips:.8f}\n")
print('Saved test set')
if i % args.i_tensorboard == 0:
tensorboard.add_scalar("Loss", loss.item(), global_step)
tensorboard.add_scalar("PSNR", psnr.item(), global_step)
for k, v in extra_loss.items():
tensorboard.add_scalar(k, v.item(), global_step)
if i % args.i_print == 0:
print(f"[TRAIN] Iter: {i} Loss: {loss.item()} PSNR: {psnr.item()}")
global_step += 1
if __name__ == '__main__':
torch.set_default_tensor_type('torch.cuda.FloatTensor')
train()
|
py | 1a37160c9960d2ebd96f14e849e53523656a0f2b | from ..action import Action
from db.models.user import User
from db.models.account import Account
from db.models.artist import Artist
from db.serializers.user_serializer import UserSerializer
from db.serializers.account_serializer import AccountSerializer
class ArtistAccount(Action):
arguments = ['user']
def perform(self):
user = self.user
user_info = User.objects.get(id=user.id)
artist_info = Artist.objects.get(user_id=user.id)
try:
account_info = Account.objects.get(artist_id=artist_info.id)
except :
self.fail(dict(account_error='Please add your account information'))
serialize_user = UserSerializer(user_info)
serialize_account = AccountSerializer(account_info)
account_information = {
'email': serialize_user.data.get('email', ''),
'account_number': serialize_account.data.get('account_number', ''),
'account_name': serialize_account.data.get('account_name', ''),
'bank_name': serialize_account.data.get('bank_name', ''),
'bank_code': serialize_account.data.get('bank_code', '')
}
return account_information
|
py | 1a37160dc038408eaceb8c7e1fe0e60fab5091ba | # coding:utf-8
"""
author:Qiu Yurui
"""
import tensorflow as tf
import numpy as np
import os
import sys
import cv2
import argparse
import glob
import tensorflow.contrib.slim as slim
import matplotlib.pyplot as plt
from tensorflow.python.framework import graph_util
from tensorflow.python import pywrap_tensorflow
def getweightpath(wdir):
ckpath = os.path.join(wdir, 'checkpoint')
fr = open(ckpath, 'rt')
fline = fr.readline()
fr.close()
ckname = fline.split('"')[1]
return os.path.join(wdir, ckname)
def exportpb_fromckpt(input_checkpoint, output_graph, output_node_names):
"""
:param input_checkpoint: ckpt model path
:param output_graph: save path of pb model
:return:
"""
with tf.Graph().as_default():
with tf.Session() as sess:
gbs = tf.Variable(0, trainable=False)
input_image = tf.placeholder(tf.float32, shape=[None, 224, 224, 3], name='images')
label_target = tf.placeholder(tf.int32, shape=[None, ], name='labels')
logits, end_points = inception_v3.inception_v3(input_image,
num_classes=2,
is_training=False,
dropout_keep_prob=0.0,
depth_multiplier=0.5)
# output = tf.identity(logits, name=output_node_names)
saver = tf.train.Saver()
saver.restore(sess, input_checkpoint)
output_graph_def = graph_util.convert_variables_to_constants(
sess=sess,
input_graph_def=sess.graph_def,
output_node_names=output_node_names.split(','))
with tf.gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
print("%d ops in the final graph." % len(output_graph_def.node))
def freeze_graph(input_checkpoint, output_graph):
'''
:param input_checkpoint:
:param output_graph: PB模型保存路径
:return:
'''
# checkpoint = tf.train.get_checkpoint_state(model_folder) #检查目录下ckpt文件状态是否可用
# input_checkpoint = checkpoint.model_checkpoint_path #得ckpt文件路径
# 指定输出的节点名称,该节点名称必须是原模型中存在的节点
output_node_names = "InceptionV3/Logits/SpatialSqueeze"
saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=True)
with tf.Session() as sess:
saver.restore(sess, input_checkpoint) # 恢复图并得到数据
output_graph_def = graph_util.convert_variables_to_constants( # 模型持久化,将变量值固定
sess=sess,
input_graph_def=sess.graph_def, # 等于:sess.graph_def
output_node_names=output_node_names.split(",")) # 如果有多个输出节点,以逗号隔开
with tf.gfile.GFile(output_graph, "wb") as f: # 保存模型
f.write(output_graph_def.SerializeToString()) # 序列化输出
print("%d ops in the final graph." % len(output_graph_def.node)) # 得到当前图有几个操作节点
if __name__ == '__main__':
root_path_model = ''
root_path_pb = ''
output_node_names = ''
checkpoint_path = os.path.join('/Users/qiuyurui/Downloads/model.ckpt-1758393')
reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
var_to_shape_map = reader.get_variable_to_shape_map()
for key in var_to_shape_map:
print('tensor_name: ', key)
freeze_graph('/Users/qiuyurui/Downloads/model.ckpt-1758393', 'test.pb')
if not os.path.exists(root_path_pb):
os.makedirs(root_path_pb)
dirs = glob.glob(root_path_model + '/*')
for dir in dirs:
if dir.startswith('.'):
continue
if not os.path.isdir(dir):
continue
number = dir.split('/')[-1].split('_')[-1]
ckpath = getweightpath(dir)
pbpath = os.path.join(root_path_pb, '{0}.pb'.format(number))
exportpb_fromckpt(ckpath, pbpath, output_node_names)
|
py | 1a371730738c26fb623caa2c7779dcd1f5d02e8f | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@Project: Deep-Learning-in-Action
@File: __init__.py.py
@Author: 轩名
@Date: 2021/8/13 5:11 下午
"""
from .cnn import Q_network
from .replay_buff import Experience, ReplayBuffer
from .dataset import RLDataset
__all__ = [
"Q_network",
"Experience", "ReplayBuffer",
"RLDataset"
]
|
py | 1a3717e04ec2049d7fa9a27596fd1b7d35cbc337 | # data loader
from __future__ import print_function, division
import glob
import torch
from skimage import io, transform, color
import numpy as np
import random
import math
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from PIL import Image
# ==========================dataset load==========================
class RescaleT(object):
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def __call__(self, image):
new_size = (self.output_size, self.output_size)
print(image.shape)
print(new_size)
img = transform.resize(image, new_size, mode='constant')
print('rescale T')
return img
class Rescale(object):
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def __call__(self, image):
if random.random() >= 0.5:
image = image[::-1]
h, w = image.shape[:2]
if isinstance(self.output_size, int):
if h > w:
new_h, new_w = self.output_size * h / w, self.output_size
else:
new_h, new_w = self.output_size, self.output_size * w / h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
# #resize the image to new_h x new_w and convert image from range [0,255] to [0,1]
img = transform.resize(image, (new_h, new_w), mode='constant')
return img
class RandomCrop(object):
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self, image):
if random.random() >= 0.5:
image = image[::-1]
h, w = image.shape[:2]
new_h, new_w = self.output_size
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
image = image[top: top + new_h, left: left + new_w]
return image
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, image):
tmpImg = np.zeros((image.shape[0], image.shape[1], 3))
image = image / np.max(image)
if image.shape[2] == 1:
tmpImg[:, :, 0] = (image[:, :, 0] - 0.485) / 0.229
tmpImg[:, :, 1] = (image[:, :, 0] - 0.485) / 0.229
tmpImg[:, :, 2] = (image[:, :, 0] - 0.485) / 0.229
else:
tmpImg[:, :, 0] = (image[:, :, 0] - 0.485) / 0.229
tmpImg[:, :, 1] = (image[:, :, 1] - 0.456) / 0.224
tmpImg[:, :, 2] = (image[:, :, 2] - 0.406) / 0.225
# change the r,g,b to b,r,g from [0,255] to [0,1]
tmpImg = tmpImg.transpose((2, 0, 1))
image = torch.from_numpy(tmpImg)
return image
class ToTensorLab(object):
"""Convert ndarrays in sample to Tensors."""
def __init__(self, flag=0):
self.flag = flag
def __call__(self, image):
# change the color space
if self.flag == 2: # with rgb and Lab colors
tmpImg = np.zeros((image.shape[0], image.shape[1], 6))
tmpImgt = np.zeros((image.shape[0], image.shape[1], 3))
if image.shape[2] == 1:
tmpImgt[:, :, 0] = image[:, :, 0]
tmpImgt[:, :, 1] = image[:, :, 0]
tmpImgt[:, :, 2] = image[:, :, 0]
else:
tmpImgt = image
tmpImgtl = color.rgb2lab(tmpImgt)
# nomalize image to range [0,1]
tmpImg[:, :, 0] = (tmpImgt[:, :, 0] - np.min(tmpImgt[:, :, 0])) / (
np.max(tmpImgt[:, :, 0]) - np.min(tmpImgt[:, :, 0]))
tmpImg[:, :, 1] = (tmpImgt[:, :, 1] - np.min(tmpImgt[:, :, 1])) / (
np.max(tmpImgt[:, :, 1]) - np.min(tmpImgt[:, :, 1]))
tmpImg[:, :, 2] = (tmpImgt[:, :, 2] - np.min(tmpImgt[:, :, 2])) / (
np.max(tmpImgt[:, :, 2]) - np.min(tmpImgt[:, :, 2]))
tmpImg[:, :, 3] = (tmpImgtl[:, :, 0] - np.min(
tmpImgtl[:, :, 0])) / (np.max(tmpImgtl[:, :, 0]) - np.min(
tmpImgtl[:, :, 0]))
tmpImg[:, :, 4] = (tmpImgtl[:, :, 1] - np.min(
tmpImgtl[:, :, 1])) / (np.max(tmpImgtl[:, :, 1]) - np.min(
tmpImgtl[:, :, 1]))
tmpImg[:, :, 5] = (tmpImgtl[:, :, 2] - np.min(
tmpImgtl[:, :, 2])) / (np.max(tmpImgtl[:, :, 2]) - np.min(
tmpImgtl[:, :, 2]))
# tmpImg = tmpImg/(np.max(tmpImg)-np.min(tmpImg))
tmpImg[:, :, 0] = (tmpImg[:, :, 0] - np.mean(
tmpImg[:, :, 0])) / np.std(tmpImg[:, :, 0])
tmpImg[:, :, 1] = (tmpImg[:, :, 1] - np.mean(
tmpImg[:, :, 1])) / np.std(tmpImg[:, :, 1])
tmpImg[:, :, 2] = (tmpImg[:, :, 2] - np.mean(
tmpImg[:, :, 2])) / np.std(tmpImg[:, :, 2])
tmpImg[:, :, 3] = (tmpImg[:, :, 3] - np.mean(
tmpImg[:, :, 3])) / np.std(tmpImg[:, :, 3])
tmpImg[:, :, 4] = (tmpImg[:, :, 4] - np.mean(
tmpImg[:, :, 4])) / np.std(tmpImg[:, :, 4])
tmpImg[:, :, 5] = (tmpImg[:, :, 5] - np.mean(
tmpImg[:, :, 5])) / np.std(tmpImg[:, :, 5])
elif self.flag == 1: # with Lab color
tmpImg = np.zeros((image.shape[0], image.shape[1], 3))
if image.shape[2] == 1:
tmpImg[:, :, 0] = image[:, :, 0]
tmpImg[:, :, 1] = image[:, :, 0]
tmpImg[:, :, 2] = image[:, :, 0]
else:
tmpImg = image
tmpImg = color.rgb2lab(tmpImg)
# tmpImg = tmpImg/(np.max(tmpImg)-np.min(tmpImg))
tmpImg[:, :, 0] = (tmpImg[:, :, 0] - np.min(tmpImg[:, :, 0])) / (
np.max(tmpImg[:, :, 0]) - np.min(tmpImg[:, :, 0]))
tmpImg[:, :, 1] = (tmpImg[:, :, 1] - np.min(tmpImg[:, :, 1])) / (
np.max(tmpImg[:, :, 1]) - np.min(tmpImg[:, :, 1]))
tmpImg[:, :, 2] = (tmpImg[:, :, 2] - np.min(tmpImg[:, :, 2])) / (
np.max(tmpImg[:, :, 2]) - np.min(tmpImg[:, :, 2]))
tmpImg[:, :, 0] = (tmpImg[:, :, 0] - np.mean(
tmpImg[:, :, 0])) / np.std(tmpImg[:, :, 0])
tmpImg[:, :, 1] = (tmpImg[:, :, 1] - np.mean(
tmpImg[:, :, 1])) / np.std(tmpImg[:, :, 1])
tmpImg[:, :, 2] = (tmpImg[:, :, 2] - np.mean(
tmpImg[:, :, 2])) / np.std(tmpImg[:, :, 2])
else: # with rgb color
tmpImg = np.zeros((image.shape[0], image.shape[1], 3))
print(f"tmpimg shape: {tmpImg.shape}")
image = image / np.max(image)
if image.shape[2] == 1:
tmpImg[:, :, 0] = (image[:, :, 0] - 0.485) / 0.229
tmpImg[:, :, 1] = (image[:, :, 0] - 0.485) / 0.229
tmpImg[:, :, 2] = (image[:, :, 0] - 0.485) / 0.229
else:
tmpImg[:, :, 0] = (image[:, :, 0] - 0.485) / 0.229
tmpImg[:, :, 1] = (image[:, :, 1] - 0.456) / 0.224
tmpImg[:, :, 2] = (image[:, :, 2] - 0.406) / 0.225
# change the r,g,b to b,r,g from [0,255] to [0,1]
# transforms.Normalize(mean = (0.485, 0.456, 0.406), std = (0.229, 0.224, 0.225))
tmpImg = tmpImg.transpose((2, 0, 1))
image = torch.from_numpy(tmpImg)
print('totensorlab')
print(f"final image shape: {image.shape}")
return image
class SalObjDataset(Dataset):
def __init__(self, img_name_list, lbl_name_list, transform=None):
# self.root_dir = root_dir
# self.image_name_list = glob.glob(image_dir+'*.png')
# self.label_name_list = glob.glob(label_dir+'*.png')
self.image_name_list = img_name_list
self.label_name_list = lbl_name_list
self.transform = transform
def __len__(self):
return len(self.image_name_list)
def __getitem__(self, idx):
# image = Image.open(self.image_name_list[idx])#io.imread(self.image_name_list[idx])
# label = Image.open(self.label_name_list[idx])#io.imread(self.label_name_list[idx])
image = io.imread(self.image_name_list[idx])
imname = self.image_name_list[idx]
imidx = np.array([idx])
if (0 == len(self.label_name_list)):
label_3 = np.zeros(image.shape)
else:
label_3 = io.imread(self.label_name_list[idx])
label = np.zeros(label_3.shape[0:2])
if (3 == len(label_3.shape)):
label = label_3[:, :, 0]
elif (2 == len(label_3.shape)):
label = label_3
if (3 == len(image.shape) and 2 == len(label.shape)):
label = label[:, :, np.newaxis]
elif (2 == len(image.shape) and 2 == len(label.shape)):
image = image[:, :, np.newaxis]
label = label[:, :, np.newaxis]
sample = {'imidx': imidx, 'image': image, 'label': label}
if self.transform:
sample = self.transform(sample)
return sample
|
py | 1a37182b0f64521ae63fd19ea4e658caaeb4cb91 | from cw02.file_manager import FileManager
print(FileManager.read_file("file_manager_test.txt"))
FileManager.update_file("file_manager_test.txt", "\nBardzo")
|
py | 1a3718bef07736895c81927aa62c9cdcf4062fc1 | #!/usr/bin/env python
# coding: utf-8
from __future__ import print_function
from __future__ import absolute_import
import os
import tct
import sys
params = tct.readjson(sys.argv[1])
binabspath = sys.argv[2]
facts = tct.readjson(params['factsfile'])
milestones = tct.readjson(params['milestonesfile'])
resultfile = params['resultfile']
result = tct.readjson(resultfile)
loglist = result['loglist'] = result.get('loglist', [])
toolname = params['toolname']
toolname_pure = params['toolname_pure']
workdir = params['workdir']
exitcode = CONTINUE = 0
# ==================================================
# Make a copy of milestones for later inspection?
# --------------------------------------------------
if 0 or milestones.get('debug_always_make_milestones_snapshot'):
tct.make_snapshot_of_milestones(params['milestonesfile'], sys.argv[1])
# ==================================================
# Get and check required milestone(s)
# --------------------------------------------------
def milestones_get(name, default=None):
result = milestones.get(name, default)
loglist.append((name, result))
return result
def facts_get(name, default=None):
result = facts.get(name, default)
loglist.append((name, result))
return result
def params_get(name, default=None):
result = params.get(name, default)
loglist.append((name, result))
return result
# ==================================================
# define
# --------------------------------------------------
xeq_name_cnt = 0
# ==================================================
# Check params
# --------------------------------------------------
if exitcode == CONTINUE:
loglist.append('CHECK PARAMS')
latex_file = milestones_get('latex_file')
if not (latex_file):
exitcode = 22
if exitcode == CONTINUE:
loglist.append('PARAMS are ok')
else:
loglist.append('PROBLEMS with params')
if CONTINUE != 0:
loglist.append({'CONTINUE': CONTINUE})
loglist.append('NOTHING to do')
# ==================================================
# work
# --------------------------------------------------
if exitcode == CONTINUE:
latex_make_file = os.path.join(os.path.split(latex_file)[0], 'Makefile')
import subprocess
def cmdline(cmd, cwd=None):
if cwd is None:
cwd = os.getcwd()
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, cwd=cwd)
out, err = process.communicate()
exitcode = process.returncode
return exitcode, cmd, out, err
destfile = latex_make_file
# a list of pairs for textreplacements to be done in latex
# sed -i"" 's/pdflatex /pdflatex -interaction=nonstopmode -halt-on-error /' $BUILDDIR/latex/Makefile
#-interaction=STRING set interaction mode (STRING=batchmode/nonstopmode/scrollmode/errorstopmode)
sed_replacements = [(r'PDFLATEX = pdflatex', r'PDFLATEX = pdflatex -interaction=nonstopmode -halt-on-error ')]
for searchstring, replacement in sed_replacements:
if exitcode != CONTINUE:
break
x = searchstring
x = searchstring.replace(r'~', r'\~')
y = replacement
y = replacement.replace(r'~', r'\~')
cmdlist = [
'sed',
'--in-place',
"'s~%s~%s~'" % (x, y),
destfile
]
exitcode, cmd, out, err = cmdline(' '.join(cmdlist))
loglist.append([exitcode, cmd, out, err])
# ==================================================
# Set MILESTONE
# --------------------------------------------------
if exitcode == CONTINUE:
builds_successful = milestones.get('builds_successful', [])
builds_successful.append('latex')
result['MILESTONES'].append({
'latex_make_file': latex_make_file,
'latex_make_file_tweaked': True,
'builds_successful': builds_successful,
})
# ==================================================
# save result
# --------------------------------------------------
tct.save_the_result(result, resultfile, params, facts, milestones, exitcode, CONTINUE, reason)
# ==================================================
# Return with proper exitcode
# --------------------------------------------------
sys.exit(exitcode)
|
py | 1a3718ddb34acfc8e98947caf9274fbad6021e0f | import datetime
from json.decoder import JSONDecodeError
import python_http_client.exceptions
from twilio.base.exceptions import TwilioException
import tornado
from tornado.ioloop import IOLoop
import io
import math
from dateutil.parser import isoparse
from sqlalchemy.orm import joinedload
from sqlalchemy import func, or_, tuple_
import arrow
from marshmallow import Schema, fields
from marshmallow.exceptions import ValidationError
import functools
import healpix_alchemy as ha
from baselayer.app.access import permissions, auth_or_token
from baselayer.app.env import load_env
from baselayer.app.model_util import recursive_to_dict
from ..base import BaseHandler
from ...models import (
DBSession,
Allocation,
Annotation,
Comment,
Instrument,
Obj,
Source,
Token,
Photometry,
Group,
FollowupRequest,
ClassicalAssignment,
ObservingRun,
SourceNotification,
Classification,
Taxonomy,
Listing,
Spectrum,
SourceView,
)
from ...utils.offset import (
get_nearby_offset_stars,
facility_parameters,
source_image_parameters,
get_finding_chart,
_calculate_best_position_for_offset_stars,
)
from .candidate import grab_query_results, update_redshift_history_if_relevant
from .photometry import serialize
from .color_mag import get_color_mag
SOURCES_PER_PAGE = 100
_, cfg = load_env()
def apply_active_or_requested_filtering(query, include_requested, requested_only):
if include_requested:
query = query.filter(or_(Source.requested.is_(True), Source.active.is_(True)))
elif not requested_only:
query = query.filter(Source.active.is_(True))
if requested_only:
query = query.filter(Source.active.is_(False)).filter(
Source.requested.is_(True)
)
return query
def add_ps1_thumbnail_and_push_ws_msg(obj_id, request_handler):
try:
obj = Obj.get_if_accessible_by(obj_id, request_handler.current_user)
obj.add_ps1_thumbnail()
request_handler.push_all(
action="skyportal/REFRESH_SOURCE", payload={"obj_key": obj.internal_key}
)
request_handler.push_all(
action="skyportal/REFRESH_CANDIDATE", payload={"id": obj.internal_key}
)
except Exception as e:
return request_handler.error(f"Unable to generate PS1 thumbnail URL: {e}")
finally:
DBSession.remove()
class SourceHandler(BaseHandler):
@auth_or_token
def head(self, obj_id=None):
"""
---
single:
description: Check if a Source exists
tags:
- sources
parameters:
- in: path
name: obj_id
required: true
schema:
type: string
responses:
200:
content:
application/json:
schema: Success
404:
content:
application/json:
schema: Error
"""
user_group_ids = [g.id for g in self.associated_user_object.accessible_groups]
num_s = (
DBSession()
.query(Source)
.filter(Source.obj_id == obj_id)
.filter(Source.group_id.in_(user_group_ids))
.count()
)
self.verify_and_commit()
if num_s > 0:
return self.success()
else:
self.set_status(404)
self.finish()
@auth_or_token
def get(self, obj_id=None):
"""
---
single:
description: Retrieve a source
tags:
- sources
parameters:
- in: path
name: obj_id
required: false
schema:
type: string
- in: query
name: includePhotometry
nullable: true
schema:
type: boolean
description: |
Boolean indicating whether to include associated photometry. Defaults to
false.
- in: query
name: includeComments
nullable: true
schema:
type: boolean
description: |
Boolean indicating whether to include comment metadata in response.
Defaults to false.
- in: query
name: includePhotometryExists
nullable: true
schema:
type: boolean
description: |
Boolean indicating whether to return if a source has any photometry points. Defaults to false.
- in: query
name: includeSpectrumExists
nullable: true
schema:
type: boolean
description: |
Boolean indicating whether to return if a source has a spectra. Defaults to false.
responses:
200:
content:
application/json:
schema: SingleObj
400:
content:
application/json:
schema: Error
multiple:
description: Retrieve all sources
tags:
- sources
parameters:
- in: query
name: ra
nullable: true
schema:
type: number
description: RA for spatial filtering (in decimal degrees)
- in: query
name: dec
nullable: true
schema:
type: number
description: Declination for spatial filtering (in decimal degrees)
- in: query
name: radius
nullable: true
schema:
type: number
description: Radius for spatial filtering if ra & dec are provided (in decimal degrees)
- in: query
name: sourceID
nullable: true
schema:
type: string
description: Portion of ID to filter on
- in: query
name: simbadClass
nullable: true
schema:
type: string
description: Simbad class to filter on
- in: query
name: hasTNSname
nullable: true
schema:
type: boolean
description: If true, return only those matches with TNS names
- in: query
name: numPerPage
nullable: true
schema:
type: integer
description: |
Number of sources to return per paginated request. Defaults to 100. Max 1000.
- in: query
name: pageNumber
nullable: true
schema:
type: integer
description: Page number for paginated query results. Defaults to 1
- in: query
name: totalMatches
nullable: true
schema:
type: integer
description: |
Used only in the case of paginating query results - if provided, this
allows for avoiding a potentially expensive query.count() call.
- in: query
name: startDate
nullable: true
schema:
type: string
description: |
Arrow-parseable date string (e.g. 2020-01-01). If provided, filter by
last_detected_at >= startDate
- in: query
name: endDate
nullable: true
schema:
type: string
description: |
Arrow-parseable date string (e.g. 2020-01-01). If provided, filter by
last_detected_at <= endDate
- in: query
name: listName
nullable: true
schema:
type: string
description: |
Get only sources saved to the querying user's list, e.g., "favorites".
- in: query
name: group_ids
nullable: true
schema:
type: list
items:
type: integer
description: |
If provided, filter only sources saved to one of these group IDs.
- in: query
name: includePhotometry
nullable: true
schema:
type: boolean
description: |
Boolean indicating whether to include associated photometry. Defaults to
false.
- in: query
name: includeColorMagnitude
nullable: true
schema:
type: boolean
description: |
Boolean indicating whether to include the color-magnitude data from Gaia.
This will only include data for objects that have an annotation
with the appropriate format: a key named Gaia that contains a dictionary
with keys named Mag_G, Mag_Bp, Mag_Rp, and Plx
(underscores and case are ignored when matching all the above keys).
The result is saved in a field named 'color_magnitude'.
If no data is available, returns an empty array.
Defaults to false (do not search for nor include this info).
- in: query
name: includeRequested
nullable: true
schema:
type: boolean
description: |
Boolean indicating whether to include requested saves. Defaults to
false.
- in: query
name: pendingOnly
nullable: true
schema:
type: boolean
description: |
Boolean indicating whether to only include requested/pending saves.
Defaults to false.
- in: query
name: savedBefore
nullable: true
schema:
type: string
description: |
Only return sources that were saved before this UTC datetime.
- in: query
name: savedAfter
nullable: true
schema:
type: string
description: |
Only return sources that were saved after this UTC datetime.
- in: query
name: saveSummary
nullable: true
schema:
type: boolean
description: |
Boolean indicating whether to only return the source save
information in the response (defaults to false). If true,
the response will contain a list of dicts with the following
schema under `response['data']['sources']`:
```
{
"group_id": 2,
"created_at": "2020-11-13T22:11:25.910271",
"saved_by_id": 1,
"saved_at": "2020-11-13T22:11:25.910271",
"requested": false,
"unsaved_at": null,
"modified": "2020-11-13T22:11:25.910271",
"obj_id": "16fil",
"active": true,
"unsaved_by_id": null
}
```
- in: query
name: sortBy
nullable: true
schema:
type: string
description: |
The field to sort by. Currently allowed options are ["id", "ra", "dec", "redshift", "saved_at"]
- in: query
name: sortOrder
nullable: true
schema:
type: string
description: |
The sort order - either "asc" or "desc". Defaults to "asc"
- in: query
name: includeComments
nullable: true
schema:
type: boolean
description: |
Boolean indicating whether to include comment metadata in response.
Defaults to false.
- in: query
name: includePhotometryExists
nullable: true
schema:
type: boolean
description: |
Boolean indicating whether to return if a source has any photometry points. Defaults to false.
- in: query
name: includeSpectrumExists
nullable: true
schema:
type: boolean
description: |
Boolean indicating whether to return if a source has a spectra. Defaults to false.
- in: query
name: classifications
nullable: true
schema:
type: array
items:
type: string
explode: false
style: simple
description: |
Comma-separated string of "taxonomy: classification" pair(s) to filter for sources matching
that/those classification(s), i.e. "Sitewide Taxonomy: Type II, Sitewide Taxonomy: AGN"
- in: query
name: minRedshift
nullable: true
schema:
type: number
description: |
If provided, return only sources with a redshift of at least this value
- in: query
name: maxRedshift
nullable: true
schema:
type: number
description: |
If provided, return only sources with a redshift of at most this value
- in: query
name: minPeakMagnitude
nullable: true
schema:
type: number
description: |
If provided, return only sources with a peak photometry magnitude of at least this value
- in: query
name: maxPeakMagnitude
nullable: true
schema:
type: number
description: |
If provided, return only sources with a peak photometry magnitude of at most this value
- in: query
name: minLatestMagnitude
nullable: true
schema:
type: number
description: |
If provided, return only sources whose latest photometry magnitude is at least this value
- in: query
name: maxLatestMagnitude
nullable: true
schema:
type: number
description: |
If provided, return only sources whose latest photometry magnitude is at most this value
- in: query
name: hasSpectrum
nullable: true
schema:
type: boolean
description: If true, return only those matches with at least one associated spectrum
responses:
200:
content:
application/json:
schema:
allOf:
- $ref: '#/components/schemas/Success'
- type: object
properties:
data:
type: object
properties:
sources:
type: array
items:
$ref: '#/components/schemas/Obj'
totalMatches:
type: integer
pageNumber:
type: integer
numPerPage:
type: integer
400:
content:
application/json:
schema: Error
"""
page_number = self.get_query_argument('pageNumber', None)
num_per_page = min(
int(self.get_query_argument("numPerPage", SOURCES_PER_PAGE)), 100
)
ra = self.get_query_argument('ra', None)
dec = self.get_query_argument('dec', None)
radius = self.get_query_argument('radius', None)
start_date = self.get_query_argument('startDate', None)
end_date = self.get_query_argument('endDate', None)
list_name = self.get_query_argument('listName', None)
sourceID = self.get_query_argument('sourceID', None) # Partial ID to match
include_photometry = self.get_query_argument("includePhotometry", False)
include_color_mag = self.get_query_argument("includeColorMagnitude", False)
include_requested = self.get_query_argument("includeRequested", False)
requested_only = self.get_query_argument("pendingOnly", False)
saved_after = self.get_query_argument('savedAfter', None)
saved_before = self.get_query_argument('savedBefore', None)
save_summary = self.get_query_argument('saveSummary', False)
sort_by = self.get_query_argument("sortBy", None)
sort_order = self.get_query_argument("sortOrder", "asc")
include_comments = self.get_query_argument("includeComments", False)
include_photometry_exists = self.get_query_argument(
"includePhotometryExists", False
)
include_spectrum_exists = self.get_query_argument(
"includeSpectrumExists", False
)
classifications = self.get_query_argument("classifications", None)
min_redshift = self.get_query_argument("minRedshift", None)
max_redshift = self.get_query_argument("maxRedshift", None)
min_peak_magnitude = self.get_query_argument("minPeakMagnitude", None)
max_peak_magnitude = self.get_query_argument("maxPeakMagnitude", None)
min_latest_magnitude = self.get_query_argument("minLatestMagnitude", None)
max_latest_magnitude = self.get_query_argument("maxLatestMagnitude", None)
has_spectrum = self.get_query_argument("hasSpectrum", False)
# These are just throwaway helper classes to help with deserialization
class UTCTZnaiveDateTime(fields.DateTime):
"""
DateTime object that deserializes both timezone aware iso8601
strings and naive iso8601 strings into naive datetime objects
in utc
See discussion in https://github.com/Scille/umongo/issues/44#issuecomment-244407236
"""
def _deserialize(self, value, attr, data, **kwargs):
value = super()._deserialize(value, attr, data, **kwargs)
if value and value.tzinfo:
value = (value - value.utcoffset()).replace(tzinfo=None)
return value
class Validator(Schema):
saved_after = UTCTZnaiveDateTime(required=False, missing=None)
saved_before = UTCTZnaiveDateTime(required=False, missing=None)
save_summary = fields.Boolean()
validator_instance = Validator()
params_to_be_validated = {}
if saved_after is not None:
params_to_be_validated['saved_after'] = saved_after
if saved_before is not None:
params_to_be_validated['saved_before'] = saved_before
if save_summary is not None:
params_to_be_validated['save_summary'] = save_summary
try:
validated = validator_instance.load(params_to_be_validated)
except ValidationError as e:
return self.error(f'Error parsing query params: {e.args[0]}.')
saved_after = validated['saved_after']
saved_before = validated['saved_before']
save_summary = validated['save_summary']
# parse the group ids:
group_ids = self.get_query_argument('group_ids', None)
if group_ids is not None:
try:
group_ids = [int(gid) for gid in group_ids.split(',')]
except ValueError:
return self.error(
f'Invalid group ids field ({group_ids}; Could not parse all elements to integers'
)
user_accessible_group_ids = [g.id for g in self.current_user.accessible_groups]
simbad_class = self.get_query_argument('simbadClass', None)
has_tns_name = self.get_query_argument('hasTNSname', None)
total_matches = self.get_query_argument('totalMatches', None)
is_token_request = isinstance(self.current_user, Token)
if obj_id is not None:
s = Obj.get_if_accessible_by(
obj_id, self.current_user, options=[joinedload(Obj.thumbnails)]
)
if s is None:
return self.error("Source not found", status=404)
source_info = s.to_dict()
source_info["followup_requests"] = (
FollowupRequest.query_records_accessible_by(
self.current_user,
options=[
joinedload(FollowupRequest.allocation).joinedload(
Allocation.instrument
),
joinedload(FollowupRequest.allocation).joinedload(
Allocation.group
),
joinedload(FollowupRequest.requester),
],
)
.filter(FollowupRequest.obj_id == obj_id)
.filter(FollowupRequest.status != "deleted")
.all()
)
source_info["assignments"] = (
ClassicalAssignment.query_records_accessible_by(
self.current_user,
options=[
joinedload(ClassicalAssignment.run)
.joinedload(ObservingRun.instrument)
.joinedload(Instrument.telescope)
],
)
.filter(ClassicalAssignment.obj_id == obj_id)
.all()
)
if is_token_request:
# Logic determining whether to register front-end request as view lives in front-end
sv = SourceView(
obj_id=obj_id,
username_or_token_id=self.current_user.id,
is_token=True,
)
DBSession.add(sv)
# To keep loaded relationships from being cleared in verify_and_commit:
source_info = recursive_to_dict(source_info)
self.verify_and_commit()
if "ps1" not in [thumb.type for thumb in s.thumbnails]:
IOLoop.current().add_callback(
lambda: add_ps1_thumbnail_and_push_ws_msg(obj_id, self)
)
if include_comments:
comments = (
Comment.query_records_accessible_by(
self.current_user,
options=[
joinedload(Comment.author),
joinedload(Comment.groups),
],
)
.filter(Comment.obj_id == obj_id)
.all()
)
source_info["comments"] = sorted(
[
{
**{
k: v
for k, v in c.to_dict().items()
if k != "attachment_bytes"
},
"author": {
**c.author.to_dict(),
"gravatar_url": c.author.gravatar_url,
},
}
for c in comments
],
key=lambda x: x["created_at"],
reverse=True,
)
source_info["annotations"] = sorted(
Annotation.query_records_accessible_by(
self.current_user, options=[joinedload(Annotation.author)]
)
.filter(Annotation.obj_id == obj_id)
.all(),
key=lambda x: x.origin,
)
readable_classifications = (
Classification.query_records_accessible_by(self.current_user)
.filter(Classification.obj_id == obj_id)
.all()
)
readable_classifications_json = []
for classification in readable_classifications:
classification_dict = classification.to_dict()
classification_dict['groups'] = [
g.to_dict() for g in classification.groups
]
readable_classifications_json.append(classification_dict)
source_info["classifications"] = readable_classifications_json
source_info["last_detected_at"] = s.last_detected_at(self.current_user)
source_info["last_detected_mag"] = s.last_detected_mag(self.current_user)
source_info["peak_detected_at"] = s.peak_detected_at(self.current_user)
source_info["peak_detected_mag"] = s.peak_detected_mag(self.current_user)
source_info["gal_lat"] = s.gal_lat_deg
source_info["gal_lon"] = s.gal_lon_deg
source_info["luminosity_distance"] = s.luminosity_distance
source_info["dm"] = s.dm
source_info["angular_diameter_distance"] = s.angular_diameter_distance
if include_photometry:
photometry = (
Photometry.query_records_accessible_by(self.current_user)
.filter(Photometry.obj_id == obj_id)
.all()
)
source_info["photometry"] = [
serialize(phot, 'ab', 'flux') for phot in photometry
]
if include_photometry_exists:
source_info["photometry_exists"] = (
len(
Photometry.query_records_accessible_by(self.current_user)
.filter(Photometry.obj_id == obj_id)
.all()
)
> 0
)
if include_spectrum_exists:
source_info["spectrum_exists"] = (
len(
Spectrum.query_records_accessible_by(self.current_user)
.filter(Spectrum.obj_id == obj_id)
.all()
)
> 0
)
source_query = Source.query_records_accessible_by(self.current_user).filter(
Source.obj_id == source_info["id"]
)
source_query = apply_active_or_requested_filtering(
source_query, include_requested, requested_only
)
source_subquery = source_query.subquery()
groups = (
Group.query_records_accessible_by(self.current_user)
.join(source_subquery, Group.id == source_subquery.c.group_id)
.all()
)
source_info["groups"] = [g.to_dict() for g in groups]
for group in source_info["groups"]:
source_table_row = (
Source.query_records_accessible_by(self.current_user)
.filter(Source.obj_id == s.id, Source.group_id == group["id"])
.first()
)
if source_table_row is not None:
group["active"] = source_table_row.active
group["requested"] = source_table_row.requested
group["saved_at"] = source_table_row.saved_at
group["saved_by"] = (
source_table_row.saved_by.to_dict()
if source_table_row.saved_by is not None
else None
)
if include_color_mag:
source_info["color_magnitude"] = get_color_mag(
source_info["annotations"]
)
source_info = recursive_to_dict(source_info)
self.verify_and_commit()
return self.success(data=source_info)
# Fetch multiple sources
obj_query_options = [joinedload(Obj.thumbnails)]
obj_query = Obj.query_records_accessible_by(
self.current_user, options=obj_query_options
)
source_query = Source.query_records_accessible_by(self.current_user)
if list_name:
listing_subquery = Listing.query_records_accessible_by(
self.current_user
).subquery()
obj_query = obj_query.join(
listing_subquery, Obj.id == listing_subquery.c.obj_id
)
if classifications is not None or sort_by == "classification":
classification_subquery = Classification.query_records_accessible_by(
self.current_user
)
if classifications is not None:
taxonomy_subquery = Taxonomy.query_records_accessible_by(
self.current_user
).subquery()
classification_subquery = classification_subquery.join(
taxonomy_subquery,
Classification.taxonomy_id == taxonomy_subquery.c.id,
)
classification_subquery = classification_subquery.subquery()
obj_query = obj_query.join(
classification_subquery,
Obj.id == classification_subquery.c.obj_id,
isouter=True,
)
if sourceID:
obj_query = obj_query.filter(Obj.id.contains(sourceID.strip()))
if any([ra, dec, radius]):
if not all([ra, dec, radius]):
return self.error(
"If any of 'ra', 'dec' or 'radius' are "
"provided, all three are required."
)
try:
ra = float(ra)
dec = float(dec)
radius = float(radius)
except ValueError:
return self.error(
"Invalid values for ra, dec or radius - could not convert to float"
)
other = ha.Point(ra=ra, dec=dec)
obj_query = obj_query.filter(Obj.within(other, radius))
if start_date:
start_date = arrow.get(start_date.strip()).datetime
obj_query = obj_query.filter(
Obj.last_detected_at(self.current_user) >= start_date
)
if end_date:
end_date = arrow.get(end_date.strip()).datetime
obj_query = obj_query.filter(
Obj.last_detected_at(self.current_user) <= end_date
)
if saved_before:
source_query = source_query.filter(Source.saved_at <= saved_before)
if saved_after:
source_query = source_query.filter(Source.saved_at >= saved_after)
if list_name:
obj_query = obj_query.filter(
listing_subquery.c.list_name == list_name,
listing_subquery.c.user_id == self.associated_user_object.id,
)
if simbad_class:
obj_query = obj_query.filter(
func.lower(Obj.altdata['simbad']['class'].astext)
== simbad_class.lower()
)
if has_tns_name in ['true', True]:
obj_query = obj_query.filter(Obj.altdata['tns']['name'].isnot(None))
if has_spectrum in ["true", True]:
spectrum_subquery = Spectrum.query_records_accessible_by(
self.current_user
).subquery()
obj_query = obj_query.join(
spectrum_subquery, Obj.id == spectrum_subquery.c.obj_id
)
if min_redshift is not None:
try:
min_redshift = float(min_redshift)
except ValueError:
return self.error(
"Invalid values for minRedshift - could not convert to float"
)
obj_query = obj_query.filter(Obj.redshift >= min_redshift)
if max_redshift is not None:
try:
max_redshift = float(max_redshift)
except ValueError:
return self.error(
"Invalid values for maxRedshift - could not convert to float"
)
obj_query = obj_query.filter(Obj.redshift <= max_redshift)
if min_peak_magnitude is not None:
try:
min_peak_magnitude = float(min_peak_magnitude)
except ValueError:
return self.error(
"Invalid values for minPeakMagnitude - could not convert to float"
)
obj_query = obj_query.filter(
Obj.peak_detected_mag(self.current_user) >= min_peak_magnitude
)
if max_peak_magnitude is not None:
try:
max_peak_magnitude = float(max_peak_magnitude)
except ValueError:
return self.error(
"Invalid values for maxPeakMagnitude - could not convert to float"
)
obj_query = obj_query.filter(
Obj.peak_detected_mag(self.current_user) <= max_peak_magnitude
)
if min_latest_magnitude is not None:
try:
min_latest_magnitude = float(min_latest_magnitude)
except ValueError:
return self.error(
"Invalid values for minLatestMagnitude - could not convert to float"
)
obj_query = obj_query.filter(
Obj.last_detected_mag(self.current_user) >= min_latest_magnitude
)
if max_latest_magnitude is not None:
try:
max_latest_magnitude = float(max_latest_magnitude)
except ValueError:
return self.error(
"Invalid values for maxLatestMagnitude - could not convert to float"
)
obj_query = obj_query.filter(
Obj.last_detected_mag(self.current_user) <= max_latest_magnitude
)
if classifications is not None:
if isinstance(classifications, str) and "," in classifications:
classifications = [c.strip() for c in classifications.split(",")]
elif isinstance(classifications, str):
classifications = [classifications]
else:
return self.error(
"Invalid classifications value -- must provide at least one string value"
)
# Parse into tuples of taxonomy: classification
classifications = list(
map(
lambda c: (c.split(":")[0].strip(), c.split(":")[1].strip()),
classifications,
)
)
obj_query = obj_query.filter(
tuple_(
taxonomy_subquery.c.name, classification_subquery.c.classification
).in_(classifications)
)
source_query = apply_active_or_requested_filtering(
source_query, include_requested, requested_only
)
if group_ids is not None:
if not all(gid in user_accessible_group_ids for gid in group_ids):
return self.error(
f"One of the requested groups in '{group_ids}' is inaccessible to user."
)
source_query = source_query.filter(Source.group_id.in_(group_ids))
source_subquery = source_query.subquery()
query = obj_query.join(source_subquery, Obj.id == source_subquery.c.obj_id)
order_by = None
if sort_by is not None:
if sort_by == "id":
order_by = [Obj.id] if sort_order == "asc" else [Obj.id.desc()]
elif sort_by == "ra":
order_by = (
[Obj.ra.nullslast()]
if sort_order == "asc"
else [Obj.ra.desc().nullslast()]
)
elif sort_by == "dec":
order_by = (
[Obj.dec.nullslast()]
if sort_order == "asc"
else [Obj.dec.desc().nullslast()]
)
elif sort_by == "redshift":
order_by = (
[Obj.redshift.nullslast()]
if sort_order == "asc"
else [Obj.redshift.desc().nullslast()]
)
elif sort_by == "saved_at":
order_by = (
[source_subquery.c.saved_at]
if sort_order == "asc"
else [source_subquery.c.saved_at.desc()]
)
elif sort_by == "classification":
order_by = (
[classification_subquery.c.classification.nullslast()]
if sort_order == "asc"
else [classification_subquery.c.classification.desc().nullslast()]
)
if page_number:
try:
page_number = int(page_number)
except ValueError:
return self.error("Invalid page number value.")
try:
query_results = grab_query_results(
query,
total_matches,
page_number,
num_per_page,
"sources",
order_by=order_by,
)
except ValueError as e:
if "Page number out of range" in str(e):
return self.error("Page number out of range.")
raise
elif save_summary:
query_results = {"sources": source_query.all()}
else:
query_results = grab_query_results(
query,
total_matches,
None,
None,
"sources",
order_by=order_by,
)
if not save_summary:
# Records are Objs, not Sources
obj_list = []
for obj in query_results["sources"]:
obj_list.append(obj.to_dict())
if include_comments:
obj_list[-1]["comments"] = sorted(
[
{
k: v
for k, v in c.to_dict().items()
if k != "attachment_bytes"
}
for c in Comment.query_records_accessible_by(
self.current_user
)
.filter(Comment.obj_id == obj.id)
.all()
],
key=lambda x: x["created_at"],
reverse=True,
)
readable_classifications = (
Classification.query_records_accessible_by(self.current_user)
.filter(Classification.obj_id == obj.id)
.all()
)
readable_classifications_json = []
for classification in readable_classifications:
classification_dict = classification.to_dict()
classification_dict['groups'] = [
g.to_dict() for g in classification.groups
]
readable_classifications_json.append(classification_dict)
obj_list[-1]["classifications"] = readable_classifications_json
obj_list[-1]["annotations"] = sorted(
Annotation.query_records_accessible_by(self.current_user).filter(
Annotation.obj_id == obj.id
),
key=lambda x: x.origin,
)
obj_list[-1]["last_detected_at"] = obj.last_detected_at(
self.current_user
)
obj_list[-1]["last_detected_mag"] = obj.last_detected_mag(
self.current_user
)
obj_list[-1]["peak_detected_at"] = obj.peak_detected_at(
self.current_user
)
obj_list[-1]["peak_detected_mag"] = obj.peak_detected_mag(
self.current_user
)
obj_list[-1]["gal_lon"] = obj.gal_lon_deg
obj_list[-1]["gal_lat"] = obj.gal_lat_deg
obj_list[-1]["luminosity_distance"] = obj.luminosity_distance
obj_list[-1]["dm"] = obj.dm
obj_list[-1][
"angular_diameter_distance"
] = obj.angular_diameter_distance
if include_photometry:
photometry = Photometry.query_records_accessible_by(
self.current_user
).filter(Photometry.obj_id == obj.id)
obj_list[-1]["photometry"] = [
serialize(phot, 'ab', 'flux') for phot in photometry
]
if include_photometry_exists:
obj_list[-1]["photometry_exists"] = (
len(
Photometry.query_records_accessible_by(self.current_user)
.filter(Photometry.obj_id == obj.id)
.all()
)
> 0
)
if include_spectrum_exists:
obj_list[-1]["spectrum_exists"] = (
len(
Spectrum.query_records_accessible_by(self.current_user)
.filter(Spectrum.obj_id == obj.id)
.all()
)
> 0
)
source_query = Source.query_records_accessible_by(
self.current_user
).filter(Source.obj_id == obj_list[-1]["id"])
source_query = apply_active_or_requested_filtering(
source_query, include_requested, requested_only
)
source_subquery = source_query.subquery()
groups = (
Group.query_records_accessible_by(self.current_user)
.join(source_subquery, Group.id == source_subquery.c.group_id)
.all()
)
obj_list[-1]["groups"] = [g.to_dict() for g in groups]
for group in obj_list[-1]["groups"]:
source_table_row = (
Source.query_records_accessible_by(self.current_user)
.filter(
Source.obj_id == obj_list[-1]["id"],
Source.group_id == group["id"],
)
.first()
)
if source_table_row is not None:
group["active"] = source_table_row.active
group["requested"] = source_table_row.requested
group["saved_at"] = source_table_row.saved_at
group["saved_by"] = (
source_table_row.saved_by.to_dict()
if source_table_row.saved_by is not None
else None
)
if include_color_mag:
obj_list[-1]["color_magnitude"] = get_color_mag(
obj_list[-1]["annotations"]
)
query_results["sources"] = obj_list
query_results = recursive_to_dict(query_results)
self.verify_and_commit()
return self.success(data=query_results)
@permissions(['Upload data'])
def post(self):
"""
---
description: Add a new source
tags:
- sources
requestBody:
content:
application/json:
schema:
allOf:
- $ref: '#/components/schemas/ObjPost'
- type: object
properties:
group_ids:
type: array
items:
type: integer
description: |
List of associated group IDs. If not specified, all of the
user or token's groups will be used.
responses:
200:
content:
application/json:
schema:
allOf:
- $ref: '#/components/schemas/Success'
- type: object
properties:
data:
type: object
properties:
id:
type: string
description: New source ID
"""
data = self.get_json()
obj_already_exists = (
Obj.get_if_accessible_by(data["id"], self.current_user) is not None
)
schema = Obj.__schema__()
ra = data.get('ra', None)
dec = data.get('dec', None)
if ra is None and not obj_already_exists:
return self.error("RA must not be null for a new Obj")
if dec is None and not obj_already_exists:
return self.error("Dec must not be null for a new Obj")
user_group_ids = [g.id for g in self.current_user.groups]
user_accessible_group_ids = [g.id for g in self.current_user.accessible_groups]
if not user_group_ids:
return self.error(
"You must belong to one or more groups before " "you can add sources."
)
try:
group_ids = [
int(id)
for id in data.pop('group_ids')
if int(id) in user_accessible_group_ids
]
except KeyError:
group_ids = user_group_ids
if not group_ids:
return self.error(
"Invalid group_ids field. Please specify at least "
"one valid group ID that you belong to."
)
try:
obj = schema.load(data)
except ValidationError as e:
return self.error(
'Invalid/missing parameters: ' f'{e.normalized_messages()}'
)
groups = (
Group.query_records_accessible_by(self.current_user)
.filter(Group.id.in_(group_ids))
.all()
)
if not groups:
return self.error(
"Invalid group_ids field. Please specify at least "
"one valid group ID that you belong to."
)
update_redshift_history_if_relevant(data, obj, self.associated_user_object)
DBSession().add(obj)
for group in groups:
source = (
Source.query_records_accessible_by(self.current_user)
.filter(Source.obj_id == obj.id)
.filter(Source.group_id == group.id)
.first()
)
if source is not None:
source.active = True
source.saved_by = self.associated_user_object
else:
DBSession().add(
Source(
obj=obj, group=group, saved_by_id=self.associated_user_object.id
)
)
self.verify_and_commit()
if not obj_already_exists:
obj.add_linked_thumbnails()
self.push_all(
action="skyportal/REFRESH_SOURCE", payload={"obj_key": obj.internal_key}
)
self.push_all(
action="skyportal/REFRESH_CANDIDATE", payload={"id": obj.internal_key}
)
return self.success(data={"id": obj.id})
@permissions(['Upload data'])
def patch(self, obj_id):
"""
---
description: Update a source
tags:
- sources
parameters:
- in: path
name: obj_id
required: True
schema:
type: string
requestBody:
content:
application/json:
schema: ObjNoID
responses:
200:
content:
application/json:
schema: Success
400:
content:
application/json:
schema: Error
"""
data = self.get_json()
data['id'] = obj_id
schema = Obj.__schema__()
try:
obj = schema.load(data)
except ValidationError as e:
return self.error(
'Invalid/missing parameters: ' f'{e.normalized_messages()}'
)
update_redshift_history_if_relevant(data, obj, self.associated_user_object)
self.verify_and_commit()
self.push_all(
action="skyportal/REFRESH_SOURCE",
payload={"obj_key": obj.internal_key},
)
return self.success(action='skyportal/FETCH_SOURCES')
@permissions(['Manage sources'])
def delete(self, obj_id, group_id):
"""
---
description: Delete a source
tags:
- sources
parameters:
- in: path
name: obj_id
required: true
schema:
type: string
- in: path
name: group_id
required: true
schema:
type: string
responses:
200:
content:
application/json:
schema: Success
"""
if group_id not in [g.id for g in self.current_user.accessible_groups]:
return self.error("Inadequate permissions.")
s = (
Source.query_records_accessible_by(self.current_user, mode="update")
.filter(Source.obj_id == obj_id)
.filter(Source.group_id == group_id)
.first()
)
s.active = False
s.unsaved_by = self.current_user
self.verify_and_commit()
return self.success(action='skyportal/FETCH_SOURCES')
class SourceOffsetsHandler(BaseHandler):
@auth_or_token
async def get(self, obj_id):
"""
---
description: Retrieve offset stars to aid in spectroscopy
tags:
- sources
parameters:
- in: path
name: obj_id
required: true
schema:
type: string
- in: query
name: facility
nullable: true
schema:
type: string
enum: [Keck, Shane, P200]
description: Which facility to generate the starlist for
- in: query
name: num_offset_stars
nullable: true
schema:
type: integer
minimum: 0
maximum: 10
description: |
Requested number of offset stars (set to zero to get starlist
of just the source itself)
- in: query
name: obstime
nullable: True
schema:
type: string
description: |
datetime of observation in isoformat (e.g. 2020-12-30T12:34:10)
- in: query
name: use_ztfref
required: false
schema:
type: boolean
description: |
Use ZTFref catalog for offset star positions, otherwise Gaia DR2
responses:
200:
content:
application/json:
schema:
allOf:
- $ref: '#/components/schemas/Success'
- type: object
properties:
data:
type: object
properties:
facility:
type: string
enum: [Keck, Shane, P200]
description: Facility queried for starlist
starlist_str:
type: string
description: formatted starlist in facility format
starlist_info:
type: array
description: |
list of source and offset star information
items:
type: object
properties:
str:
type: string
description: single-line starlist format per object
ra:
type: number
format: float
description: object RA in degrees (J2000)
dec:
type: number
format: float
description: object DEC in degrees (J2000)
name:
type: string
description: object name
dras:
type: string
description: offset from object to source in RA
ddecs:
type: string
description: offset from object to source in DEC
mag:
type: number
format: float
description: |
magnitude of object (from
Gaia phot_rp_mean_mag)
ra:
type: number
format: float
description: source RA in degrees (J2000)
dec:
type: number
format: float
description: source DEC in degrees (J2000)
queries_issued:
type: integer
description: |
Number of times the catalog was queried to find
noffsets
noffsets:
type: integer
description: |
Number of suitable offset stars found (may be less)
than requested
query:
type: string
description: SQL query submitted to Gaia
400:
content:
application/json:
schema: Error
"""
source = Obj.get_if_accessible_by(obj_id, self.current_user)
if source is None:
return self.error('Source not found', status=404)
initial_pos = (source.ra, source.dec)
try:
best_ra, best_dec = _calculate_best_position_for_offset_stars(
Photometry.query_records_accessible_by(self.current_user)
.filter(Photometry.obj_id == source.id)
.all(),
fallback=(initial_pos[0], initial_pos[1]),
how="snr2",
)
except JSONDecodeError:
self.push_notification(
'Source position using photometry points failed.'
' Reverting to discovery position.'
)
best_ra, best_dec = initial_pos[0], initial_pos[1]
facility = self.get_query_argument('facility', 'Keck')
num_offset_stars = self.get_query_argument('num_offset_stars', '3')
use_ztfref = self.get_query_argument('use_ztfref', True)
if isinstance(use_ztfref, str):
use_ztfref = use_ztfref in ['t', 'True', 'true', 'yes', 'y']
obstime = self.get_query_argument(
'obstime', datetime.datetime.utcnow().isoformat()
)
if not isinstance(isoparse(obstime), datetime.datetime):
return self.error('obstime is not valid isoformat')
if facility not in facility_parameters:
return self.error('Invalid facility')
radius_degrees = facility_parameters[facility]["radius_degrees"]
mag_limit = facility_parameters[facility]["mag_limit"]
min_sep_arcsec = facility_parameters[facility]["min_sep_arcsec"]
mag_min = facility_parameters[facility]["mag_min"]
try:
num_offset_stars = int(num_offset_stars)
except ValueError:
# could not handle inputs
return self.error('Invalid argument for `num_offset_stars`')
offset_func = functools.partial(
get_nearby_offset_stars,
best_ra,
best_dec,
obj_id,
how_many=num_offset_stars,
radius_degrees=radius_degrees,
mag_limit=mag_limit,
min_sep_arcsec=min_sep_arcsec,
starlist_type=facility,
mag_min=mag_min,
obstime=obstime,
allowed_queries=2,
use_ztfref=use_ztfref,
)
try:
(
starlist_info,
query_string,
queries_issued,
noffsets,
used_ztfref,
) = await IOLoop.current().run_in_executor(None, offset_func)
except ValueError:
return self.error("Error querying for nearby offset stars")
starlist_str = "\n".join(
[x["str"].replace(" ", " ") for x in starlist_info]
)
self.verify_and_commit()
return self.success(
data={
'facility': facility,
'starlist_str': starlist_str,
'starlist_info': starlist_info,
'ra': source.ra,
'dec': source.dec,
'noffsets': noffsets,
'queries_issued': queries_issued,
'query': query_string,
}
)
class SourceFinderHandler(BaseHandler):
@auth_or_token
async def get(self, obj_id):
"""
---
description: Generate a PDF/PNG finding chart to aid in spectroscopy
tags:
- sources
parameters:
- in: path
name: obj_id
required: true
schema:
type: string
- in: query
name: imsize
schema:
type: float
minimum: 2
maximum: 15
description: Image size in arcmin (square)
- in: query
name: facility
nullable: true
schema:
type: string
enum: [Keck, Shane, P200]
- in: query
name: image_source
nullable: true
schema:
type: string
enum: [desi, dss, ztfref]
description: Source of the image used in the finding chart
- in: query
name: use_ztfref
required: false
schema:
type: boolean
description: |
Use ZTFref catalog for offset star positions, otherwise DR2
- in: query
name: obstime
nullable: True
schema:
type: string
description: |
datetime of observation in isoformat (e.g. 2020-12-30T12:34:10)
- in: query
name: type
nullable: true
schema:
type: string
enum: [png, pdf]
description: |
output type
- in: query
name: num_offset_stars
schema:
type: integer
minimum: 0
maximum: 4
description: |
output desired number of offset stars [0,5] (default: 3)
responses:
200:
description: A PDF/PNG finding chart file
content:
application/pdf:
schema:
type: string
format: binary
image/png:
schema:
type: string
format: binary
400:
content:
application/json:
schema: Error
"""
source = Obj.get_if_accessible_by(obj_id, self.current_user)
if source is None:
return self.error('Source not found', status=404)
output_type = self.get_query_argument('type', 'pdf')
if output_type not in ["png", "pdf"]:
return self.error(f'Invalid argument for `type`: {output_type}')
imsize = self.get_query_argument('imsize', '4.0')
try:
imsize = float(imsize)
except ValueError:
# could not handle inputs
return self.error('Invalid argument for `imsize`')
if imsize < 2.0 or imsize > 15.0:
return self.error('The value for `imsize` is outside the allowed range')
initial_pos = (source.ra, source.dec)
try:
best_ra, best_dec = _calculate_best_position_for_offset_stars(
Photometry.query_records_accessible_by(self.current_user)
.filter(Photometry.obj_id == source.id)
.all(),
fallback=(initial_pos[0], initial_pos[1]),
how="snr2",
)
except JSONDecodeError:
self.push_notification(
'Source position using photometry points failed.'
' Reverting to discovery position.'
)
best_ra, best_dec = initial_pos[0], initial_pos[1]
facility = self.get_query_argument('facility', 'Keck')
image_source = self.get_query_argument('image_source', 'ztfref')
use_ztfref = self.get_query_argument('use_ztfref', True)
if isinstance(use_ztfref, str):
use_ztfref = use_ztfref in ['t', 'True', 'true', 'yes', 'y']
num_offset_stars = self.get_query_argument('num_offset_stars', '3')
try:
num_offset_stars = int(num_offset_stars)
except ValueError:
# could not handle inputs
return self.error('Invalid argument for `num_offset_stars`')
obstime = self.get_query_argument(
'obstime', datetime.datetime.utcnow().isoformat()
)
if not isinstance(isoparse(obstime), datetime.datetime):
return self.error('obstime is not valid isoformat')
if facility not in facility_parameters:
return self.error('Invalid facility')
if image_source not in source_image_parameters:
return self.error('Invalid source image')
radius_degrees = facility_parameters[facility]["radius_degrees"]
mag_limit = facility_parameters[facility]["mag_limit"]
min_sep_arcsec = facility_parameters[facility]["min_sep_arcsec"]
mag_min = facility_parameters[facility]["mag_min"]
finder = functools.partial(
get_finding_chart,
best_ra,
best_dec,
obj_id,
image_source=image_source,
output_format=output_type,
imsize=imsize,
how_many=num_offset_stars,
radius_degrees=radius_degrees,
mag_limit=mag_limit,
mag_min=mag_min,
min_sep_arcsec=min_sep_arcsec,
starlist_type=facility,
obstime=obstime,
use_source_pos_in_starlist=True,
allowed_queries=2,
queries_issued=0,
use_ztfref=use_ztfref,
)
self.push_notification(
'Finding chart generation in progress. Download will start soon.'
)
rez = await IOLoop.current().run_in_executor(None, finder)
filename = rez["name"]
image = io.BytesIO(rez["data"])
# Adapted from
# https://bhch.github.io/posts/2017/12/serving-large-files-with-tornado-safely-without-blocking/
mb = 1024 * 1024 * 1
chunk_size = 1 * mb
max_file_size = 15 * mb
if not (image.getbuffer().nbytes < max_file_size):
return self.error(
f"Refusing to send files larger than {max_file_size / mb:.2f} MB"
)
# do not send result via `.success`, since that creates a JSON
self.set_status(200)
if output_type == "pdf":
self.set_header("Content-Type", "application/pdf; charset='utf-8'")
self.set_header("Content-Disposition", f"attachment; filename={filename}")
else:
self.set_header("Content-type", f"image/{output_type}")
self.set_header(
'Cache-Control', 'no-store, no-cache, must-revalidate, max-age=0'
)
self.verify_and_commit()
for i in range(math.ceil(max_file_size / chunk_size)):
chunk = image.read(chunk_size)
if not chunk:
break
try:
self.write(chunk) # write the chunk to response
await self.flush() # send the chunk to client
except tornado.iostream.StreamClosedError:
# this means the client has closed the connection
# so break the loop
break
finally:
# deleting the chunk is very important because
# if many clients are downloading files at the
# same time, the chunks in memory will keep
# increasing and will eat up the RAM
del chunk
# pause the coroutine so other handlers can run
await tornado.gen.sleep(1e-9) # 1 ns
class SourceNotificationHandler(BaseHandler):
@auth_or_token
def post(self):
"""
---
description: Send out a new source notification
tags:
- notifications
requestBody:
content:
application/json:
schema:
type: object
properties:
additionalNotes:
type: string
description: |
Notes to append to the message sent out
groupIds:
type: array
items:
type: integer
description: |
List of IDs of groups whose members should get the notification (if they've opted in)
sourceId:
type: string
description: |
The ID of the Source's Obj the notification is being sent about
level:
type: string
description: |
Either 'soft' or 'hard', determines whether to send an email or email+SMS notification
required:
- groupIds
- sourceId
- level
responses:
200:
content:
application/json:
schema:
allOf:
- $ref: '#/components/schemas/Success'
- type: object
properties:
data:
type: object
properties:
id:
type: string
description: New SourceNotification ID
"""
if not cfg["notifications.enabled"]:
return self.error("Notifications are not enabled in current deployment.")
data = self.get_json()
additional_notes = data.get("additionalNotes")
if isinstance(additional_notes, str):
additional_notes = data["additionalNotes"].strip()
else:
if additional_notes is not None:
return self.error(
"Invalid parameter `additionalNotes`: should be a string"
)
if data.get("groupIds") is None:
return self.error("Missing required parameter `groupIds`")
try:
group_ids = [int(gid) for gid in data["groupIds"]]
except ValueError:
return self.error(
"Invalid value provided for `groupIDs`; unable to parse "
"all list items to integers."
)
groups = (
Group.query_records_accessible_by(self.current_user)
.filter(Group.id.in_(group_ids))
.all()
)
if data.get("sourceId") is None:
return self.error("Missing required parameter `sourceId`")
source = Obj.get_if_accessible_by(data["sourceId"], self.current_user)
if source is None:
return self.error('Source not found', status=404)
source_id = data["sourceId"]
source_group_ids = [
row[0]
for row in Source.query_records_accessible_by(
self.current_user, columns=[Source.group_id]
)
.filter(Source.obj_id == source_id)
.all()
]
if bool(set(group_ids).difference(set(source_group_ids))):
forbidden_groups = list(set(group_ids) - set(source_group_ids))
return self.error(
"Insufficient recipient group access permissions. Not a member of "
f"group IDs: {forbidden_groups}."
)
if data.get("level") is None:
return self.error("Missing required parameter `level`")
if data["level"] not in ["soft", "hard"]:
return self.error(
"Invalid value provided for `level`: should be either 'soft' or 'hard'"
)
level = data["level"]
new_notification = SourceNotification(
source_id=source_id,
groups=groups,
additional_notes=additional_notes,
sent_by=self.associated_user_object,
level=level,
)
DBSession().add(new_notification)
try:
self.verify_and_commit()
except python_http_client.exceptions.UnauthorizedError:
return self.error(
"Twilio Sendgrid authorization error. Please ensure "
"valid Sendgrid API key is set in server environment as "
"per their setup docs."
)
except TwilioException:
return self.error(
"Twilio Communication SMS API authorization error. Please ensure "
"valid Twilio API key is set in server environment as "
"per their setup docs."
)
return self.success(data={'id': new_notification.id})
class PS1ThumbnailHandler(BaseHandler):
@auth_or_token
def post(self):
data = self.get_json()
obj_id = data.get("objID")
if obj_id is None:
return self.error("Missing required paramter objID")
IOLoop.current().add_callback(
lambda: add_ps1_thumbnail_and_push_ws_msg(obj_id, self)
)
return self.success()
|
py | 1a3719233a1da2652b0e65dd0b55bd484f6d83ec | from __future__ import annotations
from functools import wraps
from sys import getsizeof
from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
Hashable,
Iterable,
List,
Sequence,
Tuple,
cast,
)
import warnings
import numpy as np
from pandas._config import get_option
from pandas._libs import (
algos as libalgos,
index as libindex,
lib,
)
from pandas._libs.hashtable import duplicated
from pandas._typing import (
AnyArrayLike,
DtypeObj,
Scalar,
Shape,
)
from pandas.compat.numpy import function as nv
from pandas.errors import (
InvalidIndexError,
PerformanceWarning,
UnsortedIndexError,
)
from pandas.util._decorators import (
Appender,
cache_readonly,
doc,
)
from pandas.core.dtypes.cast import coerce_indexer_dtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_platform_int,
is_categorical_dtype,
is_hashable,
is_integer,
is_iterator,
is_list_like,
is_object_dtype,
is_scalar,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDatetimeIndex,
ABCTimedeltaIndex,
)
from pandas.core.dtypes.missing import (
array_equivalent,
isna,
)
import pandas.core.algorithms as algos
from pandas.core.arrays import Categorical
from pandas.core.arrays.categorical import factorize_from_iterables
import pandas.core.common as com
from pandas.core.indexers import is_empty_indexer
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import (
Index,
_index_shared_docs,
ensure_index,
get_unanimous_names,
)
from pandas.core.indexes.frozen import FrozenList
from pandas.core.indexes.numeric import Int64Index
from pandas.core.ops.invalid import make_invalid_op
from pandas.core.sorting import (
get_group_index,
indexer_from_factorized,
lexsort_indexer,
)
from pandas.io.formats.printing import (
format_object_attrs,
format_object_summary,
pprint_thing,
)
if TYPE_CHECKING:
from pandas import (
CategoricalIndex,
DataFrame,
Series,
)
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
{"klass": "MultiIndex", "target_klass": "MultiIndex or list of tuples"}
)
class MultiIndexUIntEngine(libindex.BaseMultiIndexCodesEngine, libindex.UInt64Engine):
"""
This class manages a MultiIndex by mapping label combinations to positive
integers.
"""
_base = libindex.UInt64Engine
def _codes_to_ints(self, codes):
"""
Transform combination(s) of uint64 in one uint64 (each), in a strictly
monotonic way (i.e. respecting the lexicographic order of integer
combinations): see BaseMultiIndexCodesEngine documentation.
Parameters
----------
codes : 1- or 2-dimensional array of dtype uint64
Combinations of integers (one per row)
Returns
-------
scalar or 1-dimensional array, of dtype uint64
Integer(s) representing one combination (each).
"""
# Shift the representation of each level by the pre-calculated number
# of bits:
codes <<= self.offsets
# Now sum and OR are in fact interchangeable. This is a simple
# composition of the (disjunct) significant bits of each level (i.e.
# each column in "codes") in a single positive integer:
if codes.ndim == 1:
# Single key
return np.bitwise_or.reduce(codes)
# Multiple keys
return np.bitwise_or.reduce(codes, axis=1)
class MultiIndexPyIntEngine(libindex.BaseMultiIndexCodesEngine, libindex.ObjectEngine):
"""
This class manages those (extreme) cases in which the number of possible
label combinations overflows the 64 bits integers, and uses an ObjectEngine
containing Python integers.
"""
_base = libindex.ObjectEngine
def _codes_to_ints(self, codes):
"""
Transform combination(s) of uint64 in one Python integer (each), in a
strictly monotonic way (i.e. respecting the lexicographic order of
integer combinations): see BaseMultiIndexCodesEngine documentation.
Parameters
----------
codes : 1- or 2-dimensional array of dtype uint64
Combinations of integers (one per row)
Returns
-------
int, or 1-dimensional array of dtype object
Integer(s) representing one combination (each).
"""
# Shift the representation of each level by the pre-calculated number
# of bits. Since this can overflow uint64, first make sure we are
# working with Python integers:
codes = codes.astype("object") << self.offsets
# Now sum and OR are in fact interchangeable. This is a simple
# composition of the (disjunct) significant bits of each level (i.e.
# each column in "codes") in a single positive integer (per row):
if codes.ndim == 1:
# Single key
return np.bitwise_or.reduce(codes)
# Multiple keys
return np.bitwise_or.reduce(codes, axis=1)
def names_compat(meth):
"""
A decorator to allow either `name` or `names` keyword but not both.
This makes it easier to share code with base class.
"""
@wraps(meth)
def new_meth(self_or_cls, *args, **kwargs):
if "name" in kwargs and "names" in kwargs:
raise TypeError("Can only provide one of `names` and `name`")
elif "name" in kwargs:
kwargs["names"] = kwargs.pop("name")
return meth(self_or_cls, *args, **kwargs)
return new_meth
class MultiIndex(Index):
"""
A multi-level, or hierarchical, index object for pandas objects.
Parameters
----------
levels : sequence of arrays
The unique labels for each level.
codes : sequence of arrays
Integers for each level designating which label at each location.
.. versionadded:: 0.24.0
sortorder : optional int
Level of sortedness (must be lexicographically sorted by that
level).
names : optional sequence of objects
Names for each of the index levels. (name is accepted for compat).
copy : bool, default False
Copy the meta-data.
verify_integrity : bool, default True
Check that the levels/codes are consistent and valid.
Attributes
----------
names
levels
codes
nlevels
levshape
Methods
-------
from_arrays
from_tuples
from_product
from_frame
set_levels
set_codes
to_frame
to_flat_index
sortlevel
droplevel
swaplevel
reorder_levels
remove_unused_levels
get_locs
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_product : Create a MultiIndex from the cartesian product
of iterables.
MultiIndex.from_tuples : Convert list of tuples to a MultiIndex.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Index : The base pandas Index type.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html>`__
for more.
Examples
--------
A new ``MultiIndex`` is typically constructed using one of the helper
methods :meth:`MultiIndex.from_arrays`, :meth:`MultiIndex.from_product`
and :meth:`MultiIndex.from_tuples`. For example (using ``.from_arrays``):
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
See further examples for how to construct a MultiIndex in the doc strings
of the mentioned helper methods.
"""
_hidden_attrs = Index._hidden_attrs | frozenset()
# initialize to zero-length tuples to make everything work
_typ = "multiindex"
_names = FrozenList()
_levels = FrozenList()
_codes = FrozenList()
_comparables = ["names"]
rename = Index.set_names
sortorder: int | None
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
levels=None,
codes=None,
sortorder=None,
names=None,
dtype=None,
copy=False,
name=None,
verify_integrity: bool = True,
):
# compat with Index
if name is not None:
names = name
if levels is None or codes is None:
raise TypeError("Must pass both levels and codes")
if len(levels) != len(codes):
raise ValueError("Length of levels and codes must be the same.")
if len(levels) == 0:
raise ValueError("Must pass non-zero number of levels/codes")
result = object.__new__(cls)
result._cache = {}
# we've already validated levels and codes, so shortcut here
result._set_levels(levels, copy=copy, validate=False)
result._set_codes(codes, copy=copy, validate=False)
result._names = [None] * len(levels)
if names is not None:
# handles name validation
result._set_names(names)
if sortorder is not None:
result.sortorder = int(sortorder)
else:
result.sortorder = sortorder
if verify_integrity:
new_codes = result._verify_integrity()
result._codes = new_codes
result._reset_identity()
return result
def _validate_codes(self, level: list, code: list):
"""
Reassign code values as -1 if their corresponding levels are NaN.
Parameters
----------
code : list
Code to reassign.
level : list
Level to check for missing values (NaN, NaT, None).
Returns
-------
new code where code value = -1 if it corresponds
to a level with missing values (NaN, NaT, None).
"""
null_mask = isna(level)
if np.any(null_mask):
code = np.where(null_mask[code], -1, code)
return code
def _verify_integrity(self, codes: list | None = None, levels: list | None = None):
"""
Parameters
----------
codes : optional list
Codes to check for validity. Defaults to current codes.
levels : optional list
Levels to check for validity. Defaults to current levels.
Raises
------
ValueError
If length of levels and codes don't match, if the codes for any
level would exceed level bounds, or there are any duplicate levels.
Returns
-------
new codes where code value = -1 if it corresponds to a
NaN level.
"""
# NOTE: Currently does not check, among other things, that cached
# nlevels matches nor that sortorder matches actually sortorder.
codes = codes or self.codes
levels = levels or self.levels
if len(levels) != len(codes):
raise ValueError(
"Length of levels and codes must match. NOTE: "
"this index is in an inconsistent state."
)
codes_length = len(codes[0])
for i, (level, level_codes) in enumerate(zip(levels, codes)):
if len(level_codes) != codes_length:
raise ValueError(
f"Unequal code lengths: {[len(code_) for code_ in codes]}"
)
if len(level_codes) and level_codes.max() >= len(level):
raise ValueError(
f"On level {i}, code max ({level_codes.max()}) >= length of "
f"level ({len(level)}). NOTE: this index is in an "
"inconsistent state"
)
if len(level_codes) and level_codes.min() < -1:
raise ValueError(f"On level {i}, code value ({level_codes.min()}) < -1")
if not level.is_unique:
raise ValueError(
f"Level values must be unique: {list(level)} on level {i}"
)
if self.sortorder is not None:
if self.sortorder > _lexsort_depth(self.codes, self.nlevels):
raise ValueError(
"Value for sortorder must be inferior or equal to actual "
f"lexsort_depth: sortorder {self.sortorder} "
f"with lexsort_depth {_lexsort_depth(self.codes, self.nlevels)}"
)
codes = [
self._validate_codes(level, code) for level, code in zip(levels, codes)
]
new_codes = FrozenList(codes)
return new_codes
@classmethod
def from_arrays(cls, arrays, sortorder=None, names=lib.no_default) -> MultiIndex:
"""
Convert arrays to MultiIndex.
Parameters
----------
arrays : list / sequence of array-likes
Each array-like gives one level's value for each data point.
len(arrays) is the number of levels.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
Returns
-------
MultiIndex
See Also
--------
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
"""
error_msg = "Input must be a list / sequence of array-likes."
if not is_list_like(arrays):
raise TypeError(error_msg)
elif is_iterator(arrays):
arrays = list(arrays)
# Check if elements of array are list-like
for array in arrays:
if not is_list_like(array):
raise TypeError(error_msg)
# Check if lengths of all arrays are equal or not,
# raise ValueError, if not
for i in range(1, len(arrays)):
if len(arrays[i]) != len(arrays[i - 1]):
raise ValueError("all arrays must be same length")
codes, levels = factorize_from_iterables(arrays)
if names is lib.no_default:
names = [getattr(arr, "name", None) for arr in arrays]
return cls(
levels=levels,
codes=codes,
sortorder=sortorder,
names=names,
verify_integrity=False,
)
@classmethod
@names_compat
def from_tuples(
cls,
tuples: Iterable[tuple[Hashable, ...]],
sortorder: int | None = None,
names: Sequence[Hashable] | None = None,
) -> MultiIndex:
"""
Convert list of tuples to MultiIndex.
Parameters
----------
tuples : list / sequence of tuple-likes
Each tuple is the index of one row/column.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
Returns
-------
MultiIndex
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> tuples = [(1, 'red'), (1, 'blue'),
... (2, 'red'), (2, 'blue')]
>>> pd.MultiIndex.from_tuples(tuples, names=('number', 'color'))
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
"""
if not is_list_like(tuples):
raise TypeError("Input must be a list / sequence of tuple-likes.")
elif is_iterator(tuples):
tuples = list(tuples)
tuples = cast(Collection[Tuple[Hashable, ...]], tuples)
arrays: list[Sequence[Hashable]]
if len(tuples) == 0:
if names is None:
raise TypeError("Cannot infer number of levels from empty list")
arrays = [[]] * len(names)
elif isinstance(tuples, (np.ndarray, Index)):
if isinstance(tuples, Index):
tuples = np.asarray(tuples._values)
arrays = list(lib.tuples_to_object_array(tuples).T)
elif isinstance(tuples, list):
arrays = list(lib.to_object_array_tuples(tuples).T)
else:
arrs = zip(*tuples)
arrays = cast(List[Sequence[Hashable]], arrs)
return cls.from_arrays(arrays, sortorder=sortorder, names=names)
@classmethod
def from_product(
cls, iterables, sortorder=None, names=lib.no_default
) -> MultiIndex:
"""
Make a MultiIndex from the cartesian product of multiple iterables.
Parameters
----------
iterables : list / sequence of iterables
Each iterable has unique labels for each level of the index.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
.. versionchanged:: 1.0.0
If not explicitly provided, names will be inferred from the
elements of iterables if an element has a name attribute
Returns
-------
MultiIndex
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> numbers = [0, 1, 2]
>>> colors = ['green', 'purple']
>>> pd.MultiIndex.from_product([numbers, colors],
... names=['number', 'color'])
MultiIndex([(0, 'green'),
(0, 'purple'),
(1, 'green'),
(1, 'purple'),
(2, 'green'),
(2, 'purple')],
names=['number', 'color'])
"""
from pandas.core.reshape.util import cartesian_product
if not is_list_like(iterables):
raise TypeError("Input must be a list / sequence of iterables.")
elif is_iterator(iterables):
iterables = list(iterables)
codes, levels = factorize_from_iterables(iterables)
if names is lib.no_default:
names = [getattr(it, "name", None) for it in iterables]
# codes are all ndarrays, so cartesian_product is lossless
codes = cartesian_product(codes)
return cls(levels, codes, sortorder=sortorder, names=names)
@classmethod
def from_frame(cls, df: DataFrame, sortorder=None, names=None) -> MultiIndex:
"""
Make a MultiIndex from a DataFrame.
.. versionadded:: 0.24.0
Parameters
----------
df : DataFrame
DataFrame to be converted to MultiIndex.
sortorder : int, optional
Level of sortedness (must be lexicographically sorted by that
level).
names : list-like, optional
If no names are provided, use the column names, or tuple of column
names if the columns is a MultiIndex. If a sequence, overwrite
names with the given sequence.
Returns
-------
MultiIndex
The MultiIndex representation of the given DataFrame.
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
Examples
--------
>>> df = pd.DataFrame([['HI', 'Temp'], ['HI', 'Precip'],
... ['NJ', 'Temp'], ['NJ', 'Precip']],
... columns=['a', 'b'])
>>> df
a b
0 HI Temp
1 HI Precip
2 NJ Temp
3 NJ Precip
>>> pd.MultiIndex.from_frame(df)
MultiIndex([('HI', 'Temp'),
('HI', 'Precip'),
('NJ', 'Temp'),
('NJ', 'Precip')],
names=['a', 'b'])
Using explicit names, instead of the column names
>>> pd.MultiIndex.from_frame(df, names=['state', 'observation'])
MultiIndex([('HI', 'Temp'),
('HI', 'Precip'),
('NJ', 'Temp'),
('NJ', 'Precip')],
names=['state', 'observation'])
"""
if not isinstance(df, ABCDataFrame):
raise TypeError("Input must be a DataFrame")
column_names, columns = zip(*df.items())
names = column_names if names is None else names
return cls.from_arrays(columns, sortorder=sortorder, names=names)
# --------------------------------------------------------------------
@cache_readonly
def _values(self) -> np.ndarray:
# We override here, since our parent uses _data, which we don't use.
values = []
for i in range(self.nlevels):
vals = self._get_level_values(i)
if is_categorical_dtype(vals.dtype):
vals = cast("CategoricalIndex", vals)
vals = vals._data._internal_get_values()
if isinstance(vals.dtype, ExtensionDtype) or isinstance(
vals, (ABCDatetimeIndex, ABCTimedeltaIndex)
):
vals = vals.astype(object)
# error: Incompatible types in assignment (expression has type "ndarray",
# variable has type "Index")
vals = np.array(vals, copy=False) # type: ignore[assignment]
values.append(vals)
arr = lib.fast_zip(values)
return arr
@property
def values(self) -> np.ndarray:
return self._values
@property
def array(self):
"""
Raises a ValueError for `MultiIndex` because there's no single
array backing a MultiIndex.
Raises
------
ValueError
"""
raise ValueError(
"MultiIndex has no single backing array. Use "
"'MultiIndex.to_numpy()' to get a NumPy array of tuples."
)
@cache_readonly
def dtypes(self) -> Series:
"""
Return the dtypes as a Series for the underlying MultiIndex
"""
from pandas import Series
return Series(
{
f"level_{idx}" if level.name is None else level.name: level.dtype
for idx, level in enumerate(self.levels)
}
)
def __len__(self) -> int:
return len(self.codes[0])
# --------------------------------------------------------------------
# Levels Methods
@cache_readonly
def levels(self) -> FrozenList:
# Use cache_readonly to ensure that self.get_locs doesn't repeatedly
# create new IndexEngine
# https://github.com/pandas-dev/pandas/issues/31648
result = [x._rename(name=name) for x, name in zip(self._levels, self._names)]
for level in result:
# disallow midx.levels[0].name = "foo"
level._no_setting_name = True
return FrozenList(result)
def _set_levels(
self,
levels,
level=None,
copy: bool = False,
validate: bool = True,
verify_integrity: bool = False,
) -> None:
# This is NOT part of the levels property because it should be
# externally not allowed to set levels. User beware if you change
# _levels directly
if validate:
if len(levels) == 0:
raise ValueError("Must set non-zero number of levels.")
if level is None and len(levels) != self.nlevels:
raise ValueError("Length of levels must match number of levels.")
if level is not None and len(levels) != len(level):
raise ValueError("Length of levels must match length of level.")
if level is None:
new_levels = FrozenList(
ensure_index(lev, copy=copy)._view() for lev in levels
)
else:
level_numbers = [self._get_level_number(lev) for lev in level]
new_levels_list = list(self._levels)
for lev_num, lev in zip(level_numbers, levels):
new_levels_list[lev_num] = ensure_index(lev, copy=copy)._view()
new_levels = FrozenList(new_levels_list)
if verify_integrity:
new_codes = self._verify_integrity(levels=new_levels)
self._codes = new_codes
names = self.names
self._levels = new_levels
if any(names):
self._set_names(names)
self._reset_cache()
def set_levels(
self, levels, level=None, inplace=None, verify_integrity: bool = True
):
"""
Set new levels on MultiIndex. Defaults to returning new index.
Parameters
----------
levels : sequence or list of sequence
New level(s) to apply.
level : int, level name, or sequence of int/level names (default None)
Level(s) to set (None for all levels).
inplace : bool
If True, mutates in place.
.. deprecated:: 1.2.0
verify_integrity : bool, default True
If True, checks that levels and codes are compatible.
Returns
-------
new index (of same type and class...etc) or None
The same type as the caller or None if ``inplace=True``.
Examples
--------
>>> idx = pd.MultiIndex.from_tuples(
... [
... (1, "one"),
... (1, "two"),
... (2, "one"),
... (2, "two"),
... (3, "one"),
... (3, "two")
... ],
... names=["foo", "bar"]
... )
>>> idx
MultiIndex([(1, 'one'),
(1, 'two'),
(2, 'one'),
(2, 'two'),
(3, 'one'),
(3, 'two')],
names=['foo', 'bar'])
>>> idx.set_levels([['a', 'b', 'c'], [1, 2]])
MultiIndex([('a', 1),
('a', 2),
('b', 1),
('b', 2),
('c', 1),
('c', 2)],
names=['foo', 'bar'])
>>> idx.set_levels(['a', 'b', 'c'], level=0)
MultiIndex([('a', 'one'),
('a', 'two'),
('b', 'one'),
('b', 'two'),
('c', 'one'),
('c', 'two')],
names=['foo', 'bar'])
>>> idx.set_levels(['a', 'b'], level='bar')
MultiIndex([(1, 'a'),
(1, 'b'),
(2, 'a'),
(2, 'b'),
(3, 'a'),
(3, 'b')],
names=['foo', 'bar'])
If any of the levels passed to ``set_levels()`` exceeds the
existing length, all of the values from that argument will
be stored in the MultiIndex levels, though the values will
be truncated in the MultiIndex output.
>>> idx.set_levels([['a', 'b', 'c'], [1, 2, 3, 4]], level=[0, 1])
MultiIndex([('a', 1),
('a', 2),
('b', 1),
('b', 2),
('c', 1),
('c', 2)],
names=['foo', 'bar'])
>>> idx.set_levels([['a', 'b', 'c'], [1, 2, 3, 4]], level=[0, 1]).levels
FrozenList([['a', 'b', 'c'], [1, 2, 3, 4]])
"""
if inplace is not None:
warnings.warn(
"inplace is deprecated and will be removed in a future version.",
FutureWarning,
stacklevel=2,
)
else:
inplace = False
if is_list_like(levels) and not isinstance(levels, Index):
levels = list(levels)
level, levels = _require_listlike(level, levels, "Levels")
if inplace:
idx = self
else:
idx = self._view()
idx._reset_identity()
idx._set_levels(
levels, level=level, validate=True, verify_integrity=verify_integrity
)
if not inplace:
return idx
@property
def nlevels(self) -> int:
"""
Integer number of levels in this MultiIndex.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([['a'], ['b'], ['c']])
>>> mi
MultiIndex([('a', 'b', 'c')],
)
>>> mi.nlevels
3
"""
return len(self._levels)
@property
def levshape(self) -> Shape:
"""
A tuple with the length of each level.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([['a'], ['b'], ['c']])
>>> mi
MultiIndex([('a', 'b', 'c')],
)
>>> mi.levshape
(1, 1, 1)
"""
return tuple(len(x) for x in self.levels)
# --------------------------------------------------------------------
# Codes Methods
@property
def codes(self):
return self._codes
def _set_codes(
self,
codes,
level=None,
copy: bool = False,
validate: bool = True,
verify_integrity: bool = False,
) -> None:
if validate:
if level is None and len(codes) != self.nlevels:
raise ValueError("Length of codes must match number of levels")
if level is not None and len(codes) != len(level):
raise ValueError("Length of codes must match length of levels.")
if level is None:
new_codes = FrozenList(
_coerce_indexer_frozen(level_codes, lev, copy=copy).view()
for lev, level_codes in zip(self._levels, codes)
)
else:
level_numbers = [self._get_level_number(lev) for lev in level]
new_codes_list = list(self._codes)
for lev_num, level_codes in zip(level_numbers, codes):
lev = self.levels[lev_num]
new_codes_list[lev_num] = _coerce_indexer_frozen(
level_codes, lev, copy=copy
)
new_codes = FrozenList(new_codes_list)
if verify_integrity:
new_codes = self._verify_integrity(codes=new_codes)
self._codes = new_codes
self._reset_cache()
def set_codes(self, codes, level=None, inplace=None, verify_integrity: bool = True):
"""
Set new codes on MultiIndex. Defaults to returning new index.
.. versionadded:: 0.24.0
New name for deprecated method `set_labels`.
Parameters
----------
codes : sequence or list of sequence
New codes to apply.
level : int, level name, or sequence of int/level names (default None)
Level(s) to set (None for all levels).
inplace : bool
If True, mutates in place.
.. deprecated:: 1.2.0
verify_integrity : bool, default True
If True, checks that levels and codes are compatible.
Returns
-------
new index (of same type and class...etc) or None
The same type as the caller or None if ``inplace=True``.
Examples
--------
>>> idx = pd.MultiIndex.from_tuples(
... [(1, "one"), (1, "two"), (2, "one"), (2, "two")], names=["foo", "bar"]
... )
>>> idx
MultiIndex([(1, 'one'),
(1, 'two'),
(2, 'one'),
(2, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]])
MultiIndex([(2, 'one'),
(1, 'one'),
(2, 'two'),
(1, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([1, 0, 1, 0], level=0)
MultiIndex([(2, 'one'),
(1, 'two'),
(2, 'one'),
(1, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([0, 0, 1, 1], level='bar')
MultiIndex([(1, 'one'),
(1, 'one'),
(2, 'two'),
(2, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]], level=[0, 1])
MultiIndex([(2, 'one'),
(1, 'one'),
(2, 'two'),
(1, 'two')],
names=['foo', 'bar'])
"""
if inplace is not None:
warnings.warn(
"inplace is deprecated and will be removed in a future version.",
FutureWarning,
stacklevel=2,
)
else:
inplace = False
level, codes = _require_listlike(level, codes, "Codes")
if inplace:
idx = self
else:
idx = self._view()
idx._reset_identity()
idx._set_codes(codes, level=level, verify_integrity=verify_integrity)
if not inplace:
return idx
# --------------------------------------------------------------------
# Index Internals
@cache_readonly
def _engine(self):
# Calculate the number of bits needed to represent labels in each
# level, as log2 of their sizes (including -1 for NaN):
sizes = np.ceil(np.log2([len(level) + 1 for level in self.levels]))
# Sum bit counts, starting from the _right_....
lev_bits = np.cumsum(sizes[::-1])[::-1]
# ... in order to obtain offsets such that sorting the combination of
# shifted codes (one for each level, resulting in a unique integer) is
# equivalent to sorting lexicographically the codes themselves. Notice
# that each level needs to be shifted by the number of bits needed to
# represent the _previous_ ones:
offsets = np.concatenate([lev_bits[1:], [0]]).astype("uint64")
# Check the total number of bits needed for our representation:
if lev_bits[0] > 64:
# The levels would overflow a 64 bit uint - use Python integers:
return MultiIndexPyIntEngine(self.levels, self.codes, offsets)
return MultiIndexUIntEngine(self.levels, self.codes, offsets)
@property
def _constructor(self) -> Callable[..., MultiIndex]:
return type(self).from_tuples
@doc(Index._shallow_copy)
def _shallow_copy(self, values: np.ndarray, name=lib.no_default) -> MultiIndex:
names = name if name is not lib.no_default else self.names
return type(self).from_tuples(values, sortorder=None, names=names)
def _view(self) -> MultiIndex:
result = type(self)(
levels=self.levels,
codes=self.codes,
sortorder=self.sortorder,
names=self.names,
verify_integrity=False,
)
result._cache = self._cache.copy()
result._cache.pop("levels", None) # GH32669
return result
# --------------------------------------------------------------------
def copy(
self,
names=None,
dtype=None,
levels=None,
codes=None,
deep=False,
name=None,
):
"""
Make a copy of this object. Names, dtype, levels and codes can be
passed and will be set on new copy.
Parameters
----------
names : sequence, optional
dtype : numpy dtype or pandas type, optional
.. deprecated:: 1.2.0
levels : sequence, optional
.. deprecated:: 1.2.0
codes : sequence, optional
.. deprecated:: 1.2.0
deep : bool, default False
name : Label
Kept for compatibility with 1-dimensional Index. Should not be used.
Returns
-------
MultiIndex
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
This could be potentially expensive on large MultiIndex objects.
"""
names = self._validate_names(name=name, names=names, deep=deep)
if levels is not None:
warnings.warn(
"parameter levels is deprecated and will be removed in a future "
"version. Use the set_levels method instead.",
FutureWarning,
stacklevel=2,
)
if codes is not None:
warnings.warn(
"parameter codes is deprecated and will be removed in a future "
"version. Use the set_codes method instead.",
FutureWarning,
stacklevel=2,
)
if deep:
from copy import deepcopy
if levels is None:
levels = deepcopy(self.levels)
if codes is None:
codes = deepcopy(self.codes)
levels = levels if levels is not None else self.levels
codes = codes if codes is not None else self.codes
new_index = type(self)(
levels=levels,
codes=codes,
sortorder=self.sortorder,
names=names,
verify_integrity=False,
)
new_index._cache = self._cache.copy()
new_index._cache.pop("levels", None) # GH32669
if dtype:
warnings.warn(
"parameter dtype is deprecated and will be removed in a future "
"version. Use the astype method instead.",
FutureWarning,
stacklevel=2,
)
new_index = new_index.astype(dtype)
return new_index
def __array__(self, dtype=None) -> np.ndarray:
""" the array interface, return my values """
return self.values
def view(self, cls=None):
""" this is defined as a copy with the same identity """
result = self.copy()
result._id = self._id
return result
@doc(Index.__contains__)
def __contains__(self, key: Any) -> bool:
hash(key)
try:
self.get_loc(key)
return True
except (LookupError, TypeError, ValueError):
return False
@cache_readonly
def dtype(self) -> np.dtype:
return np.dtype("O")
def _is_memory_usage_qualified(self) -> bool:
""" return a boolean if we need a qualified .info display """
def f(level):
return "mixed" in level or "string" in level or "unicode" in level
return any(f(level) for level in self._inferred_type_levels)
@doc(Index.memory_usage)
def memory_usage(self, deep: bool = False) -> int:
# we are overwriting our base class to avoid
# computing .values here which could materialize
# a tuple representation unnecessarily
return self._nbytes(deep)
@cache_readonly
def nbytes(self) -> int:
""" return the number of bytes in the underlying data """
return self._nbytes(False)
def _nbytes(self, deep: bool = False) -> int:
"""
return the number of bytes in the underlying data
deeply introspect the level data if deep=True
include the engine hashtable
*this is in internal routine*
"""
# for implementations with no useful getsizeof (PyPy)
objsize = 24
level_nbytes = sum(i.memory_usage(deep=deep) for i in self.levels)
label_nbytes = sum(i.nbytes for i in self.codes)
names_nbytes = sum(getsizeof(i, objsize) for i in self.names)
result = level_nbytes + label_nbytes + names_nbytes
# include our engine hashtable
result += self._engine.sizeof(deep=deep)
return result
# --------------------------------------------------------------------
# Rendering Methods
def _formatter_func(self, tup):
"""
Formats each item in tup according to its level's formatter function.
"""
formatter_funcs = [level._formatter_func for level in self.levels]
return tuple(func(val) for func, val in zip(formatter_funcs, tup))
def _format_data(self, name=None) -> str:
"""
Return the formatted data as a unicode string
"""
return format_object_summary(
self, self._formatter_func, name=name, line_break_each_value=True
)
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value).
"""
return format_object_attrs(self, include_dtype=False)
def _format_native_types(self, na_rep="nan", **kwargs):
new_levels = []
new_codes = []
# go through the levels and format them
for level, level_codes in zip(self.levels, self.codes):
level_strs = level._format_native_types(na_rep=na_rep, **kwargs)
# add nan values, if there are any
mask = level_codes == -1
if mask.any():
nan_index = len(level_strs)
# numpy 1.21 deprecated implicit string casting
level_strs = level_strs.astype(str)
level_strs = np.append(level_strs, na_rep)
assert not level_codes.flags.writeable # i.e. copy is needed
level_codes = level_codes.copy() # make writeable
level_codes[mask] = nan_index
new_levels.append(level_strs)
new_codes.append(level_codes)
if len(new_levels) == 1:
# a single-level multi-index
return Index(new_levels[0].take(new_codes[0]))._format_native_types()
else:
# reconstruct the multi-index
mi = MultiIndex(
levels=new_levels,
codes=new_codes,
names=self.names,
sortorder=self.sortorder,
verify_integrity=False,
)
return mi._values
def format(
self,
name: bool | None = None,
formatter: Callable | None = None,
na_rep: str | None = None,
names: bool = False,
space: int = 2,
sparsify=None,
adjoin: bool = True,
) -> list:
if name is not None:
names = name
if len(self) == 0:
return []
stringified_levels = []
for lev, level_codes in zip(self.levels, self.codes):
na = na_rep if na_rep is not None else _get_na_rep(lev.dtype.type)
if len(lev) > 0:
formatted = lev.take(level_codes).format(formatter=formatter)
# we have some NA
mask = level_codes == -1
if mask.any():
formatted = np.array(formatted, dtype=object)
formatted[mask] = na
formatted = formatted.tolist()
else:
# weird all NA case
formatted = [
pprint_thing(na if isna(x) else x, escape_chars=("\t", "\r", "\n"))
for x in algos.take_nd(lev._values, level_codes)
]
stringified_levels.append(formatted)
result_levels = []
for lev, lev_name in zip(stringified_levels, self.names):
level = []
if names:
level.append(
pprint_thing(lev_name, escape_chars=("\t", "\r", "\n"))
if lev_name is not None
else ""
)
level.extend(np.array(lev, dtype=object))
result_levels.append(level)
if sparsify is None:
sparsify = get_option("display.multi_sparse")
if sparsify:
sentinel = ""
# GH3547 use value of sparsify as sentinel if it's "Falsey"
assert isinstance(sparsify, bool) or sparsify is lib.no_default
if sparsify in [False, lib.no_default]:
sentinel = sparsify
# little bit of a kludge job for #1217
result_levels = sparsify_labels(
result_levels, start=int(names), sentinel=sentinel
)
if adjoin:
from pandas.io.formats.format import get_adjustment
adj = get_adjustment()
return adj.adjoin(space, *result_levels).split("\n")
else:
return result_levels
# --------------------------------------------------------------------
# Names Methods
def _get_names(self) -> FrozenList:
return FrozenList(self._names)
def _set_names(self, names, level=None, validate: bool = True):
"""
Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
validate : bool, default True
validate that the names match level lengths
Raises
------
TypeError if each name is not hashable.
Notes
-----
sets names on levels. WARNING: mutates!
Note that you generally want to set this *after* changing levels, so
that it only acts on copies
"""
# GH 15110
# Don't allow a single string for names in a MultiIndex
if names is not None and not is_list_like(names):
raise ValueError("Names should be list-like for a MultiIndex")
names = list(names)
if validate:
if level is not None and len(names) != len(level):
raise ValueError("Length of names must match length of level.")
if level is None and len(names) != self.nlevels:
raise ValueError(
"Length of names must match number of levels in MultiIndex."
)
if level is None:
level = range(self.nlevels)
else:
level = [self._get_level_number(lev) for lev in level]
# set the name
for lev, name in zip(level, names):
if name is not None:
# GH 20527
# All items in 'names' need to be hashable:
if not is_hashable(name):
raise TypeError(
f"{type(self).__name__}.name must be a hashable type"
)
# error: Cannot determine type of '__setitem__'
self._names[lev] = name # type: ignore[has-type]
# If .levels has been accessed, the names in our cache will be stale.
self._reset_cache()
names = property(
fset=_set_names,
fget=_get_names,
doc="""
Names of levels in MultiIndex.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays(
... [[1, 2], [3, 4], [5, 6]], names=['x', 'y', 'z'])
>>> mi
MultiIndex([(1, 3, 5),
(2, 4, 6)],
names=['x', 'y', 'z'])
>>> mi.names
FrozenList(['x', 'y', 'z'])
""",
)
# --------------------------------------------------------------------
@doc(Index._get_grouper_for_level)
def _get_grouper_for_level(self, mapper, level):
indexer = self.codes[level]
level_index = self.levels[level]
if mapper is not None:
# Handle group mapping function and return
level_values = self.levels[level].take(indexer)
grouper = level_values.map(mapper)
return grouper, None, None
codes, uniques = algos.factorize(indexer, sort=True)
if len(uniques) > 0 and uniques[0] == -1:
# Handle NAs
mask = indexer != -1
ok_codes, uniques = algos.factorize(indexer[mask], sort=True)
codes = np.empty(len(indexer), dtype=indexer.dtype)
codes[mask] = ok_codes
codes[~mask] = -1
if len(uniques) < len(level_index):
# Remove unobserved levels from level_index
level_index = level_index.take(uniques)
else:
# break references back to us so that setting the name
# on the output of a groupby doesn't reflect back here.
level_index = level_index.copy()
if level_index._can_hold_na:
grouper = level_index.take(codes, fill_value=True)
else:
grouper = level_index.take(codes)
return grouper, codes, level_index
@cache_readonly
def inferred_type(self) -> str:
return "mixed"
def _get_level_number(self, level) -> int:
count = self.names.count(level)
if (count > 1) and not is_integer(level):
raise ValueError(
f"The name {level} occurs multiple times, use a level number"
)
try:
level = self.names.index(level)
except ValueError as err:
if not is_integer(level):
raise KeyError(f"Level {level} not found") from err
elif level < 0:
level += self.nlevels
if level < 0:
orig_level = level - self.nlevels
raise IndexError(
f"Too many levels: Index has only {self.nlevels} levels, "
f"{orig_level} is not a valid level number"
) from err
# Note: levels are zero-based
elif level >= self.nlevels:
raise IndexError(
f"Too many levels: Index has only {self.nlevels} levels, "
f"not {level + 1}"
) from err
return level
@property
def _has_complex_internals(self) -> bool:
# used to avoid libreduction code paths, which raise or require conversion
return True
@cache_readonly
def is_monotonic_increasing(self) -> bool:
"""
return if the index is monotonic increasing (only equal or
increasing) values.
"""
if any(-1 in code for code in self.codes):
return False
if all(level.is_monotonic for level in self.levels):
# If each level is sorted, we can operate on the codes directly. GH27495
return libalgos.is_lexsorted(
[x.astype("int64", copy=False) for x in self.codes]
)
# reversed() because lexsort() wants the most significant key last.
values = [
self._get_level_values(i)._values for i in reversed(range(len(self.levels)))
]
try:
sort_order = np.lexsort(values)
return Index(sort_order).is_monotonic
except TypeError:
# we have mixed types and np.lexsort is not happy
return Index(self._values).is_monotonic
@cache_readonly
def is_monotonic_decreasing(self) -> bool:
"""
return if the index is monotonic decreasing (only equal or
decreasing) values.
"""
# monotonic decreasing if and only if reverse is monotonic increasing
return self[::-1].is_monotonic_increasing
@cache_readonly
def _inferred_type_levels(self) -> list[str]:
""" return a list of the inferred types, one for each level """
return [i.inferred_type for i in self.levels]
@doc(Index.duplicated)
def duplicated(self, keep="first") -> np.ndarray:
shape = tuple(len(lev) for lev in self.levels)
ids = get_group_index(self.codes, shape, sort=False, xnull=False)
return duplicated(ids, keep)
# error: Cannot override final attribute "_duplicated"
# (previously declared in base class "IndexOpsMixin")
_duplicated = duplicated # type: ignore[misc]
def fillna(self, value=None, downcast=None):
"""
fillna is not implemented for MultiIndex
"""
raise NotImplementedError("isna is not defined for MultiIndex")
@doc(Index.dropna)
def dropna(self, how: str = "any") -> MultiIndex:
nans = [level_codes == -1 for level_codes in self.codes]
if how == "any":
indexer = np.any(nans, axis=0)
elif how == "all":
indexer = np.all(nans, axis=0)
else:
raise ValueError(f"invalid how option: {how}")
new_codes = [level_codes[~indexer] for level_codes in self.codes]
return self.set_codes(codes=new_codes)
def _get_level_values(self, level: int, unique: bool = False) -> Index:
"""
Return vector of label values for requested level,
equal to the length of the index
**this is an internal method**
Parameters
----------
level : int
unique : bool, default False
if True, drop duplicated values
Returns
-------
Index
"""
lev = self.levels[level]
level_codes = self.codes[level]
name = self._names[level]
if unique:
level_codes = algos.unique(level_codes)
filled = algos.take_nd(lev._values, level_codes, fill_value=lev._na_value)
return lev._shallow_copy(filled, name=name)
def get_level_values(self, level):
"""
Return vector of label values for requested level.
Length of returned vector is equal to the length of the index.
Parameters
----------
level : int or str
``level`` is either the integer position of the level in the
MultiIndex, or the name of the level.
Returns
-------
values : Index
Values is a level of this MultiIndex converted to
a single :class:`Index` (or subclass thereof).
Examples
--------
Create a MultiIndex:
>>> mi = pd.MultiIndex.from_arrays((list('abc'), list('def')))
>>> mi.names = ['level_1', 'level_2']
Get level values by supplying level as either integer or name:
>>> mi.get_level_values(0)
Index(['a', 'b', 'c'], dtype='object', name='level_1')
>>> mi.get_level_values('level_2')
Index(['d', 'e', 'f'], dtype='object', name='level_2')
"""
level = self._get_level_number(level)
values = self._get_level_values(level)
return values
@doc(Index.unique)
def unique(self, level=None):
if level is None:
return super().unique()
else:
level = self._get_level_number(level)
return self._get_level_values(level=level, unique=True)
def to_frame(self, index: bool = True, name=None) -> DataFrame:
"""
Create a DataFrame with the levels of the MultiIndex as columns.
Column ordering is determined by the DataFrame constructor with data as
a dict.
.. versionadded:: 0.24.0
Parameters
----------
index : bool, default True
Set the index of the returned DataFrame as the original MultiIndex.
name : list / sequence of str, optional
The passed names should substitute index level names.
Returns
-------
DataFrame : a DataFrame containing the original MultiIndex data.
See Also
--------
DataFrame : Two-dimensional, size-mutable, potentially heterogeneous
tabular data.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([['a', 'b'], ['c', 'd']])
>>> mi
MultiIndex([('a', 'c'),
('b', 'd')],
)
>>> df = mi.to_frame()
>>> df
0 1
a c a c
b d b d
>>> df = mi.to_frame(index=False)
>>> df
0 1
0 a c
1 b d
>>> df = mi.to_frame(name=['x', 'y'])
>>> df
x y
a c a c
b d b d
"""
from pandas import DataFrame
if name is not None:
if not is_list_like(name):
raise TypeError("'name' must be a list / sequence of column names.")
if len(name) != len(self.levels):
raise ValueError(
"'name' should have same length as number of levels on index."
)
idx_names = name
else:
idx_names = self.names
# Guarantee resulting column order - PY36+ dict maintains insertion order
result = DataFrame(
{
(level if lvlname is None else lvlname): self._get_level_values(level)
for lvlname, level in zip(idx_names, range(len(self.levels)))
},
copy=False,
)
if index:
result.index = self
return result
def to_flat_index(self) -> Index:
"""
Convert a MultiIndex to an Index of Tuples containing the level values.
.. versionadded:: 0.24.0
Returns
-------
pd.Index
Index with the MultiIndex data represented in Tuples.
See Also
--------
MultiIndex.from_tuples : Convert flat index back to MultiIndex.
Notes
-----
This method will simply return the caller if called by anything other
than a MultiIndex.
Examples
--------
>>> index = pd.MultiIndex.from_product(
... [['foo', 'bar'], ['baz', 'qux']],
... names=['a', 'b'])
>>> index.to_flat_index()
Index([('foo', 'baz'), ('foo', 'qux'),
('bar', 'baz'), ('bar', 'qux')],
dtype='object')
"""
return Index(self._values, tupleize_cols=False)
@property
def _is_all_dates(self) -> bool:
return False
def is_lexsorted(self) -> bool:
warnings.warn(
"MultiIndex.is_lexsorted is deprecated as a public function, "
"users should use MultiIndex.is_monotonic_increasing instead.",
FutureWarning,
stacklevel=2,
)
return self._is_lexsorted()
def _is_lexsorted(self) -> bool:
"""
Return True if the codes are lexicographically sorted.
Returns
-------
bool
Examples
--------
In the below examples, the first level of the MultiIndex is sorted because
a<b<c, so there is no need to look at the next level.
>>> pd.MultiIndex.from_arrays([['a', 'b', 'c'], ['d', 'e', 'f']]).is_lexsorted()
True
>>> pd.MultiIndex.from_arrays([['a', 'b', 'c'], ['d', 'f', 'e']]).is_lexsorted()
True
In case there is a tie, the lexicographical sorting looks
at the next level of the MultiIndex.
>>> pd.MultiIndex.from_arrays([[0, 1, 1], ['a', 'b', 'c']]).is_lexsorted()
True
>>> pd.MultiIndex.from_arrays([[0, 1, 1], ['a', 'c', 'b']]).is_lexsorted()
False
>>> pd.MultiIndex.from_arrays([['a', 'a', 'b', 'b'],
... ['aa', 'bb', 'aa', 'bb']]).is_lexsorted()
True
>>> pd.MultiIndex.from_arrays([['a', 'a', 'b', 'b'],
... ['bb', 'aa', 'aa', 'bb']]).is_lexsorted()
False
"""
return self._lexsort_depth == self.nlevels
@property
def lexsort_depth(self):
warnings.warn(
"MultiIndex.is_lexsorted is deprecated as a public function, "
"users should use MultiIndex.is_monotonic_increasing instead.",
FutureWarning,
stacklevel=2,
)
return self._lexsort_depth
@cache_readonly
def _lexsort_depth(self) -> int:
"""
Compute and return the lexsort_depth, the number of levels of the
MultiIndex that are sorted lexically
Returns
-------
int
"""
if self.sortorder is not None:
return self.sortorder
return _lexsort_depth(self.codes, self.nlevels)
def _sort_levels_monotonic(self) -> MultiIndex:
"""
This is an *internal* function.
Create a new MultiIndex from the current to monotonically sorted
items IN the levels. This does not actually make the entire MultiIndex
monotonic, JUST the levels.
The resulting MultiIndex will have the same outward
appearance, meaning the same .values and ordering. It will also
be .equals() to the original.
Returns
-------
MultiIndex
Examples
--------
>>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
... codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> mi
MultiIndex([('a', 'bb'),
('a', 'aa'),
('b', 'bb'),
('b', 'aa')],
)
>>> mi.sort_values()
MultiIndex([('a', 'aa'),
('a', 'bb'),
('b', 'aa'),
('b', 'bb')],
)
"""
if self._is_lexsorted() and self.is_monotonic:
return self
new_levels = []
new_codes = []
for lev, level_codes in zip(self.levels, self.codes):
if not lev.is_monotonic:
try:
# indexer to reorder the levels
indexer = lev.argsort()
except TypeError:
pass
else:
lev = lev.take(indexer)
# indexer to reorder the level codes
indexer = ensure_platform_int(indexer)
ri = lib.get_reverse_indexer(indexer, len(indexer))
level_codes = algos.take_nd(ri, level_codes)
new_levels.append(lev)
new_codes.append(level_codes)
return MultiIndex(
new_levels,
new_codes,
names=self.names,
sortorder=self.sortorder,
verify_integrity=False,
)
def remove_unused_levels(self) -> MultiIndex:
"""
Create new MultiIndex from current that removes unused levels.
Unused level(s) means levels that are not expressed in the
labels. The resulting MultiIndex will have the same outward
appearance, meaning the same .values and ordering. It will
also be .equals() to the original.
Returns
-------
MultiIndex
Examples
--------
>>> mi = pd.MultiIndex.from_product([range(2), list('ab')])
>>> mi
MultiIndex([(0, 'a'),
(0, 'b'),
(1, 'a'),
(1, 'b')],
)
>>> mi[2:]
MultiIndex([(1, 'a'),
(1, 'b')],
)
The 0 from the first level is not represented
and can be removed
>>> mi2 = mi[2:].remove_unused_levels()
>>> mi2.levels
FrozenList([[1], ['a', 'b']])
"""
new_levels = []
new_codes = []
changed = False
for lev, level_codes in zip(self.levels, self.codes):
# Since few levels are typically unused, bincount() is more
# efficient than unique() - however it only accepts positive values
# (and drops order):
uniques = np.where(np.bincount(level_codes + 1) > 0)[0] - 1
has_na = int(len(uniques) and (uniques[0] == -1))
if len(uniques) != len(lev) + has_na:
if lev.isna().any() and len(uniques) == len(lev):
break
# We have unused levels
changed = True
# Recalculate uniques, now preserving order.
# Can easily be cythonized by exploiting the already existing
# "uniques" and stop parsing "level_codes" when all items
# are found:
uniques = algos.unique(level_codes)
if has_na:
na_idx = np.where(uniques == -1)[0]
# Just ensure that -1 is in first position:
uniques[[0, na_idx[0]]] = uniques[[na_idx[0], 0]]
# codes get mapped from uniques to 0:len(uniques)
# -1 (if present) is mapped to last position
code_mapping = np.zeros(len(lev) + has_na)
# ... and reassigned value -1:
code_mapping[uniques] = np.arange(len(uniques)) - has_na
level_codes = code_mapping[level_codes]
# new levels are simple
lev = lev.take(uniques[has_na:])
new_levels.append(lev)
new_codes.append(level_codes)
result = self.view()
if changed:
result._reset_identity()
result._set_levels(new_levels, validate=False)
result._set_codes(new_codes, validate=False)
return result
# --------------------------------------------------------------------
# Pickling Methods
def __reduce__(self):
"""Necessary for making this object picklable"""
d = {
"levels": list(self.levels),
"codes": list(self.codes),
"sortorder": self.sortorder,
"names": list(self.names),
}
return ibase._new_Index, (type(self), d), None
# --------------------------------------------------------------------
def __getitem__(self, key):
if is_scalar(key):
key = com.cast_scalar_indexer(key, warn_float=True)
retval = []
for lev, level_codes in zip(self.levels, self.codes):
if level_codes[key] == -1:
retval.append(np.nan)
else:
retval.append(lev[level_codes[key]])
return tuple(retval)
else:
# in general cannot be sure whether the result will be sorted
sortorder = None
if com.is_bool_indexer(key):
key = np.asarray(key, dtype=bool)
sortorder = self.sortorder
elif isinstance(key, slice):
if key.step is None or key.step > 0:
sortorder = self.sortorder
elif isinstance(key, Index):
key = np.asarray(key)
new_codes = [level_codes[key] for level_codes in self.codes]
return MultiIndex(
levels=self.levels,
codes=new_codes,
names=self.names,
sortorder=sortorder,
verify_integrity=False,
)
def _getitem_slice(self: MultiIndex, slobj: slice) -> MultiIndex:
"""
Fastpath for __getitem__ when we know we have a slice.
"""
sortorder = None
if slobj.step is None or slobj.step > 0:
sortorder = self.sortorder
new_codes = [level_codes[slobj] for level_codes in self.codes]
return type(self)(
levels=self.levels,
codes=new_codes,
names=self._names,
sortorder=sortorder,
verify_integrity=False,
)
@Appender(_index_shared_docs["take"] % _index_doc_kwargs)
def take(
self: MultiIndex,
indices,
axis: int = 0,
allow_fill: bool = True,
fill_value=None,
**kwargs,
) -> MultiIndex:
nv.validate_take((), kwargs)
indices = ensure_platform_int(indices)
# only fill if we are passing a non-None fill_value
allow_fill = self._maybe_disallow_fill(allow_fill, fill_value, indices)
na_value = -1
taken = [lab.take(indices) for lab in self.codes]
if allow_fill:
mask = indices == -1
if mask.any():
masked = []
for new_label in taken:
label_values = new_label
label_values[mask] = na_value
masked.append(np.asarray(label_values))
taken = masked
return MultiIndex(
levels=self.levels, codes=taken, names=self.names, verify_integrity=False
)
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
if not isinstance(other, (list, tuple)):
other = [other]
if all(
(isinstance(o, MultiIndex) and o.nlevels >= self.nlevels) for o in other
):
arrays = []
for i in range(self.nlevels):
label = self._get_level_values(i)
appended = [o._get_level_values(i) for o in other]
arrays.append(label.append(appended))
return MultiIndex.from_arrays(arrays, names=self.names)
to_concat = (self._values,) + tuple(k._values for k in other)
new_tuples = np.concatenate(to_concat)
# if all(isinstance(x, MultiIndex) for x in other):
try:
return MultiIndex.from_tuples(new_tuples, names=self.names)
except (TypeError, IndexError):
return Index(new_tuples)
def argsort(self, *args, **kwargs) -> np.ndarray:
return self._values.argsort(*args, **kwargs)
@Appender(_index_shared_docs["repeat"] % _index_doc_kwargs)
def repeat(self, repeats: int, axis=None) -> MultiIndex:
nv.validate_repeat((), {"axis": axis})
# error: Incompatible types in assignment (expression has type "ndarray",
# variable has type "int")
repeats = ensure_platform_int(repeats) # type: ignore[assignment]
return MultiIndex(
levels=self.levels,
codes=[
level_codes.view(np.ndarray).astype(np.intp).repeat(repeats)
for level_codes in self.codes
],
names=self.names,
sortorder=self.sortorder,
verify_integrity=False,
)
def drop(self, codes, level=None, errors="raise"):
"""
Make new MultiIndex with passed list of codes deleted
Parameters
----------
codes : array-like
Must be a list of tuples when level is not specified
level : int or level name, default None
errors : str, default 'raise'
Returns
-------
dropped : MultiIndex
"""
if level is not None:
return self._drop_from_level(codes, level, errors)
if not isinstance(codes, (np.ndarray, Index)):
try:
codes = com.index_labels_to_array(codes, dtype=np.dtype("object"))
except ValueError:
pass
inds = []
for level_codes in codes:
try:
loc = self.get_loc(level_codes)
# get_loc returns either an integer, a slice, or a boolean
# mask
if isinstance(loc, int):
inds.append(loc)
elif isinstance(loc, slice):
step = loc.step if loc.step is not None else 1
inds.extend(range(loc.start, loc.stop, step))
elif com.is_bool_indexer(loc):
if self._lexsort_depth == 0:
warnings.warn(
"dropping on a non-lexsorted multi-index "
"without a level parameter may impact performance.",
PerformanceWarning,
stacklevel=3,
)
loc = loc.nonzero()[0]
inds.extend(loc)
else:
msg = f"unsupported indexer of type {type(loc)}"
raise AssertionError(msg)
except KeyError:
if errors != "ignore":
raise
return self.delete(inds)
def _drop_from_level(self, codes, level, errors="raise") -> MultiIndex:
codes = com.index_labels_to_array(codes)
i = self._get_level_number(level)
index = self.levels[i]
values = index.get_indexer(codes)
# If nan should be dropped it will equal -1 here. We have to check which values
# are not nan and equal -1, this means they are missing in the index
nan_codes = isna(codes)
values[(np.equal(nan_codes, False)) & (values == -1)] = -2
if index.shape[0] == self.shape[0]:
values[np.equal(nan_codes, True)] = -2
not_found = codes[values == -2]
if len(not_found) != 0 and errors != "ignore":
raise KeyError(f"labels {not_found} not found in level")
mask = ~algos.isin(self.codes[i], values)
return self[mask]
def swaplevel(self, i=-2, j=-1) -> MultiIndex:
"""
Swap level i with level j.
Calling this method does not change the ordering of the values.
Parameters
----------
i : int, str, default -2
First level of index to be swapped. Can pass level name as string.
Type of parameters can be mixed.
j : int, str, default -1
Second level of index to be swapped. Can pass level name as string.
Type of parameters can be mixed.
Returns
-------
MultiIndex
A new MultiIndex.
See Also
--------
Series.swaplevel : Swap levels i and j in a MultiIndex.
Dataframe.swaplevel : Swap levels i and j in a MultiIndex on a
particular axis.
Examples
--------
>>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
... codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> mi
MultiIndex([('a', 'bb'),
('a', 'aa'),
('b', 'bb'),
('b', 'aa')],
)
>>> mi.swaplevel(0, 1)
MultiIndex([('bb', 'a'),
('aa', 'a'),
('bb', 'b'),
('aa', 'b')],
)
"""
new_levels = list(self.levels)
new_codes = list(self.codes)
new_names = list(self.names)
i = self._get_level_number(i)
j = self._get_level_number(j)
new_levels[i], new_levels[j] = new_levels[j], new_levels[i]
new_codes[i], new_codes[j] = new_codes[j], new_codes[i]
new_names[i], new_names[j] = new_names[j], new_names[i]
return MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
def reorder_levels(self, order) -> MultiIndex:
"""
Rearrange levels using input order. May not drop or duplicate levels.
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
Returns
-------
MultiIndex
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([[1, 2], [3, 4]], names=['x', 'y'])
>>> mi
MultiIndex([(1, 3),
(2, 4)],
names=['x', 'y'])
>>> mi.reorder_levels(order=[1, 0])
MultiIndex([(3, 1),
(4, 2)],
names=['y', 'x'])
>>> mi.reorder_levels(order=['y', 'x'])
MultiIndex([(3, 1),
(4, 2)],
names=['y', 'x'])
"""
order = [self._get_level_number(i) for i in order]
if len(order) != self.nlevels:
raise AssertionError(
f"Length of order must be same as number of levels ({self.nlevels}), "
f"got {len(order)}"
)
new_levels = [self.levels[i] for i in order]
new_codes = [self.codes[i] for i in order]
new_names = [self.names[i] for i in order]
return MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
def _get_codes_for_sorting(self) -> list[Categorical]:
"""
we are categorizing our codes by using the
available categories (all, not just observed)
excluding any missing ones (-1); this is in preparation
for sorting, where we need to disambiguate that -1 is not
a valid valid
"""
def cats(level_codes):
return np.arange(
np.array(level_codes).max() + 1 if len(level_codes) else 0,
dtype=level_codes.dtype,
)
return [
Categorical.from_codes(level_codes, cats(level_codes), ordered=True)
for level_codes in self.codes
]
def sortlevel(
self, level=0, ascending: bool = True, sort_remaining: bool = True
) -> tuple[MultiIndex, np.ndarray]:
"""
Sort MultiIndex at the requested level.
The result will respect the original ordering of the associated
factor at that level.
Parameters
----------
level : list-like, int or str, default 0
If a string is given, must be a name of the level.
If list-like must be names or ints of levels.
ascending : bool, default True
False to sort in descending order.
Can also be a list to specify a directed ordering.
sort_remaining : sort by the remaining levels after level
Returns
-------
sorted_index : pd.MultiIndex
Resulting index.
indexer : np.ndarray
Indices of output values in original index.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([[0, 0], [2, 1]])
>>> mi
MultiIndex([(0, 2),
(0, 1)],
)
>>> mi.sortlevel()
(MultiIndex([(0, 1),
(0, 2)],
), array([1, 0]))
>>> mi.sortlevel(sort_remaining=False)
(MultiIndex([(0, 2),
(0, 1)],
), array([0, 1]))
>>> mi.sortlevel(1)
(MultiIndex([(0, 1),
(0, 2)],
), array([1, 0]))
>>> mi.sortlevel(1, ascending=False)
(MultiIndex([(0, 2),
(0, 1)],
), array([0, 1]))
"""
if isinstance(level, (str, int)):
level = [level]
level = [self._get_level_number(lev) for lev in level]
sortorder = None
# we have a directed ordering via ascending
if isinstance(ascending, list):
if not len(level) == len(ascending):
raise ValueError("level must have same length as ascending")
indexer = lexsort_indexer(
[self.codes[lev] for lev in level], orders=ascending
)
# level ordering
else:
codes = list(self.codes)
shape = list(self.levshape)
# partition codes and shape
primary = tuple(codes[lev] for lev in level)
primshp = tuple(shape[lev] for lev in level)
# Reverse sorted to retain the order of
# smaller indices that needs to be removed
for lev in sorted(level, reverse=True):
codes.pop(lev)
shape.pop(lev)
if sort_remaining:
primary += primary + tuple(codes)
primshp += primshp + tuple(shape)
else:
sortorder = level[0]
indexer = indexer_from_factorized(primary, primshp, compress=False)
if not ascending:
indexer = indexer[::-1]
indexer = ensure_platform_int(indexer)
new_codes = [level_codes.take(indexer) for level_codes in self.codes]
new_index = MultiIndex(
codes=new_codes,
levels=self.levels,
names=self.names,
sortorder=sortorder,
verify_integrity=False,
)
return new_index, indexer
def reindex(
self, target, method=None, level=None, limit=None, tolerance=None
) -> tuple[MultiIndex, np.ndarray | None]:
"""
Create index with target's values (move/add/delete values as necessary)
Returns
-------
new_index : pd.MultiIndex
Resulting index
indexer : np.ndarray[np.intp] or None
Indices of output values in original index.
"""
# GH6552: preserve names when reindexing to non-named target
# (i.e. neither Index nor Series).
preserve_names = not hasattr(target, "names")
if level is not None:
if method is not None:
raise TypeError("Fill method not supported if level passed")
# GH7774: preserve dtype/tz if target is empty and not an Index.
# target may be an iterator
target = ibase.ensure_has_len(target)
if len(target) == 0 and not isinstance(target, Index):
idx = self.levels[level]
attrs = idx._get_attributes_dict()
attrs.pop("freq", None) # don't preserve freq
target = type(idx)._simple_new(np.empty(0, dtype=idx.dtype), **attrs)
else:
target = ensure_index(target)
target, indexer, _ = self._join_level(
target, level, how="right", keep_order=False
)
else:
target = ensure_index(target)
if self.equals(target):
indexer = None
else:
if self.is_unique:
indexer = self.get_indexer(
target, method=method, limit=limit, tolerance=tolerance
)
else:
raise ValueError("cannot handle a non-unique multi-index!")
if not isinstance(target, MultiIndex):
if indexer is None:
target = self
elif (indexer >= 0).all():
target = self.take(indexer)
else:
# hopefully?
target = MultiIndex.from_tuples(target)
if (
preserve_names
and target.nlevels == self.nlevels
and target.names != self.names
):
target = target.copy(deep=False)
target.names = self.names
return target, indexer
# --------------------------------------------------------------------
# Indexing Methods
def _check_indexing_error(self, key):
if not is_hashable(key) or is_iterator(key):
# We allow tuples if they are hashable, whereas other Index
# subclasses require scalar.
# We have to explicitly exclude generators, as these are hashable.
raise InvalidIndexError(key)
def _should_fallback_to_positional(self) -> bool:
"""
Should integer key(s) be treated as positional?
"""
# GH#33355
return self.levels[0]._should_fallback_to_positional()
def _get_values_for_loc(self, series: Series, loc, key):
"""
Do a positional lookup on the given Series, returning either a scalar
or a Series.
Assumes that `series.index is self`
"""
new_values = series._values[loc]
if is_scalar(loc):
return new_values
if len(new_values) == 1 and not self.nlevels > 1:
# If more than one level left, we can not return a scalar
return new_values[0]
new_index = self[loc]
new_index = maybe_droplevels(new_index, key)
new_ser = series._constructor(new_values, index=new_index, name=series.name)
return new_ser.__finalize__(series)
def _convert_listlike_indexer(self, keyarr):
"""
Parameters
----------
keyarr : list-like
Indexer to convert.
Returns
-------
tuple (indexer, keyarr)
indexer is an ndarray or None if cannot convert
keyarr are tuple-safe keys
"""
indexer, keyarr = super()._convert_listlike_indexer(keyarr)
# are we indexing a specific level
if indexer is None and len(keyarr) and not isinstance(keyarr[0], tuple):
level = 0
_, indexer = self.reindex(keyarr, level=level)
# take all
if indexer is None:
indexer = np.arange(len(self))
check = self.levels[0].get_indexer(keyarr)
mask = check == -1
if mask.any():
raise KeyError(f"{keyarr[mask]} not in index")
elif is_empty_indexer(indexer, keyarr):
# We get here when levels still contain values which are not
# actually in Index anymore
raise KeyError(f"{keyarr} not in index")
return indexer, keyarr
def _get_partial_string_timestamp_match_key(self, key):
"""
Translate any partial string timestamp matches in key, returning the
new key.
Only relevant for MultiIndex.
"""
# GH#10331
if isinstance(key, str) and self.levels[0]._supports_partial_string_indexing:
# Convert key '2016-01-01' to
# ('2016-01-01'[, slice(None, None, None)]+)
key = (key,) + (slice(None),) * (len(self.levels) - 1)
if isinstance(key, tuple):
# Convert (..., '2016-01-01', ...) in tuple to
# (..., slice('2016-01-01', '2016-01-01', None), ...)
new_key = []
for i, component in enumerate(key):
if (
isinstance(component, str)
and self.levels[i]._supports_partial_string_indexing
):
new_key.append(slice(component, component, None))
else:
new_key.append(component)
key = tuple(new_key)
return key
def _get_indexer(
self,
target: Index,
method: str | None = None,
limit: int | None = None,
tolerance=None,
) -> np.ndarray:
# returned ndarray is np.intp
# empty indexer
if not len(target):
return ensure_platform_int(np.array([]))
if not isinstance(target, MultiIndex):
try:
target = MultiIndex.from_tuples(target)
except (TypeError, ValueError):
# let's instead try with a straight Index
if method is None:
return Index(self._values).get_indexer(
target, method=method, limit=limit, tolerance=tolerance
)
# TODO: explicitly raise here? we only have one test that
# gets here, and it is checking that we raise with method="nearest"
if method == "pad" or method == "backfill":
if tolerance is not None:
raise NotImplementedError(
"tolerance not implemented yet for MultiIndex"
)
# TODO: get_indexer_with_fill docstring says values must be _sorted_
# but that doesn't appear to be enforced
indexer = self._engine.get_indexer_with_fill(
target=target._values, values=self._values, method=method, limit=limit
)
elif method == "nearest":
raise NotImplementedError(
"method='nearest' not implemented yet "
"for MultiIndex; see GitHub issue 9365"
)
else:
indexer = self._engine.get_indexer(target._values)
# Note: we only get here (in extant tests at least) with
# target.nlevels == self.nlevels
return ensure_platform_int(indexer)
def get_slice_bound(
self, label: Hashable | Sequence[Hashable], side: str, kind: str | None = None
) -> int:
"""
For an ordered MultiIndex, compute slice bound
that corresponds to given label.
Returns leftmost (one-past-the-rightmost if `side=='right') position
of given label.
Parameters
----------
label : object or tuple of objects
side : {'left', 'right'}
kind : {'loc', 'getitem', None}
Returns
-------
int
Index of label.
Notes
-----
This method only works if level 0 index of the MultiIndex is lexsorted.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abbc'), list('gefd')])
Get the locations from the leftmost 'b' in the first level
until the end of the multiindex:
>>> mi.get_slice_bound('b', side="left")
1
Like above, but if you get the locations from the rightmost
'b' in the first level and 'f' in the second level:
>>> mi.get_slice_bound(('b','f'), side="right")
3
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
"""
if not isinstance(label, tuple):
label = (label,)
return self._partial_tup_index(label, side=side)
def slice_locs(self, start=None, end=None, step=None, kind=None):
"""
For an ordered MultiIndex, compute the slice locations for input
labels.
The input labels can be tuples representing partial levels, e.g. for a
MultiIndex with 3 levels, you can pass a single value (corresponding to
the first level), or a 1-, 2-, or 3-tuple.
Parameters
----------
start : label or tuple, default None
If None, defaults to the beginning
end : label or tuple
If None, defaults to the end
step : int or None
Slice step
kind : string, optional, defaults None
Returns
-------
(start, end) : (int, int)
Notes
-----
This method only works if the MultiIndex is properly lexsorted. So,
if only the first 2 levels of a 3-level MultiIndex are lexsorted,
you can only pass two levels to ``.slice_locs``.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abbd'), list('deff')],
... names=['A', 'B'])
Get the slice locations from the beginning of 'b' in the first level
until the end of the multiindex:
>>> mi.slice_locs(start='b')
(1, 4)
Like above, but stop at the end of 'b' in the first level and 'f' in
the second level:
>>> mi.slice_locs(start='b', end=('b', 'f'))
(1, 3)
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
"""
# This function adds nothing to its parent implementation (the magic
# happens in get_slice_bound method), but it adds meaningful doc.
return super().slice_locs(start, end, step)
def _partial_tup_index(self, tup, side="left"):
if len(tup) > self._lexsort_depth:
raise UnsortedIndexError(
f"Key length ({len(tup)}) was greater than MultiIndex lexsort depth "
f"({self._lexsort_depth})"
)
n = len(tup)
start, end = 0, len(self)
zipped = zip(tup, self.levels, self.codes)
for k, (lab, lev, labs) in enumerate(zipped):
section = labs[start:end]
if lab not in lev and not isna(lab):
if not lev.is_type_compatible(lib.infer_dtype([lab], skipna=False)):
raise TypeError(f"Level type mismatch: {lab}")
# short circuit
loc = lev.searchsorted(lab, side=side)
if side == "right" and loc >= 0:
loc -= 1
return start + section.searchsorted(loc, side=side)
idx = self._get_loc_single_level_index(lev, lab)
if isinstance(idx, slice) and k < n - 1:
# Get start and end value from slice, necessary when a non-integer
# interval is given as input GH#37707
start = idx.start
end = idx.stop
elif k < n - 1:
end = start + section.searchsorted(idx, side="right")
start = start + section.searchsorted(idx, side="left")
elif isinstance(idx, slice):
idx = idx.start
return start + section.searchsorted(idx, side=side)
else:
return start + section.searchsorted(idx, side=side)
def _get_loc_single_level_index(self, level_index: Index, key: Hashable) -> int:
"""
If key is NA value, location of index unify as -1.
Parameters
----------
level_index: Index
key : label
Returns
-------
loc : int
If key is NA value, loc is -1
Else, location of key in index.
See Also
--------
Index.get_loc : The get_loc method for (single-level) index.
"""
if is_scalar(key) and isna(key):
return -1
else:
return level_index.get_loc(key)
def get_loc(self, key, method=None):
"""
Get location for a label or a tuple of labels.
The location is returned as an integer/slice or boolean
mask.
Parameters
----------
key : label or tuple of labels (one for each level)
method : None
Returns
-------
loc : int, slice object or boolean mask
If the key is past the lexsort depth, the return may be a
boolean mask array, otherwise it is always a slice or int.
See Also
--------
Index.get_loc : The get_loc method for (single-level) index.
MultiIndex.slice_locs : Get slice location given start label(s) and
end label(s).
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
Notes
-----
The key cannot be a slice, list of same-level labels, a boolean mask,
or a sequence of such. If you want to use those, use
:meth:`MultiIndex.get_locs` instead.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])
>>> mi.get_loc('b')
slice(1, 3, None)
>>> mi.get_loc(('b', 'e'))
1
"""
if method is not None:
raise NotImplementedError(
"only the default get_loc method is "
"currently supported for MultiIndex"
)
hash(key)
def _maybe_to_slice(loc):
"""convert integer indexer to boolean mask or slice if possible"""
if not isinstance(loc, np.ndarray) or loc.dtype != np.intp:
return loc
loc = lib.maybe_indices_to_slice(loc, len(self))
if isinstance(loc, slice):
return loc
mask = np.empty(len(self), dtype="bool")
mask.fill(False)
mask[loc] = True
return mask
if not isinstance(key, tuple):
loc = self._get_level_indexer(key, level=0)
return _maybe_to_slice(loc)
keylen = len(key)
if self.nlevels < keylen:
raise KeyError(
f"Key length ({keylen}) exceeds index depth ({self.nlevels})"
)
if keylen == self.nlevels and self.is_unique:
return self._engine.get_loc(key)
# -- partial selection or non-unique index
# break the key into 2 parts based on the lexsort_depth of the index;
# the first part returns a continuous slice of the index; the 2nd part
# needs linear search within the slice
i = self._lexsort_depth
lead_key, follow_key = key[:i], key[i:]
start, stop = (
self.slice_locs(lead_key, lead_key) if lead_key else (0, len(self))
)
if start == stop:
raise KeyError(key)
if not follow_key:
return slice(start, stop)
warnings.warn(
"indexing past lexsort depth may impact performance.",
PerformanceWarning,
stacklevel=10,
)
loc = np.arange(start, stop, dtype=np.intp)
for i, k in enumerate(follow_key, len(lead_key)):
mask = self.codes[i][loc] == self._get_loc_single_level_index(
self.levels[i], k
)
if not mask.all():
loc = loc[mask]
if not len(loc):
raise KeyError(key)
return _maybe_to_slice(loc) if len(loc) != stop - start else slice(start, stop)
def get_loc_level(self, key, level=0, drop_level: bool = True):
"""
Get location and sliced index for requested label(s)/level(s).
Parameters
----------
key : label or sequence of labels
level : int/level name or list thereof, optional
drop_level : bool, default True
If ``False``, the resulting index will not drop any level.
Returns
-------
loc : A 2-tuple where the elements are:
Element 0: int, slice object or boolean array
Element 1: The resulting sliced multiindex/index. If the key
contains all levels, this will be ``None``.
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')],
... names=['A', 'B'])
>>> mi.get_loc_level('b')
(slice(1, 3, None), Index(['e', 'f'], dtype='object', name='B'))
>>> mi.get_loc_level('e', level='B')
(array([False, True, False]), Index(['b'], dtype='object', name='A'))
>>> mi.get_loc_level(['b', 'e'])
(1, None)
"""
if not isinstance(level, (list, tuple)):
level = self._get_level_number(level)
else:
level = [self._get_level_number(lev) for lev in level]
return self._get_loc_level(key, level=level, drop_level=drop_level)
def _get_loc_level(self, key, level: int | list[int] = 0, drop_level: bool = True):
"""
get_loc_level but with `level` known to be positional, not name-based.
"""
# different name to distinguish from maybe_droplevels
def maybe_mi_droplevels(indexer, levels, drop_level: bool):
if not drop_level:
return self[indexer]
# kludge around
orig_index = new_index = self[indexer]
for i in sorted(levels, reverse=True):
try:
new_index = new_index._drop_level_numbers([i])
except ValueError:
# no dropping here
return orig_index
return new_index
if isinstance(level, (tuple, list)):
if len(key) != len(level):
raise AssertionError(
"Key for location must have same length as number of levels"
)
result = None
for lev, k in zip(level, key):
loc, new_index = self._get_loc_level(k, level=lev)
if isinstance(loc, slice):
mask = np.zeros(len(self), dtype=bool)
mask[loc] = True
loc = mask
result = loc if result is None else result & loc
return result, maybe_mi_droplevels(result, level, drop_level)
# kludge for #1796
if isinstance(key, list):
key = tuple(key)
if isinstance(key, tuple) and level == 0:
try:
if key in self.levels[0]:
indexer = self._get_level_indexer(key, level=level)
new_index = maybe_mi_droplevels(indexer, [0], drop_level)
return indexer, new_index
except (TypeError, InvalidIndexError):
pass
if not any(isinstance(k, slice) for k in key):
# partial selection
# optionally get indexer to avoid re-calculation
def partial_selection(key, indexer=None):
if indexer is None:
indexer = self.get_loc(key)
ilevels = [
i for i in range(len(key)) if key[i] != slice(None, None)
]
return indexer, maybe_mi_droplevels(indexer, ilevels, drop_level)
if len(key) == self.nlevels and self.is_unique:
# Complete key in unique index -> standard get_loc
try:
return (self._engine.get_loc(key), None)
except KeyError as e:
raise KeyError(key) from e
else:
return partial_selection(key)
else:
indexer = None
for i, k in enumerate(key):
if not isinstance(k, slice):
k = self._get_level_indexer(k, level=i)
if isinstance(k, slice):
# everything
if k.start == 0 and k.stop == len(self):
k = slice(None, None)
else:
k_index = k
if isinstance(k, slice):
if k == slice(None, None):
continue
else:
raise TypeError(key)
if indexer is None:
indexer = k_index
else: # pragma: no cover
indexer &= k_index
if indexer is None:
indexer = slice(None, None)
ilevels = [i for i in range(len(key)) if key[i] != slice(None, None)]
return indexer, maybe_mi_droplevels(indexer, ilevels, drop_level)
else:
indexer = self._get_level_indexer(key, level=level)
return indexer, maybe_mi_droplevels(indexer, [level], drop_level)
def _get_level_indexer(self, key, level: int = 0, indexer=None):
# `level` kwarg is _always_ positional, never name
# return an indexer, boolean array or a slice showing where the key is
# in the totality of values
# if the indexer is provided, then use this
level_index = self.levels[level]
level_codes = self.codes[level]
def convert_indexer(start, stop, step, indexer=indexer, codes=level_codes):
# given the inputs and the codes/indexer, compute an indexer set
# if we have a provided indexer, then this need not consider
# the entire labels set
if step is not None and step < 0:
# Switch elements for negative step size
start, stop = stop - 1, start - 1
r = np.arange(start, stop, step)
if indexer is not None and len(indexer) != len(codes):
# we have an indexer which maps the locations in the labels
# that we have already selected (and is not an indexer for the
# entire set) otherwise this is wasteful so we only need to
# examine locations that are in this set the only magic here is
# that the result are the mappings to the set that we have
# selected
from pandas import Series
mapper = Series(indexer)
indexer = codes.take(ensure_platform_int(indexer))
result = Series(Index(indexer).isin(r).nonzero()[0])
m = result.map(mapper)
# error: Incompatible types in assignment (expression has type
# "ndarray", variable has type "Series")
m = np.asarray(m) # type: ignore[assignment]
else:
# error: Incompatible types in assignment (expression has type
# "ndarray", variable has type "Series")
m = np.zeros(len(codes), dtype=bool) # type: ignore[assignment]
m[np.in1d(codes, r, assume_unique=Index(codes).is_unique)] = True
return m
if isinstance(key, slice):
# handle a slice, returning a slice if we can
# otherwise a boolean indexer
try:
if key.start is not None:
start = level_index.get_loc(key.start)
else:
start = 0
if key.stop is not None:
stop = level_index.get_loc(key.stop)
elif isinstance(start, slice):
stop = len(level_index)
else:
stop = len(level_index) - 1
step = key.step
except KeyError:
# we have a partial slice (like looking up a partial date
# string)
start = stop = level_index.slice_indexer(key.start, key.stop, key.step)
step = start.step
if isinstance(start, slice) or isinstance(stop, slice):
# we have a slice for start and/or stop
# a partial date slicer on a DatetimeIndex generates a slice
# note that the stop ALREADY includes the stopped point (if
# it was a string sliced)
start = getattr(start, "start", start)
stop = getattr(stop, "stop", stop)
return convert_indexer(start, stop, step)
elif level > 0 or self._lexsort_depth == 0 or step is not None:
# need to have like semantics here to right
# searching as when we are using a slice
# so include the stop+1 (so we include stop)
return convert_indexer(start, stop + 1, step)
else:
# sorted, so can return slice object -> view
i = level_codes.searchsorted(start, side="left")
j = level_codes.searchsorted(stop, side="right")
return slice(i, j, step)
else:
idx = self._get_loc_single_level_index(level_index, key)
if level > 0 or self._lexsort_depth == 0:
# Desired level is not sorted
locs = np.array(level_codes == idx, dtype=bool, copy=False)
if not locs.any():
# The label is present in self.levels[level] but unused:
raise KeyError(key)
return locs
if isinstance(idx, slice):
start = idx.start
end = idx.stop
else:
start = level_codes.searchsorted(idx, side="left")
end = level_codes.searchsorted(idx, side="right")
if start == end:
# The label is present in self.levels[level] but unused:
raise KeyError(key)
return slice(start, end)
def get_locs(self, seq):
"""
Get location for a sequence of labels.
Parameters
----------
seq : label, slice, list, mask or a sequence of such
You should use one of the above for each level.
If a level should not be used, set it to ``slice(None)``.
Returns
-------
numpy.ndarray
NumPy array of integers suitable for passing to iloc.
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.slice_locs : Get slice location given start label(s) and
end label(s).
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])
>>> mi.get_locs('b') # doctest: +SKIP
array([1, 2], dtype=int64)
>>> mi.get_locs([slice(None), ['e', 'f']]) # doctest: +SKIP
array([1, 2], dtype=int64)
>>> mi.get_locs([[True, False, True], slice('e', 'f')]) # doctest: +SKIP
array([2], dtype=int64)
"""
# must be lexsorted to at least as many levels
true_slices = [i for (i, s) in enumerate(com.is_true_slices(seq)) if s]
if true_slices and true_slices[-1] >= self._lexsort_depth:
raise UnsortedIndexError(
"MultiIndex slicing requires the index to be lexsorted: slicing "
f"on levels {true_slices}, lexsort depth {self._lexsort_depth}"
)
# indexer
# this is the list of all values that we want to select
n = len(self)
indexer = None
def _convert_to_indexer(r) -> Int64Index:
# return an indexer
if isinstance(r, slice):
m = np.zeros(n, dtype=bool)
m[r] = True
r = m.nonzero()[0]
elif com.is_bool_indexer(r):
if len(r) != n:
raise ValueError(
"cannot index with a boolean indexer "
"that is not the same length as the "
"index"
)
r = r.nonzero()[0]
return Int64Index(r)
def _update_indexer(idxr: Index | None, indexer: Index | None, key) -> Index:
if indexer is None:
indexer = Index(np.arange(n))
if idxr is None:
return indexer
indexer_intersection = indexer.intersection(idxr)
if indexer_intersection.empty and not idxr.empty and not indexer.empty:
raise KeyError(key)
return indexer_intersection
for i, k in enumerate(seq):
if com.is_bool_indexer(k):
# a boolean indexer, must be the same length!
k = np.asarray(k)
indexer = _update_indexer(
_convert_to_indexer(k), indexer=indexer, key=seq
)
elif is_list_like(k):
# a collection of labels to include from this level (these
# are or'd)
indexers: Int64Index | None = None
for x in k:
try:
idxrs = _convert_to_indexer(
self._get_level_indexer(x, level=i, indexer=indexer)
)
indexers = (idxrs if indexers is None else indexers).union(
idxrs, sort=False
)
except KeyError:
# ignore not founds
continue
if indexers is not None:
indexer = _update_indexer(indexers, indexer=indexer, key=seq)
else:
# no matches we are done
return np.array([], dtype=np.int64)
elif com.is_null_slice(k):
# empty slice
indexer = _update_indexer(None, indexer=indexer, key=seq)
elif isinstance(k, slice):
# a slice, include BOTH of the labels
indexer = _update_indexer(
_convert_to_indexer(
self._get_level_indexer(k, level=i, indexer=indexer)
),
indexer=indexer,
key=seq,
)
else:
# a single label
indexer = _update_indexer(
_convert_to_indexer(
self.get_loc_level(k, level=i, drop_level=False)[0]
),
indexer=indexer,
key=seq,
)
# empty indexer
if indexer is None:
return np.array([], dtype=np.int64)
assert isinstance(indexer, Int64Index), type(indexer)
indexer = self._reorder_indexer(seq, indexer)
return indexer._values
# --------------------------------------------------------------------
def _reorder_indexer(
self,
seq: tuple[Scalar | Iterable | AnyArrayLike, ...],
indexer: Int64Index,
) -> Int64Index:
"""
Reorder an indexer of a MultiIndex (self) so that the label are in the
same order as given in seq
Parameters
----------
seq : label/slice/list/mask or a sequence of such
indexer: an Int64Index indexer of self
Returns
-------
indexer : a sorted Int64Index indexer of self ordered as seq
"""
# If the index is lexsorted and the list_like label in seq are sorted
# then we do not need to sort
if self._is_lexsorted():
need_sort = False
for i, k in enumerate(seq):
if is_list_like(k):
if not need_sort:
k_codes = self.levels[i].get_indexer(k)
k_codes = k_codes[k_codes >= 0] # Filter absent keys
# True if the given codes are not ordered
need_sort = (k_codes[:-1] > k_codes[1:]).any()
elif isinstance(k, slice) and k.step is not None and k.step < 0:
need_sort = True
# Bail out if both index and seq are sorted
if not need_sort:
return indexer
n = len(self)
keys: tuple[np.ndarray, ...] = ()
# For each level of the sequence in seq, map the level codes with the
# order they appears in a list-like sequence
# This mapping is then use to reorder the indexer
for i, k in enumerate(seq):
if is_scalar(k):
# GH#34603 we want to treat a scalar the same as an all equal list
k = [k]
if com.is_bool_indexer(k):
new_order = np.arange(n)[indexer]
elif is_list_like(k):
# Generate a map with all level codes as sorted initially
key_order_map = np.ones(len(self.levels[i]), dtype=np.uint64) * len(
self.levels[i]
)
# Set order as given in the indexer list
level_indexer = self.levels[i].get_indexer(k)
level_indexer = level_indexer[level_indexer >= 0] # Filter absent keys
key_order_map[level_indexer] = np.arange(len(level_indexer))
new_order = key_order_map[self.codes[i][indexer]]
elif isinstance(k, slice) and k.step is not None and k.step < 0:
new_order = np.arange(n)[k][indexer]
elif isinstance(k, slice) and k.start is None and k.stop is None:
# slice(None) should not determine order GH#31330
new_order = np.ones((n,))[indexer]
else:
# For all other case, use the same order as the level
new_order = np.arange(n)[indexer]
keys = (new_order,) + keys
# Find the reordering using lexsort on the keys mapping
ind = np.lexsort(keys)
return indexer[ind]
def truncate(self, before=None, after=None) -> MultiIndex:
"""
Slice index between two labels / tuples, return new MultiIndex
Parameters
----------
before : label or tuple, can be partial. Default None
None defaults to start
after : label or tuple, can be partial. Default None
None defaults to end
Returns
-------
truncated : MultiIndex
"""
if after and before and after < before:
raise ValueError("after < before")
i, j = self.levels[0].slice_locs(before, after)
left, right = self.slice_locs(before, after)
new_levels = list(self.levels)
new_levels[0] = new_levels[0][i:j]
new_codes = [level_codes[left:right] for level_codes in self.codes]
new_codes[0] = new_codes[0] - i
return MultiIndex(
levels=new_levels,
codes=new_codes,
names=self._names,
verify_integrity=False,
)
def equals(self, other: object) -> bool:
"""
Determines if two MultiIndex objects have the same labeling information
(the levels themselves do not necessarily have to be the same)
See Also
--------
equal_levels
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if len(self) != len(other):
return False
if not isinstance(other, MultiIndex):
# d-level MultiIndex can equal d-tuple Index
if not self._should_compare(other):
# object Index or Categorical[object] may contain tuples
return False
return array_equivalent(self._values, other._values)
if self.nlevels != other.nlevels:
return False
for i in range(self.nlevels):
self_codes = self.codes[i]
other_codes = other.codes[i]
self_mask = self_codes == -1
other_mask = other_codes == -1
if not np.array_equal(self_mask, other_mask):
return False
self_codes = self_codes[~self_mask]
self_values = self.levels[i]._values.take(self_codes)
other_codes = other_codes[~other_mask]
other_values = other.levels[i]._values.take(other_codes)
# since we use NaT both datetime64 and timedelta64 we can have a
# situation where a level is typed say timedelta64 in self (IOW it
# has other values than NaT) but types datetime64 in other (where
# its all NaT) but these are equivalent
if len(self_values) == 0 and len(other_values) == 0:
continue
if not array_equivalent(self_values, other_values):
return False
return True
def equal_levels(self, other: MultiIndex) -> bool:
"""
Return True if the levels of both MultiIndex objects are the same
"""
if self.nlevels != other.nlevels:
return False
for i in range(self.nlevels):
if not self.levels[i].equals(other.levels[i]):
return False
return True
# --------------------------------------------------------------------
# Set Methods
def _union(self, other, sort) -> MultiIndex:
other, result_names = self._convert_can_do_setop(other)
# We could get here with CategoricalIndex other
rvals = other._values.astype(object, copy=False)
uniq_tuples = lib.fast_unique_multiple([self._values, rvals], sort=sort)
return MultiIndex.from_arrays(
zip(*uniq_tuples), sortorder=0, names=result_names
)
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
return is_object_dtype(dtype)
def _get_reconciled_name_object(self, other) -> MultiIndex:
"""
If the result of a set operation will be self,
return self, unless the names change, in which
case make a shallow copy of self.
"""
names = self._maybe_match_names(other)
if self.names != names:
return self.rename(names)
return self
def _maybe_match_names(self, other):
"""
Try to find common names to attach to the result of an operation between
a and b. Return a consensus list of names if they match at least partly
or list of None if they have completely different names.
"""
if len(self.names) != len(other.names):
return [None] * len(self.names)
names = []
for a_name, b_name in zip(self.names, other.names):
if a_name == b_name:
names.append(a_name)
else:
# TODO: what if they both have np.nan for their names?
names.append(None)
return names
def _intersection(self, other, sort=False) -> MultiIndex:
other, result_names = self._convert_can_do_setop(other)
other = other.astype(object, copy=False)
uniq_tuples = None # flag whether _inner_indexer was successful
if self.is_monotonic and other.is_monotonic:
try:
inner_tuples = self._inner_indexer(other)[0]
sort = False # inner_tuples is already sorted
except TypeError:
pass
else:
uniq_tuples = algos.unique(inner_tuples)
if uniq_tuples is None:
left_unique = self.drop_duplicates()
indexer = left_unique.get_indexer(other.drop_duplicates())
uniq_tuples = left_unique.take(np.sort(indexer[indexer != -1]))
if sort is None:
uniq_tuples = sorted(uniq_tuples)
if len(uniq_tuples) == 0:
return MultiIndex(
levels=self.levels,
codes=[[]] * self.nlevels,
names=result_names,
verify_integrity=False,
)
else:
return MultiIndex.from_arrays(
zip(*uniq_tuples), sortorder=0, names=result_names
)
def _difference(self, other, sort) -> MultiIndex:
other, result_names = self._convert_can_do_setop(other)
this = self._get_unique_index()
indexer = this.get_indexer(other)
indexer = indexer.take((indexer != -1).nonzero()[0])
label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True)
difference = this._values.take(label_diff)
if sort is None:
difference = sorted(difference)
if len(difference) == 0:
return MultiIndex(
levels=[[]] * self.nlevels,
codes=[[]] * self.nlevels,
names=result_names,
verify_integrity=False,
)
else:
return MultiIndex.from_tuples(difference, sortorder=0, names=result_names)
def _convert_can_do_setop(self, other):
result_names = self.names
if not isinstance(other, Index):
if len(other) == 0:
return self[:0], self.names
else:
msg = "other must be a MultiIndex or a list of tuples"
try:
other = MultiIndex.from_tuples(other, names=self.names)
except (ValueError, TypeError) as err:
# ValueError raised by tuples_to_object_array if we
# have non-object dtype
raise TypeError(msg) from err
else:
result_names = get_unanimous_names(self, other)
return other, result_names
def symmetric_difference(self, other, result_name=None, sort=None):
# On equal symmetric_difference MultiIndexes the difference is empty.
# Therefore, an empty MultiIndex is returned GH13490
tups = Index.symmetric_difference(self, other, result_name, sort)
if len(tups) == 0:
return type(self)(
levels=[[] for _ in range(self.nlevels)],
codes=[[] for _ in range(self.nlevels)],
names=tups.name,
)
return type(self).from_tuples(tups, names=tups.name)
# --------------------------------------------------------------------
@doc(Index.astype)
def astype(self, dtype, copy: bool = True):
dtype = pandas_dtype(dtype)
if is_categorical_dtype(dtype):
msg = "> 1 ndim Categorical are not supported at this time"
raise NotImplementedError(msg)
elif not is_object_dtype(dtype):
raise TypeError(
"Setting a MultiIndex dtype to anything other than object "
"is not supported"
)
elif copy is True:
return self._view()
return self
def _validate_fill_value(self, item):
if not isinstance(item, tuple):
# Pad the key with empty strings if lower levels of the key
# aren't specified:
item = (item,) + ("",) * (self.nlevels - 1)
elif len(item) != self.nlevels:
raise ValueError("Item must have length equal to number of levels.")
return item
def insert(self, loc: int, item) -> MultiIndex:
"""
Make new MultiIndex inserting new item at location
Parameters
----------
loc : int
item : tuple
Must be same length as number of levels in the MultiIndex
Returns
-------
new_index : Index
"""
item = self._validate_fill_value(item)
new_levels = []
new_codes = []
for k, level, level_codes in zip(item, self.levels, self.codes):
if k not in level:
# have to insert into level
# must insert at end otherwise you have to recompute all the
# other codes
lev_loc = len(level)
level = level.insert(lev_loc, k)
else:
lev_loc = level.get_loc(k)
new_levels.append(level)
new_codes.append(np.insert(ensure_int64(level_codes), loc, lev_loc))
return MultiIndex(
levels=new_levels, codes=new_codes, names=self.names, verify_integrity=False
)
def delete(self, loc) -> MultiIndex:
"""
Make new index with passed location deleted
Returns
-------
new_index : MultiIndex
"""
new_codes = [np.delete(level_codes, loc) for level_codes in self.codes]
return MultiIndex(
levels=self.levels,
codes=new_codes,
names=self.names,
verify_integrity=False,
)
@doc(Index.isin)
def isin(self, values, level=None) -> np.ndarray:
if level is None:
values = MultiIndex.from_tuples(values, names=self.names)._values
return algos.isin(self._values, values)
else:
num = self._get_level_number(level)
levs = self.get_level_values(num)
if levs.size == 0:
return np.zeros(len(levs), dtype=np.bool_)
return levs.isin(values)
# ---------------------------------------------------------------
# Arithmetic/Numeric Methods - Disabled
__add__ = make_invalid_op("__add__")
__radd__ = make_invalid_op("__radd__")
__iadd__ = make_invalid_op("__iadd__")
__sub__ = make_invalid_op("__sub__")
__rsub__ = make_invalid_op("__rsub__")
__isub__ = make_invalid_op("__isub__")
__pow__ = make_invalid_op("__pow__")
__rpow__ = make_invalid_op("__rpow__")
__mul__ = make_invalid_op("__mul__")
__rmul__ = make_invalid_op("__rmul__")
__floordiv__ = make_invalid_op("__floordiv__")
__rfloordiv__ = make_invalid_op("__rfloordiv__")
__truediv__ = make_invalid_op("__truediv__")
__rtruediv__ = make_invalid_op("__rtruediv__")
__mod__ = make_invalid_op("__mod__")
__rmod__ = make_invalid_op("__rmod__")
__divmod__ = make_invalid_op("__divmod__")
__rdivmod__ = make_invalid_op("__rdivmod__")
# Unary methods disabled
__neg__ = make_invalid_op("__neg__")
__pos__ = make_invalid_op("__pos__")
__abs__ = make_invalid_op("__abs__")
__inv__ = make_invalid_op("__inv__")
def _lexsort_depth(codes: list[np.ndarray], nlevels: int) -> int:
"""Count depth (up to a maximum of `nlevels`) with which codes are lexsorted."""
int64_codes = [ensure_int64(level_codes) for level_codes in codes]
for k in range(nlevels, 0, -1):
if libalgos.is_lexsorted(int64_codes[:k]):
return k
return 0
def sparsify_labels(label_list, start: int = 0, sentinel=""):
pivoted = list(zip(*label_list))
k = len(label_list)
result = pivoted[: start + 1]
prev = pivoted[start]
for cur in pivoted[start + 1 :]:
sparse_cur = []
for i, (p, t) in enumerate(zip(prev, cur)):
if i == k - 1:
sparse_cur.append(t)
result.append(sparse_cur)
break
if p == t:
sparse_cur.append(sentinel)
else:
sparse_cur.extend(cur[i:])
result.append(sparse_cur)
break
prev = cur
return list(zip(*result))
def _get_na_rep(dtype) -> str:
return {np.datetime64: "NaT", np.timedelta64: "NaT"}.get(dtype, "NaN")
def maybe_droplevels(index: Index, key) -> Index:
"""
Attempt to drop level or levels from the given index.
Parameters
----------
index: Index
key : scalar or tuple
Returns
-------
Index
"""
# drop levels
original_index = index
if isinstance(key, tuple):
for _ in key:
try:
index = index._drop_level_numbers([0])
except ValueError:
# we have dropped too much, so back out
return original_index
else:
try:
index = index._drop_level_numbers([0])
except ValueError:
pass
return index
def _coerce_indexer_frozen(array_like, categories, copy: bool = False) -> np.ndarray:
"""
Coerce the array_like indexer to the smallest integer dtype that can encode all
of the given categories.
Parameters
----------
array_like : array-like
categories : array-like
copy : bool
Returns
-------
np.ndarray
Non-writeable.
"""
array_like = coerce_indexer_dtype(array_like, categories)
if copy:
array_like = array_like.copy()
array_like.flags.writeable = False
return array_like
def _require_listlike(level, arr, arrname: str):
"""
Ensure that level is either None or listlike, and arr is list-of-listlike.
"""
if level is not None and not is_list_like(level):
if not is_list_like(arr):
raise TypeError(f"{arrname} must be list-like")
if is_list_like(arr[0]):
raise TypeError(f"{arrname} must be list-like")
level = [level]
arr = [arr]
elif level is None or is_list_like(level):
if not is_list_like(arr) or not is_list_like(arr[0]):
raise TypeError(f"{arrname} must be list of lists-like")
return level, arr
|
py | 1a3719755e1183e123bdbdf4e0127b023326e7c9 | from wtforms.validators import ValidationError
from taqueriaposapp.user.models import UserModel
|
py | 1a3719d0580b822e53179cd9bbfc19d56d70fff6 | # -*- coding: utf-8 -*-
"""Code for maintaining the background process and for running
user programs
Commands get executed via shell, this way the command line in the
shell becomes kind of title for the execution.
"""
import collections
import logging
import os.path
import re
import shlex
import signal
import subprocess
import sys
import time
import tkinter as tk
import warnings
from logging import debug
from threading import Thread
from time import sleep
from tkinter import messagebox, ttk
from typing import Any, List, Optional, Set, Union, Callable # @UnusedImport; @UnusedImport
import thonny
from thonny import THONNY_USER_DIR, common, get_runner, get_shell, get_workbench
from thonny.common import (
BackendEvent,
CommandToBackend,
DebuggerCommand,
DebuggerResponse,
EOFCommand,
InlineCommand,
InputSubmission,
ToplevelCommand,
ToplevelResponse,
UserError,
is_same_path,
normpath_with_actual_case,
parse_message,
path_startswith,
serialize_message,
update_system_path,
MessageFromBackend,
universal_relpath,
)
from thonny.editors import (
get_current_breakpoints,
get_saved_current_script_filename,
is_remote_path,
is_local_path,
get_target_dirname_from_editor_filename,
extract_target_path,
)
from thonny.languages import tr
from thonny.misc_utils import construct_cmd_line, running_on_mac_os, running_on_windows
from thonny.ui_utils import CommonDialogEx, select_sequence, show_dialog
from thonny.workdlg import WorkDialog
logger = logging.getLogger(__name__)
WINDOWS_EXE = "python.exe"
OUTPUT_MERGE_THRESHOLD = 1000
RUN_COMMAND_LABEL = "" # init later when gettext is ready
RUN_COMMAND_CAPTION = ""
EDITOR_CONTENT_TOKEN = "$EDITOR_CONTENT"
EXPECTED_TERMINATION_CODE = 123
INTERRUPT_SEQUENCE = "<Control-c>"
ANSI_CODE_TERMINATOR = re.compile("[@-~]")
# other components may turn it on in order to avoid grouping output lines into one event
io_animation_required = False
_console_allocated = False
class Runner:
def __init__(self) -> None:
get_workbench().set_default("run.auto_cd", True)
self._init_commands()
self._state = "starting"
self._proxy = None # type: BackendProxy
self._publishing_events = False
self._polling_after_id = None
self._postponed_commands = [] # type: List[CommandToBackend]
def _remove_obsolete_jedi_copies(self) -> None:
# Thonny 2.1 used to copy jedi in order to make it available
# for the backend. Get rid of it now
for item in os.listdir(THONNY_USER_DIR):
if item.startswith("jedi_0."):
import shutil
shutil.rmtree(os.path.join(THONNY_USER_DIR, item), True)
def start(self) -> None:
global _console_allocated
try:
self._check_alloc_console()
_console_allocated = True
except Exception:
logger.exception("Problem allocating console")
_console_allocated = False
self.restart_backend(False, True)
# temporary
self._remove_obsolete_jedi_copies()
def _init_commands(self) -> None:
global RUN_COMMAND_CAPTION, RUN_COMMAND_LABEL
RUN_COMMAND_LABEL = tr("Run current script")
RUN_COMMAND_CAPTION = tr("Run")
get_workbench().set_default("run.run_in_terminal_python_repl", False)
get_workbench().set_default("run.run_in_terminal_keep_open", True)
try:
import thonny.plugins.debugger # @UnusedImport
debugger_available = True
except ImportError:
debugger_available = False
get_workbench().add_command(
"run_current_script",
"run",
RUN_COMMAND_LABEL,
caption=RUN_COMMAND_CAPTION,
handler=self.cmd_run_current_script,
default_sequence="<F5>",
extra_sequences=[select_sequence("<Control-r>", "<Command-r>")],
tester=self.cmd_run_current_script_enabled,
group=10,
image="run-current-script",
include_in_toolbar=not (get_workbench().in_simple_mode() and debugger_available),
show_extra_sequences=True,
)
get_workbench().add_command(
"run_current_script_in_terminal",
"run",
tr("Run current script in terminal"),
caption="RunT",
handler=self._cmd_run_current_script_in_terminal,
default_sequence="<Control-t>",
extra_sequences=["<<CtrlTInText>>"],
tester=self._cmd_run_current_script_in_terminal_enabled,
group=35,
image="terminal",
)
get_workbench().add_command(
"restart",
"run",
tr("Stop/Restart backend"),
caption=tr("Stop"),
handler=self.cmd_stop_restart,
default_sequence="<Control-F2>",
group=100,
image="stop",
include_in_toolbar=True,
)
get_workbench().add_command(
"interrupt",
"run",
tr("Interrupt execution"),
handler=self._cmd_interrupt,
tester=self._cmd_interrupt_enabled,
default_sequence=INTERRUPT_SEQUENCE,
skip_sequence_binding=True, # Sequence will be bound differently
group=100,
bell_when_denied=False,
)
get_workbench().bind(INTERRUPT_SEQUENCE, self._cmd_interrupt_with_shortcut, True)
get_workbench().add_command(
"ctrld",
"run",
tr("Send EOF / Soft reboot"),
self.ctrld,
self.ctrld_enabled,
group=100,
default_sequence="<Control-d>",
extra_sequences=["<<CtrlDInText>>"],
)
get_workbench().add_command(
"disconnect",
"run",
tr("Disconnect"),
self.disconnect,
self.disconnect_enabled,
group=100,
)
def get_state(self) -> str:
"""State is one of "running", "waiting_debugger_command", "waiting_toplevel_command" """
return self._state
def _set_state(self, state: str) -> None:
if self._state != state:
logging.debug("Runner state changed: %s ==> %s" % (self._state, state))
self._state = state
def is_running(self):
return self._state == "running"
def is_waiting(self):
return self._state.startswith("waiting")
def is_waiting_toplevel_command(self):
return self._state == "waiting_toplevel_command"
def is_waiting_debugger_command(self):
return self._state == "waiting_debugger_command"
def get_sys_path(self) -> List[str]:
return self._proxy.get_sys_path()
def send_command(self, cmd: CommandToBackend) -> None:
if self._proxy is None:
return
if self._publishing_events:
# allow all event handlers to complete before sending the commands
# issued by first event handlers
self._postpone_command(cmd)
return
# First sanity check
if (
isinstance(cmd, ToplevelCommand)
and not self.is_waiting_toplevel_command()
and cmd.name not in ["Reset", "Run", "Debug"]
or isinstance(cmd, DebuggerCommand)
and not self.is_waiting_debugger_command()
):
get_workbench().bell()
logging.warning(
"RUNNER: Command %s was attempted at state %s" % (cmd, self.get_state())
)
return
# Attach extra info
if "debug" in cmd.name.lower():
cmd["breakpoints"] = get_current_breakpoints()
if "id" not in cmd:
cmd["id"] = generate_command_id()
cmd["local_cwd"] = get_workbench().get_local_cwd()
# Offer the command
logging.debug("RUNNER Sending: %s, %s", cmd.name, cmd)
response = self._proxy.send_command(cmd)
if response == "discard":
return None
elif response == "postpone":
self._postpone_command(cmd)
return
else:
assert response is None
get_workbench().event_generate("CommandAccepted", command=cmd)
if isinstance(cmd, (ToplevelCommand, DebuggerCommand)):
self._set_state("running")
if cmd.name[0].isupper():
# This may be only logical restart, which does not look like restart to the runner
get_workbench().event_generate("BackendRestart", full=False)
def send_command_and_wait(self, cmd: CommandToBackend, dialog_title: str) -> MessageFromBackend:
dlg = InlineCommandDialog(get_workbench(), cmd, title=dialog_title + " ...")
show_dialog(dlg)
return dlg.response
def _postpone_command(self, cmd: CommandToBackend) -> None:
# in case of InlineCommands, discard older same type command
if isinstance(cmd, InlineCommand):
for older_cmd in self._postponed_commands:
if older_cmd.name == cmd.name:
self._postponed_commands.remove(older_cmd)
if len(self._postponed_commands) > 10:
logging.warning("Can't pile up too many commands. This command will be just ignored")
else:
self._postponed_commands.append(cmd)
def _send_postponed_commands(self) -> None:
todo = self._postponed_commands
self._postponed_commands = []
for cmd in todo:
logging.debug("Sending postponed command: %s", cmd)
self.send_command(cmd)
def send_program_input(self, data: str) -> None:
assert self.is_running()
self._proxy.send_program_input(data)
def execute_script(
self,
script_path: str,
args: List[str],
working_directory: Optional[str] = None,
command_name: str = "Run",
) -> None:
if self._proxy.get_cwd() != working_directory:
# create compound command
# start with %cd
cd_cmd_line = construct_cd_command(working_directory) + "\n"
else:
# create simple command
cd_cmd_line = ""
rel_filename = universal_relpath(script_path, working_directory)
cmd_parts = ["%" + command_name, rel_filename] + args
exe_cmd_line = construct_cmd_line(cmd_parts, [EDITOR_CONTENT_TOKEN]) + "\n"
# submit to shell (shell will execute it)
get_shell().submit_magic_command(cd_cmd_line + exe_cmd_line)
def execute_editor_content(self, command_name, args):
get_shell().submit_magic_command(
construct_cmd_line(
["%" + command_name, "-c", EDITOR_CONTENT_TOKEN] + args, [EDITOR_CONTENT_TOKEN]
)
)
def execute_current(self, command_name: str) -> None:
"""
This method's job is to create a command for running/debugging
current file/script and submit it to shell
"""
if not self.is_waiting_toplevel_command():
self.restart_backend(True, False, 2)
filename = get_saved_current_script_filename()
if not filename:
# user has cancelled file saving
return
if (
is_remote_path(filename)
and not self._proxy.can_run_remote_files()
or is_local_path(filename)
and not self._proxy.can_run_local_files()
):
self.execute_editor_content(command_name, self._get_active_arguments())
else:
if get_workbench().get_option("run.auto_cd") and command_name[0].isupper():
working_directory = get_target_dirname_from_editor_filename(filename)
else:
working_directory = self._proxy.get_cwd()
if is_local_path(filename):
target_path = filename
else:
target_path = extract_target_path(filename)
self.execute_script(
target_path, self._get_active_arguments(), working_directory, command_name
)
def _get_active_arguments(self):
if get_workbench().get_option("view.show_program_arguments"):
args_str = get_workbench().get_option("run.program_arguments")
get_workbench().log_program_arguments_string(args_str)
return shlex.split(args_str)
else:
return []
def cmd_run_current_script_enabled(self) -> bool:
return (
get_workbench().get_editor_notebook().get_current_editor() is not None
and "run" in get_runner().get_supported_features()
)
def _cmd_run_current_script_in_terminal_enabled(self) -> bool:
return (
self._proxy
and "run_in_terminal" in self._proxy.get_supported_features()
and self.cmd_run_current_script_enabled()
)
def cmd_run_current_script(self) -> None:
if get_workbench().in_simple_mode():
get_workbench().hide_view("VariablesView")
self.execute_current("Run")
def _cmd_run_current_script_in_terminal(self) -> None:
filename = get_saved_current_script_filename()
if not filename:
return
self._proxy.run_script_in_terminal(
filename,
self._get_active_arguments(),
get_workbench().get_option("run.run_in_terminal_python_repl"),
get_workbench().get_option("run.run_in_terminal_keep_open"),
)
def _cmd_interrupt(self) -> None:
if self._proxy is not None:
if _console_allocated:
self._proxy.interrupt()
else:
messagebox.showerror(
"No console",
"Can't interrupt as console was not allocated.\n\nUse Stop/Restart instead.",
master=self,
)
else:
logging.warning("User tried interrupting without proxy")
def _cmd_interrupt_with_shortcut(self, event=None):
if not self._cmd_interrupt_enabled():
return None
if not running_on_mac_os(): # on Mac Ctrl+C is not used for Copy.
# Disable Ctrl+C interrupt in editor and shell, when some text is selected
# (assuming user intended to copy instead of interrupting)
widget = get_workbench().focus_get()
if isinstance(widget, tk.Text):
if len(widget.tag_ranges("sel")) > 0:
# this test is reliable, unlike selection_get below
return None
elif isinstance(widget, (tk.Listbox, ttk.Entry, tk.Entry, tk.Spinbox)):
try:
selection = widget.selection_get()
if isinstance(selection, str) and len(selection) > 0:
# Assuming user meant to copy, not interrupt
# (IDLE seems to follow same logic)
# NB! This is not perfect, as in Linux the selection can be in another app
# ie. there may be no selection in Thonny actually.
# In other words, Ctrl+C interrupt may be dropped without reason
# when given inside the widgets listed above.
return None
except Exception:
# widget either doesn't have selection_get or it
# gave error (can happen without selection on Ubuntu)
pass
self._cmd_interrupt()
return "break"
def _cmd_interrupt_enabled(self) -> bool:
return self._proxy and self._proxy.is_connected()
def cmd_stop_restart(self) -> None:
if get_workbench().in_simple_mode():
get_workbench().hide_view("VariablesView")
self.restart_backend(True)
def disconnect(self):
proxy = self.get_backend_proxy()
assert hasattr(proxy, "disconnect")
proxy.disconnect()
def disconnect_enabled(self):
return hasattr(self.get_backend_proxy(), "disconnect")
def ctrld(self):
proxy = self.get_backend_proxy()
if not proxy:
return
if get_shell().has_pending_input():
messagebox.showerror(
"Can't perform this action",
"Ctrl+D only has effect on an empty line / prompt.\n"
+ "Submit current input (press ENTER) and try again",
master=self,
)
return
proxy.send_command(EOFCommand())
self._set_state("running")
def ctrld_enabled(self):
proxy = self.get_backend_proxy()
return proxy and proxy.is_connected()
def _poll_backend_messages(self) -> None:
"""I chose polling instead of event_generate in listener thread,
because event_generate across threads is not reliable
http://www.thecodingforums.com/threads/more-on-tk-event_generate-and-threads.359615/
"""
self._polling_after_id = None
if self._pull_backend_messages() is False:
return
self._polling_after_id = get_workbench().after(20, self._poll_backend_messages)
def _pull_backend_messages(self):
while self._proxy is not None:
try:
msg = self._proxy.fetch_next_message()
if not msg:
break
logging.debug(
"RUNNER GOT: %s, %s in state: %s", msg.event_type, msg, self.get_state()
)
except BackendTerminatedError as exc:
self._report_backend_crash(exc)
self.destroy_backend()
return False
if msg.get("SystemExit", False):
self.restart_backend(True)
return False
# change state
if isinstance(msg, ToplevelResponse):
self._set_state("waiting_toplevel_command")
elif isinstance(msg, DebuggerResponse):
self._set_state("waiting_debugger_command")
else:
"other messages don't affect the state"
# Publish the event
# NB! This may cause another command to be sent before we get to postponed commands.
try:
self._publishing_events = True
class_event_type = type(msg).__name__
get_workbench().event_generate(class_event_type, event=msg) # more general event
if msg.event_type != class_event_type:
# more specific event
get_workbench().event_generate(msg.event_type, event=msg)
finally:
self._publishing_events = False
# TODO: is it necessary???
# https://stackoverflow.com/a/13520271/261181
# get_workbench().update()
self._send_postponed_commands()
def _report_backend_crash(self, exc: Exception) -> None:
returncode = getattr(exc, "returncode", "?")
err = "Backend terminated or disconnected."
try:
faults_file = os.path.join(THONNY_USER_DIR, "backend_faults.log")
if os.path.exists(faults_file):
with open(faults_file, encoding="ASCII") as fp:
err += fp.read()
except Exception:
logging.exception("Failed retrieving backend faults")
err = err.strip() + " Use 'Stop/Restart' to restart.\n"
if returncode != EXPECTED_TERMINATION_CODE:
get_workbench().event_generate("ProgramOutput", stream_name="stderr", data="\n" + err)
get_workbench().become_active_window(False)
def restart_backend(self, clean: bool, first: bool = False, wait: float = 0) -> None:
"""Recreate (or replace) backend proxy / backend process."""
if not first:
get_shell().restart()
get_shell().update_idletasks()
self.destroy_backend()
backend_name = get_workbench().get_option("run.backend_name")
if backend_name not in get_workbench().get_backends():
raise UserError(
"Can't find backend '{}'. Please select another backend from options".format(
backend_name
)
)
backend_class = get_workbench().get_backends()[backend_name].proxy_class
self._set_state("running")
self._proxy = None
self._proxy = backend_class(clean)
self._poll_backend_messages()
if wait:
start_time = time.time()
while not self.is_waiting_toplevel_command() and time.time() - start_time <= wait:
# self._pull_backend_messages()
get_workbench().update()
sleep(0.01)
get_workbench().event_generate("BackendRestart", full=True)
def destroy_backend(self) -> None:
if self._polling_after_id is not None:
get_workbench().after_cancel(self._polling_after_id)
self._polling_after_id = None
self._postponed_commands = []
if self._proxy:
self._proxy.destroy()
self._proxy = None
get_workbench().event_generate("BackendTerminated")
def get_local_executable(self) -> Optional[str]:
if self._proxy is None:
return None
else:
return self._proxy.get_local_executable()
def get_backend_proxy(self) -> "BackendProxy":
return self._proxy
def _check_alloc_console(self) -> None:
if sys.executable.endswith("pythonw.exe"):
# These don't have console allocated.
# Console is required for sending interrupts.
# AllocConsole would be easier but flashes console window
import ctypes
kernel32 = ctypes.WinDLL("kernel32", use_last_error=True)
exe = sys.executable.replace("pythonw.exe", "python.exe")
cmd = [exe, "-c", "print('Hi!'); input()"]
child = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
)
child.stdout.readline()
result = kernel32.AttachConsole(child.pid)
if not result:
err = ctypes.get_last_error()
logging.info("Could not allocate console. Error code: " + str(err))
child.stdin.write(b"\n")
try:
child.stdin.flush()
except Exception:
# May happen eg. when installation path has "&" in it
# See https://bitbucket.org/plas/thonny/issues/508/cant-allocate-windows-console-when
# Without flush the console window becomes visible, but Thonny can be still used
logger.exception("Problem with finalizing console allocation")
def ready_for_remote_file_operations(self, show_message=False):
if not self._proxy or not self.supports_remote_files():
return False
ready = self._proxy.ready_for_remote_file_operations()
if not ready and show_message:
if self._proxy.is_connected():
msg = "Device is not connected"
else:
msg = (
"Device is busy -- can't perform this action now."
+ "\nPlease wait or cancel current work and try again!",
)
messagebox.showerror("Can't complete", msg, master=self)
return ready
def get_supported_features(self) -> Set[str]:
if self._proxy is None:
return set()
else:
return self._proxy.get_supported_features()
def supports_remote_files(self):
if self._proxy is None:
return False
else:
return self._proxy.supports_remote_files()
def supports_remote_directories(self):
if self._proxy is None:
return False
else:
return self._proxy.supports_remote_directories()
def get_node_label(self):
if self._proxy is None:
return "Back-end"
else:
return self._proxy.get_node_label()
def using_venv(self) -> bool:
from thonny.plugins.cpython import CPythonProxy
return isinstance(self._proxy, CPythonProxy) and self._proxy._in_venv
class BackendProxy:
"""Communicates with backend process.
All communication methods must be non-blocking,
ie. suitable for calling from GUI thread."""
# backend_name will be overwritten on Workbench.add_backend
# Subclasses don't need to worry about it.
backend_name = None
backend_description = None
def __init__(self, clean: bool) -> None:
"""Initializes (or starts the initialization of) the backend process.
Backend is considered ready when the runner gets a ToplevelResponse
with attribute "welcome_text" from fetch_next_message.
"""
def send_command(self, cmd: CommandToBackend) -> Optional[str]:
"""Send the command to backend. Return None, 'discard' or 'postpone'"""
raise NotImplementedError()
def send_program_input(self, data: str) -> None:
"""Send input data to backend"""
raise NotImplementedError()
def fetch_next_message(self):
"""Read next message from the queue or None if queue is empty"""
raise NotImplementedError()
def run_script_in_terminal(self, script_path, args, interactive, keep_open):
raise NotImplementedError()
def get_sys_path(self):
"backend's sys.path"
return []
def get_backend_name(self):
return type(self).backend_name
def get_pip_gui_class(self):
return None
def interrupt(self):
"""Tries to interrupt current command without reseting the backend"""
pass
def destroy(self):
"""Called when Thonny no longer needs this instance
(Thonny gets closed or new backend gets selected)
"""
pass
def is_connected(self):
return True
def get_local_executable(self):
"""Return system command for invoking current interpreter"""
return None
def get_supported_features(self):
return {"run"}
def get_node_label(self):
"""Used as files caption if back-end has separate files"""
return "Back-end"
def get_full_label(self):
"""Used in pip GUI title"""
return self.get_node_label()
def supports_remote_files(self):
"""Whether remote file browser should be presented with this back-end"""
return False
def uses_local_filesystem(self):
"""Whether it runs code from local files"""
return True
def supports_remote_directories(self):
return False
def supports_trash(self):
return True
def can_run_remote_files(self):
raise NotImplementedError()
def can_run_local_files(self):
raise NotImplementedError()
def ready_for_remote_file_operations(self):
return False
def get_cwd(self):
return None
def get_clean_description(self):
return self.backend_description
@classmethod
def get_current_switcher_configuration(cls):
"""returns the dict of configuration entries that distinguish current backend conf from other
items in the backend switcher"""
return {"run.backend_name": cls.backend_name}
@classmethod
def get_switcher_entries(cls):
"""
Each returned entry creates one item in the backend switcher menu.
"""
return [(cls.get_current_switcher_configuration(), cls.backend_description)]
def has_custom_system_shell(self):
return False
def open_custom_system_shell(self):
raise NotImplementedError()
class SubprocessProxy(BackendProxy):
def __init__(self, clean: bool, executable: Optional[str] = None) -> None:
super().__init__(clean)
if executable:
self._executable = executable
else:
self._executable = get_interpreter_for_subprocess()
if not os.path.isfile(self._executable):
raise UserError(
"Interpreter '%s' does not exist. Please check the configuration!"
% self._executable
)
self._welcome_text = ""
self._proc = None
self._response_queue = None
self._sys_path = []
self._usersitepackages = None
self._gui_update_loop_id = None
self._in_venv = None
self._cwd = self._get_initial_cwd() # pylint: disable=assignment-from-none
self._start_background_process(clean=clean)
def _get_initial_cwd(self):
return None
def _start_background_process(self, clean=None, extra_args=[]):
# deque, because in one occasion I need to put messages back
self._response_queue = collections.deque()
# prepare environment
env = get_environment_for_python_subprocess(self._executable)
# variables controlling communication with the back-end process
env["PYTHONIOENCODING"] = "utf-8"
# because cmd line option -u won't reach child processes
# see https://github.com/thonny/thonny/issues/808
env["PYTHONUNBUFFERED"] = "1"
# Let back-end know about plug-ins
env["THONNY_USER_DIR"] = THONNY_USER_DIR
env["THONNY_FRONTEND_SYS_PATH"] = repr(sys.path)
env["THONNY_LANGUAGE"] = get_workbench().get_option("general.language")
env["FRIENDLY_TRACEBACK_LEVEL"] = str(
get_workbench().get_option("assistance.friendly_traceback_level")
)
if thonny.in_debug_mode():
env["THONNY_DEBUG"] = "1"
elif "THONNY_DEBUG" in env:
del env["THONNY_DEBUG"]
if not os.path.exists(self._executable):
raise UserError(
"Interpreter (%s) not found. Please recheck corresponding option!"
% self._executable
)
cmd_line = (
[
self._executable,
"-u", # unbuffered IO
"-B", # don't write pyo/pyc files
# (to avoid problems when using different Python versions without write permissions)
]
+ self._get_launcher_with_args()
+ extra_args
)
creationflags = 0
if running_on_windows():
creationflags = subprocess.CREATE_NEW_PROCESS_GROUP
debug("Starting the backend: %s %s", cmd_line, get_workbench().get_local_cwd())
extra_params = {}
if sys.version_info >= (3, 6):
extra_params["encoding"] = "utf-8"
self._proc = subprocess.Popen(
cmd_line,
bufsize=0,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self._get_launch_cwd(),
env=env,
universal_newlines=True,
creationflags=creationflags,
**extra_params
)
# setup asynchronous output listeners
Thread(target=self._listen_stdout, args=(self._proc.stdout,), daemon=True).start()
Thread(target=self._listen_stderr, args=(self._proc.stderr,), daemon=True).start()
def _get_launch_cwd(self):
return self.get_cwd() if self.uses_local_filesystem() else None
def _get_launcher_with_args(self):
raise NotImplementedError()
def send_command(self, cmd: CommandToBackend) -> Optional[str]:
"""Send the command to backend. Return None, 'discard' or 'postpone'"""
if isinstance(cmd, ToplevelCommand) and cmd.name[0].isupper():
self._clear_environment()
if isinstance(cmd, ToplevelCommand):
# required by SshCPythonBackend for creating fresh target process
cmd["expected_cwd"] = self._cwd
method_name = "_cmd_" + cmd.name
if hasattr(self, method_name):
getattr(self, method_name)(cmd)
else:
self._send_msg(cmd)
def _send_msg(self, msg):
self._proc.stdin.write(serialize_message(msg) + "\n")
self._proc.stdin.flush()
def _clear_environment(self):
pass
def send_program_input(self, data):
self._send_msg(InputSubmission(data))
def process_is_alive(self):
return self._proc is not None and self._proc.poll() is None
def is_terminated(self):
return not self.process_is_alive()
def is_connected(self):
return self.process_is_alive()
def get_sys_path(self):
return self._sys_path
def destroy(self):
self._close_backend()
def _close_backend(self):
if self._proc is not None and self._proc.poll() is None:
self._proc.kill()
self._proc = None
self._response_queue = None
def _listen_stdout(self, stdout):
# debug("... started listening to stdout")
# will be called from separate thread
message_queue = self._response_queue
def publish_as_msg(data):
msg = parse_message(data)
if "cwd" in msg:
self.cwd = msg["cwd"]
message_queue.append(msg)
if len(message_queue) > 50:
# Probably backend runs an infinite/long print loop.
# Throttle message thougput in order to keep GUI thread responsive.
while len(message_queue) > 0:
sleep(0.1)
while self.process_is_alive():
try:
data = stdout.readline()
except IOError:
sleep(0.1)
continue
# debug("... read some stdout data", repr(data))
if data == "":
break
else:
try:
publish_as_msg(data)
except Exception:
# Can mean the line was from subprocess,
# which can't be captured by stream faking.
# NB! If subprocess printed it without linebreak,
# then the suffix can be thonny message
parts = data.rsplit(common.MESSAGE_MARKER, maxsplit=1)
# print first part as it is
message_queue.append(
BackendEvent("ProgramOutput", data=parts[0], stream_name="stdout")
)
if len(parts) == 2:
second_part = common.MESSAGE_MARKER + parts[1]
try:
publish_as_msg(second_part)
except Exception:
# just print ...
message_queue.append(
BackendEvent(
"ProgramOutput", data=second_part, stream_name="stdout"
)
)
def _listen_stderr(self, stderr):
# stderr is used only for debugger debugging
while self.process_is_alive():
data = stderr.readline()
if data == "":
break
else:
self._response_queue.append(
BackendEvent("ProgramOutput", stream_name="stderr", data=data)
)
def _store_state_info(self, msg):
if "cwd" in msg:
self._cwd = msg["cwd"]
self._publish_cwd(msg["cwd"])
if msg.get("welcome_text"):
self._welcome_text = msg["welcome_text"]
if "in_venv" in msg:
self._in_venv = msg["in_venv"]
if "sys_path" in msg:
self._sys_path = msg["sys_path"]
if "usersitepackages" in msg:
self._usersitepackages = msg["usersitepackages"]
if "prefix" in msg:
self._sys_prefix = msg["prefix"]
if "exe_dirs" in msg:
self._exe_dirs = msg["exe_dirs"]
if msg.get("executable"):
self._reported_executable = msg["executable"]
def _publish_cwd(self, cwd):
if self.uses_local_filesystem():
get_workbench().set_local_cwd(cwd)
def get_supported_features(self):
return {"run"}
def get_site_packages(self):
# NB! site.sitepackages may not be present in virtualenv
for d in self._sys_path:
if ("site-packages" in d or "dist-packages" in d) and path_startswith(
d, self._sys_prefix
):
return d
return None
def get_user_site_packages(self):
return self._usersitepackages
def get_cwd(self):
return self._cwd
def get_exe_dirs(self):
return self._exe_dirs
def fetch_next_message(self):
if not self._response_queue or len(self._response_queue) == 0:
if self.is_terminated():
raise BackendTerminatedError(self._proc.returncode if self._proc else None)
else:
return None
msg = self._response_queue.popleft()
self._store_state_info(msg)
if not hasattr(msg, "event_type"):
print("gotww", msg)
if msg.event_type == "ProgramOutput":
# combine available small output messages to one single message,
# in order to put less pressure on UI code
wait_time = 0.01
total_wait_time = 0
while True:
if len(self._response_queue) == 0:
if _ends_with_incomplete_ansi_code(msg["data"]) and total_wait_time < 0.1:
# Allow reader to send the remaining part
sleep(wait_time)
total_wait_time += wait_time
continue
else:
return msg
else:
next_msg = self._response_queue.popleft()
if (
next_msg.event_type == "ProgramOutput"
and next_msg["stream_name"] == msg["stream_name"]
and (
len(msg["data"]) + len(next_msg["data"]) <= OUTPUT_MERGE_THRESHOLD
and ("\n" not in msg["data"] or not io_animation_required)
or _ends_with_incomplete_ansi_code(msg["data"])
)
):
msg["data"] += next_msg["data"]
else:
# not to be sent in the same block, put it back
self._response_queue.appendleft(next_msg)
return msg
else:
return msg
def _ends_with_incomplete_ansi_code(data):
pos = data.rfind("\033")
if pos == -1:
return False
# note ANSI_CODE_TERMINATOR also includes [
params_and_terminator = data[pos + 2 :]
return not ANSI_CODE_TERMINATOR.search(params_and_terminator)
def is_bundled_python(executable):
return os.path.exists(os.path.join(os.path.dirname(executable), "thonny_python.ini"))
def create_backend_python_process(
args, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
):
"""Used for running helper commands (eg. pip) on CPython backend.
Assumes current backend is CPython."""
# TODO: if backend == frontend, then delegate to create_frontend_python_process
python_exe = get_runner().get_local_executable()
env = get_environment_for_python_subprocess(python_exe)
env["PYTHONIOENCODING"] = "utf-8"
env["PYTHONUNBUFFERED"] = "1"
# TODO: remove frontend python from path and add backend python to it
return _create_python_process(python_exe, args, stdin, stdout, stderr, env=env)
def create_frontend_python_process(
args, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
):
"""Used for running helper commands (eg. for installing plug-ins on by the plug-ins)"""
if _console_allocated:
python_exe = get_interpreter_for_subprocess().replace("pythonw.exe", "python.exe")
else:
python_exe = get_interpreter_for_subprocess().replace("python.exe", "pythonw.exe")
env = get_environment_for_python_subprocess(python_exe)
env["PYTHONIOENCODING"] = "utf-8"
env["PYTHONUNBUFFERED"] = "1"
return _create_python_process(python_exe, args, stdin, stdout, stderr)
def _create_python_process(
python_exe,
args,
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=False,
env=None,
universal_newlines=True,
):
cmd = [python_exe] + args
if running_on_windows():
creationflags = subprocess.CREATE_NEW_PROCESS_GROUP
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
else:
startupinfo = None
creationflags = 0
proc = subprocess.Popen(
cmd,
stdin=stdin,
stdout=stdout,
stderr=stderr,
shell=shell,
env=env,
universal_newlines=universal_newlines,
startupinfo=startupinfo,
creationflags=creationflags,
)
proc.cmd = cmd
return proc
class BackendTerminatedError(Exception):
def __init__(self, returncode=None):
Exception.__init__(self)
self.returncode = returncode
def is_venv_interpreter_of_current_interpreter(executable):
for location in [".", ".."]:
cfg_path = os.path.join(location, "pyvenv.cfg")
if os.path.isfile(cfg_path):
with open(cfg_path) as fp:
content = fp.read()
for line in content.splitlines():
if line.replace(" ", "").startswith("home="):
_, home = line.split("=", maxsplit=1)
home = home.strip()
if os.path.isdir(home) and os.path.samefile(home, sys.prefix):
return True
return False
def get_environment_for_python_subprocess(target_executable):
overrides = get_environment_overrides_for_python_subprocess(target_executable)
return get_environment_with_overrides(overrides)
def get_environment_with_overrides(overrides):
env = os.environ.copy()
for key in overrides:
if overrides[key] is None and key in env:
del env[key]
else:
assert isinstance(overrides[key], str)
if key.upper() == "PATH":
update_system_path(env, overrides[key])
else:
env[key] = overrides[key]
return env
def get_environment_overrides_for_python_subprocess(target_executable):
"""Take care of not not confusing different interpreter
with variables meant for bundled interpreter"""
# At the moment I'm tweaking the environment only if current
# exe is bundled for Thonny.
# In remaining cases it is user's responsibility to avoid
# calling Thonny with environment which may be confusing for
# different Pythons called in a subprocess.
this_executable = sys.executable.replace("pythonw.exe", "python.exe")
target_executable = target_executable.replace("pythonw.exe", "python.exe")
interpreter_specific_keys = [
"TCL_LIBRARY",
"TK_LIBRARY",
"LD_LIBRARY_PATH",
"DYLD_LIBRARY_PATH",
"SSL_CERT_DIR",
"SSL_CERT_FILE",
"PYTHONHOME",
"PYTHONPATH",
"PYTHONNOUSERSITE",
"PYTHONUSERBASE",
]
result = {}
if os.path.samefile(
target_executable, this_executable
) or is_venv_interpreter_of_current_interpreter(target_executable):
# bring out some important variables so that they can
# be explicitly set in macOS Terminal
# (If they are set then it's most likely because current exe is in Thonny bundle)
for key in interpreter_specific_keys:
if key in os.environ:
result[key] = os.environ[key]
# never pass some variables to different interpreter
# (even if it's venv or symlink to current one)
if not is_same_path(target_executable, this_executable):
for key in ["PYTHONPATH", "PYTHONHOME", "PYTHONNOUSERSITE", "PYTHONUSERBASE"]:
if key in os.environ:
result[key] = None
else:
# interpreters are not related
# interpreter specific keys most likely would confuse other interpreter
for key in interpreter_specific_keys:
if key in os.environ:
result[key] = None
# some keys should be never passed
for key in [
"PYTHONSTARTUP",
"PYTHONBREAKPOINT",
"PYTHONDEBUG",
"PYTHONNOUSERSITE",
"PYTHONASYNCIODEBUG",
]:
if key in os.environ:
result[key] = None
# venv may not find (correct) Tk without assistance (eg. in Ubuntu)
if is_venv_interpreter_of_current_interpreter(target_executable):
try:
if "TCL_LIBRARY" not in os.environ or "TK_LIBRARY" not in os.environ:
result["TCL_LIBRARY"] = get_workbench().tk.exprstring("$tcl_library")
result["TK_LIBRARY"] = get_workbench().tk.exprstring("$tk_library")
except Exception:
logging.exception("Can't compute Tcl/Tk library location")
return result
def construct_cd_command(path) -> str:
return construct_cmd_line(["%cd", path])
_command_id_counter = 0
def generate_command_id():
global _command_id_counter
_command_id_counter += 1
return "cmd_" + str(_command_id_counter)
class InlineCommandDialog(WorkDialog):
def __init__(
self,
master,
cmd: Union[InlineCommand, Callable],
title,
instructions=None,
output_prelude=None,
autostart=True,
):
self.response = None
self._title = title
self._instructions = instructions
self._output_prelude = output_prelude
self._cmd = cmd
self.returncode = None
get_shell().set_ignore_program_output(True)
get_workbench().bind("InlineResponse", self._on_response, True)
get_workbench().bind("InlineProgress", self._on_progress, True)
get_workbench().bind("ProgramOutput", self._on_output, True)
super().__init__(master, autostart=autostart)
def get_title(self):
return self._title
def get_instructions(self) -> Optional[str]:
return self._instructions or self._cmd.get("description", "Working...")
def _on_response(self, response):
if response.get("command_id") == getattr(self._cmd, "id"):
logger.debug("Dialog got response: %s", response)
self.response = response
self.returncode = response.get("returncode", None)
success = (
not self.returncode and not response.get("error") and not response.get("errors")
)
if success:
self.set_action_text("Done!")
else:
self.set_action_text("Error")
if response.get("error"):
self.append_text("Error %s\n" % response["error"], stream_name="stderr")
if response.get("errors"):
self.append_text("Errors %s\n" % response["errors"], stream_name="stderr")
if self.returncode:
self.append_text(
"Process returned with code %s\n" % self.returncode, stream_name="stderr"
)
self.report_done(success)
def _on_progress(self, msg):
if msg.get("command_id") != getattr(self._cmd, "id"):
return
if msg.get("value", None) is not None and msg.get("maximum", None) is not None:
self.report_progress(msg["value"], msg["maximum"])
if msg.get("description"):
self.set_action_text(msg["description"])
def _on_output(self, msg):
stream_name = msg.get("stream_name", "stdout")
self.append_text(msg["data"], stream_name)
self.set_action_text_smart(msg["data"])
def start_work(self):
self.send_command_to_backend()
def send_command_to_backend(self):
if not isinstance(self._cmd, CommandToBackend):
# it was a lazy definition
self._cmd = self._cmd()
logger.debug("Starting command in dialog: %s", self._cmd)
get_runner().send_command(self._cmd)
def cancel_work(self):
super(InlineCommandDialog, self).cancel_work()
get_runner()._cmd_interrupt()
def close(self):
get_workbench().unbind("InlineResponse", self._on_response)
get_workbench().unbind("InlineProgress", self._on_progress)
super(InlineCommandDialog, self).close()
get_shell().set_ignore_program_output(False)
def get_frontend_python():
# TODO: deprecated (name can be misleading)
warnings.warn("get_frontend_python is deprecated")
return get_interpreter_for_subprocess(sys.executable)
def get_interpreter_for_subprocess(candidate=None):
if candidate is None:
candidate = sys.executable
pythonw = candidate.replace("python.exe", "pythonw.exe")
if not _console_allocated and os.path.exists(pythonw):
return pythonw
else:
return candidate.replace("pythonw.exe", "python.exe")
|
py | 1a371bc7d10adf3830bd83bec053bce401158419 | # Copyright (c) 2017 Intel Corporation. All rights reserved.
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file.
from collections import defaultdict
from tastypie.exceptions import NotFound
from django.core.exceptions import ObjectDoesNotExist
import tastypie.http as http
from tastypie import fields
from tastypie.authorization import DjangoAuthorization
from tastypie.constants import ALL_WITH_RELATIONS
from chroma_core.services.job_scheduler.job_scheduler_client import JobSchedulerClient
from chroma_core.services import log_register
from chroma_api.utils import dehydrate_command
from chroma_api.utils import custom_response
from chroma_api.network_interface import NetworkInterfaceResource
from chroma_api.lnet_configuration import LNetConfigurationResource
from chroma_api.authentication import AnonymousAuthentication
from chroma_core.models import Command
from chroma_core.models import Nid
from chroma_api.validation_utils import ChromaValidation, validate
from chroma_api.chroma_model_resource import ChromaModelResource
log = log_register(__name__)
class NidValidation(ChromaValidation):
mandatory_message = "This field is mandatory"
def is_valid(self, bundle, request=None, **kwargs):
errors = defaultdict(list)
if request.method != 'POST':
return errors
for nids_data in bundle.data.get('objects', [bundle.data]):
if 'lnd_network' not in nids_data:
errors['lnd_network'] = ["Field lnd_network not present in data"]
if not errors:
self.validate_object(nids_data,
errors,
{"lnd_network": self.Expectation(True),
"network_interface": self.Expectation(True),
"lnd_type": self.Expectation(int(nids_data['lnd_network'] != -1)),
"resource_uri": self.Expectation(False),
"lnet_configuration": self.Expectation(False)})
if not errors:
self.validate_resources([self.URIInfo(nids_data.get('lnet_configuration', None), LNetConfigurationResource),
self.URIInfo(nids_data['network_interface'], NetworkInterfaceResource)],
errors, request)
if not errors:
# Check the lnd_type passed is valid for the network_interface
if ('lnd_type' in nids_data) and (nids_data['lnd_type'] not in NetworkInterfaceResource().get_via_uri(nids_data['network_interface'], request).lnd_types):
errors['lnd_type'].append("lnd_type %s not valid for interface %s" % (nids_data['lnd_type'], NetworkInterfaceResource().get_via_uri(nids_data['network_interface'], request)))
return errors
###
# Allows read and update of Nid
#
# Responds to
#
# Get
# https://localhost:8000/api/nid/1/
# https://localhost:8000/api/nid/
#
# Put
# https://localhost:8000/api/nid/1
#
# Post
# https://localhost:8000/api/nid/
#
# Delete
# https://localhost:8000/api/nid/1/
# https://localhost:8000/api/nid/
class NidResource(ChromaModelResource):
"""
Nid information.
"""
network_interface = fields.ToOneField('chroma_api.network_interface.NetworkInterfaceResource', 'network_interface')
lnet_configuration = fields.ToOneField('chroma_api.lnet_configuration.LNetConfigurationResource', 'lnet_configuration')
class Meta:
queryset = Nid.objects.select_related('network_interface', 'lnet_configuration').all()
authorization = DjangoAuthorization()
authentication = AnonymousAuthentication()
validation = NidValidation()
resource_name = 'nid'
list_allowed_methods = ['get', 'post', 'delete']
detail_allowed_methods = ['get', 'post', 'put', 'delete']
filtering = {'network_interface': ALL_WITH_RELATIONS,
'lnet_configuration': ALL_WITH_RELATIONS,
'id': ['exact']}
@validate
def obj_create(self, bundle, **kwargs):
request = bundle.request
if 'objects' in bundle.data:
nids_data = bundle.data['objects']
else:
nids_data = [bundle.data]
for nid_data in nids_data:
nid_data['network_interface'] = NetworkInterfaceResource().get_via_uri(nid_data['network_interface'], bundle.request).id
command_id = JobSchedulerClient.update_nids(nids_data)
try:
command = Command.objects.get(pk = command_id)
except ObjectDoesNotExist:
command = None
raise custom_response(self, request, http.HttpAccepted,
{
'command': dehydrate_command(command)
})
@validate
def obj_update(self, bundle, **kwargs):
self.obj_create(bundle, **kwargs)
def obj_delete_list(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_delete_list``.
Takes optional ``kwargs``, which are used to narrow the query to find
the instance.
"""
try:
obj_list = self.obj_get_list(bundle, **kwargs)
except ObjectDoesNotExist:
raise NotFound("A model instance matching the provided arguments could not be found.")
self._nids_delete(obj_list)
def obj_delete(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_delete``.
Takes optional ``kwargs``, which are used to narrow the query to find
the instance.
"""
try:
obj = self.obj_get(bundle, **kwargs)
except ObjectDoesNotExist:
raise NotFound("A model instance matching the provided arguments could not be found.")
self._nids_delete([obj])
def _nids_delete(self, obj_list):
delete_list = []
for nid in obj_list:
delete_list.append({'network_interface': nid.network_interface_id, 'lnd_network': -1})
if (len(delete_list) > 0):
JobSchedulerClient.update_nids(delete_list)
|
py | 1a371bc845386673def4d60bcb340213e9f66565 | from .port import Port, PortManager, Category, Maintainer, Variant, Dependency, LastPortIndexUpdate
from .buildhistory import BuildHistory, Builder
from .stats import UUID, PortInstallation, Submission
|
py | 1a371cff3dc8f8a7f45c1e1b3e0b280949ed7ac6 | #!/usr/bin/python
"""Utility to generate the header files for BOOST_METAPARSE_STRING"""
# Copyright Abel Sinkovics ([email protected]) 2016.
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import argparse
import math
import os
import sys
VERSION = 1
class Namespace(object):
"""Generate namespace definition"""
def __init__(self, out_f, names):
self.out_f = out_f
self.names = names
def begin(self):
"""Generate the beginning part"""
self.out_f.write('\n')
for depth, name in enumerate(self.names):
self.out_f.write(
'{0}namespace {1}\n{0}{{\n'.format(self.prefix(depth), name)
)
def end(self):
"""Generate the closing part"""
for depth in xrange(len(self.names) - 1, -1, -1):
self.out_f.write('{0}}}\n'.format(self.prefix(depth)))
def prefix(self, depth=None):
"""Returns the prefix of a given depth. Returns the prefix code inside
the namespace should use when depth is None."""
if depth is None:
depth = len(self.names)
return ' ' * depth
def __enter__(self):
self.begin()
return self
def __exit__(self, typ, value, traceback):
self.end()
def write_autogen_info(out_f):
"""Write the comment about the file being autogenerated"""
out_f.write(
'\n'
'// This is an automatically generated header file.\n'
'// Generated with the tools/string_headers.py utility of\n'
'// Boost.Metaparse\n'
)
class IncludeGuard(object):
"""Generate include guards"""
def __init__(self, out_f):
self.out_f = out_f
def begin(self):
"""Generate the beginning part"""
name = 'BOOST_METAPARSE_V1_CPP11_IMPL_STRING_HPP'
self.out_f.write('#ifndef {0}\n#define {0}\n'.format(name))
write_autogen_info(self.out_f)
def end(self):
"""Generate the closing part"""
self.out_f.write('\n#endif\n')
def __enter__(self):
self.begin()
return self
def __exit__(self, typ, value, traceback):
self.end()
def macro_name(name):
"""Generate the full macro name"""
return 'BOOST_METAPARSE_V{0}_{1}'.format(VERSION, name)
def define_macro(out_f, (name, args, body), undefine=False, check=True):
"""Generate a macro definition or undefinition"""
if undefine:
out_f.write(
'#undef {0}\n'
.format(macro_name(name))
)
else:
if args:
arg_list = '({0})'.format(', '.join(args))
else:
arg_list = ''
if check:
out_f.write(
'#ifdef {0}\n'
'# error {0} already defined.\n'
'#endif\n'
.format(macro_name(name))
)
out_f.write(
'#define {0}{1} {2}\n'.format(macro_name(name), arg_list, body)
)
def filename(out_dir, name, undefine=False):
"""Generate the filename"""
if undefine:
prefix = 'undef_'
else:
prefix = ''
return os.path.join(out_dir, '{0}{1}.hpp'.format(prefix, name.lower()))
def length_limits(max_length_limit, length_limit_step):
"""Generates the length limits"""
string_len = len(str(max_length_limit))
return [
str(i).zfill(string_len) for i in
xrange(
length_limit_step,
max_length_limit + length_limit_step - 1,
length_limit_step
)
]
def unique_names(count):
"""Generate count unique variable name"""
return ('C{0}'.format(i) for i in xrange(0, count))
def generate_take(out_f, steps, line_prefix):
"""Generate the take function"""
out_f.write(
'{0}constexpr inline int take(int n_)\n'
'{0}{{\n'
'{0} return {1} 0 {2};\n'
'{0}}}\n'
'\n'.format(
line_prefix,
''.join('n_ >= {0} ? {0} : ('.format(s) for s in steps),
')' * len(steps)
)
)
def generate_make_string(out_f, max_step):
"""Generate the make_string template"""
steps = [2 ** n for n in xrange(int(math.log(max_step, 2)), -1, -1)]
with Namespace(
out_f,
['boost', 'metaparse', 'v{0}'.format(VERSION), 'impl']
) as nsp:
generate_take(out_f, steps, nsp.prefix())
out_f.write(
'{0}template <int LenNow, int LenRemaining, char... Cs>\n'
'{0}struct make_string;\n'
'\n'
'{0}template <char... Cs>'
' struct make_string<0, 0, Cs...> : string<> {{}};\n'
.format(nsp.prefix())
)
disable_sun = False
for i in reversed(steps):
if i > 64 and not disable_sun:
out_f.write('#ifndef __SUNPRO_CC\n')
disable_sun = True
out_f.write(
'{0}template <int LenRemaining,{1}char... Cs>'
' struct make_string<{2},LenRemaining,{3}Cs...> :'
' concat<string<{4}>,'
' typename make_string<take(LenRemaining),'
'LenRemaining-take(LenRemaining),Cs...>::type> {{}};\n'
.format(
nsp.prefix(),
''.join('char {0},'.format(n) for n in unique_names(i)),
i,
''.join('{0},'.format(n) for n in unique_names(i)),
','.join(unique_names(i))
)
)
if disable_sun:
out_f.write('#endif\n')
def generate_string(out_dir, limits):
"""Generate string.hpp"""
max_limit = max((int(v) for v in limits))
with open(filename(out_dir, 'string'), 'wb') as out_f:
with IncludeGuard(out_f):
out_f.write(
'\n'
'#include <boost/metaparse/v{0}/cpp11/impl/concat.hpp>\n'
'#include <boost/preprocessor/cat.hpp>\n'
.format(VERSION)
)
generate_make_string(out_f, 512)
out_f.write(
'\n'
'#ifndef BOOST_METAPARSE_LIMIT_STRING_SIZE\n'
'# error BOOST_METAPARSE_LIMIT_STRING_SIZE not defined\n'
'#endif\n'
'\n'
'#if BOOST_METAPARSE_LIMIT_STRING_SIZE > {0}\n'
'# error BOOST_METAPARSE_LIMIT_STRING_SIZE is greater than'
' {0}. To increase the limit run tools/string_headers.py of'
' Boost.Metaparse against your Boost headers.\n'
'#endif\n'
'\n'
.format(max_limit)
)
define_macro(out_f, (
'STRING',
['s'],
'{0}::make_string< '
'{0}::take(sizeof(s)-1), sizeof(s)-1-{0}::take(sizeof(s)-1),'
'BOOST_PP_CAT({1}, BOOST_METAPARSE_LIMIT_STRING_SIZE)(s)'
'>::type'
.format(
'::boost::metaparse::v{0}::impl'.format(VERSION),
macro_name('I')
)
))
out_f.write('\n')
for limit in xrange(0, max_limit + 1):
out_f.write(
'#define {0} {1}\n'
.format(
macro_name('I{0}'.format(limit)),
macro_name('INDEX_STR{0}'.format(
min(int(l) for l in limits if int(l) >= limit)
))
)
)
out_f.write('\n')
prev_macro = None
prev_limit = 0
for length_limit in (int(l) for l in limits):
this_macro = macro_name('INDEX_STR{0}'.format(length_limit))
out_f.write(
'#define {0}(s) {1}{2}\n'
.format(
this_macro,
'{0}(s),'.format(prev_macro) if prev_macro else '',
','.join(
'{0}((s), {1})'
.format(macro_name('STRING_AT'), i)
for i in xrange(prev_limit, length_limit)
)
)
)
prev_macro = this_macro
prev_limit = length_limit
def positive_integer(value):
"""Throws when the argument is not a positive integer"""
val = int(value)
if val > 0:
return val
else:
raise argparse.ArgumentTypeError("A positive number is expected")
def existing_path(value):
"""Throws when the path does not exist"""
if os.path.exists(value):
return value
else:
raise argparse.ArgumentTypeError("Path {0} not found".format(value))
def main():
"""The main function of the script"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--boost_dir',
required=False,
type=existing_path,
help='The path to the include/boost directory of Metaparse'
)
parser.add_argument(
'--max_length_limit',
required=False,
default=2048,
type=positive_integer,
help='The maximum supported length limit'
)
parser.add_argument(
'--length_limit_step',
required=False,
default=128,
type=positive_integer,
help='The longest step at which headers are generated'
)
args = parser.parse_args()
if args.boost_dir is None:
tools_path = os.path.dirname(os.path.abspath(__file__))
boost_dir = os.path.join(
os.path.dirname(tools_path),
'include',
'boost'
)
else:
boost_dir = args.boost_dir
if args.max_length_limit < 1:
sys.stderr.write('Invalid maximum length limit')
sys.exit(-1)
generate_string(
os.path.join(
boost_dir,
'metaparse',
'v{0}'.format(VERSION),
'cpp11',
'impl'
),
length_limits(args.max_length_limit, args.length_limit_step)
)
if __name__ == '__main__':
main()
|
py | 1a371d80bd06ae6d47556f7eff098b5578fdf63d | import win32gui
from errors import IdenticalWindowsError, WindowNotFound
from typing import Any
class WindowsHandler:
"""
Extract a handler for a specific active window.
Parameters
----------
screen_name : str
A ``string`` of the title to match from the list of enumerated windows.
"""
def __init__(self, screen_name: str) -> Any:
self._windows = self._enumerate_screens()
_hwdl = self._extract_window(self._windows, screen_name)
self._set_windows_foreground(_hwdl)
self.handler = _hwdl
def _enumerate_callback(self, hwdl: Any, windows: list) -> list:
"""
Enumerate all running windows.
Create a list of tuples representing the window handle plus the text
corresponding to the window.
Parameters
----------
hwdl : Any
A handler pointing to a single active window.
windows : list
A ``list`` of ``tuples`` where each item represents the handler to
the window and the corresponding text for the window.
Returns
-------
list
Returns a ``list`` of ``tuples`` where each item represents the
handler to a window and the corresponding text for the window.
"""
windows.append((hwdl, win32gui.GetWindowText(hwdl)))
def _enumerate_screens(self) -> list:
"""
Enumerate all active screens.
Get a list of all active screens running on the PC including the window
handler and the corresponding text.
Returns
-------
list
Returns a ``list`` of ``tuples`` where each item represents the
handler to a window and the corresponding text for the window.
"""
windows = []
win32gui.GetDesktopWindow()
win32gui.EnumWindows(self._enumerate_callback, windows)
return windows
def _extract_window(self, windows: list, screen_name: str) -> Any:
"""
Retrieve the handle for a specific window.
Iterate through a list of enumerated active windows on the system and
attempt to find a match for a specific window with a given title. If
multiple windows exist with the same name, throw an error that the specific
window can't be identified. If no matching windows can be found, throw an
error that it can't be found.
Parameters
----------
windows : list
A ``list`` of ``tuples`` where each item represents the handler to a
window and the corresponding text for the window.
screen_name : str
A ``string`` of the title to match from the list of enumerated windows.
Returns
-------
Any
Returns a handler to the requested window if found.
Raises
------
WindowNotFound
Raises a ``WindowNotFound`` error if no windows match the requested
title.
IdenticalWindowsError
Raises an ``IdenticalWindowsError`` when there are multiple running
windows with the same name and a unique instance cannot be found.
"""
window = [(hwdl, title) for hwdl, title in windows
if screen_name.lower() in title.lower()]
if not len(window):
raise WindowNotFound(f'Screen "{screen_name}" not found. Ensure a '
f'window with name "{screen_name}" is '
'running.')
elif len(window) > 1:
# Multiple windows have the screen name included in at least part
# of the title. Check for an exact copy of the name excluding case.
window = [(hwdl, title) for hwdl, title in window
if screen_name.lower() == title.lower()]
if len(window) != 1:
raise IdenticalWindowsError('Multiple windows contain the '
f'name {screen_name}. Unable to '
'identify unique window.')
# The first and only element is the requested window at this point.
hwdl, _ = window[0]
return hwdl
def _set_windows_foreground(self, hwdl: Any) -> None:
"""
Set the requested window to the foreground.
In order to capture screenshots, the window needs to be placed in the
foreground as the screen grabber captures the specified dimensions for
the top-most windows.
hwdl : Any
A handler to the requested window.
"""
win32gui.SetForegroundWindow(hwdl)
|
py | 1a371e50847aa59fe3b9fe6353a805c01eb8ec2c | # -*- coding: utf-8 -*-
"""
CSV related help functions
"""
import csv
import codecs
from setup import eol, encoding, delimiter
def dict2csv(csv_path, a_dict, sort=None):
"""
Writes a dictionary to a csv file, optinally sorted by key (sort=0) or
value (sort=1)
"""
with codecs.open(csv_path, 'w', encoding) as csv_file:
dictitems = a_dict.items()
if sort in [0, 1]:
dictitems.sort(key=lambda x:x[sort])
for (k, v) in dictitems:
csv_file.write(u'%s%s%s%s' % (k, delimiter, v, eol))
def csv2dict(csv_path, a_dict, encoding=encoding):
"""Read a dictionary from a csv file"""
with open(csv_path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=delimiter)
for row in csv_reader:
if len(row) < 2:
raise IOError(_("Failed to load CSV file '%s'") % csv_file.name)
else:
a_dict[row[0].decode(encoding)] = row[1].decode(encoding)
return a_dict
|
py | 1a371e58b6e46d42fb30bfc6a2944b0ecec7560a | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Why our own memcache client?
By Michael Barton
python-memcached doesn't use consistent hashing, so adding or
removing a memcache server from the pool invalidates a huge
percentage of cached items.
If you keep a pool of python-memcached client objects, each client
object has its own connection to every memcached server, only one of
which is ever in use. So you wind up with n * m open sockets and
almost all of them idle. This client effectively has a pool for each
server, so the number of backend connections is hopefully greatly
reduced.
python-memcache uses pickle to store things, and there was already a
huge stink about Swift using pickles in memcache
(http://osvdb.org/show/osvdb/86581). That seemed sort of unfair,
since nova and keystone and everyone else use pickles for memcache
too, but it's hidden behind a "standard" library. But changing would
be a security regression at this point.
Also, pylibmc wouldn't work for us because it needs to use python
sockets in order to play nice with eventlet.
Lucid comes with memcached: v1.4.2. Protocol documentation for that
version is at:
http://github.com/memcached/memcached/blob/1.4.2/doc/protocol.txt
"""
import six
import six.moves.cPickle as pickle
import json
import logging
import time
from bisect import bisect
from eventlet.green import socket
from eventlet.pools import Pool
from eventlet import Timeout
from six.moves import range
from swift.common import utils
from swift.common.utils import md5, human_readable
DEFAULT_MEMCACHED_PORT = 11211
CONN_TIMEOUT = 0.3
POOL_TIMEOUT = 1.0 # WAG
IO_TIMEOUT = 2.0
PICKLE_FLAG = 1
JSON_FLAG = 2
NODE_WEIGHT = 50
PICKLE_PROTOCOL = 2
TRY_COUNT = 3
# if ERROR_LIMIT_COUNT errors occur in ERROR_LIMIT_TIME seconds, the server
# will be considered failed for ERROR_LIMIT_DURATION seconds.
ERROR_LIMIT_COUNT = 10
ERROR_LIMIT_TIME = ERROR_LIMIT_DURATION = 60
DEFAULT_ITEM_SIZE_WARNING_THRESHOLD = -1
def md5hash(key):
if not isinstance(key, bytes):
if six.PY2:
key = key.encode('utf-8')
else:
key = key.encode('utf-8', errors='surrogateescape')
return md5(key, usedforsecurity=False).hexdigest().encode('ascii')
def sanitize_timeout(timeout):
"""
Sanitize a timeout value to use an absolute expiration time if the delta
is greater than 30 days (in seconds). Note that the memcached server
translates negative values to mean a delta of 30 days in seconds (and 1
additional second), client beware.
"""
if timeout > (30 * 24 * 60 * 60):
timeout += time.time()
return int(timeout)
def set_msg(key, flags, timeout, value):
if not isinstance(key, bytes):
raise TypeError('key must be bytes')
if not isinstance(value, bytes):
raise TypeError('value must be bytes')
return b' '.join([
b'set',
key,
str(flags).encode('ascii'),
str(timeout).encode('ascii'),
str(len(value)).encode('ascii'),
]) + (b'\r\n' + value + b'\r\n')
class MemcacheConnectionError(Exception):
pass
class MemcachePoolTimeout(Timeout):
pass
class MemcacheConnPool(Pool):
"""
Connection pool for Memcache Connections
The *server* parameter can be a hostname, an IPv4 address, or an IPv6
address with an optional port. See
:func:`swift.common.utils.parse_socket_string` for details.
"""
def __init__(self, server, size, connect_timeout, tls_context=None):
Pool.__init__(self, max_size=size)
self.host, self.port = utils.parse_socket_string(
server, DEFAULT_MEMCACHED_PORT)
self._connect_timeout = connect_timeout
self._tls_context = tls_context
def create(self):
addrs = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC,
socket.SOCK_STREAM)
family, socktype, proto, canonname, sockaddr = addrs[0]
sock = socket.socket(family, socket.SOCK_STREAM)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
try:
with Timeout(self._connect_timeout):
sock.connect(sockaddr)
if self._tls_context:
sock = self._tls_context.wrap_socket(sock,
server_hostname=self.host)
except (Exception, Timeout):
sock.close()
raise
return (sock.makefile('rwb'), sock)
def get(self):
fp, sock = super(MemcacheConnPool, self).get()
try:
if fp is None:
# An error happened previously, so we need a new connection
fp, sock = self.create()
return fp, sock
except MemcachePoolTimeout:
# This is the only place that knows an item was successfully taken
# from the pool, so it has to be responsible for repopulating it.
# Any other errors should get handled in _get_conns(); see the
# comment about timeouts during create() there.
self.put((None, None))
raise
class MemcacheRing(object):
"""
Simple, consistent-hashed memcache client.
"""
def __init__(
self, servers, connect_timeout=CONN_TIMEOUT,
io_timeout=IO_TIMEOUT, pool_timeout=POOL_TIMEOUT,
tries=TRY_COUNT, allow_pickle=False, allow_unpickle=False,
max_conns=2, tls_context=None, logger=None,
error_limit_count=ERROR_LIMIT_COUNT,
error_limit_time=ERROR_LIMIT_TIME,
error_limit_duration=ERROR_LIMIT_DURATION,
item_size_warning_threshold=DEFAULT_ITEM_SIZE_WARNING_THRESHOLD):
self._ring = {}
self._errors = dict(((serv, []) for serv in servers))
self._error_limited = dict(((serv, 0) for serv in servers))
self._error_limit_count = error_limit_count
self._error_limit_time = error_limit_time
self._error_limit_duration = error_limit_duration
for server in sorted(servers):
for i in range(NODE_WEIGHT):
self._ring[md5hash('%s-%s' % (server, i))] = server
self._tries = tries if tries <= len(servers) else len(servers)
self._sorted = sorted(self._ring)
self._client_cache = dict((
(server, MemcacheConnPool(server, max_conns, connect_timeout,
tls_context=tls_context))
for server in servers))
self._connect_timeout = connect_timeout
self._io_timeout = io_timeout
self._pool_timeout = pool_timeout
self._allow_pickle = allow_pickle
self._allow_unpickle = allow_unpickle or allow_pickle
if logger is None:
self.logger = logging.getLogger()
else:
self.logger = logger
self.item_size_warning_threshold = item_size_warning_threshold
def _exception_occurred(self, server, e, action='talking',
sock=None, fp=None, got_connection=True):
if isinstance(e, Timeout):
self.logger.error("Timeout %(action)s to memcached: %(server)s",
{'action': action, 'server': server})
elif isinstance(e, (socket.error, MemcacheConnectionError)):
self.logger.error(
"Error %(action)s to memcached: %(server)s: %(err)s",
{'action': action, 'server': server, 'err': e})
else:
self.logger.exception("Error %(action)s to memcached: %(server)s",
{'action': action, 'server': server})
try:
if fp:
fp.close()
del fp
except Exception:
pass
try:
if sock:
sock.close()
del sock
except Exception:
pass
if got_connection:
# We need to return something to the pool
# A new connection will be created the next time it is retrieved
self._return_conn(server, None, None)
if self._error_limit_time <= 0 or self._error_limit_duration <= 0:
return
now = time.time()
self._errors[server].append(now)
if len(self._errors[server]) > self._error_limit_count:
self._errors[server] = [err for err in self._errors[server]
if err > now - self._error_limit_time]
if len(self._errors[server]) > self._error_limit_count:
self._error_limited[server] = now + self._error_limit_duration
self.logger.error('Error limiting server %s', server)
def _get_conns(self, key):
"""
Retrieves a server conn from the pool, or connects a new one.
Chooses the server based on a consistent hash of "key".
:return: generator to serve memcached connection
"""
pos = bisect(self._sorted, key)
served = []
while len(served) < self._tries:
pos = (pos + 1) % len(self._sorted)
server = self._ring[self._sorted[pos]]
if server in served:
continue
served.append(server)
if self._error_limited[server] > time.time():
continue
sock = None
try:
with MemcachePoolTimeout(self._pool_timeout):
fp, sock = self._client_cache[server].get()
yield server, fp, sock
except MemcachePoolTimeout as e:
self._exception_occurred(
server, e, action='getting a connection',
got_connection=False)
except (Exception, Timeout) as e:
# Typically a Timeout exception caught here is the one raised
# by the create() method of this server's MemcacheConnPool
# object.
self._exception_occurred(
server, e, action='connecting', sock=sock)
def _return_conn(self, server, fp, sock):
"""Returns a server connection to the pool."""
self._client_cache[server].put((fp, sock))
def set(self, key, value, serialize=True, time=0,
min_compress_len=0):
"""
Set a key/value pair in memcache
:param key: key
:param value: value
:param serialize: if True, value is serialized with JSON before sending
to memcache, or with pickle if configured to use
pickle instead of JSON (to avoid cache poisoning)
:param time: the time to live
:param min_compress_len: minimum compress length, this parameter was
added to keep the signature compatible with
python-memcached interface. This
implementation ignores it.
"""
key = md5hash(key)
timeout = sanitize_timeout(time)
flags = 0
if serialize and self._allow_pickle:
value = pickle.dumps(value, PICKLE_PROTOCOL)
flags |= PICKLE_FLAG
elif serialize:
if isinstance(value, bytes):
value = value.decode('utf8')
value = json.dumps(value).encode('ascii')
flags |= JSON_FLAG
elif not isinstance(value, bytes):
value = str(value).encode('utf-8')
for (server, fp, sock) in self._get_conns(key):
try:
with Timeout(self._io_timeout):
sock.sendall(set_msg(key, flags, timeout, value))
# Wait for the set to complete
msg = fp.readline().strip()
if msg != b'STORED':
if not six.PY2:
msg = msg.decode('ascii')
self.logger.error(
"Error setting value in memcached: "
"%(server)s: %(msg)s",
{'server': server, 'msg': msg})
if 0 <= self.item_size_warning_threshold <= len(value):
self.logger.warning(
"Item size larger than warning threshold: "
"%d (%s) >= %d (%s)", len(value),
human_readable(len(value)),
self.item_size_warning_threshold,
human_readable(self.item_size_warning_threshold))
self._return_conn(server, fp, sock)
return
except (Exception, Timeout) as e:
self._exception_occurred(server, e, sock=sock, fp=fp)
def get(self, key):
"""
Gets the object specified by key. It will also unserialize the object
before returning if it is serialized in memcache with JSON, or if it
is pickled and unpickling is allowed.
:param key: key
:returns: value of the key in memcache
"""
key = md5hash(key)
value = None
for (server, fp, sock) in self._get_conns(key):
try:
with Timeout(self._io_timeout):
sock.sendall(b'get ' + key + b'\r\n')
line = fp.readline().strip().split()
while True:
if not line:
raise MemcacheConnectionError('incomplete read')
if line[0].upper() == b'END':
break
if line[0].upper() == b'VALUE' and line[1] == key:
size = int(line[3])
value = fp.read(size)
if int(line[2]) & PICKLE_FLAG:
if self._allow_unpickle:
value = pickle.loads(value)
else:
value = None
elif int(line[2]) & JSON_FLAG:
value = json.loads(value)
fp.readline()
line = fp.readline().strip().split()
self._return_conn(server, fp, sock)
return value
except (Exception, Timeout) as e:
self._exception_occurred(server, e, sock=sock, fp=fp)
def incr(self, key, delta=1, time=0):
"""
Increments a key which has a numeric value by delta.
If the key can't be found, it's added as delta or 0 if delta < 0.
If passed a negative number, will use memcached's decr. Returns
the int stored in memcached
Note: The data memcached stores as the result of incr/decr is
an unsigned int. decr's that result in a number below 0 are
stored as 0.
:param key: key
:param delta: amount to add to the value of key (or set as the value
if the key is not found) will be cast to an int
:param time: the time to live
:returns: result of incrementing
:raises MemcacheConnectionError:
"""
key = md5hash(key)
command = b'incr'
if delta < 0:
command = b'decr'
delta = str(abs(int(delta))).encode('ascii')
timeout = sanitize_timeout(time)
for (server, fp, sock) in self._get_conns(key):
try:
with Timeout(self._io_timeout):
sock.sendall(b' '.join([
command, key, delta]) + b'\r\n')
line = fp.readline().strip().split()
if not line:
raise MemcacheConnectionError('incomplete read')
if line[0].upper() == b'NOT_FOUND':
add_val = delta
if command == b'decr':
add_val = b'0'
sock.sendall(b' '.join([
b'add', key, b'0', str(timeout).encode('ascii'),
str(len(add_val)).encode('ascii')
]) + b'\r\n' + add_val + b'\r\n')
line = fp.readline().strip().split()
if line[0].upper() == b'NOT_STORED':
sock.sendall(b' '.join([
command, key, delta]) + b'\r\n')
line = fp.readline().strip().split()
ret = int(line[0].strip())
else:
ret = int(add_val)
else:
ret = int(line[0].strip())
self._return_conn(server, fp, sock)
return ret
except (Exception, Timeout) as e:
self._exception_occurred(server, e, sock=sock, fp=fp)
raise MemcacheConnectionError("No Memcached connections succeeded.")
def decr(self, key, delta=1, time=0):
"""
Decrements a key which has a numeric value by delta. Calls incr with
-delta.
:param key: key
:param delta: amount to subtract to the value of key (or set the
value to 0 if the key is not found) will be cast to
an int
:param time: the time to live
:returns: result of decrementing
:raises MemcacheConnectionError:
"""
return self.incr(key, delta=-delta, time=time)
def delete(self, key, server_key=None):
"""
Deletes a key/value pair from memcache.
:param key: key to be deleted
:param server_key: key to use in determining which server in the ring
is used
"""
key = md5hash(key)
server_key = md5hash(server_key) if server_key else key
for (server, fp, sock) in self._get_conns(server_key):
try:
with Timeout(self._io_timeout):
sock.sendall(b'delete ' + key + b'\r\n')
# Wait for the delete to complete
fp.readline()
self._return_conn(server, fp, sock)
return
except (Exception, Timeout) as e:
self._exception_occurred(server, e, sock=sock, fp=fp)
def set_multi(self, mapping, server_key, serialize=True, time=0,
min_compress_len=0):
"""
Sets multiple key/value pairs in memcache.
:param mapping: dictionary of keys and values to be set in memcache
:param server_key: key to use in determining which server in the ring
is used
:param serialize: if True, value is serialized with JSON before sending
to memcache, or with pickle if configured to use
pickle instead of JSON (to avoid cache poisoning)
:param time: the time to live
:min_compress_len: minimum compress length, this parameter was added
to keep the signature compatible with
python-memcached interface. This implementation
ignores it
"""
server_key = md5hash(server_key)
timeout = sanitize_timeout(time)
msg = []
for key, value in mapping.items():
key = md5hash(key)
flags = 0
if serialize and self._allow_pickle:
value = pickle.dumps(value, PICKLE_PROTOCOL)
flags |= PICKLE_FLAG
elif serialize:
if isinstance(value, bytes):
value = value.decode('utf8')
value = json.dumps(value).encode('ascii')
flags |= JSON_FLAG
msg.append(set_msg(key, flags, timeout, value))
for (server, fp, sock) in self._get_conns(server_key):
try:
with Timeout(self._io_timeout):
sock.sendall(b''.join(msg))
# Wait for the set to complete
for line in range(len(mapping)):
fp.readline()
self._return_conn(server, fp, sock)
return
except (Exception, Timeout) as e:
self._exception_occurred(server, e, sock=sock, fp=fp)
def get_multi(self, keys, server_key):
"""
Gets multiple values from memcache for the given keys.
:param keys: keys for values to be retrieved from memcache
:param server_key: key to use in determining which server in the ring
is used
:returns: list of values
"""
server_key = md5hash(server_key)
keys = [md5hash(key) for key in keys]
for (server, fp, sock) in self._get_conns(server_key):
try:
with Timeout(self._io_timeout):
sock.sendall(b'get ' + b' '.join(keys) + b'\r\n')
line = fp.readline().strip().split()
responses = {}
while True:
if not line:
raise MemcacheConnectionError('incomplete read')
if line[0].upper() == b'END':
break
if line[0].upper() == b'VALUE':
size = int(line[3])
value = fp.read(size)
if int(line[2]) & PICKLE_FLAG:
if self._allow_unpickle:
value = pickle.loads(value)
else:
value = None
elif int(line[2]) & JSON_FLAG:
value = json.loads(value)
responses[line[1]] = value
fp.readline()
line = fp.readline().strip().split()
values = []
for key in keys:
if key in responses:
values.append(responses[key])
else:
values.append(None)
self._return_conn(server, fp, sock)
return values
except (Exception, Timeout) as e:
self._exception_occurred(server, e, sock=sock, fp=fp)
|
py | 1a3720078df9d4faa9057001ea134e076dacd6e1 | """
Module to provide for a simple summarization of relevant output files from a build.
"""
import argparse
import os
import runpy
import sys
from shutil import copyfile
from project_summarizer.cobertura_plugin import CoberturaPlugin
from project_summarizer.junit_plugin import JUnitPlugin
from project_summarizer.project_summarizer_plugin import ProjectSummarizerPlugin
class ProjectSummarizer:
"""
Class to provide for a simple summarization of relevant output files from a build.
"""
def __init__(self):
self.__version_number = ProjectSummarizer.__get_semantic_version()
self.test_summary_publish_path = ProjectSummarizerPlugin.SUMMARY_PUBLISH_PATH
self.debug = False
self.__available_plugins = None
self.__plugin_argument_names = {}
self.__plugin_variable_names = {}
@staticmethod
def __get_semantic_version():
file_path = __file__
assert os.path.isabs(file_path)
file_path = file_path.replace(os.sep, "/")
last_index = file_path.rindex("/")
file_path = file_path[: last_index + 1] + "version.py"
version_meta = runpy.run_path(file_path)
return version_meta["__version__"]
def __parse_arguments(self):
"""
Handle any arguments for the program.
"""
parser = argparse.ArgumentParser(
description="Summarize Python files.", allow_abbrev=False
)
parser.add_argument(
"--version",
action="version",
version="%(prog)s " + self.__version_number,
)
for next_plugin_instance in self.__available_plugins:
(
plugin_argument_name,
plugin_variable_name,
) = next_plugin_instance.add_command_line_arguments(parser)
self.__plugin_argument_names[plugin_argument_name] = next_plugin_instance
self.__plugin_variable_names[plugin_argument_name] = plugin_variable_name
parser.add_argument(
"--only-changes",
dest="only_changes",
action="store_true",
default=False,
help="only_changes",
)
parser.add_argument(
"--publish",
dest="publish_summaries",
action="store_true",
default=False,
help="publish",
)
args = parser.parse_args()
if not args.publish_summaries and not args.test_report_file:
are_plugin_arguments_present = False
arguments_as_dictionary = vars(args)
for next_plugin_argument in self.__plugin_argument_names:
plugin_variable_name = self.__plugin_variable_names[
next_plugin_argument
]
assert plugin_variable_name in arguments_as_dictionary
argument_value = arguments_as_dictionary[plugin_variable_name]
are_plugin_arguments_present = bool(argument_value.strip())
if are_plugin_arguments_present:
break
if not are_plugin_arguments_present:
parser.print_help()
sys.exit(2)
return args
def __publish_file(self, file_to_publish):
"""
Publish the specified file to the set publish directory.
"""
if not os.path.exists(self.test_summary_publish_path):
print(
f"Publish directory '{self.test_summary_publish_path}' does not exist. Creating."
)
os.makedirs(self.test_summary_publish_path)
elif not os.path.isdir(self.test_summary_publish_path):
print(
f"Publish directory '{self.test_summary_publish_path}' already exists, but as a file."
)
sys.exit(1)
if os.path.exists(file_to_publish):
try:
copyfile(
file_to_publish,
ProjectSummarizerPlugin.compute_published_path_to_file(
file_to_publish
),
)
except IOError as ex:
print(f"Publishing file '{file_to_publish}' failed ({ex}).")
sys.exit(1)
def publish_summaries(self):
"""
Respond to a request to publish any existing summaries.
"""
valid_paths = []
for plugin_instance in self.__available_plugins:
plugin_output_path = plugin_instance.get_output_path()
if os.path.exists(plugin_output_path) and not os.path.isfile(
plugin_output_path
):
print(f"Summary path '{plugin_output_path}' is not a file.")
sys.exit(1)
valid_paths.append(plugin_output_path)
for plugin_output_path in valid_paths:
self.__publish_file(plugin_output_path)
def main(self):
"""
Main entrance point.
"""
self.__available_plugins = [CoberturaPlugin(), JUnitPlugin()]
args = self.__parse_arguments()
if args.publish_summaries:
self.publish_summaries()
sys.exit(0)
arguments_as_dictionary = vars(args)
for next_command_line_argument in sys.argv:
if next_command_line_argument in self.__plugin_argument_names:
plugin_instance = self.__plugin_argument_names[
next_command_line_argument
]
plugin_variable_name = self.__plugin_variable_names[
next_command_line_argument
]
plugin_instance.generate_report(
args.only_changes, arguments_as_dictionary[plugin_variable_name]
)
if __name__ == "__main__":
ProjectSummarizer().main()
|
py | 1a3720563dcfbede27217e4e4af48fb685d737db | import catboost
import csv
import filecmp
import json
import numpy as np
import os
import pytest
import re
import yatest.common
from copy import deepcopy
from catboost_pytest_lib import (
append_params_to_cmdline,
apply_catboost,
data_file,
execute,
execute_catboost_fit,
get_limited_precision_dsv_diff_tool,
local_canonical_file,
)
CATBOOST_PATH = yatest.common.binary_path("catboost/app/catboost")
BOOSTING_TYPE = ['Ordered', 'Plain']
MULTICLASS_LOSSES = ['MultiClass', 'MultiClassOneVsAll']
def generate_random_labeled_set(nrows, nvals, labels, seed=20181219, prng=None):
if prng is None:
prng = np.random.RandomState(seed=seed)
label = prng.choice(labels, [nrows, 1])
feature = prng.random_sample([nrows, nvals])
return np.concatenate([label, feature], axis=1)
BY_CLASS_METRICS = ['AUC', 'Precision', 'Recall', 'F1']
def compare_evals(custom_metric, fit_eval, calc_eval, eps=1e-7):
csv_fit = csv.reader(open(fit_eval, "r"), dialect='excel-tab')
csv_calc = csv.reader(open(calc_eval, "r"), dialect='excel-tab')
head_fit = next(csv_fit)
head_calc = next(csv_calc)
if isinstance(custom_metric, basestring):
custom_metric = [custom_metric]
for metric_name in deepcopy(custom_metric):
if metric_name in BY_CLASS_METRICS:
custom_metric.remove(metric_name)
for fit_metric_name in head_fit:
if fit_metric_name[:len(metric_name)] == metric_name:
custom_metric.append(fit_metric_name)
col_idx_fit = {}
col_idx_calc = {}
for metric_name in custom_metric:
col_idx_fit[metric_name] = head_fit.index(metric_name)
col_idx_calc[metric_name] = head_calc.index(metric_name)
while True:
try:
line_fit = next(csv_fit)
line_calc = next(csv_calc)
for metric_name in custom_metric:
fit_value = float(line_fit[col_idx_fit[metric_name]])
calc_value = float(line_calc[col_idx_calc[metric_name]])
max_abs = max(abs(fit_value), abs(calc_value))
err = abs(fit_value - calc_value) / max_abs if max_abs > 0 else 0
if err > eps:
raise Exception('{}, iter {}: fit vs calc = {} vs {}, err = {} > eps = {}'.format(
metric_name, line_fit[0], fit_value, calc_value, err, eps))
except StopIteration:
break
def diff_tool(threshold=2e-7):
return get_limited_precision_dsv_diff_tool(threshold, True)
@pytest.fixture(scope='module', autouse=True)
def skipif_no_cuda():
for flag in pytest.config.option.flags:
if re.match('HAVE_CUDA=(0|no|false)', flag, flags=re.IGNORECASE):
return pytest.mark.skipif(True, reason=flag)
return pytest.mark.skipif(False, reason='None')
pytestmark = skipif_no_cuda()
def fit_catboost_gpu(params, devices='0', input_data=None, output_data=None):
execute_catboost_fit(
task_type='GPU',
params=params,
devices=devices,
input_data=input_data,
output_data=output_data
)
# currently only works on CPU
def fstr_catboost_cpu(params):
cmd = list()
cmd.append(CATBOOST_PATH)
cmd.append('fstr')
append_params_to_cmdline(cmd, params)
execute(cmd)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('qwise_loss', ['QueryRMSE', 'RMSE'])
def test_queryrmse(boosting_type, qwise_loss):
output_model_path = yatest.common.test_output_path('model.bin')
test_error_path = yatest.common.test_output_path('test_error.tsv')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
predictions_path_learn = yatest.common.test_output_path('predictions_learn.tsv')
predictions_path_test = yatest.common.test_output_path('predictions_test.tsv')
learn_file = data_file('querywise', 'train')
cd_file = data_file('querywise', 'train.cd')
test_file = data_file('querywise', 'test')
params = {"--loss-function": qwise_loss,
"-f": learn_file,
"-t": test_file,
'--column-description': cd_file,
'--boosting-type': boosting_type,
'-i': '100',
'-T': '4',
'-m': output_model_path,
'--learn-err-log': learn_error_path,
'--test-err-log': test_error_path,
'--use-best-model': 'false'
}
fit_catboost_gpu(params)
apply_catboost(output_model_path, learn_file, cd_file, predictions_path_learn)
apply_catboost(output_model_path, test_file, cd_file, predictions_path_test)
return [local_canonical_file(learn_error_path, diff_tool=diff_tool()),
local_canonical_file(test_error_path, diff_tool=diff_tool()),
local_canonical_file(predictions_path_learn, diff_tool=diff_tool()),
local_canonical_file(predictions_path_test, diff_tool=diff_tool()),
]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_boosting_type(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
train_file = data_file('adult', 'train_small')
test_file = data_file('adult', 'test_small')
cd_file = data_file('adult', 'train.cd')
params = {
'--use-best-model': 'false',
'--loss-function': 'Logloss',
'-f': train_file,
'-t': test_file,
'--column-description': cd_file,
'--boosting-type': boosting_type,
'-i': '10',
'-w': '0.03',
'-T': '4',
'-m': output_model_path,
}
fit_catboost_gpu(params)
apply_catboost(output_model_path, test_file, cd_file, output_eval_path)
return [local_canonical_file(output_eval_path)]
def combine_dicts(first, *vargs):
combined = first.copy()
for rest in vargs:
combined.update(rest)
return combined
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_bootstrap(boosting_type):
bootstrap_option = {
'no': {'--bootstrap-type': 'No'},
'bayes': {'--bootstrap-type': 'Bayesian', '--bagging-temperature': '0.0'},
'bernoulli': {'--bootstrap-type': 'Bernoulli', '--subsample': '1.0'}
}
test_file = data_file('adult', 'test_small')
cd_file = data_file('adult', 'train.cd')
params = {
'--use-best-model': 'false',
'--loss-function': 'Logloss',
'-f': data_file('adult', 'train_small'),
'-t': test_file,
'--column-description': cd_file,
'--boosting-type': boosting_type,
'-i': '10',
'-w': '0.03',
'-T': '4',
}
for bootstrap in bootstrap_option:
model_path = yatest.common.test_output_path('model_' + bootstrap + '.bin')
eval_path = yatest.common.test_output_path('test_' + bootstrap + '.eval')
model_option = {'-m': model_path}
run_params = combine_dicts(params,
bootstrap_option[bootstrap],
model_option)
fit_catboost_gpu(run_params)
apply_catboost(model_path, test_file, cd_file, eval_path)
ref_eval_path = yatest.common.test_output_path('test_no.eval')
assert (filecmp.cmp(ref_eval_path, yatest.common.test_output_path('test_bayes.eval')))
assert (filecmp.cmp(ref_eval_path, yatest.common.test_output_path('test_bernoulli.eval')))
return [local_canonical_file(ref_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_nan_mode_forbidden(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
test_file = data_file('adult', 'test_small')
learn_file = data_file('adult', 'train_small')
cd_file = data_file('adult', 'train.cd')
params = {
'-f': learn_file,
'-t': test_file,
'--column-description': cd_file,
'--boosting-type': boosting_type,
'-i': '20',
'-T': '4',
'-m': output_model_path,
'--nan-mode': 'Forbidden',
'--use-best-model': 'false',
}
fit_catboost_gpu(params)
apply_catboost(output_model_path, test_file, cd_file, output_eval_path)
return [local_canonical_file(output_eval_path, diff_tool=diff_tool())]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_overfit_detector_iter(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cd_file = data_file('adult', 'train.cd')
test_file = data_file('adult', 'test_small')
params = {
'--use-best-model': 'false',
'--loss-function': 'Logloss',
'-f': data_file('adult', 'train_small'),
'-t': test_file,
'--column-description': cd_file,
'--boosting-type': boosting_type,
'-i': '2000',
'-T': '4',
'-m': output_model_path,
'-x': '1',
'-n': '8',
'-w': '0.5',
'--od-type': 'Iter',
'--od-wait': '2',
}
fit_catboost_gpu(params)
apply_catboost(output_model_path, test_file, cd_file, output_eval_path)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_overfit_detector_inc_to_dec(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cd_file = data_file('adult', 'train.cd')
test_file = data_file('adult', 'test_small')
params = {
'--use-best-model': 'false',
'--loss-function': 'Logloss',
'-f': data_file('adult', 'train_small'),
'-t': test_file,
'--column-description': cd_file,
'--boosting-type': boosting_type,
'-i': '2000',
'-T': '4',
'-m': output_model_path,
'-x': '1',
'-n': '8',
'-w': '0.5',
'--od-pval': '0.5',
'--od-type': 'IncToDec',
'--od-wait': '2',
}
fit_catboost_gpu(params)
apply_catboost(output_model_path, test_file, cd_file, output_eval_path)
return [local_canonical_file(output_eval_path)]
NAN_MODE = ['Min', 'Max']
@pytest.mark.parametrize('nan_mode', NAN_MODE)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_nan_mode(nan_mode, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
test_file = data_file('adult_nan', 'test_small')
cd_file = data_file('adult_nan', 'train.cd')
params = {
'--use-best-model': 'false',
'-f': data_file('adult_nan', 'train_small'),
'-t': test_file,
'--column-description': cd_file,
'--boosting-type': boosting_type,
'-i': '20',
'-T': '4',
'-m': output_model_path,
'--nan-mode': nan_mode
}
fit_catboost_gpu(params)
apply_catboost(output_model_path, test_file, cd_file, output_eval_path)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_use_best_model(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cd_file = data_file('adult', 'train.cd')
test_file = data_file('adult', 'test_small')
params = {
'--loss-function': 'Logloss',
'-f': data_file('adult', 'train_small'),
'-t': test_file,
'--column-description': cd_file,
'--boosting-type': boosting_type,
'-i': '100',
'-T': '4',
'-m': output_model_path,
'-x': '1',
'-n': '8',
'-w': '1',
'--od-pval': '0.99',
'--use-best-model': 'true'
}
fit_catboost_gpu(params)
apply_catboost(output_model_path, test_file, cd_file, output_eval_path)
return [local_canonical_file(output_eval_path)]
LOSS_FUNCTIONS = ['RMSE', 'Logloss', 'MAE', 'CrossEntropy', 'Quantile', 'LogLinQuantile', 'Poisson', 'MAPE']
LEAF_ESTIMATION_METHOD = ['Gradient', 'Newton']
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_crossentropy(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cd_file = data_file('adult_crossentropy', 'train.cd')
test_file = data_file('adult_crossentropy', 'test_proba')
params = {
'--loss-function': 'CrossEntropy',
'-f': data_file('adult_crossentropy', 'train_proba'),
'-t': test_file,
'--column-description': cd_file,
'--boosting-type': boosting_type,
'-i': '10',
'-w': '0.03',
'-T': '4',
'-m': output_model_path,
}
fit_catboost_gpu(params)
apply_catboost(output_model_path, test_file, cd_file, output_eval_path)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_permutation_block(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cd_file = data_file('adult_crossentropy', 'train.cd')
test_file = data_file('adult_crossentropy', 'test_proba')
params = {
'--loss-function': 'CrossEntropy',
'-f': data_file('adult_crossentropy', 'train_proba'),
'-t': test_file,
'--column-description': cd_file,
'--boosting-type': boosting_type,
'-i': '10',
'-w': '0.03',
'-T': '4',
'--fold-permutation-block': '8',
'-m': output_model_path,
}
fit_catboost_gpu(params)
apply_catboost(output_model_path, test_file, cd_file, output_eval_path)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_ignored_features(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
test_file = data_file('adult', 'test_small')
cd_file = data_file('adult', 'train.cd')
params = {
'--loss-function': 'Logloss',
'-f': data_file('adult', 'train_small'),
'-t': test_file,
'--column-description': cd_file,
'--boosting-type': boosting_type,
'-i': '10',
'-w': '0.03',
'-T': '4',
'-m': output_model_path,
'-I': '0:1:3:5-7:10000',
'--use-best-model': 'false',
}
fit_catboost_gpu(params)
apply_catboost(output_model_path, test_file, cd_file, output_eval_path)
return [local_canonical_file(output_eval_path, diff_tool=diff_tool())]
def test_ignored_features_not_read():
output_model_path = yatest.common.test_output_path('model.bin')
input_cd_path = data_file('adult', 'train.cd')
cd_file = yatest.common.test_output_path('train.cd')
with open(input_cd_path, "rt") as f:
cd_lines = f.readlines()
with open(cd_file, "wt") as f:
for cd_line in cd_lines:
# Corrupt some features by making them 'Num'
if cd_line.split() == ('5', 'Categ'): # column 5 --> feature 4
cd_line = cd_line.replace('Categ', 'Num')
if cd_line.split() == ('7', 'Categ'): # column 7 --> feature 6
cd_line = cd_line.replace('Categ', 'Num')
f.write(cd_line)
test_file = data_file('adult', 'test_small')
params = {
'--loss-function': 'Logloss',
'-f': data_file('adult', 'train_small'),
'-t': test_file,
'--column-description': cd_file,
'--boosting-type': 'Plain',
'-i': '10',
'-T': '4',
'-m': output_model_path,
'-I': '4:6',
'--use-best-model': 'false',
}
fit_catboost_gpu(params)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_baseline(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cd_file = data_file('train_adult_baseline.cd')
test_file = data_file('adult_weight', 'test_weight')
params = {
'--loss-function': 'Logloss',
'-f': data_file('adult_weight', 'train_weight'),
'-t': test_file,
'--column-description': cd_file,
'--boosting-type': boosting_type,
'-i': '10',
'-w': '0.03',
'-T': '4',
'-m': output_model_path,
'--use-best-model': 'false',
}
fit_catboost_gpu(params)
apply_catboost(output_model_path, test_file, cd_file, output_eval_path)
return [local_canonical_file(output_eval_path, diff_tool=diff_tool())]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_weights(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cd_file = data_file('adult_weight', 'train.cd')
test_file = data_file('adult_weight', 'test_weight')
params = {
'--use-best-model': 'false',
'--loss-function': 'Logloss',
'-f': data_file('adult_weight', 'train_weight'),
'-t': test_file,
'--column-description': cd_file,
'--boosting-type': boosting_type,
'-i': '10',
'-w': '0.03',
'-T': '4',
'-m': output_model_path,
}
fit_catboost_gpu(params)
apply_catboost(output_model_path, test_file, cd_file, output_eval_path)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_weights_without_bootstrap(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cd_file = data_file('adult_weight', 'train.cd')
test_file = data_file('adult_weight', 'test_weight')
params = {
'--use-best-model': 'false',
'--loss-function': 'Logloss',
'-f': data_file('adult_weight', 'train_weight'),
'-t': test_file,
'--column-description': cd_file,
'--boosting-type': boosting_type,
'-i': '10',
'-w': '0.03',
'-T': '4',
'--bootstrap-type': 'No',
'-m': output_model_path,
}
fit_catboost_gpu(params)
apply_catboost(output_model_path, test_file, cd_file, output_eval_path)
return [local_canonical_file(output_eval_path, diff_tool=diff_tool())]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('leaf_estimation', ["Newton", "Gradient"])
def test_weighted_pool_leaf_estimation_method(boosting_type, leaf_estimation):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cd_file = data_file('adult_weight', 'train.cd')
test_file = data_file('adult_weight', 'test_weight')
params = {
'--use-best-model': 'false',
'--loss-function': 'Logloss',
'-f': data_file('adult_weight', 'train_weight'),
'-t': test_file,
'--column-description': cd_file,
'--boosting-type': boosting_type,
'-i': '10',
'-T': '4',
'--leaf-estimation-method': leaf_estimation,
'-m': output_model_path,
}
fit_catboost_gpu(params)
apply_catboost(output_model_path, test_file, cd_file, output_eval_path)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('leaf_estimation', ["Newton", "Gradient"])
def test_leaf_estimation_method(boosting_type, leaf_estimation):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cd_file = data_file('adult', 'train.cd')
test_file = data_file('adult', 'test_small')
params = {
'--use-best-model': 'false',
'--loss-function': 'Logloss',
'-f': data_file('adult', 'train_small'),
'-t': test_file,
'--column-description': cd_file,
'--boosting-type': boosting_type,
'-i': '10',
'-T': '4',
'--leaf-estimation-method': leaf_estimation,
'-m': output_model_path,
}
fit_catboost_gpu(params)
apply_catboost(output_model_path, test_file, cd_file, output_eval_path)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_one_hot_max_size(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cd_file = data_file('adult', 'train.cd')
test_file = data_file('adult', 'test_small')
params = {
'--use-best-model': 'false',
'--loss-function': 'Logloss',
'-f': data_file('adult', 'train_small'),
'-t': test_file,
'--column-description': cd_file,
'--boosting-type': boosting_type,
'-i': '10',
'-w': '0.03',
'-T': '4',
'--one-hot-max-size': 64,
'-m': output_model_path,
}
fit_catboost_gpu(params)
apply_catboost(output_model_path, test_file, cd_file, output_eval_path)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_l2_reg_size(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cd_file = data_file('adult', 'train.cd')
test_file = data_file('adult', 'test_small')
params = {
'--use-best-model': 'false',
'--loss-function': 'Logloss',
'-f': data_file('adult', 'train_small'),
'-t': test_file,
'--column-description': cd_file,
'--boosting-type': boosting_type,
'-i': '10',
'-T': '4',
'--l2-leaf-reg': 10,
'-m': output_model_path,
}
fit_catboost_gpu(params)
apply_catboost(output_model_path, test_file, cd_file, output_eval_path)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_has_time(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cd_file = data_file('adult', 'train.cd')
test_file = data_file('adult', 'test_small')
params = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', test_file,
'--column-description', cd_file,
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '4',
'--has-time',
'-m', output_model_path,
)
fit_catboost_gpu(params)
apply_catboost(output_model_path, test_file, cd_file, output_eval_path)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_logloss_with_not_binarized_target(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cd_file = data_file('adult_not_binarized', 'train.cd')
test_file = data_file('adult_not_binarized', 'test_small')
params = {
'--use-best-model': 'false',
'--loss-function': 'Logloss',
'-f': data_file('adult_not_binarized', 'train_small'),
'-t': test_file,
'--column-description': cd_file,
'--boosting-type': boosting_type,
'-i': '10',
'-w': '0.03',
'-T': '4',
'-m': output_model_path,
}
fit_catboost_gpu(params)
apply_catboost(output_model_path, test_file, cd_file, output_eval_path)
return [local_canonical_file(output_eval_path)]
def test_fold_len_mult():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cd_file = data_file('adult_not_binarized', 'train.cd')
test_file = data_file('adult_not_binarized', 'test_small')
params = {
'--use-best-model': 'false',
'--loss-function': 'Logloss',
'-f': data_file('adult_not_binarized', 'train_small'),
'-t': test_file,
'--column-description': cd_file,
'--boosting-type': 'Ordered',
'-i': '10',
'-w': '0.03',
'-T': '4',
'--fold-len-multiplier': 1.2,
'-m': output_model_path,
}
fit_catboost_gpu(params)
apply_catboost(output_model_path, test_file, cd_file, output_eval_path)
return [local_canonical_file(output_eval_path)]
def test_random_strength():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cd_file = data_file('adult_not_binarized', 'train.cd')
test_file = data_file('adult_not_binarized', 'test_small')
params = {
'--use-best-model': 'false',
'--loss-function': 'Logloss',
'-f': data_file('adult_not_binarized', 'train_small'),
'-t': test_file,
'--column-description': cd_file,
'--boosting-type': 'Ordered',
'-i': '10',
'-w': '0.03',
'-T': '4',
'--random-strength': 122,
'-m': output_model_path,
}
fit_catboost_gpu(params)
apply_catboost(output_model_path, test_file, cd_file, output_eval_path)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('loss_function', LOSS_FUNCTIONS)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_all_targets(loss_function, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
test_file = data_file('adult', 'test_small')
cd_file = data_file('adult', 'train.cd')
params = (
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', data_file('adult', 'train_small'),
'-t', test_file,
'--column-description', cd_file,
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
)
fit_catboost_gpu(params)
apply_catboost(output_model_path, test_file, cd_file, output_eval_path)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('is_inverted', [False, True], ids=['', 'inverted'])
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_cv(is_inverted, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
params = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
('-Y' if is_inverted else '-X'), '2/10',
'--eval-file', output_eval_path,
)
fit_catboost_gpu(params)
return [local_canonical_file(output_eval_path, diff_tool=diff_tool())]
@pytest.mark.parametrize('is_inverted', [False, True], ids=['', 'inverted'])
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_cv_for_query(is_inverted, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
params = (
'--use-best-model', 'false',
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'--column-description', data_file('querywise', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'-m', output_model_path,
('-Y' if is_inverted else '-X'), '2/7',
'--eval-file', output_eval_path,
)
fit_catboost_gpu(params)
return [local_canonical_file(output_eval_path, diff_tool=diff_tool())]
@pytest.mark.parametrize('is_inverted', [False, True], ids=['', 'inverted'])
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_cv_for_pairs(is_inverted, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
params = (
'--use-best-model', 'false',
'--loss-function', 'PairLogit',
'-f', data_file('querywise', 'train'),
'--column-description', data_file('querywise', 'train.cd'),
'--learn-pairs', data_file('querywise', 'train.pairs'),
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'-m', output_model_path,
('-Y' if is_inverted else '-X'), '2/7',
'--eval-file', output_eval_path,
)
fit_catboost_gpu(params)
return [local_canonical_file(output_eval_path, diff_tool=diff_tool())]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_custom_priors(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
test_file = data_file('adult', 'test_small')
cd_file = data_file('adult', 'train.cd')
params = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', test_file,
'--column-description', cd_file,
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--ctr', 'Borders:Prior=-2:Prior=0:Prior=8/3:Prior=1:Prior=-1:Prior=3,'
'FeatureFreq:Prior=0',
'--per-feature-ctr', '4:Borders:Prior=0.444,FeatureFreq:Prior=0.444;'
'6:Borders:Prior=0.666,FeatureFreq:Prior=0.666;'
'8:Borders:Prior=-0.888:Prior=2/3,FeatureFreq:Prior=-0.888:Prior=0.888'
)
fit_catboost_gpu(params)
apply_catboost(output_model_path, test_file, cd_file, output_eval_path)
return [local_canonical_file(output_eval_path)]
CTR_TYPES = ['Borders', 'Buckets', 'FloatTargetMeanValue',
'Borders,FloatTargetMeanValue', 'Buckets,Borders']
@pytest.mark.parametrize('ctr_type', CTR_TYPES)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_ctr_type(ctr_type, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cd_file = data_file('adult_crossentropy', 'train.cd')
test_file = data_file('adult_crossentropy', 'test_proba')
params = (
'--use-best-model', 'false',
'--loss-function', 'RMSE',
'-f', data_file('adult_crossentropy', 'train_proba'),
'-t', test_file,
'--column-description', cd_file,
'--boosting-type', boosting_type,
'-i', '3',
'-T', '4',
'-m', output_model_path,
'--ctr', ctr_type
)
fit_catboost_gpu(params)
apply_catboost(output_model_path, test_file, cd_file, output_eval_path)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('loss_function', LOSS_FUNCTIONS)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_meta(loss_function, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
meta_path = 'meta.tsv'
params = (
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--name', 'test experiment',
)
# meta_path is implicit output file
fit_catboost_gpu(params, output_data={meta_path: meta_path})
return [local_canonical_file(meta_path)]
def test_train_dir():
output_model_path = 'model.bin'
train_dir_path = 'trainDir'
params = (
'--use-best-model', 'false',
'--loss-function', 'RMSE',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--train-dir', train_dir_path,
)
fit_catboost_gpu(params, output_data={train_dir_path: train_dir_path, output_model_path: output_model_path})
outputs = ['time_left.tsv', 'learn_error.tsv', 'test_error.tsv', 'meta.tsv', output_model_path]
for output in outputs:
assert os.path.isfile(train_dir_path + '/' + output)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('qwise_loss', ['QueryRMSE', 'RMSE'])
def test_train_on_binarized_equal_train_on_float(boosting_type, qwise_loss):
output_model_path = yatest.common.test_output_path('model.bin')
output_model_path_binarized = yatest.common.test_output_path('model_binarized.bin')
test_error_path = yatest.common.test_output_path('test_error.tsv')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
borders_file = yatest.common.test_output_path('borders.tsv')
borders_file_output = borders_file + '.out'
predictions_path_learn = yatest.common.test_output_path('predictions_learn.tsv')
predictions_path_learn_binarized = yatest.common.test_output_path('predictions_learn_binarized.tsv')
predictions_path_test = yatest.common.test_output_path('predictions_test.tsv')
predictions_path_test_binarized = yatest.common.test_output_path('predictions_test_binarized.tsv')
learn_file = data_file('querywise', 'train')
cd_file = data_file('querywise', 'train.cd')
test_file = data_file('querywise', 'test')
params = {"--loss-function": qwise_loss,
"-f": learn_file,
"-t": test_file,
'--column-description': cd_file,
'--boosting-type': boosting_type,
'-i': '100',
'-T': '4',
'-m': output_model_path,
'--learn-err-log': learn_error_path,
'--test-err-log': test_error_path,
'--use-best-model': 'false',
'--output-borders-file': borders_file_output,
}
params_binarized = dict(params)
params_binarized['--input-borders-file'] = borders_file_output
params_binarized['--output-borders-file'] = borders_file
params_binarized['-m'] = output_model_path_binarized
fit_catboost_gpu(params)
apply_catboost(output_model_path, learn_file, cd_file, predictions_path_learn)
apply_catboost(output_model_path, test_file, cd_file, predictions_path_test)
# learn_error_path and test_error_path already exist after first fit_catboost_gpu() call
# and would be automatically marked as input_data for YT operation,
# which will lead to error, because input files are available only for reading.
# That's why we explicitly drop files from input_data and implicitly add them to output_data.
fit_catboost_gpu(params_binarized, input_data={learn_error_path: None, test_error_path: None})
apply_catboost(output_model_path_binarized, learn_file, cd_file, predictions_path_learn_binarized)
apply_catboost(output_model_path_binarized, test_file, cd_file, predictions_path_test_binarized)
assert (filecmp.cmp(predictions_path_learn, predictions_path_learn_binarized))
assert (filecmp.cmp(predictions_path_test, predictions_path_test_binarized))
return [local_canonical_file(learn_error_path, diff_tool=diff_tool()),
local_canonical_file(test_error_path, diff_tool=diff_tool()),
local_canonical_file(predictions_path_test, diff_tool=diff_tool()),
local_canonical_file(predictions_path_learn, diff_tool=diff_tool()),
local_canonical_file(borders_file, diff_tool=diff_tool())]
FSTR_TYPES = ['PredictionValuesChange', 'InternalFeatureImportance', 'InternalInteraction', 'Interaction', 'ShapValues']
@pytest.mark.parametrize('fstr_type', FSTR_TYPES)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_fstr(fstr_type, boosting_type):
model_path = yatest.common.test_output_path('adult_model.bin')
output_fstr_path = yatest.common.test_output_path('fstr.tsv')
fit_params = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '4',
'--one-hot-max-size', '10',
'-m', model_path
)
if fstr_type == 'ShapValues':
fit_params += ('--max-ctr-complexity', '1')
fit_catboost_gpu(fit_params)
fstr_params = (
'--input-path', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'-m', model_path,
'-o', output_fstr_path,
'--fstr-type', fstr_type
)
fstr_catboost_cpu(fstr_params)
return local_canonical_file(output_fstr_path)
LOSS_FUNCTIONS_NO_MAPE = ['RMSE', 'Logloss', 'MAE', 'CrossEntropy', 'Quantile', 'LogLinQuantile', 'Poisson']
@pytest.mark.parametrize('loss_function', LOSS_FUNCTIONS_NO_MAPE)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_quantized_pool(loss_function, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
quantized_train_file = 'quantized://' + data_file('quantized_adult', 'train.qbin')
quantized_test_file = 'quantized://' + data_file('quantized_adult', 'test.qbin')
params = (
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', quantized_train_file,
'-t', quantized_test_file,
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
)
fit_catboost_gpu(params)
cd_file = data_file('quantized_adult', 'pool.cd')
test_file = data_file('quantized_adult', 'test_small.tsv')
apply_catboost(output_model_path, test_file, cd_file, output_eval_path)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('used_ram_limit', ['1Kb', '550Mb'])
def test_allow_writing_files_and_used_ram_limit(boosting_type, used_ram_limit):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cd_file = data_file('airlines_5K', 'cd')
params = (
'--use-best-model', 'false',
'--allow-writing-files', 'false',
'--used-ram-limit', used_ram_limit,
'--loss-function', 'Logloss',
'--max-ctr-complexity', '8',
'--depth', '10',
'-f', data_file('airlines_5K', 'train'),
'-t', data_file('airlines_5K', 'test'),
'--column-description', cd_file,
'--has-header',
'--boosting-type', boosting_type,
'-i', '20',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
)
fit_catboost_gpu(params)
test_file = data_file('airlines_5K', 'test')
apply_catboost(output_model_path, test_file, cd_file,
output_eval_path, has_header=True)
return [local_canonical_file(output_eval_path, diff_tool=diff_tool())]
def test_pairs_generation():
output_model_path = yatest.common.test_output_path('model.bin')
test_error_path = yatest.common.test_output_path('test_error.tsv')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
predictions_path_learn = yatest.common.test_output_path('predictions_learn.tsv')
predictions_path_test = yatest.common.test_output_path('predictions_test.tsv')
cd_file = data_file('querywise', 'train.cd')
learn_file = data_file('querywise', 'train')
test_file = data_file('querywise', 'test')
params = [
'--loss-function', 'PairLogit',
'--eval-metric', 'PairAccuracy',
'-f', learn_file,
'-t', test_file,
'--column-description', cd_file,
'--l2-leaf-reg', '0',
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false'
]
fit_catboost_gpu(params)
apply_catboost(output_model_path, learn_file, cd_file, predictions_path_learn)
apply_catboost(output_model_path, test_file, cd_file, predictions_path_test)
return [local_canonical_file(learn_error_path, diff_tool=diff_tool()),
local_canonical_file(test_error_path, diff_tool=diff_tool()),
local_canonical_file(predictions_path_learn, diff_tool=diff_tool()),
local_canonical_file(predictions_path_test, diff_tool=diff_tool()),
]
def test_pairs_generation_with_max_pairs():
output_model_path = yatest.common.test_output_path('model.bin')
test_error_path = yatest.common.test_output_path('test_error.tsv')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
predictions_path_learn = yatest.common.test_output_path('predictions_learn.tsv')
predictions_path_test = yatest.common.test_output_path('predictions_test.tsv')
cd_file = data_file('querywise', 'train.cd')
learn_file = data_file('querywise', 'train')
test_file = data_file('querywise', 'test')
params = [
'--loss-function', 'PairLogit:max_pairs=30',
'--eval-metric', 'PairAccuracy',
'-f', learn_file,
'-t', test_file,
'--column-description', cd_file,
'--l2-leaf-reg', '0',
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false'
]
fit_catboost_gpu(params)
apply_catboost(output_model_path, learn_file, cd_file, predictions_path_learn)
apply_catboost(output_model_path, test_file, cd_file, predictions_path_test)
return [local_canonical_file(learn_error_path, diff_tool=diff_tool()),
local_canonical_file(test_error_path, diff_tool=diff_tool()),
local_canonical_file(predictions_path_learn, diff_tool=diff_tool()),
local_canonical_file(predictions_path_test, diff_tool=diff_tool()),
]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_pairlogit_no_target(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
params = [
'--loss-function', 'PairLogit',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd.no_target'),
'--learn-pairs', data_file('querywise', 'train.pairs'),
'--test-pairs', data_file('querywise', 'test.pairs'),
'--boosting-type', boosting_type,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
]
fit_catboost_gpu(params)
return [
local_canonical_file(
output_eval_path,
# TODO(akhropov): why such result instability for Plain. MLTOOLS-2801
diff_tool=diff_tool(threshold={'Plain': 0.07, 'Ordered': 1.e-7}[boosting_type])
)
]
@pytest.mark.parametrize('task_type', ['CPU', 'GPU'])
def test_learn_without_header_eval_with_header(task_type):
train_path = yatest.common.test_output_path('airlines_without_header')
with open(data_file('airlines_5K', 'train'), 'r') as with_header_file:
with open(train_path, 'w') as without_header_file:
without_header_file.writelines(with_header_file.readlines()[1:])
model_path = yatest.common.test_output_path('model.bin')
fit_params = [
'--loss-function', 'Logloss',
'-f', train_path,
'--cd', data_file('airlines_5K', 'cd'),
'-i', '10',
'-m', model_path
]
execute_catboost_fit(
task_type=task_type,
params=fit_params,
devices='0'
)
cmd_calc = (
CATBOOST_PATH,
'calc',
'--input-path', data_file('airlines_5K', 'test'),
'--cd', data_file('airlines_5K', 'cd'),
'-m', model_path,
'--has-header'
)
yatest.common.execute(cmd_calc)
def test_group_weights_file():
first_eval_path = yatest.common.test_output_path('first.eval')
second_eval_path = yatest.common.test_output_path('second.eval')
first_model_path = yatest.common.test_output_path('first_model.bin')
second_model_path = yatest.common.test_output_path('second_model.bin')
def run_catboost(eval_path, model_path, cd_file, is_additional_query_weights):
cd_file_path = data_file('querywise', cd_file)
fit_params = [
'--use-best-model', 'false',
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'--column-description', cd_file_path,
'-i', '5',
'-T', '4',
'-m', model_path,
]
if is_additional_query_weights:
fit_params += [
'--learn-group-weights', data_file('querywise', 'train.group_weights'),
'--test-group-weights', data_file('querywise', 'test.group_weights'),
]
fit_catboost_gpu(fit_params)
apply_catboost(model_path, data_file('querywise', 'test'), cd_file_path, eval_path)
run_catboost(first_eval_path, first_model_path, 'train.cd', True)
run_catboost(second_eval_path, second_model_path, 'train.cd.group_weight', False)
assert filecmp.cmp(first_eval_path, second_eval_path)
return [local_canonical_file(first_eval_path)]
def test_group_weights_file_quantized():
first_eval_path = yatest.common.test_output_path('first.eval')
second_eval_path = yatest.common.test_output_path('second.eval')
first_model_path = yatest.common.test_output_path('first_model.bin')
second_model_path = yatest.common.test_output_path('second_model.bin')
def run_catboost(eval_path, model_path, train, is_additional_query_weights):
fit_params = [
'--use-best-model', 'false',
'--loss-function', 'QueryRMSE',
'-f', 'quantized://' + data_file('querywise', train),
'-i', '5',
'-T', '4',
'-m', model_path,
]
if is_additional_query_weights:
fit_params += [
'--learn-group-weights', data_file('querywise', 'train.group_weights'),
'--test-group-weights', data_file('querywise', 'test.group_weights'),
]
fit_catboost_gpu(fit_params)
apply_catboost(model_path, data_file('querywise', 'test'), data_file('querywise', 'train.cd.group_weight'), eval_path)
run_catboost(first_eval_path, first_model_path, 'train.quantized', True)
run_catboost(second_eval_path, second_model_path, 'train.quantized.group_weight', False)
assert filecmp.cmp(first_eval_path, second_eval_path)
return [local_canonical_file(first_eval_path)]
NO_RANDOM_PARAMS = {
'--random-strength': '0',
'--bootstrap-type': 'No',
'--has-time': '',
'--set-metadata-from-freeargs': ''
}
METRIC_CHECKING_MULTICLASS = 'Accuracy:use_weights=false'
CAT_COMPARE_PARAMS = {
'--counter-calc-method': 'SkipTest',
'--simple-ctr': 'Buckets',
'--max-ctr-complexity': 1
}
def eval_metric(model_path, metrics, data_path, cd_path, output_log, eval_period='1'):
cmd = [
CATBOOST_PATH,
'eval-metrics',
'--metrics', metrics,
'-m', model_path,
'--input-path', data_path,
'--cd', cd_path,
'--output-path', output_log,
'--eval-period', eval_period
]
yatest.common.execute(cmd)
@pytest.mark.parametrize('loss_function', MULTICLASS_LOSSES)
def test_class_weight_multiclass(loss_function):
model_path = yatest.common.test_output_path('model.bin')
test_error_path = yatest.common.test_output_path('test_error.tsv')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
eval_error_path = yatest.common.test_output_path('eval_error.tsv')
learn_path = data_file('adult', 'train_small')
test_path = data_file('adult', 'test_small')
cd_path = data_file('adult', 'train.cd')
fit_params = {
'--use-best-model': 'false',
'--loss-function': loss_function,
'-f': learn_path,
'-t': test_path,
'--column-description': cd_path,
'--boosting-type': 'Plain',
'-i': '10',
'-T': '4',
'-m': model_path,
'--class-weights': '0.5,2',
'--learn-err-log': learn_error_path,
'--test-err-log': test_error_path,
'--custom-metric': METRIC_CHECKING_MULTICLASS
}
fit_params.update(CAT_COMPARE_PARAMS)
fit_catboost_gpu(fit_params)
eval_metric(model_path, METRIC_CHECKING_MULTICLASS, test_path, cd_path, eval_error_path)
compare_evals(METRIC_CHECKING_MULTICLASS, test_error_path, eval_error_path)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
@pytest.mark.parametrize('leaf_estimation_method', LEAF_ESTIMATION_METHOD)
def test_multi_leaf_estimation_method(leaf_estimation_method):
output_model_path = yatest.common.test_output_path('model.bin')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
eval_test_error_path = yatest.common.test_output_path('eval_test_error.tsv')
train_path = data_file('cloudness_small', 'train_small')
test_path = data_file('cloudness_small', 'test_small')
cd_path = data_file('cloudness_small', 'train.cd')
fit_params = {
'--loss-function': 'MultiClass',
'-f': train_path,
'-t': test_path,
'--column-description': cd_path,
'--boosting-type': 'Plain',
'-i': '10',
'-T': '4',
'-m': output_model_path,
'--leaf-estimation-method': leaf_estimation_method,
'--leaf-estimation-iterations': '2',
'--use-best-model': 'false',
'--learn-err-log': learn_error_path,
'--test-err-log': test_error_path,
'--custom-metric': METRIC_CHECKING_MULTICLASS
}
fit_params.update(CAT_COMPARE_PARAMS)
fit_catboost_gpu(fit_params)
eval_metric(output_model_path, METRIC_CHECKING_MULTICLASS, test_path, cd_path, eval_test_error_path)
compare_evals(METRIC_CHECKING_MULTICLASS, test_error_path, eval_test_error_path)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
@pytest.mark.parametrize('loss_function', MULTICLASS_LOSSES)
def test_multiclass_baseline(loss_function):
labels = [0, 1, 2, 3]
cd_path = yatest.common.test_output_path('cd.txt')
np.savetxt(cd_path, [[0, 'Target'], [1, 'Baseline'], [2, 'Baseline'], [3, 'Baseline'], [4, 'Baseline']], fmt='%s', delimiter='\t')
prng = np.random.RandomState(seed=0)
train_path = yatest.common.test_output_path('train.txt')
np.savetxt(train_path, generate_random_labeled_set(100, 1000, labels, prng=prng), fmt='%s', delimiter='\t')
test_path = yatest.common.test_output_path('test.txt')
np.savetxt(test_path, generate_random_labeled_set(100, 1000, labels, prng=prng), fmt='%s', delimiter='\t')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
eval_error_path = yatest.common.test_output_path('eval_error.tsv')
fit_params = {
'--loss-function': loss_function,
'-f': train_path,
'-t': test_path,
'--column-description': cd_path,
'--boosting-type': 'Plain',
'-i': '10',
'-T': '4',
'--use-best-model': 'false',
'--classes-count': '4',
'--custom-metric': METRIC_CHECKING_MULTICLASS,
'--test-err-log': eval_error_path
}
fit_params.update(NO_RANDOM_PARAMS)
execute_catboost_fit('CPU', fit_params)
fit_params['--learn-err-log'] = learn_error_path
fit_params['--test-err-log'] = test_error_path
fit_catboost_gpu(fit_params)
compare_evals(METRIC_CHECKING_MULTICLASS, test_error_path, eval_error_path)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
@pytest.mark.parametrize('loss_function', MULTICLASS_LOSSES)
def test_multiclass_baseline_lost_class(loss_function):
num_objects = 1000
cd_path = yatest.common.test_output_path('cd.txt')
np.savetxt(cd_path, [[0, 'Target'], [1, 'Baseline'], [2, 'Baseline']], fmt='%s', delimiter='\t')
prng = np.random.RandomState(seed=0)
train_path = yatest.common.test_output_path('train.txt')
np.savetxt(train_path, generate_random_labeled_set(num_objects, 10, labels=[1, 2], prng=prng), fmt='%.5f', delimiter='\t')
test_path = yatest.common.test_output_path('test.txt')
np.savetxt(test_path, generate_random_labeled_set(num_objects, 10, labels=[0, 1, 2, 3], prng=prng), fmt='%.5f', delimiter='\t')
eval_error_path = yatest.common.test_output_path('eval_error.tsv')
custom_metric = 'Accuracy:use_weights=false'
fit_params = {
'--loss-function': loss_function,
'-f': train_path,
'-t': test_path,
'--column-description': cd_path,
'--boosting-type': 'Plain',
'-i': '10',
'-T': '4',
'--custom-metric': custom_metric,
'--test-err-log': eval_error_path,
'--use-best-model': 'false',
'--classes-count': '4'
}
fit_params.update(NO_RANDOM_PARAMS)
with pytest.raises(yatest.common.ExecutionError):
execute_catboost_fit('CPU', fit_params)
def test_ctr_buckets():
model_path = yatest.common.test_output_path('model.bin')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
eval_error_path = yatest.common.test_output_path('eval_error.tsv')
learn_path = data_file('adult', 'train_small')
test_path = data_file('adult', 'test_small')
cd_path = data_file('adult', 'train.cd')
fit_params = {
'--use-best-model': 'false',
'--loss-function': 'MultiClass',
'-f': learn_path,
'-t': test_path,
'--column-description': cd_path,
'--boosting-type': 'Plain',
'-i': '10',
'-T': '4',
'-m': model_path,
'--learn-err-log': learn_error_path,
'--test-err-log': test_error_path,
'--custom-metric': METRIC_CHECKING_MULTICLASS
}
fit_params.update(CAT_COMPARE_PARAMS)
fit_catboost_gpu(fit_params)
eval_metric(model_path, METRIC_CHECKING_MULTICLASS, test_path, cd_path, eval_error_path)
compare_evals(METRIC_CHECKING_MULTICLASS, test_error_path, eval_error_path)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
@pytest.mark.parametrize('loss_function', MULTICLASS_LOSSES)
def test_multi_targets(loss_function):
model_path = yatest.common.test_output_path('model.bin')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
eval_error_path = yatest.common.test_output_path('eval_error.tsv')
learn_path = data_file('cloudness_small', 'train_small')
test_path = data_file('cloudness_small', 'test_small')
cd_path = data_file('cloudness_small', 'train.cd')
fit_params = {
'--use-best-model': 'false',
'--loss-function': loss_function,
'-f': learn_path,
'-t': test_path,
'--column-description': cd_path,
'--boosting-type': 'Plain',
'-i': '10',
'-T': '4',
'-m': model_path,
'--learn-err-log': learn_error_path,
'--test-err-log': test_error_path,
'--custom-metric': METRIC_CHECKING_MULTICLASS
}
fit_params.update(CAT_COMPARE_PARAMS)
fit_catboost_gpu(fit_params)
eval_metric(model_path, METRIC_CHECKING_MULTICLASS, test_path, cd_path, eval_error_path)
compare_evals(METRIC_CHECKING_MULTICLASS, test_error_path, eval_error_path)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
def test_custom_loss_for_multiclassification():
model_path = yatest.common.test_output_path('model.bin')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
eval_error_path = yatest.common.test_output_path('eval_error.tsv')
learn_path = data_file('cloudness_small', 'train_small')
test_path = data_file('cloudness_small', 'test_small')
cd_path = data_file('cloudness_small', 'train.cd')
custom_metric = [
'Accuracy',
'Precision',
'Recall',
'F1',
'TotalF1',
'MCC',
'Kappa',
'WKappa',
'ZeroOneLoss',
'HammingLoss',
'HingeLoss'
]
custom_metric_string = ','.join(custom_metric)
fit_params = {
'--use-best-model': 'false',
'--loss-function': 'MultiClass',
'-f': learn_path,
'-t': test_path,
'--column-description': cd_path,
'--boosting-type': 'Plain',
'-i': '10',
'-T': '4',
'-m': model_path,
'--custom-metric': custom_metric_string,
'--learn-err-log': learn_error_path,
'--test-err-log': test_error_path,
}
fit_params.update(CAT_COMPARE_PARAMS)
fit_catboost_gpu(fit_params)
eval_metric(model_path, custom_metric_string, test_path, cd_path, eval_error_path)
compare_evals(custom_metric, test_error_path, eval_error_path)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_custom_loss_for_classification(boosting_type):
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
eval_error_path = yatest.common.test_output_path('eval_error.tsv')
model_path = yatest.common.test_output_path('model.bin')
learn_path = data_file('adult', 'train_small')
test_path = data_file('adult', 'test_small')
cd_path = data_file('adult', 'train.cd')
custom_metric = [
'AUC',
'CrossEntropy',
'Accuracy',
'Precision',
'Recall',
'F1',
'TotalF1',
'MCC',
'BalancedAccuracy',
'BalancedErrorRate',
'Kappa',
'WKappa',
'BrierScore',
'ZeroOneLoss',
'HammingLoss',
'HingeLoss'
]
custom_metric_string = ','.join(custom_metric)
fit_params = {
'--use-best-model': 'false',
'--loss-function': 'Logloss',
'-f': learn_path,
'-t': test_path,
'--column-description': cd_path,
'--boosting-type': boosting_type,
'-w': '0.03',
'-i': '10',
'-T': '4',
'-m': model_path,
'--custom-metric': custom_metric_string,
'--learn-err-log': learn_error_path,
'--test-err-log': test_error_path
}
fit_params.update(CAT_COMPARE_PARAMS)
fit_catboost_gpu(fit_params)
eval_metric(model_path, custom_metric_string, test_path, cd_path, eval_error_path)
compare_evals(custom_metric, test_error_path, eval_error_path)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
@pytest.mark.parametrize('loss_function', MULTICLASS_LOSSES)
def test_class_names_multiclass(loss_function):
model_path = yatest.common.test_output_path('model.bin')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
eval_error_path = yatest.common.test_output_path('eval_error.tsv')
learn_path = data_file('precipitation_small', 'train_small')
test_path = data_file('precipitation_small', 'test_small')
cd_path = data_file('precipitation_small', 'train.cd')
fit_params = {
'--use-best-model': 'false',
'--loss-function': loss_function,
'-f': learn_path,
'-t': test_path,
'--column-description': cd_path,
'--boosting-type': 'Plain',
'-i': '10',
'-T': '4',
'-m': model_path,
'--learn-err-log': learn_error_path,
'--test-err-log': test_error_path,
'--custom-metric': METRIC_CHECKING_MULTICLASS,
'--class-names': '0.,0.5,1.,0.25,0.75'
}
fit_params.update(CAT_COMPARE_PARAMS)
fit_catboost_gpu(fit_params)
eval_metric(model_path, METRIC_CHECKING_MULTICLASS, test_path, cd_path, eval_error_path)
compare_evals(METRIC_CHECKING_MULTICLASS, test_error_path, eval_error_path)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
@pytest.mark.parametrize('loss_function', MULTICLASS_LOSSES)
def test_lost_class(loss_function):
model_path = yatest.common.test_output_path('model.bin')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
eval_error_path = yatest.common.test_output_path('eval_error.tsv')
learn_path = data_file('cloudness_lost_class', 'train_small')
test_path = data_file('cloudness_lost_class', 'test_small')
cd_path = data_file('cloudness_lost_class', 'train.cd')
fit_params = {
'--use-best-model': 'false',
'--loss-function': loss_function,
'-f': learn_path,
'-t': test_path,
'--column-description': cd_path,
'--boosting-type': 'Plain',
'-i': '10',
'-T': '4',
'-m': model_path,
'--custom-metric': METRIC_CHECKING_MULTICLASS,
'--learn-err-log': learn_error_path,
'--test-err-log': test_error_path,
'--classes-count': '3'
}
fit_params.update(CAT_COMPARE_PARAMS)
fit_catboost_gpu(fit_params)
eval_metric(model_path, METRIC_CHECKING_MULTICLASS, test_path, cd_path, eval_error_path)
compare_evals(METRIC_CHECKING_MULTICLASS, test_error_path, eval_error_path)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
def test_class_weight_with_lost_class():
model_path = yatest.common.test_output_path('model.bin')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
eval_error_path = yatest.common.test_output_path('eval_error.tsv')
learn_path = data_file('cloudness_lost_class', 'train_small')
test_path = data_file('cloudness_lost_class', 'test_small')
cd_path = data_file('cloudness_lost_class', 'train.cd')
fit_params = {
'--use-best-model': 'false',
'--loss-function': 'MultiClass',
'-f': learn_path,
'-t': test_path,
'--column-description': cd_path,
'--boosting-type': 'Plain',
'-i': '10',
'-T': '4',
'-m': model_path,
'--classes-count': '3',
'--class-weights': '0.5,2,2',
'--learn-err-log': learn_error_path,
'--test-err-log': test_error_path,
'--custom-metric': METRIC_CHECKING_MULTICLASS
}
fit_params.update(CAT_COMPARE_PARAMS)
fit_catboost_gpu(fit_params)
eval_metric(model_path, METRIC_CHECKING_MULTICLASS, test_path, cd_path, eval_error_path)
compare_evals(METRIC_CHECKING_MULTICLASS, test_error_path, eval_error_path)
return [local_canonical_file(eval_error_path)]
@pytest.mark.parametrize('metric_period', ['1', '2'])
@pytest.mark.parametrize('metric', ['MultiClass', 'MultiClassOneVsAll', 'F1', 'Accuracy', 'TotalF1', 'MCC', 'Precision', 'Recall'])
@pytest.mark.parametrize('loss_function', MULTICLASS_LOSSES)
@pytest.mark.parametrize('dataset', ['cloudness_small', 'cloudness_lost_class'])
def test_eval_metrics_multiclass(metric, loss_function, dataset, metric_period):
if loss_function == 'MultiClass' and metric == 'MultiClassOneVsAll' or loss_function == 'MultiClassOneVsAll' and metric == 'MultiClass':
return
learn_path = data_file(dataset, 'train_small')
test_path = data_file(dataset, 'test_small')
cd_path = data_file(dataset, 'train.cd')
model_path = yatest.common.test_output_path('model.bin')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
eval_error_path = yatest.common.test_output_path('eval_error.tsv')
fit_params = {
'--loss-function': loss_function,
'--custom-metric': metric,
'--boosting-type': 'Plain',
'-f': learn_path,
'-t': test_path,
'--column-description': cd_path,
'-i': '10',
'-T': '4',
'-m': model_path,
'--learn-err-log': learn_error_path,
'--test-err-log': test_error_path,
'--use-best-model': 'false',
'--classes-count': '3',
'--metric-period': metric_period
}
fit_params.update(CAT_COMPARE_PARAMS)
fit_catboost_gpu(fit_params)
eval_metric(model_path, metric, test_path, cd_path, eval_error_path, metric_period)
idx_test_metric = 1 if metric == loss_function else 2
first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, idx_test_metric], 5)
second_metrics = np.round(np.loadtxt(eval_error_path, skiprows=1)[:, 1], 5)
assert np.all(first_metrics == second_metrics)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
def test_eval_metrics_class_names():
labels = ['a', 'b', 'c', 'd']
model_path = yatest.common.test_output_path('model.bin')
cd_path = yatest.common.test_output_path('cd.txt')
np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\t')
prng = np.random.RandomState(seed=0)
train_path = yatest.common.test_output_path('train.txt')
np.savetxt(train_path, generate_random_labeled_set(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
test_path = yatest.common.test_output_path('test.txt')
np.savetxt(test_path, generate_random_labeled_set(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
eval_error_path = yatest.common.test_output_path('eval_error.tsv')
custom_metric = 'TotalF1,MultiClass'
fit_params = {
'--loss-function': 'MultiClass',
'--custom-metric': custom_metric,
'--boosting-type': 'Plain',
'-f': train_path,
'-t': test_path,
'--column-description': cd_path,
'-i': '10',
'-T': '4',
'-m': model_path,
'--learn-err-log': learn_error_path,
'--test-err-log': test_error_path,
'--use-best-model': 'false',
'--class-names': ','.join(labels)
}
fit_catboost_gpu(fit_params)
eval_metric(model_path, custom_metric, test_path, cd_path, eval_error_path)
first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 2], 5)
second_metrics = np.round(np.loadtxt(eval_error_path, skiprows=1)[:, 1], 5)
assert np.all(first_metrics == second_metrics)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
def test_fit_multiclass_with_class_names():
labels = ['a', 'b', 'c', 'd']
model_path = yatest.common.test_output_path('model.bin')
cd_path = yatest.common.test_output_path('cd.txt')
np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\t')
prng = np.random.RandomState(seed=0)
learn_path = yatest.common.test_output_path('train.txt')
np.savetxt(learn_path, generate_random_labeled_set(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
test_path = yatest.common.test_output_path('test.txt')
np.savetxt(test_path, generate_random_labeled_set(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
test_error_path = yatest.common.test_output_path('test_error.tsv')
eval_error_path = yatest.common.test_output_path('eval_error.tsv')
fit_params = {
'--loss-function': 'MultiClass',
'--boosting-type': 'Plain',
'--custom-metric': METRIC_CHECKING_MULTICLASS,
'--class-names': ','.join(labels),
'-f': learn_path,
'-t': test_path,
'--column-description': cd_path,
'-i': '10',
'-T': '4',
'-m': model_path,
'--use-best-model': 'false',
'--test-err-log': test_error_path
}
fit_catboost_gpu(fit_params)
eval_metric(model_path, METRIC_CHECKING_MULTICLASS, test_path, cd_path, eval_error_path)
compare_evals(METRIC_CHECKING_MULTICLASS, test_error_path, eval_error_path)
return [local_canonical_file(test_error_path)]
def test_extract_multiclass_labels_from_class_names():
labels = ['a', 'b', 'c', 'd']
model_path = yatest.common.test_output_path('model.bin')
cd_path = yatest.common.test_output_path('cd.txt')
np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\t')
prng = np.random.RandomState(seed=0)
train_path = yatest.common.test_output_path('train.txt')
np.savetxt(train_path, generate_random_labeled_set(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
test_path = yatest.common.test_output_path('test.txt')
np.savetxt(test_path, generate_random_labeled_set(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
test_error_path = yatest.common.test_output_path('test_error.tsv')
eval_error_path = yatest.common.test_output_path('eval_error.tsv')
fit_params = {
'--loss-function': 'MultiClass',
'--class-names': ','.join(labels),
'--boosting-type': 'Plain',
'--custom-metric': METRIC_CHECKING_MULTICLASS,
'-f': train_path,
'-t': test_path,
'--column-description': cd_path,
'-i': '10',
'-T': '4',
'-m': model_path,
'--use-best-model': 'false',
'--test-err-log': test_error_path
}
fit_catboost_gpu(fit_params)
eval_metric(model_path, METRIC_CHECKING_MULTICLASS, test_path, cd_path, eval_error_path)
compare_evals(METRIC_CHECKING_MULTICLASS, test_error_path, eval_error_path)
py_catboost = catboost.CatBoost()
py_catboost.load_model(model_path)
assert json.loads(py_catboost.get_metadata()['multiclass_params'])['class_to_label'] == [0, 1, 2, 3]
assert json.loads(py_catboost.get_metadata()['multiclass_params'])['class_names'] == ['a', 'b', 'c', 'd']
assert json.loads(py_catboost.get_metadata()['multiclass_params'])['classes_count'] == 0
assert json.loads(py_catboost.get_metadata()['params'])['data_processing_options']['class_names'] == ['a', 'b', 'c', 'd']
return [local_canonical_file(test_error_path)]
@pytest.mark.parametrize('loss_function', MULTICLASS_LOSSES)
@pytest.mark.parametrize('prediction_type', ['Probability', 'RawFormulaVal', 'Class'])
def test_save_and_apply_multiclass_labels_from_classes_count(loss_function, prediction_type):
model_path = yatest.common.test_output_path('model.bin')
cd_path = yatest.common.test_output_path('cd.txt')
np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\t')
prng = np.random.RandomState(seed=0)
train_path = yatest.common.test_output_path('train.txt')
np.savetxt(train_path, generate_random_labeled_set(100, 10, [1, 2], prng=prng), fmt='%s', delimiter='\t')
test_path = yatest.common.test_output_path('test.txt')
np.savetxt(test_path, generate_random_labeled_set(100, 10, [0, 1, 2, 3], prng=prng), fmt='%s', delimiter='\t')
eval_path = yatest.common.test_output_path('eval.txt')
fit_params = {
'--loss-function': loss_function,
'--boosting-type': 'Plain',
'--classes-count': '4',
'-f': train_path,
'--column-description': cd_path,
'-i': '10',
'-T': '4',
'-m': model_path,
'--use-best-model': 'false'
}
fit_catboost_gpu(fit_params)
py_catboost = catboost.CatBoost()
py_catboost.load_model(model_path)
assert json.loads(py_catboost.get_metadata()['multiclass_params'])['class_to_label'] == [1, 2]
assert json.loads(py_catboost.get_metadata()['multiclass_params'])['classes_count'] == 4
assert json.loads(py_catboost.get_metadata()['multiclass_params'])['class_names'] == []
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', test_path,
'--column-description', cd_path,
'-m', model_path,
'--output-path', eval_path,
'--prediction-type', prediction_type
)
yatest.common.execute(calc_cmd)
if prediction_type == 'RawFormulaVal':
with open(eval_path, "rt") as f:
for i, line in enumerate(f):
if i == 0:
assert line[:-1] == 'DocId\t{}:Class=0\t{}:Class=1\t{}:Class=2\t{}:Class=3' \
.format(prediction_type, prediction_type, prediction_type, prediction_type)
else:
assert float(line[:-1].split()[1]) == float('-inf') and float(line[:-1].split()[4]) == float('-inf') # fictitious approxes must be negative infinity
if prediction_type == 'Probability':
with open(eval_path, "rt") as f:
for i, line in enumerate(f):
if i == 0:
assert line[:-1] == 'DocId\t{}:Class=0\t{}:Class=1\t{}:Class=2\t{}:Class=3' \
.format(prediction_type, prediction_type, prediction_type, prediction_type)
else:
assert abs(float(line[:-1].split()[1])) < 1e-307 \
and abs(float(line[:-1].split()[4])) < 1e-307 # fictitious probabilities must be virtually zero
if prediction_type == 'Class':
with open(eval_path, "rt") as f:
for i, line in enumerate(f):
if i == 0:
assert line[:-1] == 'DocId\tClass'
else:
assert float(line[:-1].split()[1]) in [1, 2] # probability of 0,3 classes appearance must be zero
return [local_canonical_file(eval_path)]
REG_LOSS_FUNCTIONS = ['RMSE', 'MAE', 'Lq:q=1', 'Lq:q=1.5', 'Lq:q=3']
CUSTOM_METRIC = ["MAE,Lq:q=2.5,NumErrors:greater_than=0.1,NumErrors:greater_than=0.01,NumErrors:greater_than=0.5"]
@pytest.mark.parametrize('loss_function', REG_LOSS_FUNCTIONS)
@pytest.mark.parametrize('custom_metric', CUSTOM_METRIC)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_reg_targets(loss_function, boosting_type, custom_metric):
test_error_path = yatest.common.test_output_path("test_error.tsv")
params = [
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', data_file('adult_crossentropy', 'train_proba'),
'-t', data_file('adult_crossentropy', 'test_proba'),
'--column-description', data_file('adult_crossentropy', 'train.cd'),
'-i', '10',
'-T', '4',
'--counter-calc-method', 'SkipTest',
'--custom-metric', custom_metric,
'--test-err-log', test_error_path,
'--boosting-type', boosting_type
]
fit_catboost_gpu(params)
return [local_canonical_file(test_error_path, diff_tool=diff_tool(1e-5))]
def test_eval_result_on_different_pool_type():
output_eval_path = yatest.common.test_output_path('test.eval')
output_quantized_eval_path = yatest.common.test_output_path('test.eval.quantized')
def get_params(train, test, eval_path):
return (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', train,
'-t', test,
'--cd', data_file('querywise', 'train.cd'),
'-i', '10',
'-T', '4',
'--eval-file', eval_path,
)
def get_pool_path(set_name, is_quantized=False):
path = data_file('querywise', set_name)
return 'quantized://' + path + '.quantized' if is_quantized else path
fit_catboost_gpu(get_params(get_pool_path('train'), get_pool_path('test'), output_eval_path))
fit_catboost_gpu(get_params(get_pool_path('train', True), get_pool_path('test', True), output_quantized_eval_path))
assert filecmp.cmp(output_eval_path, output_quantized_eval_path)
return [local_canonical_file(output_eval_path)]
def compare_evals_with_precision(fit_eval, calc_eval):
array_fit = np.genfromtxt(fit_eval, delimiter='\t', skip_header=True)
array_calc = np.genfromtxt(calc_eval, delimiter='\t', skip_header=True)
if open(fit_eval, "r").readline().split()[:-1] != open(calc_eval, "r").readline().split():
return False
array_fit = np.delete(array_fit, np.s_[-1], 1)
return np.all(np.isclose(array_fit, array_calc, rtol=1e-6))
def test_convert_model_to_json_without_cat_features():
output_model_path = yatest.common.test_output_path('model.json')
output_eval_path = yatest.common.test_output_path('test.eval')
fit_params = [
'--use-best-model', 'false',
'-f', data_file('higgs', 'train_small'),
'-t', data_file('higgs', 'test_small'),
'--column-description', data_file('higgs', 'train.cd'),
'-i', '20',
'-T', '4',
'-r', '0',
'--eval-file', output_eval_path,
'-m', output_model_path,
'--model-format', 'Json'
]
fit_catboost_gpu(fit_params)
formula_predict_path = yatest.common.test_output_path('predict_test.eval')
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', data_file('higgs', 'test_small'),
'--column-description', data_file('higgs', 'train.cd'),
'-m', output_model_path,
'--model-format', 'Json',
'--output-path', formula_predict_path
)
execute(calc_cmd)
assert (compare_evals_with_precision(output_eval_path, formula_predict_path))
return [local_canonical_file(output_eval_path, diff_tool=diff_tool())]
@pytest.mark.parametrize(
'loss_function,eval_metric,boosting_type',
[
('QueryRMSE', 'NDCG', 'Plain'),
('QueryRMSE', 'NDCG', 'Ordered'),
# Boosting type 'Ordered' is not supported for YetiRankPairwise and PairLogitPairwise
('YetiRankPairwise', 'NDCG', 'Plain'),
('PairLogit', 'PairAccuracy', 'Plain'),
('PairLogitPairwise', 'NDCG', 'Plain'),
('PairLogitPairwise', 'PairAccuracy', 'Plain'),
],
ids=[
'loss_function=QueryRMSE,eval_metric=NDCG,boosting_type=Plain',
'loss_function=QueryRMSE,eval_metric=NDCG,boosting_type=Ordered',
'loss_function=YetiRankPairwise,eval_metric=NDCG,boosting_type=Plain',
'loss_function=PairLogit,eval_metric=PairAccuracy,boosting_type=Plain',
'loss_function=PairLogitPairwise,eval_metric=NDCG,boosting_type=Plain',
'loss_function=PairLogitPairwise,eval_metric=PairAccuracy,boosting_type=Plain'
]
)
def test_groupwise_with_cat_features(loss_function, eval_metric, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
train_file = data_file('black_friday', 'train')
test_file = data_file('black_friday', 'test')
cd_file = data_file('black_friday', 'cd')
params = [
'--loss-function', loss_function,
'--has-header',
'-f', train_file,
'-t', test_file,
'--column-description', cd_file,
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'--eval-metric', eval_metric,
'-m', output_model_path,
]
fit_catboost_gpu(params)
apply_catboost(output_model_path, test_file, cd_file, output_eval_path)
diff_precision = 1e-2 if loss_function == 'YetiRankPairwise' else 1e-5
return [local_canonical_file(output_eval_path, diff_tool=diff_tool(diff_precision))]
@pytest.mark.parametrize(
'border_count',
[1, 3, 10],
ids=lambda border_count: 'border_count=%d' % border_count
)
@pytest.mark.parametrize(
'boosting_type',
BOOSTING_TYPE,
ids=lambda boosting_type: 'boosting_type=%s' % boosting_type
)
def test_ctr_target_quantization(border_count, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
train_file = data_file('adult_crossentropy', 'train_proba')
test_file = data_file('adult_crossentropy', 'test_proba')
cd_file = data_file('adult_crossentropy', 'train.cd')
params = {
'--use-best-model': 'false',
'--loss-function': 'RMSE',
'-f': train_file,
'-t': test_file,
'--column-description': cd_file,
'--boosting-type': boosting_type,
'-i': '3',
'-T': '4',
'-m': output_model_path,
'--ctr-target-border-count': str(border_count)
}
fit_catboost_gpu(params)
apply_catboost(output_model_path, test_file, cd_file, output_eval_path)
return [local_canonical_file(output_eval_path, diff_tool=diff_tool())]
def test_train_on_quantized_pool_with_large_grid():
# Dataset with 2 random columns, first is Target, second is Num, used Uniform grid with 10000
# borders
#
# There are 20 rows in a dataset.
cmd = (
CATBOOST_PATH, 'fit',
'--task-type', 'GPU',
'-f', 'quantized://' + data_file('quantized_with_large_grid', 'train.qbin'),
'-t', 'quantized://' + data_file('quantized_with_large_grid', 'test.qbin'),
'-i', '10')
yatest.common.execute(cmd)
|
py | 1a3720b758e16ced211bb670f190af559d299d91 | import numpy as np
from utils import *
from images import *
np.random.seed(2)
class Imager(object):
def __init__(self, input_size, labels):
if type(input_size) is int:
self.input_size = (input_size, input_size)
else:
self.input_size = input_size
self.labels = labels
self.palette = np.random.randint(0, 256, (len(self.labels), 3)).tolist()
def imset_from_path(self, path):
ims = np.array(imread_from_path(path))
if len(ims.shape) == 3:
ims = [ims]
self.ims = ims
def imset(self, ims):
ims = np.array(ims)
if len(ims.shape) == 3:
ims = [ims]
self.ims = ims
def preprocess(self):
return improcess(self.ims, self.input_size)
def ncs_preprocess(self):
ims = improcess(self.ims, self.input_size, to_rgb=False, normalise=False) # ims are normalised by the ncs.
ims = np.transpose(np.array(ims), [0, 3, 1, 2])
return np.expand_dims(ims, 1)
def visualise_preds(self, pred_list):
self.ims = visualise(self.ims, pred_list, self.input_size, self.labels, self.palette)
return self.ims
def ncs_visualise_preds(self, objects_list):
imlist = list()
for im, objects in zip(self.ims, objects_list):
if not objects:
imlist.append(im)
continue
for obj in objects:
add_overlays_v2(obj, im, self.labels, self.palette)
imlist.append(im)
self.ims = imlist
return self.ims
def imsave(self, ims):
imwrite(ims)
|
py | 1a37211c9d49187dd70c1c131e564046b631cd61 | """
License: Apache-2.0
Author: Huadong Liao
E-mail: [email protected]
"""
import numpy as np
import tensorflow as tf
from core.utils import *
epsilon = 1e-9
class CapsLayer(object):
''' Capsule layer.
Args:
input: A 4-D tensor.
num_outputs: the number of capsule in this layer.
vec_len: integer, the length of the output vector of a capsule.
layer_type: string, one of 'FC' or "CONV", the type of this layer,
fully connected or convolution, for the future expansion capability
with_routing: boolean, this capsule is routing with the
lower-level layer capsule.
Returns:
A 4-D tensor.
'''
def __init__(self, num_outputs, vec_len, batch_size, stddev, iter_routing, with_routing=True, layer_type='FC'):
self.num_outputs = num_outputs
self.vec_len = vec_len
self.with_routing = with_routing
self.layer_type = layer_type
self.batch_size = batch_size
self.stddev = stddev
self.iter_routing = iter_routing
def __call__(self, input, kernel_size=None, stride=None):
'''
The parameters 'kernel_size' and 'stride' will be used while 'layer_type' equal 'CONV'
'''
if self.layer_type == 'CONV':
self.kernel_size = kernel_size
self.stride = stride
if not self.with_routing:
# the PrimaryCaps layer, a convolutional layer
# input: [batch_size, 20, 20, 256]
# assert input.get_shape() == [cfg.batch_size, 20, 20, 256]
# NOTE: I can't find out any words from the paper whether the
# PrimaryCap convolution does a ReLU activation or not before
# squashing function, but experiment show that using ReLU get a
# higher test accuracy. So, which one to use will be your choice
capsules = tf.contrib.layers.conv1d(input, self.num_outputs * self.vec_len,
self.kernel_size, self.stride, padding="VALID",
activation_fn=tf.nn.relu)
# capsules = tf.contrib.layers.conv2d(input, self.num_outputs * self.vec_len,
# self.kernel_size, self.stride,padding="VALID",
# activation_fn=None)
capsules = tf.reshape(capsules, (-1,capsules.shape[1]*capsules.shape[2]//self.vec_len, self.vec_len, 1))
# return tensor with shape [batch_size, 1152, 8, 1]
capsules = squash(capsules)
return(capsules)
if self.layer_type == 'FC':
if self.with_routing:
# the DigitCaps layer, a fully connected layer
# Reshape the input into [batch_size, 1152, 1, 8, 1]
self.input = tf.reshape(input, shape=(-1, input.shape[1], 1, input.shape[2], 1))
with tf.variable_scope('routing'):
# b_IJ: [batch_size, num_caps_l, num_caps_l_plus_1, 1, 1],
# about the reason of using 'batch_size', see issue #21
b_IJ = tf.constant(np.zeros([self.batch_size, input.shape[1].value, self.num_outputs, 1, 1], dtype=np.float32))
capsules = routing(self.input, b_IJ, self.stddev, self.iter_routing, num_outputs=self.num_outputs, num_dims=self.vec_len)
capsules = tf.squeeze(capsules, axis=1)
return(capsules)
def routing(input, b_IJ, stddev, iter_routing, num_outputs=10, num_dims=16):
''' The routing algorithm.
Args:
input: A Tensor with [batch_size, num_caps_l=1152, 1, length(u_i)=8, 1]
shape, num_caps_l meaning the number of capsule in the layer l.
num_outputs: the number of output capsules.
num_dims: the number of dimensions for output capsule.
Returns:
A Tensor of shape [batch_size, num_caps_l_plus_1, length(v_j)=16, 1]
representing the vector output `v_j` in the layer l+1
Notes:
u_i represents the vector output of capsule i in the layer l, and
v_j the vector output of capsule j in the layer l+1.
'''
# W: [1, num_caps_i, num_caps_j * len_v_j, len_u_j, 1]
input_shape = get_shape(input)
W = tf.get_variable('Weight', shape=[1, input_shape[1], num_dims * num_outputs] + input_shape[-2:],
dtype=tf.float32, initializer=tf.random_normal_initializer(stddev=stddev))
biases = tf.get_variable('bias', shape=(1, 1, num_outputs, num_dims, 1))
# Eq.2, calc u_hat
# Since tf.matmul is a time-consuming op,
# A better solution is using element-wise multiply, reduce_sum and reshape
# ops instead. Matmul [a, b] x [b, c] is equal to a series ops as
# element-wise multiply [a*c, b] * [a*c, b], reduce_sum at axis=1 and
# reshape to [a, c]
input = tf.tile(input, [1, 1, num_dims * num_outputs, 1, 1])
# assert input.get_shape() == [cfg.batch_size, 1152, 160, 8, 1]
u_hat = reduce_sum(W * input, axis=3, keepdims=True)
u_hat = tf.reshape(u_hat, shape=[-1, input_shape[1], num_outputs, num_dims, 1])
# assert u_hat.get_shape() == [cfg.batch_size, 1152, 10, 16, 1]
# In forward, u_hat_stopped = u_hat; in backward, no gradient passed back from u_hat_stopped to u_hat
u_hat_stopped = tf.stop_gradient(u_hat, name='stop_gradient')
# line 3,for r iterations do
for r_iter in range(iter_routing):
with tf.variable_scope('iter_' + str(r_iter)):
# line 4:
# => [batch_size, 1152, 10, 1, 1]
c_IJ = softmax(b_IJ, axis=2)
# At last iteration, use `u_hat` in order to receive gradients from the following graph
if r_iter == iter_routing - 1:
# line 5:
# weighting u_hat with c_IJ, element-wise in the last two dims
# => [batch_size, 1152, 10, 16, 1]
s_J = tf.multiply(c_IJ, u_hat)
# then sum in the second dim, resulting in [batch_size, 1, 10, 16, 1]
s_J = reduce_sum(s_J, axis=1, keepdims=True) + biases
# assert s_J.get_shape() == [cfg.batch_size, 1, num_outputs, num_dims, 1]
# line 6:
# squash using Eq.1,
v_J = squash(s_J)
# assert v_J.get_shape() == [cfg.batch_size, 1, 10, 16, 1]
elif r_iter < iter_routing - 1: # Inner iterations, do not apply backpropagation
s_J = tf.multiply(c_IJ, u_hat_stopped)
s_J = reduce_sum(s_J, axis=1, keepdims=True) + biases
v_J = squash(s_J)
# line 7:
# reshape & tile v_j from [batch_size ,1, 10, 16, 1] to [batch_size, 1152, 10, 16, 1]
# then matmul in the last tow dim: [16, 1].T x [16, 1] => [1, 1], reduce mean in the
# batch_size dim, resulting in [1, 1152, 10, 1, 1]
v_J_tiled = tf.tile(v_J, [1, input_shape[1], 1, 1, 1])
u_produce_v = reduce_sum(u_hat_stopped * v_J_tiled, axis=3, keepdims=True)
# assert u_produce_v.get_shape() == [cfg.batch_size, 1152, 10, 1, 1]
# b_IJ += tf.reduce_sum(u_produce_v, axis=0, keep_dims=True)
b_IJ += u_produce_v
return(v_J)
def squash(vector):
'''Squashing function corresponding to Eq. 1
Args:
vector: A tensor with shape [batch_size, 1, num_caps, vec_len, 1] or [batch_size, num_caps, vec_len, 1].
Returns:
A tensor with the same shape as vector but squashed in 'vec_len' dimension.
'''
vec_squared_norm = reduce_sum(tf.square(vector), -2, keepdims=True)
scalar_factor = vec_squared_norm / (1 + vec_squared_norm) / tf.sqrt(vec_squared_norm + epsilon)
vec_squashed = scalar_factor * vector # element-wise
return(vec_squashed)
|
py | 1a37223284ba7453b9e32c470fe3037d40da4a74 | import wx
from .icons import icons8_keyboard_50
from .mwindow import MWindow
_ = wx.GetTranslation
class KeymapPanel(wx.Panel):
def __init__(self, *args, context=None, **kwds):
kwds["style"] = kwds.get("style", 0) | wx.TAB_TRAVERSAL
wx.Panel.__init__(self, *args, **kwds)
self.context = context
self.list_keymap = wx.ListCtrl(
self, wx.ID_ANY, style=wx.LC_HRULES | wx.LC_REPORT | wx.LC_VRULES
)
self.button_add = wx.Button(self, wx.ID_ANY, _("Add Hotkey"))
self.text_key_name = wx.TextCtrl(self, wx.ID_ANY, "")
self.text_command_name = wx.TextCtrl(self, wx.ID_ANY, "")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.on_button_add_hotkey, self.button_add)
# end wxGlade
self.Bind(
wx.EVT_LIST_ITEM_RIGHT_CLICK, self.on_item_rightclick, self.list_keymap
)
self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.on_item_activated, self.list_keymap)
self.text_key_name.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
def initialize(self):
self.reload_keymap()
self.Children[0].SetFocus()
def finalize(self):
pass
def __set_properties(self):
self.list_keymap.SetToolTip(_("What keys are bound to which actions?"))
self.list_keymap.AppendColumn(_("Key"), format=wx.LIST_FORMAT_LEFT, width=114)
self.list_keymap.AppendColumn(
_("Command"), format=wx.LIST_FORMAT_LEFT, width=348
)
self.button_add.SetToolTip(_("Add a new hotkey"))
# end wxGlade
def __do_layout(self):
# begin wxGlade: Keymap.__do_layout
sizer_1 = wx.BoxSizer(wx.VERTICAL)
sizer_2 = wx.BoxSizer(wx.HORIZONTAL)
sizer_1.Add(self.list_keymap, 1, wx.EXPAND, 0)
sizer_2.Add(self.button_add, 0, 0, 0)
sizer_2.Add(self.text_key_name, 1, 0, 0)
sizer_2.Add(self.text_command_name, 2, 0, 0)
sizer_1.Add(sizer_2, 0, wx.EXPAND, 0)
self.SetSizer(sizer_1)
self.Layout()
# end wxGlade
def on_item_activated(self, event):
element = event.Text
self.text_key_name.SetValue(element)
self.text_command_name.SetValue(self.context.keymap[element])
def on_item_rightclick(self, event):
element = event.Text
menu = wx.Menu()
convert = menu.Append(
wx.ID_ANY, _("Remove %s") % str(element)[:16], "", wx.ITEM_NORMAL
)
self.Bind(wx.EVT_MENU, self.on_tree_popup_delete(element), convert)
convert = menu.Append(wx.ID_ANY, _("Reset Default"), "", wx.ITEM_NORMAL)
self.Bind(wx.EVT_MENU, self.on_tree_popup_clear(element), convert)
self.PopupMenu(menu)
menu.Destroy()
def on_tree_popup_clear(self, element):
def delete(event=None):
self.context.default_keymap()
self.list_keymap.DeleteAllItems()
self.reload_keymap()
return delete
def on_tree_popup_delete(self, element):
def delete(event=None):
try:
del self.context.keymap[element]
self.list_keymap.DeleteAllItems()
self.reload_keymap()
except KeyError:
pass
return delete
def reload_keymap(self):
i = 0
for key in self.context.keymap:
value = self.context.keymap[key]
m = self.list_keymap.InsertItem(i, str(key))
i += 1
if m != -1:
self.list_keymap.SetItem(m, 1, str(value))
def on_button_add_hotkey(self, event=None): # wxGlade: Keymap.<event_handler>
keystroke = self.text_key_name.GetValue()
if len(keystroke) == 0:
dlg = wx.MessageDialog(
None,
_("Missing Keystroke"),
_("No Keystroke for binding."),
wx.OK | wx.ICON_WARNING,
)
dlg.ShowModal()
dlg.Destroy()
self.text_key_name.SetFocus()
return
if len(self.text_command_name.GetValue()) == 0:
dlg = wx.MessageDialog(
None,
_("Missing Command"),
_("No Command for binding."),
wx.OK | wx.ICON_WARNING,
)
dlg.ShowModal()
dlg.Destroy()
self.text_command_name.SetFocus()
return
self.context.keymap[
self.text_key_name.GetValue()
] = self.text_command_name.GetValue()
self.text_key_name.SetValue("")
self.text_command_name.SetValue("")
self.list_keymap.DeleteAllItems()
self.reload_keymap()
def on_key_press(self, event):
from meerk40t.gui.wxutils import get_key_name
keyvalue = get_key_name(event)
self.text_command_name.SetValue("")
if keyvalue is None:
self.text_key_name.SetValue("")
else:
self.text_key_name.SetValue(keyvalue)
for i, key in enumerate(self.context.keymap):
if key == keyvalue:
self.list_keymap.Select(i, True)
self.list_keymap.Focus(i)
self.text_command_name.SetValue(self.context.keymap[key])
else:
self.list_keymap.Select(i, False)
class Keymap(MWindow):
def __init__(self, *args, **kwds):
super().__init__(500, 530, *args, **kwds)
self.panel = KeymapPanel(self, wx.ID_ANY, context=self.context)
_icon = wx.NullIcon
_icon.CopyFromBitmap(icons8_keyboard_50.GetBitmap())
self.SetIcon(_icon)
# begin wxGlade: Keymap.__set_properties
self.SetTitle(_("Keymap Settings"))
def window_open(self):
self.panel.initialize()
def window_close(self):
self.panel.finalize()
|
py | 1a37228b7e74d36d3487a5c4504e711779bea030 | import logging
from threading import Thread
from time import sleep, time
from test.cl_node.errors import NonZeroExitCodeError
from test.cl_node.wait import wait_for_block_hashes_propagated_to_all_nodes
from test.cl_node.casperlabsnode import extract_block_hash_from_propose_output
CONTRACT_1 = 'old_wasm/helloname_invalid_just_1.wasm'
CONTRACT_2 = 'old_wasm/helloname_invalid_just_2.wasm'
class TimedThread(Thread):
def __init__(self,
docker_node: 'DockerNode',
command_kwargs: dict,
start_time: float) -> None:
Thread.__init__(self)
self.name = docker_node.name
self.node = docker_node
self.kwargs = command_kwargs
self.start_time = start_time
def run(self) -> None:
if self.start_time <= time():
raise Exception(f'start_time: {self.start_time} is past current time: {time()}')
while self.start_time > time():
sleep(0.001)
self.my_call(self.kwargs)
def my_call(self, kwargs):
raise NotImplementedError()
class DeployTimedTread(TimedThread):
def my_call(self, kwargs):
self.node.client.deploy(**kwargs)
class ProposeTimedThread(TimedThread):
def my_call(self, kwargs):
self.block_hash = None
try:
self.block_hash = extract_block_hash_from_propose_output(self.node.client.propose())
except NonZeroExitCodeError:
# Ignore error for no new deploys
pass
def test_neglected_invalid_block(three_node_network):
"""
Feature file: neglected_invalid_justification.feature
Scenario: 3 Nodes doing simultaneous deploys and proposes do not have neglected invalid blocks
"""
bootstrap, node1, node2 = three_node_network.docker_nodes
for cycle_count in range(4):
logging.info(f'DEPLOY_PROPOSE CYCLE COUNT: {cycle_count + 1}')
start_time = time() + 1
boot_deploy = DeployTimedTread(bootstrap,
{'session_contract': CONTRACT_1,
'payment_contract': CONTRACT_1},
start_time)
node1_deploy = DeployTimedTread(node1,
{'session_contract': CONTRACT_2,
'payment_contract': CONTRACT_2},
start_time)
node2_deploy = DeployTimedTread(node2,
{'session_contract': CONTRACT_2,
'payment_contract': CONTRACT_2},
start_time)
# Simultaneous Deploy
node1_deploy.start()
boot_deploy.start()
node2_deploy.start()
boot_deploy.join()
node1_deploy.join()
node2_deploy.join()
start_time = time() + 1
boot_deploy = ProposeTimedThread(bootstrap, {}, start_time)
node1_deploy = ProposeTimedThread(node1, {}, start_time)
node2_deploy = ProposeTimedThread(node2, {}, start_time)
# Simultaneous Propose
node1_deploy.start()
boot_deploy.start()
node2_deploy.start()
boot_deploy.join()
node1_deploy.join()
node2_deploy.join()
# Assure deploy and proposes occurred
block_hashes = [h for h in [boot_deploy.block_hash, node1_deploy.block_hash, node2_deploy.block_hash] if h]
wait_for_block_hashes_propagated_to_all_nodes(three_node_network.docker_nodes, block_hashes)
assert ' for NeglectedInvalidBlock.' not in bootstrap.logs()
assert ' for NeglectedInvalidBlock.' not in node1.logs()
assert ' for NeglectedInvalidBlock.' not in node2.logs()
|
py | 1a37236fae4cd96cb02ca384adf49ac60d5c4e60 | """Support for deCONZ binary sensors."""
from __future__ import annotations
from collections.abc import Callable, ValuesView
from dataclasses import dataclass
from pydeconz.sensor import (
Alarm,
CarbonMonoxide,
DeconzSensor as PydeconzSensor,
Fire,
GenericFlag,
OpenClose,
Presence,
Vibration,
Water,
)
from homeassistant.components.binary_sensor import (
DOMAIN,
BinarySensorDeviceClass,
BinarySensorEntity,
BinarySensorEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_TEMPERATURE
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import ATTR_DARK, ATTR_ON
from .deconz_device import DeconzDevice
from .gateway import DeconzGateway, get_gateway_from_config_entry
ATTR_ORIENTATION = "orientation"
ATTR_TILTANGLE = "tiltangle"
ATTR_VIBRATIONSTRENGTH = "vibrationstrength"
PROVIDES_EXTRA_ATTRIBUTES = (
"alarm",
"carbon_monoxide",
"fire",
"flag",
"open",
"presence",
"vibration",
"water",
)
@dataclass
class DeconzBinarySensorDescriptionMixin:
"""Required values when describing secondary sensor attributes."""
suffix: str
update_key: str
value_fn: Callable[[PydeconzSensor], bool | None]
@dataclass
class DeconzBinarySensorDescription(
BinarySensorEntityDescription,
DeconzBinarySensorDescriptionMixin,
):
"""Class describing deCONZ binary sensor entities."""
ENTITY_DESCRIPTIONS = {
Alarm: [
DeconzBinarySensorDescription(
key="alarm",
value_fn=lambda device: device.alarm, # type: ignore[no-any-return]
suffix="",
update_key="alarm",
device_class=BinarySensorDeviceClass.SAFETY,
)
],
CarbonMonoxide: [
DeconzBinarySensorDescription(
key="carbon_monoxide",
value_fn=lambda device: device.carbon_monoxide, # type: ignore[no-any-return]
suffix="",
update_key="carbonmonoxide",
device_class=BinarySensorDeviceClass.CO,
)
],
Fire: [
DeconzBinarySensorDescription(
key="fire",
value_fn=lambda device: device.fire, # type: ignore[no-any-return]
suffix="",
update_key="fire",
device_class=BinarySensorDeviceClass.SMOKE,
),
DeconzBinarySensorDescription(
key="in_test_mode",
value_fn=lambda device: device.in_test_mode, # type: ignore[no-any-return]
suffix="Test Mode",
update_key="test",
device_class=BinarySensorDeviceClass.SMOKE,
entity_category=EntityCategory.DIAGNOSTIC,
),
],
GenericFlag: [
DeconzBinarySensorDescription(
key="flag",
value_fn=lambda device: device.flag, # type: ignore[no-any-return]
suffix="",
update_key="flag",
)
],
OpenClose: [
DeconzBinarySensorDescription(
key="open",
value_fn=lambda device: device.open, # type: ignore[no-any-return]
suffix="",
update_key="open",
device_class=BinarySensorDeviceClass.OPENING,
)
],
Presence: [
DeconzBinarySensorDescription(
key="presence",
value_fn=lambda device: device.presence, # type: ignore[no-any-return]
suffix="",
update_key="presence",
device_class=BinarySensorDeviceClass.MOTION,
)
],
Vibration: [
DeconzBinarySensorDescription(
key="vibration",
value_fn=lambda device: device.vibration, # type: ignore[no-any-return]
suffix="",
update_key="vibration",
device_class=BinarySensorDeviceClass.VIBRATION,
)
],
Water: [
DeconzBinarySensorDescription(
key="water",
value_fn=lambda device: device.water, # type: ignore[no-any-return]
suffix="",
update_key="water",
device_class=BinarySensorDeviceClass.MOISTURE,
)
],
}
BINARY_SENSOR_DESCRIPTIONS = [
DeconzBinarySensorDescription(
key="tampered",
value_fn=lambda device: device.tampered, # type: ignore[no-any-return]
suffix="Tampered",
update_key="tampered",
device_class=BinarySensorDeviceClass.TAMPER,
entity_category=EntityCategory.DIAGNOSTIC,
),
DeconzBinarySensorDescription(
key="low_battery",
value_fn=lambda device: device.low_battery, # type: ignore[no-any-return]
suffix="Low Battery",
update_key="lowbattery",
device_class=BinarySensorDeviceClass.BATTERY,
entity_category=EntityCategory.DIAGNOSTIC,
),
]
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the deCONZ binary sensor."""
gateway = get_gateway_from_config_entry(hass, config_entry)
gateway.entities[DOMAIN] = set()
@callback
def async_add_sensor(
sensors: list[PydeconzSensor]
| ValuesView[PydeconzSensor] = gateway.api.sensors.values(),
) -> None:
"""Add binary sensor from deCONZ."""
entities: list[DeconzBinarySensor] = []
for sensor in sensors:
if not gateway.option_allow_clip_sensor and sensor.type.startswith("CLIP"):
continue
known_entities = set(gateway.entities[DOMAIN])
for description in (
ENTITY_DESCRIPTIONS.get(type(sensor), []) + BINARY_SENSOR_DESCRIPTIONS
):
if (
not hasattr(sensor, description.key)
or description.value_fn(sensor) is None
):
continue
new_sensor = DeconzBinarySensor(sensor, gateway, description)
if new_sensor.unique_id not in known_entities:
entities.append(new_sensor)
if entities:
async_add_entities(entities)
config_entry.async_on_unload(
async_dispatcher_connect(
hass,
gateway.signal_new_sensor,
async_add_sensor,
)
)
async_add_sensor(
[gateway.api.sensors[key] for key in sorted(gateway.api.sensors, key=int)]
)
class DeconzBinarySensor(DeconzDevice, BinarySensorEntity):
"""Representation of a deCONZ binary sensor."""
TYPE = DOMAIN
_device: PydeconzSensor
entity_description: DeconzBinarySensorDescription
def __init__(
self,
device: PydeconzSensor,
gateway: DeconzGateway,
description: DeconzBinarySensorDescription,
) -> None:
"""Initialize deCONZ binary sensor."""
self.entity_description: DeconzBinarySensorDescription = description
super().__init__(device, gateway)
if description.suffix:
self._attr_name = f"{self._device.name} {description.suffix}"
self._update_keys = {description.update_key, "reachable"}
if self.entity_description.key in PROVIDES_EXTRA_ATTRIBUTES:
self._update_keys.update({"on", "state"})
@property
def unique_id(self) -> str:
"""Return a unique identifier for this device."""
if self.entity_description.suffix:
return f"{self.serial}-{self.entity_description.suffix.lower()}"
return super().unique_id
@callback
def async_update_callback(self) -> None:
"""Update the sensor's state."""
if self._device.changed_keys.intersection(self._update_keys):
super().async_update_callback()
@property
def is_on(self) -> bool | None:
"""Return the state of the sensor."""
return self.entity_description.value_fn(self._device)
@property
def extra_state_attributes(self) -> dict[str, bool | float | int | list | None]:
"""Return the state attributes of the sensor."""
attr: dict[str, bool | float | int | list | None] = {}
if self.entity_description.key not in PROVIDES_EXTRA_ATTRIBUTES:
return attr
if self._device.on is not None:
attr[ATTR_ON] = self._device.on
if self._device.secondary_temperature is not None:
attr[ATTR_TEMPERATURE] = self._device.secondary_temperature
if isinstance(self._device, Presence):
if self._device.dark is not None:
attr[ATTR_DARK] = self._device.dark
elif isinstance(self._device, Vibration):
attr[ATTR_ORIENTATION] = self._device.orientation
attr[ATTR_TILTANGLE] = self._device.tilt_angle
attr[ATTR_VIBRATIONSTRENGTH] = self._device.vibration_strength
return attr
|
py | 1a37254c541a13717851f52ec8b5a9b6be436637 | """
This file offers the methods to automatically retrieve the graph Chloroflexi bacterium RBG_16_70_13.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def ChloroflexiBacteriumRbg167013(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Chloroflexi bacterium RBG_16_70_13 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Chloroflexi bacterium RBG_16_70_13 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="ChloroflexiBacteriumRbg167013",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
py | 1a37254f04febb7e8f40ca7cfdab12e2462b6cf5 | # Common utility functions used by various script execution tests
# e.g. test_cmd_line, test_cmd_line_script and test_runpy
import sys
import os
import re
import os.path
import tempfile
import subprocess
import py_compile
import contextlib
import shutil
try:
import zipfile
except ImportError:
# If Python is build without Unicode support, importing _io will
# fail, which, in turn, means that zipfile cannot be imported
# Most of this module can then still be used.
pass
from test.test_support import strip_python_stderr
# Executing the interpreter in a subprocess
def _assert_python(expected_success, *args, **env_vars):
cmd_line = [sys.executable]
if not env_vars:
cmd_line.append('-E')
cmd_line.extend(args)
# Need to preserve the original environment, for in-place testing of
# shared library builds.
env = os.environ.copy()
env.update(env_vars)
p = subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
try:
out, err = p.communicate()
finally:
subprocess._cleanup()
p.stdout.close()
p.stderr.close()
rc = p.returncode
err = strip_python_stderr(err)
if (rc and expected_success) or (not rc and not expected_success):
raise AssertionError(
"Process return code is %d, "
"stderr follows:\n%s" % (rc, err.decode('ascii', 'ignore')))
return rc, out, err
def assert_python_ok(*args, **env_vars):
"""
Assert that running the interpreter with `args` and optional environment
variables `env_vars` is ok and return a (return code, stdout, stderr) tuple.
"""
return _assert_python(True, *args, **env_vars)
def assert_python_failure(*args, **env_vars):
"""
Assert that running the interpreter with `args` and optional environment
variables `env_vars` fails and return a (return code, stdout, stderr) tuple.
"""
return _assert_python(False, *args, **env_vars)
def python_exit_code(*args):
cmd_line = [sys.executable, '-E']
cmd_line.extend(args)
with open(os.devnull, 'w') as devnull:
return subprocess.call(cmd_line, stdout=devnull,
stderr=subprocess.STDOUT)
def spawn_python(*args, **kwargs):
cmd_line = [sys.executable, '-E']
cmd_line.extend(args)
return subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
**kwargs)
def kill_python(p):
p.stdin.close()
data = p.stdout.read()
p.stdout.close()
# try to cleanup the child so we don't appear to leak when running
# with regrtest -R.
p.wait()
subprocess._cleanup()
return data
def run_python(*args, **kwargs):
if __debug__:
p = spawn_python(*args, **kwargs)
else:
p = spawn_python('-O', *args, **kwargs)
stdout_data = kill_python(p)
return p.wait(), stdout_data
# Script creation utilities
@contextlib.contextmanager
def temp_dir():
dirname = tempfile.mkdtemp()
dirname = os.path.realpath(dirname)
try:
yield dirname
finally:
shutil.rmtree(dirname)
def make_script(script_dir, script_basename, source):
script_filename = script_basename+os.extsep+'py'
script_name = os.path.join(script_dir, script_filename)
script_file = open(script_name, 'w')
script_file.write(source)
script_file.close()
return script_name
def compile_script(script_name):
py_compile.compile(script_name, doraise=True)
if __debug__:
compiled_name = script_name + 'c'
else:
compiled_name = script_name + 'o'
return compiled_name
def make_zip_script(zip_dir, zip_basename, script_name, name_in_zip=None):
zip_filename = zip_basename+os.extsep+'zip'
zip_name = os.path.join(zip_dir, zip_filename)
zip_file = zipfile.ZipFile(zip_name, 'w')
if name_in_zip is None:
name_in_zip = os.path.basename(script_name)
zip_file.write(script_name, name_in_zip)
zip_file.close()
#if test.test_support.verbose:
# zip_file = zipfile.ZipFile(zip_name, 'r')
# print 'Contents of %r:' % zip_name
# zip_file.printdir()
# zip_file.close()
return zip_name, os.path.join(zip_name, name_in_zip)
def make_pkg(pkg_dir):
os.mkdir(pkg_dir)
make_script(pkg_dir, '__init__', '')
def make_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename,
source, depth=1, compiled=False):
unlink = []
init_name = make_script(zip_dir, '__init__', '')
unlink.append(init_name)
init_basename = os.path.basename(init_name)
script_name = make_script(zip_dir, script_basename, source)
unlink.append(script_name)
if compiled:
init_name = compile_script(init_name)
script_name = compile_script(script_name)
unlink.extend((init_name, script_name))
pkg_names = [os.sep.join([pkg_name]*i) for i in range(1, depth+1)]
script_name_in_zip = os.path.join(pkg_names[-1], os.path.basename(script_name))
zip_filename = zip_basename+os.extsep+'zip'
zip_name = os.path.join(zip_dir, zip_filename)
zip_file = zipfile.ZipFile(zip_name, 'w')
for name in pkg_names:
init_name_in_zip = os.path.join(name, init_basename)
zip_file.write(init_name, init_name_in_zip)
zip_file.write(script_name, script_name_in_zip)
zip_file.close()
for name in unlink:
os.unlink(name)
#if test.test_support.verbose:
# zip_file = zipfile.ZipFile(zip_name, 'r')
# print 'Contents of %r:' % zip_name
# zip_file.printdir()
# zip_file.close()
return zip_name, os.path.join(zip_name, script_name_in_zip)
|
py | 1a37262adaf1cdebf270afbff388d75573c9733f | # Copyright (C) 2021, Mindee.
# This program is licensed under the Apache License version 2.
# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
from weasyprint import HTML
from typing import Any
__all__ = ['read_html']
def read_html(url: str, **kwargs: Any) -> bytes:
"""Read a PDF file and convert it into an image in numpy format
Example::
>>> from doctr.documents import read_html
>>> doc = read_html("https://www.yoursite.com")
Args:
url: URL of the target web page
Returns:
decoded PDF file as a bytes stream
"""
return HTML(url, **kwargs).write_pdf()
|
bzl | 1a3726cf466596d4700f0454ce1911f9128e6e8e | """A module defining the third party dependency zlib"""
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
def zlib_repositories():
maybe(
http_archive,
name = "zlib",
build_file = Label("@cargo_raze//third_party/zlib:BUILD.zlib.bazel"),
sha256 = "c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1",
strip_prefix = "zlib-1.2.11",
urls = [
"https://zlib.net/zlib-1.2.11.tar.gz",
"https://storage.googleapis.com/mirror.tensorflow.org/zlib.net/zlib-1.2.11.tar.gz",
],
)
maybe(
http_archive,
name = "rules_cc",
url = "https://github.com/bazelbuild/rules_cc/archive/624b5d59dfb45672d4239422fa1e3de1822ee110.zip",
sha256 = "8c7e8bf24a2bf515713445199a677ee2336e1c487fa1da41037c6026de04bbc3",
strip_prefix = "rules_cc-624b5d59dfb45672d4239422fa1e3de1822ee110",
type = "zip",
)
|
py | 1a3727124c3e8229a08110d0b686bffab4498c2e | # Copyright (c) 2019 Eric Steinberger
import pdb
import time
from os.path import dirname, abspath
import numpy as np
import sys
from DeepCFR.EvalAgentDeepCFR import EvalAgentDeepCFR
# These two eval agents HAVE TO come from the same training run and iteration for this analysis to make sense.
if len(sys.argv) < 2:
path_to_dcfr_eval_agent = dirname(abspath(__file__)) + "/trained_agents/Example_FHP_SINGLE.pkl"
else:
path_to_dcfr_eval_agent = sys.argv[1]
if len(sys.argv) == 3:
img_name = sys.argv[2]
else:
img_name = ''
N_DECK = 52
N_HOLE = 169 # 13 * 12 + 13
def hand2rep(hand):
card1_rank = hand[0][0]
card1_suit = hand[0][1]
card2_rank = hand[1][0]
card2_suit = hand[1][1]
suited = (card2_suit == card1_suit)
high_rank = max(card1_rank, card2_rank)
low_rank = min(card1_rank, card2_rank)
return (high_rank, low_rank, suited)
#--------------- Generate p0 strat -------------------------
#Loading EvalAgents and checking if hey have same experiment name
eval_agent_dcfr = EvalAgentDeepCFR.load_from_disk(path_to_eval_agent=path_to_dcfr_eval_agent)
#get an env bldr from the agent and create an env
env_bldr = eval_agent_dcfr.env_bldr
env = env_bldr.get_new_env(is_evaluating=False)
start_time = time.time()
hands = {}
while len(hands) < N_HOLE:
obs, rew, done, info = env.reset()
eval_agent_dcfr.reset(deck_state_dict=env.cards_state_dict())
hole_hand = hand2rep(env.seats[0].hand)
if hole_hand not in hands:
hands[hole_hand] = eval_agent_dcfr.get_a_probs()
'''
print(f"Computed {N_HOLE} possible hands in {time.time()-start_time} sec")
for hand in hands.keys():
print(f"for hand: {hand}, the probabilities are {hands[hand]}")
'''
#----------------------------store data for p0
import pickle
f = open(img_name + 'p0_strat.pkl', 'ab')
pickle.dump(hands, f)
f.close()
#----------------------- Generate and Store Image for p0
import plot_strat
plot_strat.np2img(hands,img_name + 'p0_strat_img.png')
#----------------------- Generate Data for p1
eval_agent_dcfr = EvalAgentDeepCFR.load_from_disk(path_to_eval_agent=path_to_dcfr_eval_agent)
env_bldr = eval_agent_dcfr.env_bldr
env = env_bldr.get_new_env(is_evaluating=False)
start_time = time.time()
hands = {}
while len(hands) < N_HOLE:
obs, rew, done, info = env.reset()
eval_agent_dcfr.reset(deck_state_dict=env.cards_state_dict())
obs, rew, done, info = env.step(2)
eval_agent_dcfr.notify_of_action(p_id_acted=0, action_he_did=2)
hole_hand = hand2rep(env.seats[1].hand)
if hole_hand not in hands:
hands[hole_hand] = eval_agent_dcfr.get_a_probs()
#----------------------------store data for p1
import pickle
f = open(img_name + 'p1_strat.pkl', 'ab')
pickle.dump(hands, f)
f.close()
#----------------------- Generate and Store Image for p1
import plot_strat
plot_strat.np2img(hands, img_name + 'p1_strat_img.png')
pdb.set_trace() |
py | 1a3727159612f69b1575c3d47523f5792b5bea78 | # Apache License
#
# Version 2.0, January 2004
#
# http://www.apache.org/licenses/
#
# TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
#
# 1. Definitions.
#
# "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
#
# "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
#
# "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
#
# "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
#
# "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
#
# "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
#
# "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
#
# "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
#
# "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
#
# "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
#
# 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
#
# 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
#
# 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
#
# You must give any other recipients of the Work or Derivative Works a copy of this License; and
# You must cause any modified files to carry prominent notices stating that You changed the files; and
# You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
# If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
#
# You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
# 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
#
# 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
#
# 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
#
# 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
#
# 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
#
# END OF TERMS AND CONDITIONS
"""
WiderFace evaluation code
author: wondervictor
mail: [email protected]
copyright@wondervictor
"""
from distutils.core import setup, Extension
from Cython.Build import cythonize
import numpy
package = Extension('bbox', ['box_overlaps.pyx'], include_dirs=[numpy.get_include()])
setup(ext_modules=cythonize([package]))
|
py | 1a3727ea030691d9cb294c1fec5f811413f7d1b4 | from build.management.commands.build_nhs import Command as BuildNHS
class Command(BuildNHS):
pass
|
py | 1a3727fd9ae09cedd36ec99b200148e668a0f98e | from PySide2 import QtCore, QtWidgets
from keychain.ui import constants
class SettingsMenu(QtWidgets.QWidget):
def __init__(self, settings, parent=None):
super(SettingsMenu, self).__init__(parent)
self.settings = settings
# self.setWindowFlags(QtCore.Qt.Dialog)
self.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.Popup)
self.build_ui()
def build_ui(self):
main_layout = QtWidgets.QVBoxLayout()
self.setLayout(main_layout)
for sting, attrs in self.settings.items():
widget = constants.MAPPING[attrs.get("type")]
attr_widget = widget(**attrs)
main_layout.addWidget(attr_widget)
# Connect signal
if hasattr(widget, "value_signal"):
widget.value_signal.value_changed_signal.connect(lambda value : self._on_value_changed(sting, value))
def _on_value_changed(self, item, value):
self.settings.as_dict()[item] = value
|
py | 1a37287432256791578c59686e635cb1facdc7a4 | import streamlit as st
from pathlib import Path
import base64
from modules.toc import *
# Initial page config
page_title ='Postgres Cheatsheet for Python'
# st.set_page_config(
# page_title='Postgres Cheatsheet for Python',
# layout="wide",
# # initial_sidebar_state="expanded",
# )
def img_to_bytes(img_path):
img_bytes = Path(img_path).read_bytes()
encoded = base64.b64encode(img_bytes).decode()
return encoded
##########################
# Main body of cheat sheet
##########################
def cs_body():
page_title='Postgres for Python Cheatsheet',
col1, col2= st.columns(2)
col1.subheader('Getting Started')
col1.markdown('''MACOSX: if you used brew install
**Start, Stop, Restart, Login**
```Bash
# START, STOP, RESTART postgres
brew services start postgres
pg_ctl -D /opt/homebrew/var/postgres start
brew services stop postgres
brew services restart postgres
# when starting for a new database
pqsl postgres
psql postgres -U myuser
# Login to Postgres database
# enters into postgres command line
psql <database>
# POSTGRES login and DB permissions
CREATE ROLE myuser WITH LOGIN;
ALTER ROLE myuser CREATEDB;
# in .env file for NodeJS
PG_CONNECTION_STRING=postgres://myuser@localhost/mydatabase
```
Commands work after logging into postgres
Prompt should be postgres=#
''')
# Display data
col1.subheader('Creating a Table')
col1.markdown('''
``` Bash
mydb=# CREATE TABLE users (
id BIGSERIAL PRIMARY KEY,
firstName VARCHAR(200) NOT NULL,
middleName VARCHAR(200) DEFAULT NULL,
lastName VARCHAR(200) DEFAULT NULL
);
# Another Convetnsion
CREATE TABLE Student (
roll INT,
student_name VARCHAR,
course VARCHAR,
PRIMARY KEY(roll)
);
```
``` python
# Get DB hostname
SELECT boot_val, reset_val
FROM pg_settings
WHERE name = 'listen_addresses';
# Get Ports
SELECT *
FROM pg_settings
WHERE name = 'port';
# FROM BASH GET POSTGRES PORT
sudo netstat -plunt | grep postgres
# changing password for user
# log into postgres then
cd /data
psql postgres postgres
\password <user>
```
''')
# Managing Databasee
col1.subheader('Managing Databases')
# Control flow
col2.subheader('Control flow')
col2.code('''
st.stop()
''')
# Lay out your app
col2.subheader('Lay out your app')
col2.code('''
st.form('my_form_identifier')
st.form_submit_button('Submit to me')
st.container()
st.columns(spec)
>>> col1, col2 = st.columns(2)
>>> col1.subheader('Columnisation')
st.expander('Expander')
>>> with st.expander('Expand'):
>>> st.write('Juicy deets')
''')
col2.write('Batch widgets together in a form:')
col2.code('''
>>> with st.form(key='my_form'):
>>> text_input = st.text_input(label='Enter some text')
>>> submit_button = st.form_submit_button(label='Submit')
''')
# Display code
col2.subheader('Display code')
col2.code('''
st.echo()
>>> with st.echo():
>>> st.write('Code will be executed and printed')
''')
# Display progress and status
col2.subheader('Display progress and status')
col2.code('''
st.progress(progress_variable_1_to_100)
st.spinner()
>>> with st.spinner(text='In progress'):
>>> time.sleep(5)
>>> st.success('Done')
st.balloons()
st.error('Error message')
st.warning('Warning message')
st.info('Info message')
st.success('Success message')
st.exception(e)
''')
# Placeholders, help, and options
col2.subheader('Placeholders, help, and options')
col2.code('''
st.empty()
>>> my_placeholder = st.empty()
>>> my_placeholder.text('Replaced!')
st.help(pandas.DataFrame)
st.get_option(key)
st.set_option(key, value)
st.set_page_config(layout='wide')
''')
# Mutate data
col2.subheader('Mutate data')
col2.code('''
DeltaGenerator.add_rows(data)
>>> my_table = st.table(df1)
>>> my_table.add_rows(df2)
>>> my_chart = st.line_chart(df1)
>>> my_chart.add_rows(df2)
''')
# Optimize performance
col2.subheader('Optimize performance')
col2.code('''
@st.cache
>>> @st.cache
... def fetch_and_clean_data(url):
... # Mutate data at url
... return data
>>> # Executes d1 as first time
>>> d1 = fetch_and_clean_data(ref1)
>>> # Does not execute d1; returns cached value, d1==d2
>>> d2 = fetch_and_clean_data(ref1)
>>> # Different arg, so function d1 executes
>>> d3 = fetch_and_clean_data(ref2)
''')
col2.subheader('Other key parts of the API')
col2.markdown('''
<small>[State API](https://docs.streamlit.io/en/stable/session_state_api.html)</small><br>
<small>[Theme option reference](https://docs.streamlit.io/en/stable/theme_options.html)</small><br>
<small>[Components API reference](https://docs.streamlit.io/en/stable/develop_streamlit_components.html)</small><br>
<small>[API cheat sheet](https://share.streamlit.io/daniellewisdl/streamlit-cheat-sheet/app.py)</small><br>
''', unsafe_allow_html=True)
st.subheader("PSQL CLI Commands")
st.markdown('''
Commands work after logging into postgres
Prompt should be postgres=#
| **Command** | **Description** | **Additional Information** |
| ------------------------------------------------ | ------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- |
| psql -d database -U user -W | Connects to a database under a specific user | \-d: used to state the database name <br>-U:used to state the database user |
| psql -h host -d database -U user -W | Connect to a database that resides on another host | \-h: used to state the host <br>-d: used to state the database name <br>-U:used to state the database user |
| psql -U user -h host "dbname=db sslmode=require" | Use SSL mode for the connection | \-h: used to state the host <br>-U:used to state the database user |
| \c <dbname> | Switch connection to a new database | |
| CREATE DATABASE <name> | Create a database | |
| \l | List available databases | |
| \d or \d+ | List all tables in database | |
| \dt or \dt+ | List available tables | |
| \d table_name | Describe a table such as a column, type, modifiers of columns, etc. | |
| \dn | List all schemes of the currently connected database | |
| \df | List available functions in the current database | |
| \dv | List available views in the current database | |
| \du | List all users and their assign roles | |
| SELECT version(); | Retrieve the current version of PostgreSQL server | |
| \g | Execute the last command again | |
| \s | Display command history | |
| \s filename | Save the command history to a file | |
| \i filename | Execute psql commands from a file | |
| ? | Know all available psql commands | |
| \h | Get help | Eg:to get detailed information on ALTER TABLE statement use the \h ALTER TABLE |
| \e | Edit command in your own editor | |
| \ a | Switch from aligned to non-aligned column output | |
| \H | Switch the output to HTML format | |
| \q | Exit psql shell | |
| select pg_gethostname(); | PG Hostname | *BROKEN* |
| \ x | show query out put in pretty format | NOTE: Escape sequence for streamlit |
''')
# def main():
def app():
# cs_sidebar()
cs_body()
return None
|
py | 1a37289c56efa00be3daed9606f316a16f6c9df4 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""The fsl module provides classes for interfacing with the `FSL
<http://www.fmrib.ox.ac.uk/fsl/index.html>`_ command line tools. This
was written to work with FSL version 4.1.4.
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
import os, os.path as op
import warnings
import numpy as np
from nipype.interfaces.fsl.base import FSLCommand, FSLCommandInputSpec
from nipype.interfaces.base import (TraitedSpec, File, InputMultiPath,
OutputMultiPath, Undefined, traits,
isdefined, OutputMultiPath)
from nipype.utils.filemanip import split_filename
from nibabel import load
warn = warnings.warn
warnings.filterwarnings('always', category=UserWarning)
class BETInputSpec(FSLCommandInputSpec):
# We use position args here as list indices - so a negative number
# will put something on the end
in_file = File(exists=True,
desc='input file to skull strip',
argstr='%s', position=0, mandatory=True)
out_file = File(desc='name of output skull stripped image',
argstr='%s', position=1, genfile=True, hash_files=False)
outline = traits.Bool(desc='create surface outline image',
argstr='-o')
mask = traits.Bool(desc='create binary mask image',
argstr='-m')
skull = traits.Bool(desc='create skull image',
argstr='-s')
no_output = traits.Bool(argstr='-n',
desc="Don't generate segmented output")
frac = traits.Float(desc='fractional intensity threshold',
argstr='-f %.2f')
vertical_gradient = traits.Float(argstr='-g %.2f',
desc='vertical gradient in fractional intensity ' \
'threshold (-1, 1)')
radius = traits.Int(argstr='-r %d', units='mm',
desc="head radius")
center = traits.List(traits.Int, desc='center of gravity in voxels',
argstr='-c %s', minlen=0, maxlen=3,
units='voxels')
threshold = traits.Bool(argstr='-t',
desc="apply thresholding to segmented brain image and mask")
mesh = traits.Bool(argstr='-e',
desc="generate a vtk mesh brain surface")
# the remaining 'options' are more like modes (mutually exclusive) that
# FSL actually implements in a shell script wrapper around the bet binary.
# for some combinations of them in specific order a call would not fail,
# but in general using more than one of the following is clearly not
# supported
_xor_inputs = ('functional', 'reduce_bias', 'robust', 'padding',
'remove_eyes', 'surfaces', 't2_guided')
robust = traits.Bool(desc='robust brain centre estimation ' \
'(iterates BET several times)',
argstr='-R', xor=_xor_inputs)
padding = traits.Bool(desc='improve BET if FOV is very small in Z ' \
'(by temporarily padding end slices)',
argstr='-Z', xor=_xor_inputs)
remove_eyes = traits.Bool(desc='eye & optic nerve cleanup (can be ' \
'useful in SIENA)',
argstr='-S', xor=_xor_inputs)
surfaces = traits.Bool(desc='run bet2 and then betsurf to get additional ' \
'skull and scalp surfaces (includes ' \
'registrations)',
argstr='-A', xor=_xor_inputs)
t2_guided = File(desc='as with creating surfaces, when also feeding in ' \
'non-brain-extracted T2 (includes registrations)',
argstr='-A2 %s', xor=_xor_inputs)
functional = traits.Bool(argstr='-F', xor=_xor_inputs,
desc="apply to 4D fMRI data")
reduce_bias = traits.Bool(argstr='-B', xor=_xor_inputs,
desc="bias field and neck cleanup")
class BETOutputSpec(TraitedSpec):
out_file = File(desc="path/name of skullstripped file")
mask_file = File(
desc="path/name of binary brain mask (if generated)")
outline_file = File(
desc="path/name of outline file (if generated)")
meshfile = File(
desc="path/name of vtk mesh file (if generated)")
class BET(FSLCommand):
"""Use FSL BET command for skull stripping.
For complete details, see the `BET Documentation.
<http://www.fmrib.ox.ac.uk/fsl/bet2/index.html>`_
Examples
--------
>>> from nipype.interfaces import fsl
>>> from nipype.testing import example_data
>>> btr = fsl.BET()
>>> btr.inputs.in_file = example_data('structural.nii')
>>> btr.inputs.frac = 0.7
>>> res = btr.run() # doctest: +SKIP
"""
_cmd = 'bet'
input_spec = BETInputSpec
output_spec = BETOutputSpec
def _run_interface(self, runtime):
# The returncode is meaningless in BET. So check the output
# in stderr and if it's set, then update the returncode
# accordingly.
runtime = super(BET, self)._run_interface(runtime)
if runtime.stderr:
self.raise_exception(runtime)
return runtime
def _gen_outfilename(self):
out_file = self.inputs.out_file
if not isdefined(out_file) and isdefined(self.inputs.in_file):
out_file = self._gen_fname(self.inputs.in_file,
suffix='_brain')
return os.path.abspath(out_file)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = self._gen_outfilename()
if isdefined(self.inputs.mesh) and self.inputs.mesh:
outputs['meshfile'] = self._gen_fname(outputs['out_file'],
suffix='_mesh.vtk',
change_ext=False)
if (isdefined(self.inputs.mask) and self.inputs.mask) or \
(isdefined(self.inputs.reduce_bias) and \
self.inputs.reduce_bias):
outputs['mask_file'] = self._gen_fname(outputs['out_file'],
suffix='_mask')
if isdefined(self.inputs.outline) and self.inputs.outline:
outputs['outline_file'] = self._gen_fname(outputs['out_file'],
suffix='_overlay')
if isdefined(self.inputs.no_output) and self.inputs.no_output:
outputs['out_file'] = Undefined
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._gen_outfilename()
return None
class FASTInputSpec(FSLCommandInputSpec):
""" Defines inputs (trait classes) for FAST """
in_files = InputMultiPath(File(exists=True), copyfile=False,
desc='image, or multi-channel set of images, ' \
'to be segmented',
argstr='%s', position=-1, mandatory=True)
out_basename = File(desc='base name of output files',
argstr='-o %s') # uses in_file name as basename if none given
number_classes = traits.Range(low=1, high=10, argstr='-n %d',
desc='number of tissue-type classes')
output_biasfield = traits.Bool(desc='output estimated bias field',
argstr='-b')
output_biascorrected = traits.Bool(desc='output restored image ' \
'(bias-corrected image)',
argstr='-B')
img_type = traits.Enum((1, 2, 3), desc='int specifying type of image: ' \
'(1 = T1, 2 = T2, 3 = PD)',
argstr='-t %d')
bias_iters = traits.Range(low=1, high=10, argstr='-I %d',
desc='number of main-loop iterations during ' \
'bias-field removal')
bias_lowpass = traits.Range(low=4, high=40,
desc='bias field smoothing extent (FWHM) ' \
'in mm',
argstr='-l %d', units='mm')
init_seg_smooth = traits.Range(low=0.0001, high=0.1,
desc='initial segmentation spatial ' \
'smoothness (during bias field ' \
'estimation)',
argstr='-f %.3f')
segments = traits.Bool(desc='outputs a separate binary image for each ' \
'tissue type',
argstr='-g')
init_transform = File(exists=True, desc='<standard2input.mat> initialise'\
' using priors',
argstr='-a %s')
other_priors = InputMultiPath(File(exist=True), desc='alternative prior images',
argstr='-A %s', minlen=3, maxlen=3)
no_pve = traits.Bool(desc='turn off PVE (partial volume estimation)',
argstr='--nopve')
no_bias = traits.Bool(desc='do not remove bias field',
argstr='-N')
use_priors = traits.Bool(desc='use priors throughout',
argstr='-P') # must also set -a!,
# mutually inclusive??
# No, conditional
# mandatory... need to
# figure out how to
# handle with traits.
segment_iters = traits.Range(low=1, high=50,
desc='number of segmentation-initialisation'\
' iterations',
argstr='-W %d')
mixel_smooth = traits.Range(low=0.0, high=1.0,
desc='spatial smoothness for mixeltype',
argstr='-R %.2f')
iters_afterbias = traits.Range(low=1, hight=20,
desc='number of main-loop iterations ' \
'after bias-field removal',
argstr='-O %d')
hyper = traits.Range(low=0.0, high=1.0,
desc='segmentation spatial smoothness',
argstr='-H %.2f')
verbose = traits.Bool(desc='switch on diagnostic messages',
argstr='-v')
manual_seg = File(exists=True, desc='Filename containing intensities',
argstr='-s %s')
probability_maps = traits.Bool(desc='outputs individual probability maps',
argstr='-p')
class FASTOutputSpec(TraitedSpec):
"""Specify possible outputs from FAST"""
tissue_class_map = File(exists=True,
desc='path/name of binary segmented volume file' \
' one val for each class _seg')
tissue_class_files = OutputMultiPath(File(desc='path/name of binary segmented volumes ' \
'one file for each class _seg_x'))
restored_image = OutputMultiPath(File(desc='restored images (one for each input image) ' \
'named according to the input images _restore'))
mixeltype = File(desc="path/name of mixeltype volume file _mixeltype")
partial_volume_map = File(desc="path/name of partial volume file _pveseg")
partial_volume_files = OutputMultiPath(File(desc='path/name of partial volumes files ' \
'one for each class, _pve_x'))
bias_field = OutputMultiPath(File(desc='Estimated bias field _bias'))
probability_maps = OutputMultiPath(File(desc='filenames, one for each class, for each ' \
'input, prob_x'))
class FAST(FSLCommand):
""" Use FSL FAST for segmenting and bias correction.
For complete details, see the `FAST Documentation.
<http://www.fmrib.ox.ac.uk/fsl/fast4/index.html>`_
Examples
--------
>>> from nipype.interfaces import fsl
>>> from nipype.testing import example_data
Assign options through the ``inputs`` attribute:
>>> fastr = fsl.FAST()
>>> fastr.inputs.in_files = example_data('structural.nii')
>>> out = fastr.run() #doctest: +SKIP
"""
_cmd = 'fast'
input_spec = FASTInputSpec
output_spec = FASTOutputSpec
def _format_arg(self, name, spec, value):
# first do what should be done in general
formated = super(FAST, self)._format_arg(name, spec, value)
if name == 'in_files':
# FAST needs the -S parameter value to correspond to the number
# of input images, otherwise it will ignore all but the first
formated = "-S %d %s" % (len(value), formated)
return formated
def _list_outputs(self):
outputs = self.output_spec().get()
if not isdefined(self.inputs.number_classes):
nclasses = 3
else:
nclasses = self.inputs.number_classes
# when using multichannel, results basename is based on last
# input filename
if isdefined(self.inputs.out_basename):
basefile = self.inputs.out_basename
else:
basefile = self.inputs.in_files[-1]
outputs['tissue_class_map'] = self._gen_fname(basefile,
suffix='_seg')
if self.inputs.segments:
outputs['tissue_class_files'] = []
for i in range(nclasses):
outputs['tissue_class_files'].append(
self._gen_fname(basefile, suffix='_seg_%d' % i))
if isdefined(self.inputs.output_biascorrected):
outputs['restored_image'] = []
if len(self.inputs.in_files) > 1:
# for multi-image segmentation there is one corrected image
# per input
for val, f in enumerate(self.inputs.in_files):
# image numbering is 1-based
outputs['restored_image'].append(
self._gen_fname(basefile, suffix='_restore_%d' % (val + 1)))
else:
# single image segmentation has unnumbered output image
outputs['restored_image'].append(
self._gen_fname(basefile, suffix='_restore'))
outputs['mixeltype'] = self._gen_fname(basefile, suffix='_mixeltype')
if not self.inputs.no_pve:
outputs['partial_volume_map'] = self._gen_fname(basefile, suffix='_pveseg')
outputs['partial_volume_files'] = []
for i in range(nclasses):
outputs['partial_volume_files'].append(self._gen_fname(basefile,
suffix='_pve_%d' % i))
if self.inputs.output_biasfield:
outputs['bias_field'] = []
if len(self.inputs.in_files) > 1:
# for multi-image segmentation there is one bias field image
# per input
for val, f in enumerate(self.inputs.in_files):
# image numbering is 1-based
outputs['bias_field'].append(
self._gen_fname(basefile, suffix='_bias_%d' % (val + 1)))
else:
# single image segmentation has unnumbered output image
outputs['bias_field'].append(
self._gen_fname(basefile, suffix='_bias'))
if self.inputs.probability_maps:
outputs['probability_maps'] = []
for i in range(nclasses):
outputs['probability_maps'].append(
self._gen_fname(basefile, suffix='_prob_%d' % i))
return outputs
class FLIRTInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, argstr='-in %s', mandatory=True,
position=0, desc='input file')
# XXX Not clear if position is required for mandatory flirt inputs
# since they are prefixed with argstrs. But doing it to follow
# our previous convention and so we can test the generated command
# line.
reference = File(exists=True, argstr='-ref %s', mandatory=True,
position=1, desc='reference file')
out_file = File(argstr='-out %s', desc='registered output file',
genfile=True, position=2, hash_files=False)
out_matrix_file = File(argstr='-omat %s',
desc='output affine matrix in 4x4 asciii format',
genfile=True, position=3, hash_files=False)
in_matrix_file = File(argstr='-init %s', desc='input 4x4 affine matrix')
apply_xfm = traits.Bool(argstr='-applyxfm', requires=['in_matrix_file'],
desc='apply transformation supplied by in_matrix_file')
datatype = traits.Enum('char', 'short', 'int', 'float', 'double',
argstr='-datatype %s',
desc='force output data type')
cost = traits.Enum('mutualinfo', 'corratio', 'normcorr', 'normmi',
'leastsq', 'labeldiff',
argstr='-cost %s',
desc='cost function')
# XXX What is the difference between 'cost' and 'searchcost'? Are
# these both necessary or do they map to the same variable.
cost_func = traits.Enum('mutualinfo', 'corratio', 'normcorr', 'normmi',
'leastsq', 'labeldiff',
argstr='-searchcost %s',
desc='cost function')
uses_qform = traits.Bool(argstr='-usesqform',
desc='initialize using sform or qform')
display_init = traits.Bool(argstr='-displayinit',
desc='display initial matrix')
angle_rep = traits.Enum('quaternion', 'euler',
argstr='-anglerep %s',
desc='representation of rotation angles')
interp = traits.Enum('trilinear', 'nearestneighbour', 'sinc','spline',
argstr='-interp %s',
desc='final interpolation method used in reslicing')
sinc_width = traits.Int(argstr='-sincwidth %d', units='voxels',
desc='full-width in voxels')
sinc_window = traits.Enum('rectangular', 'hanning', 'blackman',
argstr='-sincwindow %s',
desc='sinc window') # XXX better doc
bins = traits.Int(argstr='-bins %d', desc='number of histogram bins')
dof = traits.Int(argstr='-dof %d',
desc='number of transform degrees of freedom')
no_resample = traits.Bool(argstr='-noresample',
desc='do not change input sampling')
force_scaling = traits.Bool(argstr='-forcescaling',
desc='force rescaling even for low-res images')
min_sampling = traits.Float(argstr='-minsampling %f', units='mm',
desc='set minimum voxel dimension for sampling')
padding_size = traits.Int(argstr='-paddingsize %d', units='voxels',
desc='for applyxfm: interpolates outside image '\
'by size')
searchr_x = traits.List(traits.Int, minlen=2, maxlen=2, units='degrees',
argstr='-searchrx %s',
desc='search angles along x-axis, in degrees')
searchr_y = traits.List(traits.Int, minlen=2, maxlen=2, units='degrees',
argstr='-searchry %s',
desc='search angles along y-axis, in degrees')
searchr_z = traits.List(traits.Int, minlen=2, maxlen=2, units='degrees',
argstr='-searchrz %s',
desc='search angles along z-axis, in degrees')
no_search = traits.Bool(argstr='-nosearch',
desc='set all angular searches to ranges 0 to 0')
coarse_search = traits.Int(argstr='-coarsesearch %d', units='degrees',
desc='coarse search delta angle')
fine_search = traits.Int(argstr='-finesearch %d', units='degrees',
desc='fine search delta angle')
schedule = File(exists=True, argstr='-schedule %s',
desc='replaces default schedule')
ref_weight = File(exists=True, argstr='-refweight %s',
desc='File for reference weighting volume')
in_weight = File(exists=True, argstr='-inweight %s',
desc='File for input weighting volume')
no_clamp = traits.Bool(argstr='-noclamp',
desc='do not use intensity clamping')
no_resample_blur = traits.Bool(argstr='-noresampblur',
desc='do not use blurring on downsampling')
rigid2D = traits.Bool(argstr='-2D',
desc='use 2D rigid body mode - ignores dof')
verbose = traits.Int(argstr='-verbose %d',
desc='verbose mode, 0 is least')
class FLIRTOutputSpec(TraitedSpec):
out_file = File(exists=True,
desc='path/name of registered file (if generated)')
out_matrix_file = File(exists=True,
desc='path/name of calculated affine transform ' \
'(if generated)')
class FLIRT(FSLCommand):
"""Use FSL FLIRT for coregistration.
For complete details, see the `FLIRT Documentation.
<http://www.fmrib.ox.ac.uk/fsl/flirt/index.html>`_
To print out the command line help, use:
fsl.FLIRT().inputs_help()
Examples
--------
>>> from nipype.interfaces import fsl
>>> from nipype.testing import example_data
>>> flt = fsl.FLIRT(bins=640, cost_func='mutualinfo')
>>> flt.inputs.in_file = example_data('structural.nii')
>>> flt.inputs.reference = example_data('mni.nii')
>>> flt.inputs.out_file = 'moved_subject.nii'
>>> flt.inputs.out_matrix_file = 'subject_to_template.mat'
>>> res = flt.run() #doctest: +SKIP
"""
_cmd = 'flirt'
input_spec = FLIRTInputSpec
output_spec = FLIRTOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = self.inputs.out_file
# Generate an out_file if one is not provided
if not isdefined(outputs['out_file']):
outputs['out_file'] = self._gen_fname(self.inputs.in_file,
suffix='_flirt')
outputs['out_file'] = os.path.abspath(outputs['out_file'])
outputs['out_matrix_file'] = self.inputs.out_matrix_file
# Generate an out_matrix file if one is not provided
if not isdefined(outputs['out_matrix_file']):
outputs['out_matrix_file'] = self._gen_fname(self.inputs.in_file,
suffix='_flirt.mat',
change_ext=False)
outputs['out_matrix_file'] = os.path.abspath(outputs['out_matrix_file'])
return outputs
def _gen_filename(self, name):
if name in ('out_file', 'out_matrix_file'):
return self._list_outputs()[name]
else:
return None
class ApplyXfm(FLIRT):
"""Currently just a light wrapper around FLIRT,
with no modifications
ApplyXfm is used to apply an existing tranform to an image
Examples
--------
>>> import nipype.interfaces.fsl as fsl
>>> from nipype.testing import example_data
>>> applyxfm = fsl.ApplyXfm()
>>> applyxfm.inputs.in_file = example_data('structural.nii')
>>> applyxfm.inputs.in_matrix_file = example_data('trans.mat')
>>> applyxfm.inputs.out_file = 'newfile.nii'
>>> applyxfm.inputs.reference = example_data('mni.nii')
>>> applyxfm.inputs.apply_xfm = True
>>> result = applyxfm.run() # doctest: +SKIP
"""
pass
class MCFLIRTInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, position=0, argstr="-in %s", mandatory=True,
desc="timeseries to motion-correct")
out_file = File(argstr='-out %s', genfile=True,
desc="file to write", hash_files=False)
cost = traits.Enum('mutualinfo', 'woods', 'corratio', 'normcorr', 'normmi', 'leastsquares',
argstr='-cost %s', desc="cost function to optimize")
bins = traits.Int(argstr='-bins %d', desc="number of histogram bins")
dof = traits.Int(argstr='-dof %d', desc="degrees of freedom for the transformation")
ref_vol = traits.Int(argstr='-refvol %d', desc="volume to align frames to")
scaling = traits.Float(argstr='-scaling %.2f', desc="scaling factor to use")
smooth = traits.Float(argstr='-smooth %.2f', desc="smoothing factor for the cost function")
rotation = traits.Int(argstr='-rotation %d', desc="scaling factor for rotation tolerances")
stages = traits.Int(argstr='-stages %d',
desc="stages (if 4, perform final search with sinc interpolation")
init = File(exists=True, argstr='-init %s', desc="inital transformation matrix")
interpolation = traits.Enum("spline", "nn", "sinc", argstr="-%s_final",
desc="interpolation method for transformation")
use_gradient = traits.Bool(argstr='-gdt', desc="run search on gradient images")
use_contour = traits.Bool(argstr='-edge', desc="run search on contour images")
mean_vol = traits.Bool(argstr='-meanvol', desc="register to mean volume")
stats_imgs = traits.Bool(argstr='-stats', desc="produce variance and std. dev. images")
save_mats = traits.Bool(argstr='-mats', desc="save transformation matrices")
save_plots = traits.Bool(argstr='-plots', desc="save transformation parameters")
save_rms = traits.Bool(argstr='-rmsabs -rmsrel', desc="save rms displacement parameters")
ref_file = File(exists=True, argstr='-reffile %s', desc="target image for motion correction")
class MCFLIRTOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="motion-corrected timeseries")
variance_img = File(exists=True, desc="variance image")
std_img = File(exists=True, desc="standard deviation image")
mean_img = File(exists=True, desc="mean timeseries image")
par_file = File(exists=True, desc="text-file with motion parameters")
mat_file = OutputMultiPath(File(exists=True), desc="transformation matrices")
rms_files = OutputMultiPath(File(exists=True),
desc="absolute and relative displacement parameters")
class MCFLIRT(FSLCommand):
"""Use FSL MCFLIRT to do within-modality motion correction.
For complete details, see the `MCFLIRT Documentation.
<http://www.fmrib.ox.ac.uk/fsl/mcflirt/index.html>`_
Examples
--------
>>> from nipype.interfaces import fsl
>>> from nipype.testing import example_data
>>> mcflt = fsl.MCFLIRT(in_file=example_data('functional.nii'), cost='mutualinfo')
>>> res = mcflt.run() # doctest: +SKIP
"""
_cmd = 'mcflirt'
input_spec = MCFLIRTInputSpec
output_spec = MCFLIRTOutputSpec
def _format_arg(self, name, spec, value):
if name == "interpolation":
if value == "trilinear":
return ""
else:
return spec.argstr % value
return super(MCFLIRT, self)._format_arg(name, spec, value)
def _list_outputs(self):
cwd = os.getcwd()
outputs = self._outputs().get()
outputs['out_file'] = self._gen_outfilename()
if isdefined(self.inputs.stats_imgs) and self.inputs.stats_imgs:
outputs['variance_img'] = self._gen_fname(outputs['out_file'] + \
'_variance.ext', cwd=cwd)
outputs['std_img'] = self._gen_fname(outputs['out_file'] + \
'_sigma.ext', cwd=cwd)
outputs['mean_img'] = self._gen_fname(outputs['out_file'] + \
'_meanvol.ext', cwd=cwd)
if isdefined(self.inputs.save_mats) and self.inputs.save_mats:
_, filename = os.path.split(outputs['out_file'])
matpathname = os.path.join(cwd, filename + '.mat')
_, _, _, timepoints = load(self.inputs.in_file).get_shape()
outputs['mat_file'] = []
for t in range(timepoints):
outputs['mat_file'].append(os.path.join(matpathname,
'MAT_%04d' % t))
if isdefined(self.inputs.save_plots) and self.inputs.save_plots:
# Note - if e.g. out_file has .nii.gz, you get .nii.gz.par,
# which is what mcflirt does!
outputs['par_file'] = outputs['out_file'] + '.par'
if isdefined(self.inputs.save_rms) and self.inputs.save_rms:
outfile = outputs['out_file']
outputs['rms_files'] = [outfile + '_abs.rms', outfile + '_rel.rms']
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._gen_outfilename()
return None
def _gen_outfilename(self):
out_file = self.inputs.out_file
if isdefined(out_file):
out_file = os.path.realpath(out_file)
if not isdefined(out_file) and isdefined(self.inputs.in_file):
out_file = self._gen_fname(self.inputs.in_file,
suffix='_mcf')
return os.path.abspath(out_file)
class FNIRTInputSpec(FSLCommandInputSpec):
ref_file = File(exists=True, argstr='--ref=%s', mandatory=True,
desc='name of reference image')
in_file = File(exists=True, argstr='--in=%s', mandatory=True,
desc='name of input image')
affine_file = File(exists=True, argstr='--aff=%s',
desc='name of file containing affine transform')
inwarp_file = File(exists=True, argstr='--inwarp=%s',
desc='name of file containing initial non-linear warps')
in_intensitymap_file = File(exists=True, argstr='--intin=%s',
desc='name of file/files containing initial intensity maping'\
'usually generated by previos fnirt run')
fieldcoeff_file = traits.Either(traits.Bool, File, argstr='--cout=%s',
desc='name of output file with field coefficients or true')
warped_file = File(argstr='--iout=%s',
desc='name of output image', genfile=True, hash_files=False)
field_file = traits.Either(traits.Bool, File,
argstr='--fout=%s',
desc='name of output file with field or true', hash_files=False)
jacobian_file = traits.Either(traits.Bool, File,
argstr='--jout=%s',
desc='name of file for writing out the Jacobian'\
'of the field (for diagnostic or VBM purposes)', hash_files=False)
modulatedref_file = traits.Either(traits.Bool, File,
argstr='--refout=%s',
desc='name of file for writing out intensity modulated'\
'--ref (for diagnostic purposes)', hash_files=False)
out_intensitymap_file = traits.Either(traits.Bool, File,
argstr='--intout=%s',
desc='name of files for writing information pertaining '\
'to intensity mapping', hash_files=False)
log_file = File(argstr='--logout=%s',
desc='Name of log-file', genfile=True, hash_files=False)
config_file = File(exists=True, argstr='--config=%s',
desc='Name of config file specifying command line arguments')
refmask_file = File(exists=True, argstr='--refmask=%s',
desc='name of file with mask in reference space')
inmask_file = File(exists=True, argstr='--inmask=%s',
desc='name of file with mask in input image space')
skip_refmask = traits.Bool(argstr='--applyrefmask=0', xor=['apply_refmask'],
desc='Skip specified refmask if set, default false')
skip_inmask = traits.Bool(argstr='--applyinmask=0', xor=['apply_inmask'],
desc='skip specified inmask if set, default false')
apply_refmask = traits.List(traits.Enum(0, 1), argstr='--applyrefmask=%s', xor=['skip_refmask'],
desc='list of iterations to use reference mask on (1 to use, 0 to skip)', sep=",")
apply_inmask = traits.List(traits.Enum(0, 1), argstr='--applyinmask=%s', xor=['skip_inmask'],
desc='list of iterations to use input mask on (1 to use, 0 to skip)', sep=",")
skip_implicit_ref_masking = traits.Bool(argstr='--imprefm 0',
desc='skip implicit masking based on value'\
'in --ref image. Default = 0')
skip_implicit_in_masking = traits.Bool(argstr='--impinm 0',
desc='skip implicit masking based on value'\
'in --in image. Default = 0')
refmask_val = traits.Float(argstr='--imprefval=%f',
desc='Value to mask out in --ref image. Default =0.0')
inmask_val = traits.Float(argstr='--impinval=%f',
desc='Value to mask out in --in image. Default =0.0')
max_nonlin_iter = traits.List(traits.Int,
argstr='--miter=%s',
desc='Max # of non-linear iterations list, default [5, 5, 5, 5]', sep=",")
subsampling_scheme = traits.List(traits.Int,
argstr='--subsamp=%s',
desc='sub-sampling scheme, list, default [4, 2, 1, 1]',
sep=",")
warp_resolution = traits.Tuple(traits.Int, traits.Int, traits.Int,
argstr='--warpres=%d,%d,%d',
desc='(approximate) resolution (in mm) of warp basis '\
'in x-, y- and z-direction, default 10, 10, 10')
spline_order = traits.Int(argstr='--splineorder=%d',
desc='Order of spline, 2->Qadratic spline, 3->Cubic spline. Default=3')
in_fwhm = traits.List(traits.Int, argstr='--infwhm=%s',
desc='FWHM (in mm) of gaussian smoothing kernel for input volume, default [6, 4, 2, 2]', sep=",")
ref_fwhm = traits.List(traits.Int, argstr='--reffwhm=%s',
desc='FWHM (in mm) of gaussian smoothing kernel for ref volume, default [4, 2, 0, 0]', sep=",")
regularization_model = traits.Enum('membrane_energy', 'bending_energy',
argstr='--regmod=%s',
desc='Model for regularisation of warp-field [membrane_energy bending_energy], default bending_energy')
regularization_lambda = traits.List(traits.Float, argstr='--lambda=%s',
desc='Weight of regularisation, default depending on --ssqlambda and --regmod '\
'switches. See user documetation.', sep=",")
skip_lambda_ssq = traits.Bool(argstr='--ssqlambda 0',
desc='If true, lambda is not weighted by current ssq, default false')
jacobian_range = traits.Tuple(traits.Float, traits.Float,
argstr='--jacrange=%f,%f',
desc='Allowed range of Jacobian determinants, default 0.01, 100.0')
derive_from_ref = traits.Bool(argstr='--refderiv',
desc='If true, ref image is used to calculate derivatives. Default false')
intensity_mapping_model = traits.Enum('none', 'global_linear', 'global_non_linear'
'local_linear', 'global_non_linear_with_bias',
'local_non_linear', argstr='--intmod=%s',
desc='Model for intensity-mapping')
intensity_mapping_order = traits.Int(argstr='--intorder=%d',
desc='Order of poynomial for mapping intensities, default 5')
biasfield_resolution = traits.Tuple(traits.Int, traits.Int, traits.Int,
argstr='--biasres=%d,%d,%d',
desc='Resolution (in mm) of bias-field modelling '\
'local intensities, default 50, 50, 50')
bias_regularization_lambda = traits.Float(argstr='--biaslambda=%f',
desc='Weight of regularisation for bias-field, default 10000')
skip_intensity_mapping = traits.Bool(argstr='--estint=0', xor=['apply_intensity_mapping'],
desc='Skip estimate intensity-mapping default false')
apply_intensity_mapping = traits.List(traits.Enum(0, 1), argstr='--estint=%s', xor=['skip_intensity_mapping'],
desc='List of subsampling levels to apply intensity mapping for (0 to skip, 1 to apply)', sep=",")
hessian_precision = traits.Enum('double', 'float', argstr='--numprec=%s',
desc='Precision for representing Hessian, double or float. Default double')
class FNIRTOutputSpec(TraitedSpec):
fieldcoeff_file = File(exists=True, desc='file with field coefficients')
warped_file = File(exists=True, desc='warped image')
field_file = File(desc='file with warp field')
jacobian_file = File(desc='file containing Jacobian of the field')
modulatedref_file = File(desc='file containing intensity modulated --ref')
out_intensitymap_file = File(\
desc='file containing info pertaining to intensity mapping')
log_file = File(desc='Name of log-file')
class FNIRT(FSLCommand):
"""Use FSL FNIRT for non-linear registration.
Examples
--------
>>> from nipype.interfaces import fsl
>>> from nipype.testing import example_data
>>> fnt = fsl.FNIRT(affine_file=example_data('trans.mat'))
>>> res = fnt.run(ref_file=example_data('mni.nii', in_file=example_data('structural.nii')) #doctest: +SKIP
T1 -> Mni153
>>> from nipype.interfaces import fsl
>>> fnirt_mprage = fsl.FNIRT()
>>> fnirt_mprage.inputs.in_fwhm = [8, 4, 2, 2]
>>> fnirt_mprage.inputs.subsampling_scheme = [4, 2, 1, 1]
Specify the resolution of the warps
>>> fnirt_mprage.inputs.warp_resolution = (6, 6, 6)
>>> res = fnirt_mprage.run(in_file='structural.nii', ref_file='mni.nii', warped_file='warped.nii', fieldcoeff_file='fieldcoeff.nii')#doctest: +SKIP
We can check the command line and confirm that it's what we expect.
>>> fnirt_mprage.cmdline #doctest: +SKIP
'fnirt --cout=fieldcoeff.nii --in=structural.nii --infwhm=8,4,2,2 --ref=mni.nii --subsamp=4,2,1,1 --warpres=6,6,6 --iout=warped.nii'
"""
_cmd = 'fnirt'
input_spec = FNIRTInputSpec
output_spec = FNIRTOutputSpec
filemap = {'warped_file': 'warped',
'field_file': 'field',
'jacobian_file': 'field_jacobian',
'modulatedref_file': 'modulated',
'out_intensitymap_file': 'intmap',
'log_file': 'log.txt',
'fieldcoeff_file': 'fieldwarp'}
def _list_outputs(self):
outputs = self.output_spec().get()
for key, suffix in self.filemap.items():
inval = getattr(self.inputs, key)
change_ext = True
if key in ['warped_file', 'log_file']:
if suffix.endswith('.txt'):
change_ext = False
if isdefined(inval):
outputs[key] = inval
else:
outputs[key] = self._gen_fname(self.inputs.in_file,
suffix='_' + suffix,
change_ext=change_ext)
elif isdefined(inval):
if isinstance(inval, bool):
if inval:
outputs[key] = self._gen_fname(self.inputs.in_file,
suffix='_' + suffix,
change_ext=change_ext)
else:
outputs[key] = os.path.abspath(inval)
return outputs
def _format_arg(self, name, spec, value):
if name in self.filemap.keys():
return spec.argstr % self._list_outputs()[name]
return super(FNIRT, self)._format_arg(name, spec, value)
def _gen_filename(self, name):
if name in ['warped_file', 'log_file']:
return self._list_outputs()[name]
return None
def write_config(self, configfile):
"""Writes out currently set options to specified config file
XX TODO : need to figure out how the config file is written
Parameters
----------
configfile : /path/to/configfile
"""
try:
fid = open(configfile, 'w+')
except IOError:
print ('unable to create config_file %s' % (configfile))
for item in self.inputs.get().items():
fid.write('%s\n' % (item))
fid.close()
class ApplyWarpInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, argstr='-i %s',
mandatory=True,position=-4,
desc='image to be warped')
out_file = File(argstr='-o %s', genfile=True,
desc='output filename', position=-3, hash_files=False)
ref_file = File(exists=True, argstr='-r %s',position=-2,
mandatory=True,
desc='reference image')
field_file = File(exists=True, argstr='-w %s', position=-1,
desc='file containing warp field')
abswarp = traits.Bool(argstr='--abs', xor=['relwarp'],
desc="treat warp field as absolute: x' = w(x)")
relwarp = traits.Bool(argstr='--rel', xor=['abswarp'],
desc="treat warp field as relative: x' = x + w(x)")
datatype = traits.Enum('char', 'short', 'int', 'float', 'double',
argstr='--datatype %s',
desc='Force output data type [char short int float double].')
supersample = traits.Bool(argstr='--super',
desc='intermediary supersampling of output, default is off')
superlevel = traits.Either(traits.Enum('a'), traits.Int,
argstr='--superlevel %s',
desc="level of intermediary supersampling, a for 'automatic' or integer level. Default = 2")
premat = File(exists=True, argstr='--premat %s',
desc='filename for pre-transform (affine matrix)')
postmat = File(exists=True, argstr='--postmat %s',
desc='filename for post-transform (affine matrix)')
mask_file = File(exists=True, argstr='--mask %s',
desc='filename for mask image (in reference space)')
interp = traits.Enum('nn', 'trilinear', 'sinc', 'spline', argstr='--interp %s',
desc='interpolation method')
class ApplyWarpOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='Warped output file')
class ApplyWarp(FSLCommand):
"""Use FSL's applywarp to apply the results of a FNIRT registration
Examples
--------
>>> from nipype.interfaces import fsl
>>> from nipype.testing import example_data
>>> aw = fsl.ApplyWarp()
>>> aw.inputs.in_file = example_data('structural.nii')
>>> aw.inputs.ref_file = example_data('mni.nii')
>>> aw.inputs.field_file = 'my_coefficients_filed.nii' #doctest: +SKIP
>>> res = aw.run() #doctest: +SKIP
"""
_cmd = 'applywarp'
input_spec = ApplyWarpInputSpec
output_spec = ApplyWarpOutputSpec
def _format_arg(self, name, spec, value):
if name == 'superlevel':
return spec.argstr % str(value)
return super(ApplyWarp, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self._outputs().get()
if not isdefined(self.inputs.out_file):
outputs['out_file'] = self._gen_fname(self.inputs.in_file,
suffix='_warp')
else:
outputs['out_file'] = os.path.abspath(self.inputs.out_file)
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._list_outputs()[name]
return None
class SliceTimerInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, argstr='--in=%s',
mandatory=True, position=0,
desc='filename of input timeseries')
out_file = File(argstr='--out=%s', genfile=True,
desc='filename of output timeseries', hash_files=False)
index_dir = traits.Bool(argstr='--down',
desc='slice indexing from top to bottom')
time_repetition = traits.Float(argstr='--repeat=%f',
desc='Specify TR of data - default is 3s')
slice_direction = traits.Enum(1, 2, 3, argstr='--direction=%d',
desc='direction of slice acquisition (x=1, y=2, z=3) - default is z')
interleaved = traits.Bool(argstr='--odd',
desc='use interleaved acquisition')
custom_timings = File(exists=True, argstr='--tcustom=%s',
desc='slice timings, in fractions of TR, range 0:1 (default is 0.5 = no shift)')
global_shift = traits.Float(argstr='--tglobal',
desc='shift in fraction of TR, range 0:1 (default is 0.5 = no shift)')
custom_order = File(exists=True, argstr='--ocustom=%s',
desc='filename of single-column custom interleave order file (first slice is referred to as 1 not 0)')
class SliceTimerOutputSpec(TraitedSpec):
slice_time_corrected_file = File(exists=True, desc='slice time corrected file')
class SliceTimer(FSLCommand):
""" use FSL slicetimer to perform slice timing correction.
Examples
--------
>>> from nipype.interfaces import fsl
>>> from nipype.testing import example_data
>>> st = fsl.SliceTimer()
>>> st.inputs.in_file = example_data('functional.nii')
>>> st.inputs.interleaved = True
>>> result = st.run() #doctest: +SKIP
"""
_cmd = 'slicetimer'
input_spec = SliceTimerInputSpec
output_spec = SliceTimerOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
out_file = self.inputs.out_file
if not isdefined(out_file):
out_file = self._gen_fname(self.inputs.in_file,
suffix='_st')
outputs['slice_time_corrected_file'] = os.path.abspath(out_file)
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._list_outputs()['slice_time_corrected_file']
return None
class SUSANInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, argstr='%s',
mandatory=True, position=1,
desc='filename of input timeseries')
brightness_threshold = traits.Float(argstr='%.10f',
position=2, mandatory=True,
desc='brightness threshold and should be greater than '
'noise level and less than contrast of edges to '
'be preserved.')
fwhm = traits.Float(argstr='%.10f',
position=3, mandatory=True,
desc='fwhm of smoothing, in mm, gets converted using sqrt(8*log(2))')
dimension = traits.Enum(3, 2, argstr='%d', position=4, usedefault=True,
desc='within-plane (2) or fully 3D (3)')
use_median = traits.Enum(1, 0, argstr='%d', position=5, usedefault=True,
desc='whether to use a local median filter in the cases where single-point noise is detected')
usans = traits.List(traits.Tuple(File(exists=True), traits.Float), maxlen=2,
argstr='', position=6, default=[], usedefault=True,
desc='determines whether the smoothing area (USAN) is to be '
'found from secondary images (0, 1 or 2). A negative '
'value for any brightness threshold will auto-set the '
'threshold at 10% of the robust range')
out_file = File(argstr='%s', position=-1, genfile=True,
desc='output file name', hash_files=False)
class SUSANOutputSpec(TraitedSpec):
smoothed_file = File(exists=True, desc='smoothed output file')
class SUSAN(FSLCommand):
""" use FSL SUSAN to perform smoothing
Examples
--------
>>> from nipype.interfaces import fsl
>>> from nipype.testing import example_data
>>> print anatfile #doctest: +SKIP
anatomical.nii #doctest: +SKIP
>>> sus = fsl.SUSAN()
>>> sus.inputs.in_file = example_data('structural.nii')
>>> sus.inputs.brightness_threshold = 2000.0
>>> sus.inputs.fwhm = 8.0
>>> result = sus.run() #doctest: +SKIP
"""
_cmd = 'susan'
input_spec = SUSANInputSpec
output_spec = SUSANOutputSpec
def _format_arg(self, name, spec, value):
if name == 'fwhm':
return spec.argstr % (float(value) / np.sqrt(8 * np.log(2)))
if name == 'usans':
if not value:
return '0'
arglist = [str(len(value))]
for filename, thresh in value:
arglist.extend([filename, '%.10f' % thresh])
return ' '.join(arglist)
return super(SUSAN, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self._outputs().get()
out_file = self.inputs.out_file
if not isdefined(out_file):
out_file = self._gen_fname(self.inputs.in_file,
suffix='_smooth')
outputs['smoothed_file'] = os.path.abspath(out_file)
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._list_outputs()['smoothed_file']
return None
class FUGUEInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, argstr='--in=%s',
desc='filename of input volume')
unwarped_file = File(argstr='--unwarp=%s', genfile=True,
desc='apply unwarping and save as filename', hash_files=False)
phasemap_file = File(exists=True, argstr='--phasemap=%s',
desc='filename for input phase image')
dwell_to_asym_ratio = traits.Float(argstr='--dwelltoasym=%.10f',
desc='set the dwell to asym time ratio')
dwell_time = traits.Float(argstr='--dwell=%.10f',
desc='set the EPI dwell time per phase-encode line - same as echo spacing - (sec)')
asym_se_time = traits.Float(argstr='--asym=%.10f',
desc='set the fieldmap asymmetric spin echo time (sec)')
fmap_out_file = File(argstr='--savefmap=%s',
desc='filename for saving fieldmap (rad/s)', hash_files=False)
fmap_in_file = File(exists=True, argstr='--loadfmap=%s',
desc='filename for loading fieldmap (rad/s)')
shift_out_file = File(argstr='--saveshift=%s',
desc='filename for saving pixel shift volume', hash_files=False)
shift_in_file = File(exists=True, argstr='--loadshift=%s',
desc='filename for reading pixel shift volume')
median_2dfilter = traits.Bool(argstr='--median',
desc='apply 2D median filtering')
despike_2dfilter = traits.Bool(argstr='--despike',
desc='apply a 2D de-spiking filter')
no_gap_fill = traits.Bool(argstr='--nofill',
desc='do not apply gap-filling measure to the fieldmap')
no_extend = traits.Bool(argstr='--noextend',
desc='do not apply rigid-body extrapolation to the fieldmap')
smooth2d = traits.Float(argstr='--smooth2=%.2f',
desc='apply 2D Gaussian smoothing of sigma N (in mm)')
smooth3d = traits.Float(argstr='--smooth3=%.2f',
desc='apply 3D Gaussian smoothing of sigma N (in mm)')
poly_order = traits.Int(argstr='--poly=%d',
desc='apply polynomial fitting of order N')
fourier_order = traits.Int(argstr='--fourier=%d',
desc='apply Fourier (sinusoidal) fitting of order N')
pava = traits.Bool(argstr='--pava',
desc='apply monotonic enforcement via PAVA')
despike_theshold = traits.Float(argstr='--despikethreshold=%s',
desc='specify the threshold for de-spiking (default=3.0)')
unwarp_direction = traits.Enum('x', 'y', 'z', 'x-', 'y-', 'z-',
argstr='--unwarpdir=%s',
desc='specifies direction of warping (default y)')
phase_conjugate = traits.Bool(argstr='--phaseconj',
desc='apply phase conjugate method of unwarping')
icorr = traits.Bool(argstr='--icorr', requires=['shift_in_file'],
desc='apply intensity correction to unwarping (pixel shift method only)')
icorr_only = traits.Bool(argstr='--icorronly', requires=['unwarped_file'],
desc='apply intensity correction only')
mask_file = File(exists=True, argstr='--mask=%s',
desc='filename for loading valid mask')
save_unmasked_fmap = traits.Either(traits.Bool,
traits.File,
argstr='--unmaskfmap=%s',
requires=['fmap_out_file'],
desc='saves the unmasked fieldmap when using --savefmap', hash_files=False)
save_unmasked_shift = traits.Either(traits.Bool,
traits.File,
argstr='--unmaskshift=%s',
requires=['shift_out_file'],
desc='saves the unmasked shiftmap when using --saveshift', hash_files=False)
nokspace = traits.Bool(argstr='--nokspace', desc='do not use k-space forward warping')
class FUGUEOutputSpec(TraitedSpec):
unwarped_file = File(exists=True, desc='unwarped file')
class FUGUE(FSLCommand):
"""Use FSL FUGUE to unwarp epi's with fieldmaps
Examples
--------
Please insert examples for use of this command
"""
_cmd = 'fugue'
input_spec = FUGUEInputSpec
output_spec = FUGUEOutputSpec
def __init__(self, **kwargs):
super(FUGUE, self).__init__(**kwargs)
warn('This interface has not been fully tested. Please report any failures.')
def _list_outputs(self):
outputs = self._outputs().get()
out_file = self.inputs.unwarped_file
if not isdefined(out_file):
out_file = self._gen_fname(self.inputs.in_file,
suffix='_unwarped')
outputs['unwarped_file'] = os.path.abspath(out_file)
return outputs
def _gen_filename(self, name):
if name == 'unwarped_file':
return self._list_outputs()['unwarped_file']
return None
class PRELUDEInputSpec(FSLCommandInputSpec):
complex_phase_file = File(exists=True, argstr='--complex=%s',
mandatory=True, xor=['magnitude_file', 'phase_file'],
desc='complex phase input volume')
magnitude_file = File(exists=True, argstr='--abs=%s',
mandatory=True,
xor=['complex_phase_file'],
desc='file containing magnitude image')
phase_file = File(exists=True, argstr='--phase=%s',
mandatory=True,
xor=['complex_phase_file'],
desc='raw phase file')
unwrapped_phase_file = File(genfile=True,
argstr='--unwrap=%s',
desc='file containing unwrapepd phase', hash_files=False)
num_partitions = traits.Int(argstr='--numphasesplit=%d',
desc='number of phase partitions to use')
labelprocess2d = traits.Bool(argstr='--labelslices',
desc='does label processing in 2D (slice at a time)')
process2d = traits.Bool(argstr='--slices',
xor=['labelprocess2d'],
desc='does all processing in 2D (slice at a time)')
process3d = traits.Bool(argstr='--force3D',
xor=['labelprocess2d', 'process2d'],
desc='forces all processing to be full 3D')
threshold = traits.Float(argstr='--thresh=%.10f',
desc='intensity threshold for masking')
mask_file = File(exists=True, argstr='--mask=%s',
desc='filename of mask input volume')
start = traits.Int(argstr='--start=%d',
desc='first image number to process (default 0)')
end = traits.Int(argstr='--end=%d',
desc='final image number to process (default Inf)')
savemask_file = File(argstr='--savemask=%s',
desc='saving the mask volume', hash_files=False)
rawphase_file = File(argstr='--rawphase=%s',
desc='saving the raw phase output', hash_files=False)
label_file = File(argstr='--labels=%s',
desc='saving the area labels output', hash_files=False)
removeramps = traits.Bool(argstr='--removeramps',
desc='remove phase ramps during unwrapping')
class PRELUDEOutputSpec(TraitedSpec):
unwrapped_phase_file = File(exists=True,
desc='unwrapped phase file')
class PRELUDE(FSLCommand):
"""Use FSL prelude to do phase unwrapping
Examples
--------
Please insert examples for use of this command
"""
input_spec = PRELUDEInputSpec
output_spec = PRELUDEOutputSpec
_cmd = 'prelude'
def __init__(self, **kwargs):
super(PRELUDE, self).__init__(**kwargs)
warn('This has not been fully tested. Please report any failures.')
def _list_outputs(self):
outputs = self._outputs().get()
out_file = self.inputs.unwrapped_phase_file
if not isdefined(out_file):
if isdefined(self.inputs.phase_file):
out_file = self._gen_fname(self.inputs.phase_file,
suffix='_unwrapped')
elif isdefined(self.inputs.complex_phase_file):
out_file = self._gen_fname(self.inputs.complex_phase_file,
suffix='_phase_unwrapped')
outputs['unwrapped_phase_file'] = os.path.abspath(out_file)
return outputs
def _gen_filename(self, name):
if name == 'unwrapped_phase_file':
return self._list_outputs()['unwrapped_phase_file']
return None
class FIRSTInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, mandatory=True, position=-2,
argstr='-i %s',
desc='input data file')
out_file = File('segmented', usedefault=True, mandatory=True, position=-1,
argstr='-o %s',
desc='output data file', hash_files=False)
verbose = traits.Bool(argstr='-v', position=1,
desc="Use verbose logging.")
brain_extracted = traits.Bool(argstr='-b', position=2,
desc="Input structural image is already brain-extracted")
no_cleanup = traits.Bool(argstr='-d', position=3,
desc="Input structural image is already brain-extracted")
method = traits.Enum('auto', 'fast', 'none',
xor=['method_as_numerical_threshold'],
argstr='-m', position=4,
desc=("Method must be one of auto, fast, none, or it can be entered "
"using the 'method_as_numerical_threshold' input"))
method_as_numerical_threshold = traits.Float(argstr='-m', position=4,
desc=("Specify a numerical threshold value or use the 'method' input "
"to choose auto, fast, or none"))
list_of_specific_structures = traits.List(traits.Str, argstr='-s %s',
sep=',', position=5, minlen=1,
desc='Runs only on the specified structures (e.g. L_Hipp, R_Hipp'
'L_Accu, R_Accu, L_Amyg, R_Amyg'
'L_Caud, R_Caud, L_Pall, R_Pall'
'L_Puta, R_Puta, L_Thal, R_Thal, BrStem')
affine_file = File(exists=True, position=6,
argstr='-a %s',
desc=('Affine matrix to use (e.g. img2std.mat) (does not '
're-run registration)'))
class FIRSTOutputSpec(TraitedSpec):
vtk_surfaces = OutputMultiPath(File(exists=True),
desc='VTK format meshes for each subcortical region')
bvars = OutputMultiPath(File(exists=True),
desc='bvars for each subcortical region')
original_segmentations = File(exists=True,
desc=('3D image file containing the segmented regions as integer '
'values. Uses CMA labelling'))
segmentation_file = File(exists=True,
desc='4D image file containing a single volume per segmented region')
class FIRST(FSLCommand):
"""Use FSL's run_first_all command to segment subcortical volumes
http://www.fmrib.ox.ac.uk/fsl/first/index.html
Examples
--------
>>> from nipype.interfaces import fsl
>>> first = fsl.FIRST()
>>> first.inputs.in_file = 'structural.nii'
>>> first.inputs.out_file = 'segmented.nii'
>>> res = first.run() #doctest: +SKIP
"""
_cmd = 'run_first_all'
input_spec = FIRSTInputSpec
output_spec = FIRSTOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
if isdefined(self.inputs.list_of_specific_structures):
structures = self.inputs.list_of_specific_structures
else:
structures = ['L_Hipp', 'R_Hipp',
'L_Accu', 'R_Accu',
'L_Amyg', 'R_Amyg',
'L_Caud', 'R_Caud',
'L_Pall', 'R_Pall',
'L_Puta', 'R_Puta',
'L_Thal', 'R_Thal',
'BrStem']
outputs['original_segmentations'] = \
self._gen_fname('original_segmentations')
outputs['segmentation_file'] = self._gen_fname('segmentation_file')
outputs['vtk_surfaces'] = self._gen_mesh_names('vtk_surfaces',
structures)
outputs['bvars'] = self._gen_mesh_names('bvars', structures)
return outputs
def _gen_fname(self, name):
path, outname, ext = split_filename(self.inputs.out_file)
if name == 'original_segmentations':
return op.abspath(outname + '_all_fast_origsegs.nii.gz')
if name == 'segmentation_file':
return op.abspath(outname + '_all_fast_firstseg.nii.gz')
return None
def _gen_mesh_names(self, name, structures):
path, prefix, ext = split_filename(self.inputs.out_file)
if name == 'vtk_surfaces':
vtks = list()
for struct in structures:
vtk = prefix + '-' + struct + '_first.vtk'
vtks.append(op.abspath(vtk))
return vtks
if name == 'bvars':
bvars = list()
for struct in structures:
bvar = prefix + '-' + struct + '_first.bvars'
bvars.append(op.abspath(bvar))
return bvars
return None
|
py | 1a372994f7c714a802305a9560372e19bdadc56d | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This test module contains the tests for the `aea gui` sub-commands."""
import io
import os
import shutil
import tempfile
import aea.cli_gui
def create_app():
"""Create a debug version of the flask app for testing against."""
app = aea.cli_gui.run_test()
app.debug = True
app.testing = True
return app
class DummyPID:
"""Mimics the behaviour of a process id."""
def __init__(self, return_code, stdout_str, stderr_str):
"""Initialise the class."""
self.return_code = return_code
self.stdout = io.BytesIO(stdout_str.encode(encoding='UTF-8'))
self.stderr = io.BytesIO(stderr_str.encode(encoding='UTF-8'))
def poll(self):
"""Mimic the process id poll function."""
return self.return_code
class TempCWD:
"""Create a temporary current working directory."""
def __init__(self):
"""Initialise the class."""
self.temp_dir = tempfile.mkdtemp()
self.cwd = os.getcwd()
os.chdir(self.temp_dir)
def destroy(self):
"""Destroy the cwd and restore the old one."""
os.chdir(self.cwd)
try:
shutil.rmtree(self.temp_dir)
except (OSError, IOError):
pass
|
py | 1a372a2ac2968dbb8d99494b5dd8231da8037483 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: deconv.py
# Author: Qian Ge <[email protected]>
import os
import scipy.misc
import argparse
import numpy as np
import tensorflow as tf
from tensorcv.dataflow.image import ImageFromFile
import config_path as config
import sys
sys.path.append('../')
from lib.nets.vgg import DeconvBaseVGG19, BaseVGG19
import lib.utils.viz as viz
import lib.utils.normalize as normlize
import lib.utils.image as uim
IM_SIZE = 224
def get_parse():
parser = argparse.ArgumentParser()
parser.add_argument('--imtype', type=str, default='.jpg',
help='Image type')
parser.add_argument('--feat', type=str, required=True,
help='Choose of feature map layer')
parser.add_argument('--id', type=int, default=None,
help='feature map id')
return parser.parse_args()
def im_scale(im):
return uim.im_rescale(im, [IM_SIZE, IM_SIZE])
if __name__ == '__main__':
FLAGS = get_parse()
input_im = ImageFromFile(FLAGS.imtype,
data_dir=config.im_path,
num_channel=3,
shuffle=False,
pf=im_scale,
)
input_im.set_batch_size(1)
vizmodel = DeconvBaseVGG19(config.vgg_path,
feat_key=FLAGS.feat,
pick_feat=FLAGS.id)
vizmap = vizmodel.layers['deconvim']
feat_op = vizmodel.feats
max_act_op = vizmodel.max_act
act_size = vizmodel.receptive_size[FLAGS.feat]
act_scale = vizmodel.stride[FLAGS.feat]
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
max_act_list = []
while input_im.epochs_completed < 1:
im = input_im.next_batch()[0]
max_act = sess.run(max_act_op, feed_dict={vizmodel.im: im})
max_act_list.append(max_act)
max_list = np.argsort(max_act_list)[::-1]
im_file_list = input_im.get_data_list()[0]
feat_list = []
im_list = []
for i in range(0, 10):
im = input_im.next_batch()[0]
file_path = os.path.join(config.im_path, im_file_list[max_list[i]])
im = np.array([im_scale(scipy.misc.imread(file_path, mode='RGB'))])
cur_vizmap, feat_map, max_act = sess.run(
[vizmap, feat_op, max_act_op], feed_dict={vizmodel.im: im})
act_ind = np.nonzero((feat_map))
print('Location of max activation {}'.format(act_ind))
# get only the first nonzero element
act_c = (act_ind[1][0], act_ind[2][0])
min_x = max(0, int(act_c[0] * act_scale - act_size / 2))
max_x = min(IM_SIZE, int(act_c[0] * act_scale + act_size / 2))
min_y = max(0, int(act_c[1] * act_scale - act_size / 2))
max_y = min(IM_SIZE, int(act_c[1] * act_scale + act_size / 2))
im_crop = im[0, min_x:max_x, min_y:max_y, :]
act_crop = cur_vizmap[0, min_x:max_x, min_y:max_y, :]
pad_size = (act_size - im_crop.shape[0], act_size - im_crop.shape[1])
im_crop = np.pad(im_crop,
((0, pad_size[0]), (0, pad_size[1]), (0, 0)),
'constant',
constant_values=0)
act_crop = np.pad(act_crop,
((0, pad_size[0]),(0, pad_size[1]), (0, 0)),
'constant',
constant_values=0)
feat_list.append(act_crop)
im_list.append(im_crop)
viz.viz_filters(np.transpose(feat_list, (1, 2, 3, 0)),
[3, 3],
os.path.join(config.save_path, '{}_feat.png'.format(FLAGS.feat)),
gap=2,
gap_color=0,
nf=normlize.indentity,
shuffle=False)
viz.viz_filters(np.transpose(im_list, (1, 2, 3, 0)),
[3, 3],
os.path.join(config.save_path, '{}_im.png'.format(FLAGS.feat)),
gap=2,
gap_color=0,
nf=normlize.indentity,
shuffle=False)
|
py | 1a372b46d03b320a5acb8bdd0eb9155b76e12ff2 | import subprocess
import typer
from typer.testing import CliRunner
from docs_src.options.name import tutorial005 as mod
runner = CliRunner()
app = typer.Typer()
app.command()(mod.main)
def test_option_help():
result = runner.invoke(app, ["--help"])
assert result.exit_code == 0
assert "-n, --name TEXT" in result.output
assert "-f, --formal" in result.output
def test_call():
result = runner.invoke(app, ["-n", "Camila"])
assert result.exit_code == 0
assert "Hello Camila" in result.output
def test_call_formal():
result = runner.invoke(app, ["-n", "Camila", "-f"])
assert result.exit_code == 0
assert "Good day Ms. Camila." in result.output
def test_call_formal_condensed():
result = runner.invoke(app, ["-fn", "Camila"])
assert result.exit_code == 0
assert "Good day Ms. Camila." in result.output
def test_call_condensed_wrong_order():
result = runner.invoke(app, ["-nf", "Camila"])
assert result.exit_code != 0
def test_script():
result = subprocess.run(
["coverage", "run", mod.__file__, "--help"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
)
assert "Usage" in result.stdout
|
py | 1a372d2afc56e3ffb153b6ff2e519db66c099e2e | import csv
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics.classification import accuracy_score
from sklearn.metrics import classification_report
#from dbn import SupervisedDBNClassification
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier
def loaddata(filename,instanceCol):
file_reader = csv.reader(open(filename,'r'),delimiter=',')
x = []
y = []
for row in file_reader:
x.append(row[0:instanceCol])
y.append(row[-1])
return np.array(x[1:]).astype((np.float32)), np.array(y[1:]).astype(np.int)
def modeldata(filename):
scores = []
print(filename)
X,Y = loaddata(filename, 99)
for i in range(3):
#print('Cross ' + str(i))
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3, random_state=42)
# relu, sigmoid
classifier = RandomForestClassifier(max_depth=2, random_state=0)
classifier.fit(X_train, Y_train)
Y_pred = classifier.predict(X_test)
scores.append(accuracy_score(Y_test, Y_pred))
#print(classification_report(Y_test, Y_pred))
print('All Accuracy Scores in Cross: ' + str(scores))
print('Mean Accuracy Scores: ' + str(np.mean(scores)))
if __name__ == '__main__':
modeldata('D:\\Databases\\PDA\\CSV\\feature(MFCC-70-30-1400b).csv')
modeldata('D:\\Databases\\PDA\\CSV\\feature(FBank-70-30-1400b).csv')
modeldata('D:\\Databases\\PDA\\CSV\\feature(LogFBank-70-30-1400b).csv')
modeldata('D:\\Databases\\PDA\\CSV\\feature(Fractal-70-30-1400b).csv')
|
py | 1a372d4ad63624b40e06c604b467735cf26eac92 | num1 = int( input() )
if num1 > 5:
print('첫째')
elif num1 > 3:
print('둘째')
elif num1 > 1:
print('셋째')
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.