repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
HewlettPackard/oneview-ansible | library/oneview_network_set_facts.py | 1 | 4474 | #!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2020) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: oneview_network_set_facts
short_description: Retrieve facts about the OneView Network Sets
description:
- Retrieve facts about the Network Sets from OneView.
version_added: "2.4"
requirements:
- hpeOneView >= 5.4.0
author:
- Felipe Bulsoni (@fgbulsoni)
- Thiago Miotto (@tmiotto)
- Adriane Cardozo (@adriane-cardozo)
options:
name:
description:
- Network Set name.
options:
description:
- "List with options to gather facts about Network Set.
Option allowed: C(withoutEthernet).
The option C(withoutEthernet) retrieves the list of network_sets excluding Ethernet networks."
extends_documentation_fragment:
- oneview
- oneview.factsparams
'''
EXAMPLES = '''
- name: Gather facts about all Network Sets
oneview_network_set_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
no_log: true
delegate_to: localhost
- debug: var=network_sets
- name: Gather paginated, filtered, and sorted facts about Network Sets
oneview_network_set_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
params:
start: 0
count: 3
sort: 'name:descending'
filter: name='netset001'
no_log: true
delegate_to: localhost
- debug: var=network_sets
- name: Gather facts about all Network Sets, excluding Ethernet networks
oneview_network_set_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
options:
- withoutEthernet
no_log: true
delegate_to: localhost
- debug: var=network_sets
- name: Gather facts about a Network Set by name
oneview_network_set_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
name: Name of the Network Set
no_log: true
delegate_to: localhost
- debug: var=network_sets
- name: Gather facts about a Network Set by name, excluding Ethernet networks
oneview_network_set_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
name: Name of the Network Set
options:
- withoutEthernet
no_log: true
delegate_to: localhost
- debug: var=network_sets
'''
RETURN = '''
network_sets:
description: Has all the OneView facts about the Network Sets.
returned: Always, but can be empty.
type: dict
'''
from ansible.module_utils.oneview import OneViewModule
class NetworkSetFactsModule(OneViewModule):
argument_spec = dict(
name=dict(type='str'),
options=dict(type='list'),
params=dict(type='dict'),
)
def __init__(self):
super(NetworkSetFactsModule, self).__init__(additional_arg_spec=self.argument_spec)
self.set_resource_object(self.oneview_client.network_sets)
def execute_module(self):
name = self.module.params.get('name')
if 'withoutEthernet' in self.options:
filter_by_name = ("\"'name'='%s'\"" % name) if name else ''
network_sets = self.resource_client.get_all_without_ethernet(filter=filter_by_name)
elif name:
network_sets = self.resource_client.get_by('name', name)
else:
network_sets = self.resource_client.get_all(**self.facts_params)
return dict(changed=False,
ansible_facts=dict(network_sets=network_sets))
def main():
NetworkSetFactsModule().run()
if __name__ == '__main__':
main()
| apache-2.0 | -7,717,356,519,994,723,000 | 25.790419 | 104 | 0.673894 | false |
anntzer/scikit-learn | sklearn/utils/_estimator_html_repr.py | 1 | 9497 | from contextlib import closing
from contextlib import suppress
from io import StringIO
from string import Template
import uuid
import html
from sklearn import config_context
class _VisualBlock:
"""HTML Representation of Estimator
Parameters
----------
kind : {'serial', 'parallel', 'single'}
kind of HTML block
estimators : list of estimators or `_VisualBlock`s or a single estimator
If kind != 'single', then `estimators` is a list of
estimators.
If kind == 'single', then `estimators` is a single estimator.
names : list of str, default=None
If kind != 'single', then `names` corresponds to estimators.
If kind == 'single', then `names` is a single string corresponding to
the single estimator.
name_details : list of str, str, or None, default=None
If kind != 'single', then `name_details` corresponds to `names`.
If kind == 'single', then `name_details` is a single string
corresponding to the single estimator.
dash_wrapped : bool, default=True
If true, wrapped HTML element will be wrapped with a dashed border.
Only active when kind != 'single'.
"""
def __init__(self, kind, estimators, *, names=None, name_details=None,
dash_wrapped=True):
self.kind = kind
self.estimators = estimators
self.dash_wrapped = dash_wrapped
if self.kind in ('parallel', 'serial'):
if names is None:
names = (None, ) * len(estimators)
if name_details is None:
name_details = (None, ) * len(estimators)
self.names = names
self.name_details = name_details
def _sk_visual_block_(self):
return self
def _write_label_html(out, name, name_details,
outer_class="sk-label-container",
inner_class="sk-label",
checked=False):
"""Write labeled html with or without a dropdown with named details"""
out.write(f'<div class="{outer_class}">'
f'<div class="{inner_class} sk-toggleable">')
name = html.escape(name)
if name_details is not None:
checked_str = 'checked' if checked else ''
est_id = uuid.uuid4()
out.write(f'<input class="sk-toggleable__control sk-hidden--visually" '
f'id="{est_id}" type="checkbox" {checked_str}>'
f'<label class="sk-toggleable__label" for="{est_id}">'
f'{name}</label>'
f'<div class="sk-toggleable__content"><pre>{name_details}'
f'</pre></div>')
else:
out.write(f'<label>{name}</label>')
out.write('</div></div>') # outer_class inner_class
def _get_visual_block(estimator):
"""Generate information about how to display an estimator.
"""
with suppress(AttributeError):
return estimator._sk_visual_block_()
if isinstance(estimator, str):
return _VisualBlock('single', estimator,
names=estimator, name_details=estimator)
elif estimator is None:
return _VisualBlock('single', estimator,
names='None', name_details='None')
# check if estimator looks like a meta estimator wraps estimators
if hasattr(estimator, 'get_params'):
estimators = []
for key, value in estimator.get_params().items():
# Only look at the estimators in the first layer
if '__' not in key and hasattr(value, 'get_params'):
estimators.append(value)
if len(estimators):
return _VisualBlock('parallel', estimators, names=None)
return _VisualBlock('single', estimator,
names=estimator.__class__.__name__,
name_details=str(estimator))
def _write_estimator_html(out, estimator, estimator_label,
estimator_label_details, first_call=False):
"""Write estimator to html in serial, parallel, or by itself (single).
"""
if first_call:
est_block = _get_visual_block(estimator)
else:
with config_context(print_changed_only=True):
est_block = _get_visual_block(estimator)
if est_block.kind in ('serial', 'parallel'):
dashed_wrapped = first_call or est_block.dash_wrapped
dash_cls = " sk-dashed-wrapped" if dashed_wrapped else ""
out.write(f'<div class="sk-item{dash_cls}">')
if estimator_label:
_write_label_html(out, estimator_label, estimator_label_details)
kind = est_block.kind
out.write(f'<div class="sk-{kind}">')
est_infos = zip(est_block.estimators, est_block.names,
est_block.name_details)
for est, name, name_details in est_infos:
if kind == 'serial':
_write_estimator_html(out, est, name, name_details)
else: # parallel
out.write('<div class="sk-parallel-item">')
# wrap element in a serial visualblock
serial_block = _VisualBlock('serial', [est],
dash_wrapped=False)
_write_estimator_html(out, serial_block, name, name_details)
out.write('</div>') # sk-parallel-item
out.write('</div></div>')
elif est_block.kind == 'single':
_write_label_html(out, est_block.names, est_block.name_details,
outer_class="sk-item", inner_class="sk-estimator",
checked=first_call)
_STYLE = """
#$id {
color: black;
background-color: white;
}
#$id pre{
padding: 0;
}
#$id div.sk-toggleable {
background-color: white;
}
#$id label.sk-toggleable__label {
cursor: pointer;
display: block;
width: 100%;
margin-bottom: 0;
padding: 0.2em 0.3em;
box-sizing: border-box;
text-align: center;
}
#$id div.sk-toggleable__content {
max-height: 0;
max-width: 0;
overflow: hidden;
text-align: left;
background-color: #f0f8ff;
}
#$id div.sk-toggleable__content pre {
margin: 0.2em;
color: black;
border-radius: 0.25em;
background-color: #f0f8ff;
}
#$id input.sk-toggleable__control:checked~div.sk-toggleable__content {
max-height: 200px;
max-width: 100%;
overflow: auto;
}
#$id div.sk-estimator input.sk-toggleable__control:checked~label.sk-toggleable__label {
background-color: #d4ebff;
}
#$id div.sk-label input.sk-toggleable__control:checked~label.sk-toggleable__label {
background-color: #d4ebff;
}
#$id input.sk-hidden--visually {
border: 0;
clip: rect(1px 1px 1px 1px);
clip: rect(1px, 1px, 1px, 1px);
height: 1px;
margin: -1px;
overflow: hidden;
padding: 0;
position: absolute;
width: 1px;
}
#$id div.sk-estimator {
font-family: monospace;
background-color: #f0f8ff;
margin: 0.25em 0.25em;
border: 1px dotted black;
border-radius: 0.25em;
box-sizing: border-box;
}
#$id div.sk-estimator:hover {
background-color: #d4ebff;
}
#$id div.sk-parallel-item::after {
content: "";
width: 100%;
border-bottom: 1px solid gray;
flex-grow: 1;
}
#$id div.sk-label:hover label.sk-toggleable__label {
background-color: #d4ebff;
}
#$id div.sk-serial::before {
content: "";
position: absolute;
border-left: 1px solid gray;
box-sizing: border-box;
top: 2em;
bottom: 0;
left: 50%;
}
#$id div.sk-serial {
display: flex;
flex-direction: column;
align-items: center;
background-color: white;
}
#$id div.sk-item {
z-index: 1;
}
#$id div.sk-parallel {
display: flex;
align-items: stretch;
justify-content: center;
background-color: white;
}
#$id div.sk-parallel-item {
display: flex;
flex-direction: column;
position: relative;
background-color: white;
}
#$id div.sk-parallel-item:first-child::after {
align-self: flex-end;
width: 50%;
}
#$id div.sk-parallel-item:last-child::after {
align-self: flex-start;
width: 50%;
}
#$id div.sk-parallel-item:only-child::after {
width: 0;
}
#$id div.sk-dashed-wrapped {
border: 1px dashed gray;
margin: 0.2em;
box-sizing: border-box;
padding-bottom: 0.1em;
background-color: white;
position: relative;
}
#$id div.sk-label label {
font-family: monospace;
font-weight: bold;
background-color: white;
display: inline-block;
line-height: 1.2em;
}
#$id div.sk-label-container {
position: relative;
z-index: 2;
text-align: center;
}
#$id div.sk-container {
display: inline-block;
position: relative;
}
""".replace(' ', '').replace('\n', '') # noqa
def estimator_html_repr(estimator):
"""Build a HTML representation of an estimator.
Read more in the :ref:`User Guide <visualizing_composite_estimators>`.
Parameters
----------
estimator : estimator object
The estimator to visualize.
Returns
-------
html: str
HTML representation of estimator.
"""
with closing(StringIO()) as out:
container_id = "sk-" + str(uuid.uuid4())
style_template = Template(_STYLE)
style_with_id = style_template.substitute(id=container_id)
out.write(f'<style>{style_with_id}</style>'
f'<div id="{container_id}" class"sk-top-container">'
'<div class="sk-container">')
_write_estimator_html(out, estimator, estimator.__class__.__name__,
str(estimator), first_call=True)
out.write('</div></div>')
html_output = out.getvalue()
return html_output
| bsd-3-clause | -6,215,935,959,145,116,000 | 28.77116 | 87 | 0.605349 | false |
zack3241/incubator-airflow | airflow/operators/docker_operator.py | 1 | 9407 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from airflow.hooks.docker_hook import DockerHook
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.utils.file import TemporaryDirectory
from docker import Client, tls
import ast
class DockerOperator(BaseOperator):
"""
Execute a command inside a docker container.
A temporary directory is created on the host and mounted into a container to allow storing files
that together exceed the default disk size of 10GB in a container. The path to the mounted
directory can be accessed via the environment variable ``AIRFLOW_TMP_DIR``.
If a login to a private registry is required prior to pulling the image, a
Docker connection needs to be configured in Airflow and the connection ID
be provided with the parameter ``docker_conn_id``.
:param image: Docker image from which to create the container.
:type image: str
:param api_version: Remote API version. Set to ``auto`` to automatically
detect the server's version.
:type api_version: str
:param command: Command to be run in the container.
:type command: str or list
:param cpus: Number of CPUs to assign to the container.
This value gets multiplied with 1024. See
https://docs.docker.com/engine/reference/run/#cpu-share-constraint
:type cpus: float
:param docker_url: URL of the host running the docker daemon.
Default is unix://var/run/docker.sock
:type docker_url: str
:param environment: Environment variables to set in the container.
:type environment: dict
:param force_pull: Pull the docker image on every run. Default is false.
:type force_pull: bool
:param mem_limit: Maximum amount of memory the container can use. Either a float value, which
represents the limit in bytes, or a string like ``128m`` or ``1g``.
:type mem_limit: float or str
:param network_mode: Network mode for the container.
:type network_mode: str
:param tls_ca_cert: Path to a PEM-encoded certificate authority to secure the docker connection.
:type tls_ca_cert: str
:param tls_client_cert: Path to the PEM-encoded certificate used to authenticate docker client.
:type tls_client_cert: str
:param tls_client_key: Path to the PEM-encoded key used to authenticate docker client.
:type tls_client_key: str
:param tls_hostname: Hostname to match against the docker server certificate or False to
disable the check.
:type tls_hostname: str or bool
:param tls_ssl_version: Version of SSL to use when communicating with docker daemon.
:type tls_ssl_version: str
:param tmp_dir: Mount point inside the container to a temporary directory created on the host by
the operator. The path is also made available via the environment variable
``AIRFLOW_TMP_DIR`` inside the container.
:type tmp_dir: str
:param user: Default user inside the docker container.
:type user: int or str
:param volumes: List of volumes to mount into the container, e.g.
``['/host/path:/container/path', '/host/path2:/container/path2:ro']``.
:param working_dir: Working directory to set on the container (equivalent to the -w switch
the docker client)
:type working_dir: str
:param xcom_push: Does the stdout will be pushed to the next step using XCom.
The default is False.
:type xcom_push: bool
:param xcom_all: Push all the stdout or just the last line. The default is False (last line).
:type xcom_all: bool
:param docker_conn_id: ID of the Airflow connection to use
:type docker_conn_id: str
"""
template_fields = ('command', 'environment',)
template_ext = ('.sh', '.bash',)
@apply_defaults
def __init__(
self,
image,
api_version=None,
command=None,
cpus=1.0,
docker_url='unix://var/run/docker.sock',
environment=None,
force_pull=False,
mem_limit=None,
network_mode=None,
tls_ca_cert=None,
tls_client_cert=None,
tls_client_key=None,
tls_hostname=None,
tls_ssl_version=None,
tmp_dir='/tmp/airflow',
user=None,
volumes=None,
working_dir=None,
xcom_push=False,
xcom_all=False,
docker_conn_id=None,
*args,
**kwargs):
super(DockerOperator, self).__init__(*args, **kwargs)
self.api_version = api_version
self.command = command
self.cpus = cpus
self.docker_url = docker_url
self.environment = environment or {}
self.force_pull = force_pull
self.image = image
self.mem_limit = mem_limit
self.network_mode = network_mode
self.tls_ca_cert = tls_ca_cert
self.tls_client_cert = tls_client_cert
self.tls_client_key = tls_client_key
self.tls_hostname = tls_hostname
self.tls_ssl_version = tls_ssl_version
self.tmp_dir = tmp_dir
self.user = user
self.volumes = volumes or []
self.working_dir = working_dir
self.xcom_push_flag = xcom_push
self.xcom_all = xcom_all
self.docker_conn_id = docker_conn_id
self.cli = None
self.container = None
def get_hook(self):
return DockerHook(
docker_conn_id=self.docker_conn_id,
base_url=self.docker_url,
version=self.api_version,
tls=self.__get_tls_config()
)
def execute(self, context):
self.log.info('Starting docker container from image %s', self.image)
tls_config = self.__get_tls_config()
if self.docker_conn_id:
self.cli = self.get_hook().get_conn()
else:
self.cli = Client(
base_url=self.docker_url,
version=self.api_version,
tls=tls_config
)
if ':' not in self.image:
image = self.image + ':latest'
else:
image = self.image
if self.force_pull or len(self.cli.images(name=image)) == 0:
self.log.info('Pulling docker image %s', image)
for l in self.cli.pull(image, stream=True):
output = json.loads(l.decode('utf-8'))
self.log.info("%s", output['status'])
cpu_shares = int(round(self.cpus * 1024))
with TemporaryDirectory(prefix='airflowtmp') as host_tmp_dir:
self.environment['AIRFLOW_TMP_DIR'] = self.tmp_dir
self.volumes.append('{0}:{1}'.format(host_tmp_dir, self.tmp_dir))
self.container = self.cli.create_container(
command=self.get_command(),
cpu_shares=cpu_shares,
environment=self.environment,
host_config=self.cli.create_host_config(binds=self.volumes,
network_mode=self.network_mode),
image=image,
mem_limit=self.mem_limit,
user=self.user,
working_dir=self.working_dir
)
self.cli.start(self.container['Id'])
line = ''
for line in self.cli.logs(container=self.container['Id'], stream=True):
line = line.strip()
if hasattr(line, 'decode'):
line = line.decode('utf-8')
self.log.info(line)
exit_code = self.cli.wait(self.container['Id'])
if exit_code != 0:
raise AirflowException('docker container failed')
if self.xcom_push_flag:
return self.cli.logs(container=self.container['Id']) if self.xcom_all else str(line)
def get_command(self):
if self.command is not None and self.command.strip().find('[') == 0:
commands = ast.literal_eval(self.command)
else:
commands = self.command
return commands
def on_kill(self):
if self.cli is not None:
self.log.info('Stopping docker container')
self.cli.stop(self.container['Id'])
def __get_tls_config(self):
tls_config = None
if self.tls_ca_cert and self.tls_client_cert and self.tls_client_key:
tls_config = tls.TLSConfig(
ca_cert=self.tls_ca_cert,
client_cert=(self.tls_client_cert, self.tls_client_key),
verify=True,
ssl_version=self.tls_ssl_version,
assert_hostname=self.tls_hostname
)
self.docker_url = self.docker_url.replace('tcp://', 'https://')
return tls_config
| apache-2.0 | -7,895,282,612,694,790,000 | 38.860169 | 100 | 0.613586 | false |
openstack/yaql | yaql/language/conventions.py | 1 | 1410 | # Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import re
class Convention(metaclass=abc.ABCMeta):
@abc.abstractmethod
def convert_function_name(self, name):
pass
@abc.abstractmethod
def convert_parameter_name(self, name):
pass
class PythonConvention(Convention):
def convert_function_name(self, name):
return name
def convert_parameter_name(self, name):
return name
class CamelCaseConvention(Convention):
def __init__(self):
self.regex = re.compile(r'(?!^)_(\w)', flags=re.UNICODE)
def convert_function_name(self, name):
return self._to_camel_case(name)
def convert_parameter_name(self, name):
return self._to_camel_case(name)
def _to_camel_case(self, name):
return self.regex.sub(lambda m: m.group(1).upper(), name)
| apache-2.0 | -6,692,259,790,340,050,000 | 28.375 | 78 | 0.680851 | false |
henry0312/LightGBM | python-package/lightgbm/basic.py | 1 | 150238 | # coding: utf-8
"""Wrapper for C API of LightGBM."""
import ctypes
import json
import os
import warnings
from collections import OrderedDict
from copy import deepcopy
from functools import wraps
from logging import Logger
from tempfile import NamedTemporaryFile
from typing import Any, Dict, List, Set, Union
import numpy as np
import scipy.sparse
from .compat import PANDAS_INSTALLED, concat, dt_DataTable, is_dtype_sparse, pd_DataFrame, pd_Series
from .libpath import find_lib_path
class _DummyLogger:
def info(self, msg):
print(msg)
def warning(self, msg):
warnings.warn(msg, stacklevel=3)
_LOGGER = _DummyLogger()
def register_logger(logger):
"""Register custom logger.
Parameters
----------
logger : logging.Logger
Custom logger.
"""
if not isinstance(logger, Logger):
raise TypeError("Logger should inherit logging.Logger class")
global _LOGGER
_LOGGER = logger
def _normalize_native_string(func):
"""Join log messages from native library which come by chunks."""
msg_normalized = []
@wraps(func)
def wrapper(msg):
nonlocal msg_normalized
if msg.strip() == '':
msg = ''.join(msg_normalized)
msg_normalized = []
return func(msg)
else:
msg_normalized.append(msg)
return wrapper
def _log_info(msg):
_LOGGER.info(msg)
def _log_warning(msg):
_LOGGER.warning(msg)
@_normalize_native_string
def _log_native(msg):
_LOGGER.info(msg)
def _log_callback(msg):
"""Redirect logs from native library into Python."""
_log_native(str(msg.decode('utf-8')))
def _load_lib():
"""Load LightGBM library."""
lib_path = find_lib_path()
if len(lib_path) == 0:
return None
lib = ctypes.cdll.LoadLibrary(lib_path[0])
lib.LGBM_GetLastError.restype = ctypes.c_char_p
callback = ctypes.CFUNCTYPE(None, ctypes.c_char_p)
lib.callback = callback(_log_callback)
if lib.LGBM_RegisterLogCallback(lib.callback) != 0:
raise LightGBMError(lib.LGBM_GetLastError().decode('utf-8'))
return lib
_LIB = _load_lib()
NUMERIC_TYPES = (int, float, bool)
def _safe_call(ret):
"""Check the return value from C API call.
Parameters
----------
ret : int
The return value from C API calls.
"""
if ret != 0:
raise LightGBMError(_LIB.LGBM_GetLastError().decode('utf-8'))
def is_numeric(obj):
"""Check whether object is a number or not, include numpy number, etc."""
try:
float(obj)
return True
except (TypeError, ValueError):
# TypeError: obj is not a string or a number
# ValueError: invalid literal
return False
def is_numpy_1d_array(data):
"""Check whether data is a numpy 1-D array."""
return isinstance(data, np.ndarray) and len(data.shape) == 1
def is_numpy_column_array(data):
"""Check whether data is a column numpy array."""
if not isinstance(data, np.ndarray):
return False
shape = data.shape
return len(shape) == 2 and shape[1] == 1
def cast_numpy_1d_array_to_dtype(array, dtype):
"""Cast numpy 1d array to given dtype."""
if array.dtype == dtype:
return array
return array.astype(dtype=dtype, copy=False)
def is_1d_list(data):
"""Check whether data is a 1-D list."""
return isinstance(data, list) and (not data or is_numeric(data[0]))
def list_to_1d_numpy(data, dtype=np.float32, name='list'):
"""Convert data to numpy 1-D array."""
if is_numpy_1d_array(data):
return cast_numpy_1d_array_to_dtype(data, dtype)
elif is_numpy_column_array(data):
_log_warning('Converting column-vector to 1d array')
array = data.ravel()
return cast_numpy_1d_array_to_dtype(array, dtype)
elif is_1d_list(data):
return np.array(data, dtype=dtype, copy=False)
elif isinstance(data, pd_Series):
if _get_bad_pandas_dtypes([data.dtypes]):
raise ValueError('Series.dtypes must be int, float or bool')
return np.array(data, dtype=dtype, copy=False) # SparseArray should be supported as well
else:
raise TypeError(f"Wrong type({type(data).__name__}) for {name}.\n"
"It should be list, numpy 1-D array or pandas Series")
def cfloat32_array_to_numpy(cptr, length):
"""Convert a ctypes float pointer array to a numpy array."""
if isinstance(cptr, ctypes.POINTER(ctypes.c_float)):
return np.ctypeslib.as_array(cptr, shape=(length,)).copy()
else:
raise RuntimeError('Expected float pointer')
def cfloat64_array_to_numpy(cptr, length):
"""Convert a ctypes double pointer array to a numpy array."""
if isinstance(cptr, ctypes.POINTER(ctypes.c_double)):
return np.ctypeslib.as_array(cptr, shape=(length,)).copy()
else:
raise RuntimeError('Expected double pointer')
def cint32_array_to_numpy(cptr, length):
"""Convert a ctypes int pointer array to a numpy array."""
if isinstance(cptr, ctypes.POINTER(ctypes.c_int32)):
return np.ctypeslib.as_array(cptr, shape=(length,)).copy()
else:
raise RuntimeError('Expected int32 pointer')
def cint64_array_to_numpy(cptr, length):
"""Convert a ctypes int pointer array to a numpy array."""
if isinstance(cptr, ctypes.POINTER(ctypes.c_int64)):
return np.ctypeslib.as_array(cptr, shape=(length,)).copy()
else:
raise RuntimeError('Expected int64 pointer')
def c_str(string):
"""Convert a Python string to C string."""
return ctypes.c_char_p(string.encode('utf-8'))
def c_array(ctype, values):
"""Convert a Python array to C array."""
return (ctype * len(values))(*values)
def json_default_with_numpy(obj):
"""Convert numpy classes to JSON serializable objects."""
if isinstance(obj, (np.integer, np.floating, np.bool_)):
return obj.item()
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj
def param_dict_to_str(data):
"""Convert Python dictionary to string, which is passed to C API."""
if data is None or not data:
return ""
pairs = []
for key, val in data.items():
if isinstance(val, (list, tuple, set)) or is_numpy_1d_array(val):
def to_string(x):
if isinstance(x, list):
return f"[{','.join(map(str, x))}]"
else:
return str(x)
pairs.append(f"{key}={','.join(map(to_string, val))}")
elif isinstance(val, (str, NUMERIC_TYPES)) or is_numeric(val):
pairs.append(f"{key}={val}")
elif val is not None:
raise TypeError(f'Unknown type of parameter:{key}, got:{type(val).__name__}')
return ' '.join(pairs)
class _TempFile:
def __enter__(self):
with NamedTemporaryFile(prefix="lightgbm_tmp_", delete=True) as f:
self.name = f.name
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if os.path.isfile(self.name):
os.remove(self.name)
def readlines(self):
with open(self.name, "r+") as f:
ret = f.readlines()
return ret
def writelines(self, lines):
with open(self.name, "w+") as f:
f.writelines(lines)
class LightGBMError(Exception):
"""Error thrown by LightGBM."""
pass
# DeprecationWarning is not shown by default, so let's create our own with higher level
class LGBMDeprecationWarning(UserWarning):
"""Custom deprecation warning."""
pass
class _ConfigAliases:
aliases = {"bin_construct_sample_cnt": {"bin_construct_sample_cnt",
"subsample_for_bin"},
"boosting": {"boosting",
"boosting_type",
"boost"},
"categorical_feature": {"categorical_feature",
"cat_feature",
"categorical_column",
"cat_column"},
"data_random_seed": {"data_random_seed",
"data_seed"},
"early_stopping_round": {"early_stopping_round",
"early_stopping_rounds",
"early_stopping",
"n_iter_no_change"},
"enable_bundle": {"enable_bundle",
"is_enable_bundle",
"bundle"},
"eval_at": {"eval_at",
"ndcg_eval_at",
"ndcg_at",
"map_eval_at",
"map_at"},
"group_column": {"group_column",
"group",
"group_id",
"query_column",
"query",
"query_id"},
"header": {"header",
"has_header"},
"ignore_column": {"ignore_column",
"ignore_feature",
"blacklist"},
"is_enable_sparse": {"is_enable_sparse",
"is_sparse",
"enable_sparse",
"sparse"},
"label_column": {"label_column",
"label"},
"local_listen_port": {"local_listen_port",
"local_port",
"port"},
"machines": {"machines",
"workers",
"nodes"},
"metric": {"metric",
"metrics",
"metric_types"},
"num_class": {"num_class",
"num_classes"},
"num_iterations": {"num_iterations",
"num_iteration",
"n_iter",
"num_tree",
"num_trees",
"num_round",
"num_rounds",
"num_boost_round",
"n_estimators"},
"num_machines": {"num_machines",
"num_machine"},
"num_threads": {"num_threads",
"num_thread",
"nthread",
"nthreads",
"n_jobs"},
"objective": {"objective",
"objective_type",
"app",
"application"},
"pre_partition": {"pre_partition",
"is_pre_partition"},
"tree_learner": {"tree_learner",
"tree",
"tree_type",
"tree_learner_type"},
"two_round": {"two_round",
"two_round_loading",
"use_two_round_loading"},
"verbosity": {"verbosity",
"verbose"},
"weight_column": {"weight_column",
"weight"}}
@classmethod
def get(cls, *args):
ret = set()
for i in args:
ret |= cls.aliases.get(i, {i})
return ret
def _choose_param_value(main_param_name: str, params: Dict[str, Any], default_value: Any) -> Dict[str, Any]:
"""Get a single parameter value, accounting for aliases.
Parameters
----------
main_param_name : str
Name of the main parameter to get a value for. One of the keys of ``_ConfigAliases``.
params : dict
Dictionary of LightGBM parameters.
default_value : Any
Default value to use for the parameter, if none is found in ``params``.
Returns
-------
params : dict
A ``params`` dict with exactly one value for ``main_param_name``, and all aliases ``main_param_name`` removed.
If both ``main_param_name`` and one or more aliases for it are found, the value of ``main_param_name`` will be preferred.
"""
# avoid side effects on passed-in parameters
params = deepcopy(params)
# find a value, and remove other aliases with .pop()
# prefer the value of 'main_param_name' if it exists, otherwise search the aliases
found_value = None
if main_param_name in params.keys():
found_value = params[main_param_name]
for param in _ConfigAliases.get(main_param_name):
val = params.pop(param, None)
if found_value is None and val is not None:
found_value = val
if found_value is not None:
params[main_param_name] = found_value
else:
params[main_param_name] = default_value
return params
MAX_INT32 = (1 << 31) - 1
"""Macro definition of data type in C API of LightGBM"""
C_API_DTYPE_FLOAT32 = 0
C_API_DTYPE_FLOAT64 = 1
C_API_DTYPE_INT32 = 2
C_API_DTYPE_INT64 = 3
"""Matrix is row major in Python"""
C_API_IS_ROW_MAJOR = 1
"""Macro definition of prediction type in C API of LightGBM"""
C_API_PREDICT_NORMAL = 0
C_API_PREDICT_RAW_SCORE = 1
C_API_PREDICT_LEAF_INDEX = 2
C_API_PREDICT_CONTRIB = 3
"""Macro definition of sparse matrix type"""
C_API_MATRIX_TYPE_CSR = 0
C_API_MATRIX_TYPE_CSC = 1
"""Macro definition of feature importance type"""
C_API_FEATURE_IMPORTANCE_SPLIT = 0
C_API_FEATURE_IMPORTANCE_GAIN = 1
"""Data type of data field"""
FIELD_TYPE_MAPPER = {"label": C_API_DTYPE_FLOAT32,
"weight": C_API_DTYPE_FLOAT32,
"init_score": C_API_DTYPE_FLOAT64,
"group": C_API_DTYPE_INT32}
"""String name to int feature importance type mapper"""
FEATURE_IMPORTANCE_TYPE_MAPPER = {"split": C_API_FEATURE_IMPORTANCE_SPLIT,
"gain": C_API_FEATURE_IMPORTANCE_GAIN}
def convert_from_sliced_object(data):
"""Fix the memory of multi-dimensional sliced object."""
if isinstance(data, np.ndarray) and isinstance(data.base, np.ndarray):
if not data.flags.c_contiguous:
_log_warning("Usage of np.ndarray subset (sliced data) is not recommended "
"due to it will double the peak memory cost in LightGBM.")
return np.copy(data)
return data
def c_float_array(data):
"""Get pointer of float numpy array / list."""
if is_1d_list(data):
data = np.array(data, copy=False)
if is_numpy_1d_array(data):
data = convert_from_sliced_object(data)
assert data.flags.c_contiguous
if data.dtype == np.float32:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
type_data = C_API_DTYPE_FLOAT32
elif data.dtype == np.float64:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
type_data = C_API_DTYPE_FLOAT64
else:
raise TypeError(f"Expected np.float32 or np.float64, met type({data.dtype})")
else:
raise TypeError(f"Unknown type({type(data).__name__})")
return (ptr_data, type_data, data) # return `data` to avoid the temporary copy is freed
def c_int_array(data):
"""Get pointer of int numpy array / list."""
if is_1d_list(data):
data = np.array(data, copy=False)
if is_numpy_1d_array(data):
data = convert_from_sliced_object(data)
assert data.flags.c_contiguous
if data.dtype == np.int32:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int32))
type_data = C_API_DTYPE_INT32
elif data.dtype == np.int64:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int64))
type_data = C_API_DTYPE_INT64
else:
raise TypeError(f"Expected np.int32 or np.int64, met type({data.dtype})")
else:
raise TypeError(f"Unknown type({type(data).__name__})")
return (ptr_data, type_data, data) # return `data` to avoid the temporary copy is freed
def _get_bad_pandas_dtypes(dtypes):
pandas_dtype_mapper = {'int8': 'int', 'int16': 'int', 'int32': 'int',
'int64': 'int', 'uint8': 'int', 'uint16': 'int',
'uint32': 'int', 'uint64': 'int', 'bool': 'int',
'float16': 'float', 'float32': 'float', 'float64': 'float'}
bad_indices = [i for i, dtype in enumerate(dtypes) if (dtype.name not in pandas_dtype_mapper
and (not is_dtype_sparse(dtype)
or dtype.subtype.name not in pandas_dtype_mapper))]
return bad_indices
def _data_from_pandas(data, feature_name, categorical_feature, pandas_categorical):
if isinstance(data, pd_DataFrame):
if len(data.shape) != 2 or data.shape[0] < 1:
raise ValueError('Input data must be 2 dimensional and non empty.')
if feature_name == 'auto' or feature_name is None:
data = data.rename(columns=str)
cat_cols = list(data.select_dtypes(include=['category']).columns)
cat_cols_not_ordered = [col for col in cat_cols if not data[col].cat.ordered]
if pandas_categorical is None: # train dataset
pandas_categorical = [list(data[col].cat.categories) for col in cat_cols]
else:
if len(cat_cols) != len(pandas_categorical):
raise ValueError('train and valid dataset categorical_feature do not match.')
for col, category in zip(cat_cols, pandas_categorical):
if list(data[col].cat.categories) != list(category):
data[col] = data[col].cat.set_categories(category)
if len(cat_cols): # cat_cols is list
data = data.copy() # not alter origin DataFrame
data[cat_cols] = data[cat_cols].apply(lambda x: x.cat.codes).replace({-1: np.nan})
if categorical_feature is not None:
if feature_name is None:
feature_name = list(data.columns)
if categorical_feature == 'auto': # use cat cols from DataFrame
categorical_feature = cat_cols_not_ordered
else: # use cat cols specified by user
categorical_feature = list(categorical_feature)
if feature_name == 'auto':
feature_name = list(data.columns)
bad_indices = _get_bad_pandas_dtypes(data.dtypes)
if bad_indices:
bad_index_cols_str = ', '.join(data.columns[bad_indices])
raise ValueError("DataFrame.dtypes for data must be int, float or bool.\n"
"Did not expect the data types in the following fields: "
f"{bad_index_cols_str}")
data = data.values
if data.dtype != np.float32 and data.dtype != np.float64:
data = data.astype(np.float32)
else:
if feature_name == 'auto':
feature_name = None
if categorical_feature == 'auto':
categorical_feature = None
return data, feature_name, categorical_feature, pandas_categorical
def _label_from_pandas(label):
if isinstance(label, pd_DataFrame):
if len(label.columns) > 1:
raise ValueError('DataFrame for label cannot have multiple columns')
if _get_bad_pandas_dtypes(label.dtypes):
raise ValueError('DataFrame.dtypes for label must be int, float or bool')
label = np.ravel(label.values.astype(np.float32, copy=False))
return label
def _dump_pandas_categorical(pandas_categorical, file_name=None):
categorical_json = json.dumps(pandas_categorical, default=json_default_with_numpy)
pandas_str = f'\npandas_categorical:{categorical_json}\n'
if file_name is not None:
with open(file_name, 'a') as f:
f.write(pandas_str)
return pandas_str
def _load_pandas_categorical(file_name=None, model_str=None):
pandas_key = 'pandas_categorical:'
offset = -len(pandas_key)
if file_name is not None:
max_offset = -os.path.getsize(file_name)
with open(file_name, 'rb') as f:
while True:
if offset < max_offset:
offset = max_offset
f.seek(offset, os.SEEK_END)
lines = f.readlines()
if len(lines) >= 2:
break
offset *= 2
last_line = lines[-1].decode('utf-8').strip()
if not last_line.startswith(pandas_key):
last_line = lines[-2].decode('utf-8').strip()
elif model_str is not None:
idx = model_str.rfind('\n', 0, offset)
last_line = model_str[idx:].strip()
if last_line.startswith(pandas_key):
return json.loads(last_line[len(pandas_key):])
else:
return None
class _InnerPredictor:
"""_InnerPredictor of LightGBM.
Not exposed to user.
Used only for prediction, usually used for continued training.
.. note::
Can be converted from Booster, but cannot be converted to Booster.
"""
def __init__(self, model_file=None, booster_handle=None, pred_parameter=None):
"""Initialize the _InnerPredictor.
Parameters
----------
model_file : string or None, optional (default=None)
Path to the model file.
booster_handle : object or None, optional (default=None)
Handle of Booster.
pred_parameter: dict or None, optional (default=None)
Other parameters for the prediciton.
"""
self.handle = ctypes.c_void_p()
self.__is_manage_handle = True
if model_file is not None:
"""Prediction task"""
out_num_iterations = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterCreateFromModelfile(
c_str(model_file),
ctypes.byref(out_num_iterations),
ctypes.byref(self.handle)))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.num_class = out_num_class.value
self.num_total_iteration = out_num_iterations.value
self.pandas_categorical = _load_pandas_categorical(file_name=model_file)
elif booster_handle is not None:
self.__is_manage_handle = False
self.handle = booster_handle
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.num_class = out_num_class.value
self.num_total_iteration = self.current_iteration()
self.pandas_categorical = None
else:
raise TypeError('Need model_file or booster_handle to create a predictor')
pred_parameter = {} if pred_parameter is None else pred_parameter
self.pred_parameter = param_dict_to_str(pred_parameter)
def __del__(self):
try:
if self.__is_manage_handle:
_safe_call(_LIB.LGBM_BoosterFree(self.handle))
except AttributeError:
pass
def __getstate__(self):
this = self.__dict__.copy()
this.pop('handle', None)
return this
def predict(self, data, start_iteration=0, num_iteration=-1,
raw_score=False, pred_leaf=False, pred_contrib=False, data_has_header=False,
is_reshape=True):
"""Predict logic.
Parameters
----------
data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse
Data source for prediction.
When data type is string, it represents the path of txt file.
start_iteration : int, optional (default=0)
Start index of the iteration to predict.
num_iteration : int, optional (default=-1)
Iteration used for prediction.
raw_score : bool, optional (default=False)
Whether to predict raw scores.
pred_leaf : bool, optional (default=False)
Whether to predict leaf index.
pred_contrib : bool, optional (default=False)
Whether to predict feature contributions.
data_has_header : bool, optional (default=False)
Whether data has header.
Used only for txt data.
is_reshape : bool, optional (default=True)
Whether to reshape to (nrow, ncol).
Returns
-------
result : numpy array, scipy.sparse or list of scipy.sparse
Prediction result.
Can be sparse or a list of sparse objects (each element represents predictions for one class) for feature contributions (when ``pred_contrib=True``).
"""
if isinstance(data, Dataset):
raise TypeError("Cannot use Dataset instance for prediction, please use raw data instead")
data = _data_from_pandas(data, None, None, self.pandas_categorical)[0]
predict_type = C_API_PREDICT_NORMAL
if raw_score:
predict_type = C_API_PREDICT_RAW_SCORE
if pred_leaf:
predict_type = C_API_PREDICT_LEAF_INDEX
if pred_contrib:
predict_type = C_API_PREDICT_CONTRIB
int_data_has_header = 1 if data_has_header else 0
if isinstance(data, str):
with _TempFile() as f:
_safe_call(_LIB.LGBM_BoosterPredictForFile(
self.handle,
c_str(data),
ctypes.c_int(int_data_has_header),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
c_str(f.name)))
lines = f.readlines()
nrow = len(lines)
preds = [float(token) for line in lines for token in line.split('\t')]
preds = np.array(preds, dtype=np.float64, copy=False)
elif isinstance(data, scipy.sparse.csr_matrix):
preds, nrow = self.__pred_for_csr(data, start_iteration, num_iteration, predict_type)
elif isinstance(data, scipy.sparse.csc_matrix):
preds, nrow = self.__pred_for_csc(data, start_iteration, num_iteration, predict_type)
elif isinstance(data, np.ndarray):
preds, nrow = self.__pred_for_np2d(data, start_iteration, num_iteration, predict_type)
elif isinstance(data, list):
try:
data = np.array(data)
except BaseException:
raise ValueError('Cannot convert data list to numpy array.')
preds, nrow = self.__pred_for_np2d(data, start_iteration, num_iteration, predict_type)
elif isinstance(data, dt_DataTable):
preds, nrow = self.__pred_for_np2d(data.to_numpy(), start_iteration, num_iteration, predict_type)
else:
try:
_log_warning('Converting data to scipy sparse matrix.')
csr = scipy.sparse.csr_matrix(data)
except BaseException:
raise TypeError(f'Cannot predict data for type {type(data).__name__}')
preds, nrow = self.__pred_for_csr(csr, start_iteration, num_iteration, predict_type)
if pred_leaf:
preds = preds.astype(np.int32)
is_sparse = scipy.sparse.issparse(preds) or isinstance(preds, list)
if is_reshape and not is_sparse and preds.size != nrow:
if preds.size % nrow == 0:
preds = preds.reshape(nrow, -1)
else:
raise ValueError(f'Length of predict result ({preds.size}) cannot be divide nrow ({nrow})')
return preds
def __get_num_preds(self, start_iteration, num_iteration, nrow, predict_type):
"""Get size of prediction result."""
if nrow > MAX_INT32:
raise LightGBMError('LightGBM cannot perform prediction for data'
f'with number of rows greater than MAX_INT32 ({MAX_INT32}).\n'
'You can split your data into chunks'
'and then concatenate predictions for them')
n_preds = ctypes.c_int64(0)
_safe_call(_LIB.LGBM_BoosterCalcNumPredict(
self.handle,
ctypes.c_int(nrow),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.byref(n_preds)))
return n_preds.value
def __pred_for_np2d(self, mat, start_iteration, num_iteration, predict_type):
"""Predict for a 2-D numpy matrix."""
if len(mat.shape) != 2:
raise ValueError('Input numpy.ndarray or list must be 2 dimensional')
def inner_predict(mat, start_iteration, num_iteration, predict_type, preds=None):
if mat.dtype == np.float32 or mat.dtype == np.float64:
data = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
else: # change non-float data to float data, need to copy
data = np.array(mat.reshape(mat.size), dtype=np.float32)
ptr_data, type_ptr_data, _ = c_float_array(data)
n_preds = self.__get_num_preds(start_iteration, num_iteration, mat.shape[0], predict_type)
if preds is None:
preds = np.zeros(n_preds, dtype=np.float64)
elif len(preds.shape) != 1 or len(preds) != n_preds:
raise ValueError("Wrong length of pre-allocated predict array")
out_num_preds = ctypes.c_int64(0)
_safe_call(_LIB.LGBM_BoosterPredictForMat(
self.handle,
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int32(mat.shape[0]),
ctypes.c_int32(mat.shape[1]),
ctypes.c_int(C_API_IS_ROW_MAJOR),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
ctypes.byref(out_num_preds),
preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if n_preds != out_num_preds.value:
raise ValueError("Wrong length for predict results")
return preds, mat.shape[0]
nrow = mat.shape[0]
if nrow > MAX_INT32:
sections = np.arange(start=MAX_INT32, stop=nrow, step=MAX_INT32)
# __get_num_preds() cannot work with nrow > MAX_INT32, so calculate overall number of predictions piecemeal
n_preds = [self.__get_num_preds(start_iteration, num_iteration, i, predict_type) for i in np.diff([0] + list(sections) + [nrow])]
n_preds_sections = np.array([0] + n_preds, dtype=np.intp).cumsum()
preds = np.zeros(sum(n_preds), dtype=np.float64)
for chunk, (start_idx_pred, end_idx_pred) in zip(np.array_split(mat, sections),
zip(n_preds_sections, n_preds_sections[1:])):
# avoid memory consumption by arrays concatenation operations
inner_predict(chunk, start_iteration, num_iteration, predict_type, preds[start_idx_pred:end_idx_pred])
return preds, nrow
else:
return inner_predict(mat, start_iteration, num_iteration, predict_type)
def __create_sparse_native(self, cs, out_shape, out_ptr_indptr, out_ptr_indices, out_ptr_data,
indptr_type, data_type, is_csr=True):
# create numpy array from output arrays
data_indices_len = out_shape[0]
indptr_len = out_shape[1]
if indptr_type == C_API_DTYPE_INT32:
out_indptr = cint32_array_to_numpy(out_ptr_indptr, indptr_len)
elif indptr_type == C_API_DTYPE_INT64:
out_indptr = cint64_array_to_numpy(out_ptr_indptr, indptr_len)
else:
raise TypeError("Expected int32 or int64 type for indptr")
if data_type == C_API_DTYPE_FLOAT32:
out_data = cfloat32_array_to_numpy(out_ptr_data, data_indices_len)
elif data_type == C_API_DTYPE_FLOAT64:
out_data = cfloat64_array_to_numpy(out_ptr_data, data_indices_len)
else:
raise TypeError("Expected float32 or float64 type for data")
out_indices = cint32_array_to_numpy(out_ptr_indices, data_indices_len)
# break up indptr based on number of rows (note more than one matrix in multiclass case)
per_class_indptr_shape = cs.indptr.shape[0]
# for CSC there is extra column added
if not is_csr:
per_class_indptr_shape += 1
out_indptr_arrays = np.split(out_indptr, out_indptr.shape[0] / per_class_indptr_shape)
# reformat output into a csr or csc matrix or list of csr or csc matrices
cs_output_matrices = []
offset = 0
for cs_indptr in out_indptr_arrays:
matrix_indptr_len = cs_indptr[cs_indptr.shape[0] - 1]
cs_indices = out_indices[offset + cs_indptr[0]:offset + matrix_indptr_len]
cs_data = out_data[offset + cs_indptr[0]:offset + matrix_indptr_len]
offset += matrix_indptr_len
# same shape as input csr or csc matrix except extra column for expected value
cs_shape = [cs.shape[0], cs.shape[1] + 1]
# note: make sure we copy data as it will be deallocated next
if is_csr:
cs_output_matrices.append(scipy.sparse.csr_matrix((cs_data, cs_indices, cs_indptr), cs_shape))
else:
cs_output_matrices.append(scipy.sparse.csc_matrix((cs_data, cs_indices, cs_indptr), cs_shape))
# free the temporary native indptr, indices, and data
_safe_call(_LIB.LGBM_BoosterFreePredictSparse(out_ptr_indptr, out_ptr_indices, out_ptr_data,
ctypes.c_int(indptr_type), ctypes.c_int(data_type)))
if len(cs_output_matrices) == 1:
return cs_output_matrices[0]
return cs_output_matrices
def __pred_for_csr(self, csr, start_iteration, num_iteration, predict_type):
"""Predict for a CSR data."""
def inner_predict(csr, start_iteration, num_iteration, predict_type, preds=None):
nrow = len(csr.indptr) - 1
n_preds = self.__get_num_preds(start_iteration, num_iteration, nrow, predict_type)
if preds is None:
preds = np.zeros(n_preds, dtype=np.float64)
elif len(preds.shape) != 1 or len(preds) != n_preds:
raise ValueError("Wrong length of pre-allocated predict array")
out_num_preds = ctypes.c_int64(0)
ptr_indptr, type_ptr_indptr, __ = c_int_array(csr.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csr.data)
assert csr.shape[1] <= MAX_INT32
csr_indices = csr.indices.astype(np.int32, copy=False)
_safe_call(_LIB.LGBM_BoosterPredictForCSR(
self.handle,
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csr_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csr.indptr)),
ctypes.c_int64(len(csr.data)),
ctypes.c_int64(csr.shape[1]),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
ctypes.byref(out_num_preds),
preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if n_preds != out_num_preds.value:
raise ValueError("Wrong length for predict results")
return preds, nrow
def inner_predict_sparse(csr, start_iteration, num_iteration, predict_type):
ptr_indptr, type_ptr_indptr, __ = c_int_array(csr.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csr.data)
csr_indices = csr.indices.astype(np.int32, copy=False)
matrix_type = C_API_MATRIX_TYPE_CSR
if type_ptr_indptr == C_API_DTYPE_INT32:
out_ptr_indptr = ctypes.POINTER(ctypes.c_int32)()
else:
out_ptr_indptr = ctypes.POINTER(ctypes.c_int64)()
out_ptr_indices = ctypes.POINTER(ctypes.c_int32)()
if type_ptr_data == C_API_DTYPE_FLOAT32:
out_ptr_data = ctypes.POINTER(ctypes.c_float)()
else:
out_ptr_data = ctypes.POINTER(ctypes.c_double)()
out_shape = np.zeros(2, dtype=np.int64)
_safe_call(_LIB.LGBM_BoosterPredictSparseOutput(
self.handle,
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csr_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csr.indptr)),
ctypes.c_int64(len(csr.data)),
ctypes.c_int64(csr.shape[1]),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
ctypes.c_int(matrix_type),
out_shape.ctypes.data_as(ctypes.POINTER(ctypes.c_int64)),
ctypes.byref(out_ptr_indptr),
ctypes.byref(out_ptr_indices),
ctypes.byref(out_ptr_data)))
matrices = self.__create_sparse_native(csr, out_shape, out_ptr_indptr, out_ptr_indices, out_ptr_data,
type_ptr_indptr, type_ptr_data, is_csr=True)
nrow = len(csr.indptr) - 1
return matrices, nrow
if predict_type == C_API_PREDICT_CONTRIB:
return inner_predict_sparse(csr, start_iteration, num_iteration, predict_type)
nrow = len(csr.indptr) - 1
if nrow > MAX_INT32:
sections = [0] + list(np.arange(start=MAX_INT32, stop=nrow, step=MAX_INT32)) + [nrow]
# __get_num_preds() cannot work with nrow > MAX_INT32, so calculate overall number of predictions piecemeal
n_preds = [self.__get_num_preds(start_iteration, num_iteration, i, predict_type) for i in np.diff(sections)]
n_preds_sections = np.array([0] + n_preds, dtype=np.intp).cumsum()
preds = np.zeros(sum(n_preds), dtype=np.float64)
for (start_idx, end_idx), (start_idx_pred, end_idx_pred) in zip(zip(sections, sections[1:]),
zip(n_preds_sections, n_preds_sections[1:])):
# avoid memory consumption by arrays concatenation operations
inner_predict(csr[start_idx:end_idx], start_iteration, num_iteration, predict_type, preds[start_idx_pred:end_idx_pred])
return preds, nrow
else:
return inner_predict(csr, start_iteration, num_iteration, predict_type)
def __pred_for_csc(self, csc, start_iteration, num_iteration, predict_type):
"""Predict for a CSC data."""
def inner_predict_sparse(csc, start_iteration, num_iteration, predict_type):
ptr_indptr, type_ptr_indptr, __ = c_int_array(csc.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csc.data)
csc_indices = csc.indices.astype(np.int32, copy=False)
matrix_type = C_API_MATRIX_TYPE_CSC
if type_ptr_indptr == C_API_DTYPE_INT32:
out_ptr_indptr = ctypes.POINTER(ctypes.c_int32)()
else:
out_ptr_indptr = ctypes.POINTER(ctypes.c_int64)()
out_ptr_indices = ctypes.POINTER(ctypes.c_int32)()
if type_ptr_data == C_API_DTYPE_FLOAT32:
out_ptr_data = ctypes.POINTER(ctypes.c_float)()
else:
out_ptr_data = ctypes.POINTER(ctypes.c_double)()
out_shape = np.zeros(2, dtype=np.int64)
_safe_call(_LIB.LGBM_BoosterPredictSparseOutput(
self.handle,
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csc_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csc.indptr)),
ctypes.c_int64(len(csc.data)),
ctypes.c_int64(csc.shape[0]),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
ctypes.c_int(matrix_type),
out_shape.ctypes.data_as(ctypes.POINTER(ctypes.c_int64)),
ctypes.byref(out_ptr_indptr),
ctypes.byref(out_ptr_indices),
ctypes.byref(out_ptr_data)))
matrices = self.__create_sparse_native(csc, out_shape, out_ptr_indptr, out_ptr_indices, out_ptr_data,
type_ptr_indptr, type_ptr_data, is_csr=False)
nrow = csc.shape[0]
return matrices, nrow
nrow = csc.shape[0]
if nrow > MAX_INT32:
return self.__pred_for_csr(csc.tocsr(), start_iteration, num_iteration, predict_type)
if predict_type == C_API_PREDICT_CONTRIB:
return inner_predict_sparse(csc, start_iteration, num_iteration, predict_type)
n_preds = self.__get_num_preds(start_iteration, num_iteration, nrow, predict_type)
preds = np.zeros(n_preds, dtype=np.float64)
out_num_preds = ctypes.c_int64(0)
ptr_indptr, type_ptr_indptr, __ = c_int_array(csc.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csc.data)
assert csc.shape[0] <= MAX_INT32
csc_indices = csc.indices.astype(np.int32, copy=False)
_safe_call(_LIB.LGBM_BoosterPredictForCSC(
self.handle,
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csc_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csc.indptr)),
ctypes.c_int64(len(csc.data)),
ctypes.c_int64(csc.shape[0]),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
ctypes.byref(out_num_preds),
preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if n_preds != out_num_preds.value:
raise ValueError("Wrong length for predict results")
return preds, nrow
def current_iteration(self):
"""Get the index of the current iteration.
Returns
-------
cur_iter : int
The index of the current iteration.
"""
out_cur_iter = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetCurrentIteration(
self.handle,
ctypes.byref(out_cur_iter)))
return out_cur_iter.value
class Dataset:
"""Dataset in LightGBM."""
def __init__(self, data, label=None, reference=None,
weight=None, group=None, init_score=None, silent=False,
feature_name='auto', categorical_feature='auto', params=None,
free_raw_data=True):
"""Initialize Dataset.
Parameters
----------
data : string, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse or list of numpy arrays
Data source of Dataset.
If string, it represents the path to txt file.
label : list, numpy 1-D array, pandas Series / one-column DataFrame or None, optional (default=None)
Label of the data.
reference : Dataset or None, optional (default=None)
If this is Dataset for validation, training data should be used as reference.
weight : list, numpy 1-D array, pandas Series or None, optional (default=None)
Weight for each instance.
group : list, numpy 1-D array, pandas Series or None, optional (default=None)
Group/query data.
Only used in the learning-to-rank task.
sum(group) = n_samples.
For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
init_score : list, numpy 1-D array, pandas Series or None, optional (default=None)
Init score for Dataset.
silent : bool, optional (default=False)
Whether to print messages during construction.
feature_name : list of strings or 'auto', optional (default="auto")
Feature names.
If 'auto' and data is pandas DataFrame, data columns names are used.
categorical_feature : list of strings or int, or 'auto', optional (default="auto")
Categorical features.
If list of int, interpreted as indices.
If list of strings, interpreted as feature names (need to specify ``feature_name`` as well).
If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used.
All values in categorical features should be less than int32 max value (2147483647).
Large values could be memory consuming. Consider using consecutive integers starting from zero.
All negative values in categorical features will be treated as missing values.
The output cannot be monotonically constrained with respect to a categorical feature.
params : dict or None, optional (default=None)
Other parameters for Dataset.
free_raw_data : bool, optional (default=True)
If True, raw data is freed after constructing inner Dataset.
"""
self.handle = None
self.data = data
self.label = label
self.reference = reference
self.weight = weight
self.group = group
self.init_score = init_score
self.silent = silent
self.feature_name = feature_name
self.categorical_feature = categorical_feature
self.params = deepcopy(params)
self.free_raw_data = free_raw_data
self.used_indices = None
self.need_slice = True
self._predictor = None
self.pandas_categorical = None
self.params_back_up = None
self.feature_penalty = None
self.monotone_constraints = None
self.version = 0
def __del__(self):
try:
self._free_handle()
except AttributeError:
pass
def get_params(self):
"""Get the used parameters in the Dataset.
Returns
-------
params : dict or None
The used parameters in this Dataset object.
"""
if self.params is not None:
# no min_data, nthreads and verbose in this function
dataset_params = _ConfigAliases.get("bin_construct_sample_cnt",
"categorical_feature",
"data_random_seed",
"enable_bundle",
"feature_pre_filter",
"forcedbins_filename",
"group_column",
"header",
"ignore_column",
"is_enable_sparse",
"label_column",
"linear_tree",
"max_bin",
"max_bin_by_feature",
"min_data_in_bin",
"pre_partition",
"two_round",
"use_missing",
"weight_column",
"zero_as_missing")
return {k: v for k, v in self.params.items() if k in dataset_params}
def _free_handle(self):
if self.handle is not None:
_safe_call(_LIB.LGBM_DatasetFree(self.handle))
self.handle = None
self.need_slice = True
if self.used_indices is not None:
self.data = None
return self
def _set_init_score_by_predictor(self, predictor, data, used_indices=None):
data_has_header = False
if isinstance(data, str):
# check data has header or not
data_has_header = any(self.params.get(alias, False) for alias in _ConfigAliases.get("header"))
num_data = self.num_data()
if predictor is not None:
init_score = predictor.predict(data,
raw_score=True,
data_has_header=data_has_header,
is_reshape=False)
if used_indices is not None:
assert not self.need_slice
if isinstance(data, str):
sub_init_score = np.zeros(num_data * predictor.num_class, dtype=np.float32)
assert num_data == len(used_indices)
for i in range(len(used_indices)):
for j in range(predictor.num_class):
sub_init_score[i * predictor.num_class + j] = init_score[used_indices[i] * predictor.num_class + j]
init_score = sub_init_score
if predictor.num_class > 1:
# need to regroup init_score
new_init_score = np.zeros(init_score.size, dtype=np.float32)
for i in range(num_data):
for j in range(predictor.num_class):
new_init_score[j * num_data + i] = init_score[i * predictor.num_class + j]
init_score = new_init_score
elif self.init_score is not None:
init_score = np.zeros(self.init_score.shape, dtype=np.float32)
else:
return self
self.set_init_score(init_score)
def _lazy_init(self, data, label=None, reference=None,
weight=None, group=None, init_score=None, predictor=None,
silent=False, feature_name='auto',
categorical_feature='auto', params=None):
if data is None:
self.handle = None
return self
if reference is not None:
self.pandas_categorical = reference.pandas_categorical
categorical_feature = reference.categorical_feature
data, feature_name, categorical_feature, self.pandas_categorical = _data_from_pandas(data,
feature_name,
categorical_feature,
self.pandas_categorical)
label = _label_from_pandas(label)
# process for args
params = {} if params is None else params
args_names = (getattr(self.__class__, '_lazy_init')
.__code__
.co_varnames[:getattr(self.__class__, '_lazy_init').__code__.co_argcount])
for key, _ in params.items():
if key in args_names:
_log_warning(f'{key} keyword has been found in `params` and will be ignored.\n'
f'Please use {key} argument of the Dataset constructor to pass this parameter.')
# user can set verbose with params, it has higher priority
if not any(verbose_alias in params for verbose_alias in _ConfigAliases.get("verbosity")) and silent:
params["verbose"] = -1
# get categorical features
if categorical_feature is not None:
categorical_indices = set()
feature_dict = {}
if feature_name is not None:
feature_dict = {name: i for i, name in enumerate(feature_name)}
for name in categorical_feature:
if isinstance(name, str) and name in feature_dict:
categorical_indices.add(feature_dict[name])
elif isinstance(name, int):
categorical_indices.add(name)
else:
raise TypeError(f"Wrong type({type(name).__name__}) or unknown name({name}) in categorical_feature")
if categorical_indices:
for cat_alias in _ConfigAliases.get("categorical_feature"):
if cat_alias in params:
_log_warning(f'{cat_alias} in param dict is overridden.')
params.pop(cat_alias, None)
params['categorical_column'] = sorted(categorical_indices)
params_str = param_dict_to_str(params)
self.params = params
# process for reference dataset
ref_dataset = None
if isinstance(reference, Dataset):
ref_dataset = reference.construct().handle
elif reference is not None:
raise TypeError('Reference dataset should be None or dataset instance')
# start construct data
if isinstance(data, str):
self.handle = ctypes.c_void_p()
_safe_call(_LIB.LGBM_DatasetCreateFromFile(
c_str(data),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
elif isinstance(data, scipy.sparse.csr_matrix):
self.__init_from_csr(data, params_str, ref_dataset)
elif isinstance(data, scipy.sparse.csc_matrix):
self.__init_from_csc(data, params_str, ref_dataset)
elif isinstance(data, np.ndarray):
self.__init_from_np2d(data, params_str, ref_dataset)
elif isinstance(data, list) and len(data) > 0 and all(isinstance(x, np.ndarray) for x in data):
self.__init_from_list_np2d(data, params_str, ref_dataset)
elif isinstance(data, dt_DataTable):
self.__init_from_np2d(data.to_numpy(), params_str, ref_dataset)
else:
try:
csr = scipy.sparse.csr_matrix(data)
self.__init_from_csr(csr, params_str, ref_dataset)
except BaseException:
raise TypeError(f'Cannot initialize Dataset from {type(data).__name__}')
if label is not None:
self.set_label(label)
if self.get_label() is None:
raise ValueError("Label should not be None")
if weight is not None:
self.set_weight(weight)
if group is not None:
self.set_group(group)
if isinstance(predictor, _InnerPredictor):
if self._predictor is None and init_score is not None:
_log_warning("The init_score will be overridden by the prediction of init_model.")
self._set_init_score_by_predictor(predictor, data)
elif init_score is not None:
self.set_init_score(init_score)
elif predictor is not None:
raise TypeError(f'Wrong predictor type {type(predictor).__name__}')
# set feature names
return self.set_feature_name(feature_name)
def __init_from_np2d(self, mat, params_str, ref_dataset):
"""Initialize data from a 2-D numpy matrix."""
if len(mat.shape) != 2:
raise ValueError('Input numpy.ndarray must be 2 dimensional')
self.handle = ctypes.c_void_p()
if mat.dtype == np.float32 or mat.dtype == np.float64:
data = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
else: # change non-float data to float data, need to copy
data = np.array(mat.reshape(mat.size), dtype=np.float32)
ptr_data, type_ptr_data, _ = c_float_array(data)
_safe_call(_LIB.LGBM_DatasetCreateFromMat(
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int32(mat.shape[0]),
ctypes.c_int32(mat.shape[1]),
ctypes.c_int(C_API_IS_ROW_MAJOR),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
return self
def __init_from_list_np2d(self, mats, params_str, ref_dataset):
"""Initialize data from a list of 2-D numpy matrices."""
ncol = mats[0].shape[1]
nrow = np.zeros((len(mats),), np.int32)
if mats[0].dtype == np.float64:
ptr_data = (ctypes.POINTER(ctypes.c_double) * len(mats))()
else:
ptr_data = (ctypes.POINTER(ctypes.c_float) * len(mats))()
holders = []
type_ptr_data = None
for i, mat in enumerate(mats):
if len(mat.shape) != 2:
raise ValueError('Input numpy.ndarray must be 2 dimensional')
if mat.shape[1] != ncol:
raise ValueError('Input arrays must have same number of columns')
nrow[i] = mat.shape[0]
if mat.dtype == np.float32 or mat.dtype == np.float64:
mats[i] = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
else: # change non-float data to float data, need to copy
mats[i] = np.array(mat.reshape(mat.size), dtype=np.float32)
chunk_ptr_data, chunk_type_ptr_data, holder = c_float_array(mats[i])
if type_ptr_data is not None and chunk_type_ptr_data != type_ptr_data:
raise ValueError('Input chunks must have same type')
ptr_data[i] = chunk_ptr_data
type_ptr_data = chunk_type_ptr_data
holders.append(holder)
self.handle = ctypes.c_void_p()
_safe_call(_LIB.LGBM_DatasetCreateFromMats(
ctypes.c_int32(len(mats)),
ctypes.cast(ptr_data, ctypes.POINTER(ctypes.POINTER(ctypes.c_double))),
ctypes.c_int(type_ptr_data),
nrow.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ctypes.c_int32(ncol),
ctypes.c_int(C_API_IS_ROW_MAJOR),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
return self
def __init_from_csr(self, csr, params_str, ref_dataset):
"""Initialize data from a CSR matrix."""
if len(csr.indices) != len(csr.data):
raise ValueError(f'Length mismatch: {len(csr.indices)} vs {len(csr.data)}')
self.handle = ctypes.c_void_p()
ptr_indptr, type_ptr_indptr, __ = c_int_array(csr.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csr.data)
assert csr.shape[1] <= MAX_INT32
csr_indices = csr.indices.astype(np.int32, copy=False)
_safe_call(_LIB.LGBM_DatasetCreateFromCSR(
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csr_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csr.indptr)),
ctypes.c_int64(len(csr.data)),
ctypes.c_int64(csr.shape[1]),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
return self
def __init_from_csc(self, csc, params_str, ref_dataset):
"""Initialize data from a CSC matrix."""
if len(csc.indices) != len(csc.data):
raise ValueError(f'Length mismatch: {len(csc.indices)} vs {len(csc.data)}')
self.handle = ctypes.c_void_p()
ptr_indptr, type_ptr_indptr, __ = c_int_array(csc.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csc.data)
assert csc.shape[0] <= MAX_INT32
csc_indices = csc.indices.astype(np.int32, copy=False)
_safe_call(_LIB.LGBM_DatasetCreateFromCSC(
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csc_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csc.indptr)),
ctypes.c_int64(len(csc.data)),
ctypes.c_int64(csc.shape[0]),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
return self
def construct(self):
"""Lazy init.
Returns
-------
self : Dataset
Constructed Dataset object.
"""
if self.handle is None:
if self.reference is not None:
reference_params = self.reference.get_params()
if self.get_params() != reference_params:
_log_warning('Overriding the parameters from Reference Dataset.')
self._update_params(reference_params)
if self.used_indices is None:
# create valid
self._lazy_init(self.data, label=self.label, reference=self.reference,
weight=self.weight, group=self.group,
init_score=self.init_score, predictor=self._predictor,
silent=self.silent, feature_name=self.feature_name, params=self.params)
else:
# construct subset
used_indices = list_to_1d_numpy(self.used_indices, np.int32, name='used_indices')
assert used_indices.flags.c_contiguous
if self.reference.group is not None:
group_info = np.array(self.reference.group).astype(np.int32, copy=False)
_, self.group = np.unique(np.repeat(range(len(group_info)), repeats=group_info)[self.used_indices],
return_counts=True)
self.handle = ctypes.c_void_p()
params_str = param_dict_to_str(self.params)
_safe_call(_LIB.LGBM_DatasetGetSubset(
self.reference.construct().handle,
used_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ctypes.c_int32(used_indices.shape[0]),
c_str(params_str),
ctypes.byref(self.handle)))
if not self.free_raw_data:
self.get_data()
if self.group is not None:
self.set_group(self.group)
if self.get_label() is None:
raise ValueError("Label should not be None.")
if isinstance(self._predictor, _InnerPredictor) and self._predictor is not self.reference._predictor:
self.get_data()
self._set_init_score_by_predictor(self._predictor, self.data, used_indices)
else:
# create train
self._lazy_init(self.data, label=self.label,
weight=self.weight, group=self.group,
init_score=self.init_score, predictor=self._predictor,
silent=self.silent, feature_name=self.feature_name,
categorical_feature=self.categorical_feature, params=self.params)
if self.free_raw_data:
self.data = None
return self
def create_valid(self, data, label=None, weight=None, group=None,
init_score=None, silent=False, params=None):
"""Create validation data align with current Dataset.
Parameters
----------
data : string, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse or list of numpy arrays
Data source of Dataset.
If string, it represents the path to txt file.
label : list, numpy 1-D array, pandas Series / one-column DataFrame or None, optional (default=None)
Label of the data.
weight : list, numpy 1-D array, pandas Series or None, optional (default=None)
Weight for each instance.
group : list, numpy 1-D array, pandas Series or None, optional (default=None)
Group/query data.
Only used in the learning-to-rank task.
sum(group) = n_samples.
For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
init_score : list, numpy 1-D array, pandas Series or None, optional (default=None)
Init score for Dataset.
silent : bool, optional (default=False)
Whether to print messages during construction.
params : dict or None, optional (default=None)
Other parameters for validation Dataset.
Returns
-------
valid : Dataset
Validation Dataset with reference to self.
"""
ret = Dataset(data, label=label, reference=self,
weight=weight, group=group, init_score=init_score,
silent=silent, params=params, free_raw_data=self.free_raw_data)
ret._predictor = self._predictor
ret.pandas_categorical = self.pandas_categorical
return ret
def subset(self, used_indices, params=None):
"""Get subset of current Dataset.
Parameters
----------
used_indices : list of int
Indices used to create the subset.
params : dict or None, optional (default=None)
These parameters will be passed to Dataset constructor.
Returns
-------
subset : Dataset
Subset of the current Dataset.
"""
if params is None:
params = self.params
ret = Dataset(None, reference=self, feature_name=self.feature_name,
categorical_feature=self.categorical_feature, params=params,
free_raw_data=self.free_raw_data)
ret._predictor = self._predictor
ret.pandas_categorical = self.pandas_categorical
ret.used_indices = sorted(used_indices)
return ret
def save_binary(self, filename):
"""Save Dataset to a binary file.
.. note::
Please note that `init_score` is not saved in binary file.
If you need it, please set it again after loading Dataset.
Parameters
----------
filename : string
Name of the output file.
Returns
-------
self : Dataset
Returns self.
"""
_safe_call(_LIB.LGBM_DatasetSaveBinary(
self.construct().handle,
c_str(filename)))
return self
def _update_params(self, params):
if not params:
return self
params = deepcopy(params)
def update():
if not self.params:
self.params = params
else:
self.params_back_up = deepcopy(self.params)
self.params.update(params)
if self.handle is None:
update()
elif params is not None:
ret = _LIB.LGBM_DatasetUpdateParamChecking(
c_str(param_dict_to_str(self.params)),
c_str(param_dict_to_str(params)))
if ret != 0:
# could be updated if data is not freed
if self.data is not None:
update()
self._free_handle()
else:
raise LightGBMError(_LIB.LGBM_GetLastError().decode('utf-8'))
return self
def _reverse_update_params(self):
if self.handle is None:
self.params = deepcopy(self.params_back_up)
self.params_back_up = None
return self
def set_field(self, field_name, data):
"""Set property into the Dataset.
Parameters
----------
field_name : string
The field name of the information.
data : list, numpy 1-D array, pandas Series or None
The array of data to be set.
Returns
-------
self : Dataset
Dataset with set property.
"""
if self.handle is None:
raise Exception(f"Cannot set {field_name} before construct dataset")
if data is None:
# set to None
_safe_call(_LIB.LGBM_DatasetSetField(
self.handle,
c_str(field_name),
None,
ctypes.c_int(0),
ctypes.c_int(FIELD_TYPE_MAPPER[field_name])))
return self
dtype = np.float32
if field_name == 'group':
dtype = np.int32
elif field_name == 'init_score':
dtype = np.float64
data = list_to_1d_numpy(data, dtype, name=field_name)
if data.dtype == np.float32 or data.dtype == np.float64:
ptr_data, type_data, _ = c_float_array(data)
elif data.dtype == np.int32:
ptr_data, type_data, _ = c_int_array(data)
else:
raise TypeError(f"Expected np.float32/64 or np.int32, met type({data.dtype})")
if type_data != FIELD_TYPE_MAPPER[field_name]:
raise TypeError("Input type error for set_field")
_safe_call(_LIB.LGBM_DatasetSetField(
self.handle,
c_str(field_name),
ptr_data,
ctypes.c_int(len(data)),
ctypes.c_int(type_data)))
self.version += 1
return self
def get_field(self, field_name):
"""Get property from the Dataset.
Parameters
----------
field_name : string
The field name of the information.
Returns
-------
info : numpy array
A numpy array with information from the Dataset.
"""
if self.handle is None:
raise Exception(f"Cannot get {field_name} before construct Dataset")
tmp_out_len = ctypes.c_int(0)
out_type = ctypes.c_int(0)
ret = ctypes.POINTER(ctypes.c_void_p)()
_safe_call(_LIB.LGBM_DatasetGetField(
self.handle,
c_str(field_name),
ctypes.byref(tmp_out_len),
ctypes.byref(ret),
ctypes.byref(out_type)))
if out_type.value != FIELD_TYPE_MAPPER[field_name]:
raise TypeError("Return type error for get_field")
if tmp_out_len.value == 0:
return None
if out_type.value == C_API_DTYPE_INT32:
return cint32_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_int32)), tmp_out_len.value)
elif out_type.value == C_API_DTYPE_FLOAT32:
return cfloat32_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_float)), tmp_out_len.value)
elif out_type.value == C_API_DTYPE_FLOAT64:
return cfloat64_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_double)), tmp_out_len.value)
else:
raise TypeError("Unknown type")
def set_categorical_feature(self, categorical_feature):
"""Set categorical features.
Parameters
----------
categorical_feature : list of int or strings
Names or indices of categorical features.
Returns
-------
self : Dataset
Dataset with set categorical features.
"""
if self.categorical_feature == categorical_feature:
return self
if self.data is not None:
if self.categorical_feature is None:
self.categorical_feature = categorical_feature
return self._free_handle()
elif categorical_feature == 'auto':
_log_warning('Using categorical_feature in Dataset.')
return self
else:
_log_warning('categorical_feature in Dataset is overridden.\n'
f'New categorical_feature is {sorted(list(categorical_feature))}')
self.categorical_feature = categorical_feature
return self._free_handle()
else:
raise LightGBMError("Cannot set categorical feature after freed raw data, "
"set free_raw_data=False when construct Dataset to avoid this.")
def _set_predictor(self, predictor):
"""Set predictor for continued training.
It is not recommended for user to call this function.
Please use init_model argument in engine.train() or engine.cv() instead.
"""
if predictor is self._predictor and (predictor is None or predictor.current_iteration() == self._predictor.current_iteration()):
return self
if self.handle is None:
self._predictor = predictor
elif self.data is not None:
self._predictor = predictor
self._set_init_score_by_predictor(self._predictor, self.data)
elif self.used_indices is not None and self.reference is not None and self.reference.data is not None:
self._predictor = predictor
self._set_init_score_by_predictor(self._predictor, self.reference.data, self.used_indices)
else:
raise LightGBMError("Cannot set predictor after freed raw data, "
"set free_raw_data=False when construct Dataset to avoid this.")
return self
def set_reference(self, reference):
"""Set reference Dataset.
Parameters
----------
reference : Dataset
Reference that is used as a template to construct the current Dataset.
Returns
-------
self : Dataset
Dataset with set reference.
"""
self.set_categorical_feature(reference.categorical_feature) \
.set_feature_name(reference.feature_name) \
._set_predictor(reference._predictor)
# we're done if self and reference share a common upstrem reference
if self.get_ref_chain().intersection(reference.get_ref_chain()):
return self
if self.data is not None:
self.reference = reference
return self._free_handle()
else:
raise LightGBMError("Cannot set reference after freed raw data, "
"set free_raw_data=False when construct Dataset to avoid this.")
def set_feature_name(self, feature_name):
"""Set feature name.
Parameters
----------
feature_name : list of strings
Feature names.
Returns
-------
self : Dataset
Dataset with set feature name.
"""
if feature_name != 'auto':
self.feature_name = feature_name
if self.handle is not None and feature_name is not None and feature_name != 'auto':
if len(feature_name) != self.num_feature():
raise ValueError(f"Length of feature_name({len(feature_name)}) and num_feature({self.num_feature()}) don't match")
c_feature_name = [c_str(name) for name in feature_name]
_safe_call(_LIB.LGBM_DatasetSetFeatureNames(
self.handle,
c_array(ctypes.c_char_p, c_feature_name),
ctypes.c_int(len(feature_name))))
return self
def set_label(self, label):
"""Set label of Dataset.
Parameters
----------
label : list, numpy 1-D array, pandas Series / one-column DataFrame or None
The label information to be set into Dataset.
Returns
-------
self : Dataset
Dataset with set label.
"""
self.label = label
if self.handle is not None:
label = list_to_1d_numpy(_label_from_pandas(label), name='label')
self.set_field('label', label)
self.label = self.get_field('label') # original values can be modified at cpp side
return self
def set_weight(self, weight):
"""Set weight of each instance.
Parameters
----------
weight : list, numpy 1-D array, pandas Series or None
Weight to be set for each data point.
Returns
-------
self : Dataset
Dataset with set weight.
"""
if weight is not None and np.all(weight == 1):
weight = None
self.weight = weight
if self.handle is not None and weight is not None:
weight = list_to_1d_numpy(weight, name='weight')
self.set_field('weight', weight)
self.weight = self.get_field('weight') # original values can be modified at cpp side
return self
def set_init_score(self, init_score):
"""Set init score of Booster to start from.
Parameters
----------
init_score : list, numpy 1-D array, pandas Series or None
Init score for Booster.
Returns
-------
self : Dataset
Dataset with set init score.
"""
self.init_score = init_score
if self.handle is not None and init_score is not None:
init_score = list_to_1d_numpy(init_score, np.float64, name='init_score')
self.set_field('init_score', init_score)
self.init_score = self.get_field('init_score') # original values can be modified at cpp side
return self
def set_group(self, group):
"""Set group size of Dataset (used for ranking).
Parameters
----------
group : list, numpy 1-D array, pandas Series or None
Group/query data.
Only used in the learning-to-rank task.
sum(group) = n_samples.
For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
Returns
-------
self : Dataset
Dataset with set group.
"""
self.group = group
if self.handle is not None and group is not None:
group = list_to_1d_numpy(group, np.int32, name='group')
self.set_field('group', group)
return self
def get_feature_name(self):
"""Get the names of columns (features) in the Dataset.
Returns
-------
feature_names : list
The names of columns (features) in the Dataset.
"""
if self.handle is None:
raise LightGBMError("Cannot get feature_name before construct dataset")
num_feature = self.num_feature()
tmp_out_len = ctypes.c_int(0)
reserved_string_buffer_size = 255
required_string_buffer_size = ctypes.c_size_t(0)
string_buffers = [ctypes.create_string_buffer(reserved_string_buffer_size) for _ in range(num_feature)]
ptr_string_buffers = (ctypes.c_char_p * num_feature)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_DatasetGetFeatureNames(
self.handle,
ctypes.c_int(num_feature),
ctypes.byref(tmp_out_len),
ctypes.c_size_t(reserved_string_buffer_size),
ctypes.byref(required_string_buffer_size),
ptr_string_buffers))
if num_feature != tmp_out_len.value:
raise ValueError("Length of feature names doesn't equal with num_feature")
actual_string_buffer_size = required_string_buffer_size.value
# if buffer length is not long enough, reallocate buffers
if reserved_string_buffer_size < actual_string_buffer_size:
string_buffers = [ctypes.create_string_buffer(actual_string_buffer_size) for _ in range(num_feature)]
ptr_string_buffers = (ctypes.c_char_p * num_feature)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_DatasetGetFeatureNames(
self.handle,
ctypes.c_int(num_feature),
ctypes.byref(tmp_out_len),
ctypes.c_size_t(actual_string_buffer_size),
ctypes.byref(required_string_buffer_size),
ptr_string_buffers))
return [string_buffers[i].value.decode('utf-8') for i in range(num_feature)]
def get_label(self):
"""Get the label of the Dataset.
Returns
-------
label : numpy array or None
The label information from the Dataset.
"""
if self.label is None:
self.label = self.get_field('label')
return self.label
def get_weight(self):
"""Get the weight of the Dataset.
Returns
-------
weight : numpy array or None
Weight for each data point from the Dataset.
"""
if self.weight is None:
self.weight = self.get_field('weight')
return self.weight
def get_init_score(self):
"""Get the initial score of the Dataset.
Returns
-------
init_score : numpy array or None
Init score of Booster.
"""
if self.init_score is None:
self.init_score = self.get_field('init_score')
return self.init_score
def get_data(self):
"""Get the raw data of the Dataset.
Returns
-------
data : string, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse, list of numpy arrays or None
Raw data used in the Dataset construction.
"""
if self.handle is None:
raise Exception("Cannot get data before construct Dataset")
if self.need_slice and self.used_indices is not None and self.reference is not None:
self.data = self.reference.data
if self.data is not None:
if isinstance(self.data, np.ndarray) or scipy.sparse.issparse(self.data):
self.data = self.data[self.used_indices, :]
elif isinstance(self.data, pd_DataFrame):
self.data = self.data.iloc[self.used_indices].copy()
elif isinstance(self.data, dt_DataTable):
self.data = self.data[self.used_indices, :]
else:
_log_warning(f"Cannot subset {type(self.data).__name__} type of raw data.\n"
"Returning original raw data")
self.need_slice = False
if self.data is None:
raise LightGBMError("Cannot call `get_data` after freed raw data, "
"set free_raw_data=False when construct Dataset to avoid this.")
return self.data
def get_group(self):
"""Get the group of the Dataset.
Returns
-------
group : numpy array or None
Group/query data.
Only used in the learning-to-rank task.
sum(group) = n_samples.
For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
"""
if self.group is None:
self.group = self.get_field('group')
if self.group is not None:
# group data from LightGBM is boundaries data, need to convert to group size
self.group = np.diff(self.group)
return self.group
def num_data(self):
"""Get the number of rows in the Dataset.
Returns
-------
number_of_rows : int
The number of rows in the Dataset.
"""
if self.handle is not None:
ret = ctypes.c_int(0)
_safe_call(_LIB.LGBM_DatasetGetNumData(self.handle,
ctypes.byref(ret)))
return ret.value
else:
raise LightGBMError("Cannot get num_data before construct dataset")
def num_feature(self):
"""Get the number of columns (features) in the Dataset.
Returns
-------
number_of_columns : int
The number of columns (features) in the Dataset.
"""
if self.handle is not None:
ret = ctypes.c_int(0)
_safe_call(_LIB.LGBM_DatasetGetNumFeature(self.handle,
ctypes.byref(ret)))
return ret.value
else:
raise LightGBMError("Cannot get num_feature before construct dataset")
def get_ref_chain(self, ref_limit=100):
"""Get a chain of Dataset objects.
Starts with r, then goes to r.reference (if exists),
then to r.reference.reference, etc.
until we hit ``ref_limit`` or a reference loop.
Parameters
----------
ref_limit : int, optional (default=100)
The limit number of references.
Returns
-------
ref_chain : set of Dataset
Chain of references of the Datasets.
"""
head = self
ref_chain = set()
while len(ref_chain) < ref_limit:
if isinstance(head, Dataset):
ref_chain.add(head)
if (head.reference is not None) and (head.reference not in ref_chain):
head = head.reference
else:
break
else:
break
return ref_chain
def add_features_from(self, other):
"""Add features from other Dataset to the current Dataset.
Both Datasets must be constructed before calling this method.
Parameters
----------
other : Dataset
The Dataset to take features from.
Returns
-------
self : Dataset
Dataset with the new features added.
"""
if self.handle is None or other.handle is None:
raise ValueError('Both source and target Datasets must be constructed before adding features')
_safe_call(_LIB.LGBM_DatasetAddFeaturesFrom(self.handle, other.handle))
was_none = self.data is None
old_self_data_type = type(self.data).__name__
if other.data is None:
self.data = None
elif self.data is not None:
if isinstance(self.data, np.ndarray):
if isinstance(other.data, np.ndarray):
self.data = np.hstack((self.data, other.data))
elif scipy.sparse.issparse(other.data):
self.data = np.hstack((self.data, other.data.toarray()))
elif isinstance(other.data, pd_DataFrame):
self.data = np.hstack((self.data, other.data.values))
elif isinstance(other.data, dt_DataTable):
self.data = np.hstack((self.data, other.data.to_numpy()))
else:
self.data = None
elif scipy.sparse.issparse(self.data):
sparse_format = self.data.getformat()
if isinstance(other.data, np.ndarray) or scipy.sparse.issparse(other.data):
self.data = scipy.sparse.hstack((self.data, other.data), format=sparse_format)
elif isinstance(other.data, pd_DataFrame):
self.data = scipy.sparse.hstack((self.data, other.data.values), format=sparse_format)
elif isinstance(other.data, dt_DataTable):
self.data = scipy.sparse.hstack((self.data, other.data.to_numpy()), format=sparse_format)
else:
self.data = None
elif isinstance(self.data, pd_DataFrame):
if not PANDAS_INSTALLED:
raise LightGBMError("Cannot add features to DataFrame type of raw data "
"without pandas installed. "
"Install pandas and restart your session.")
if isinstance(other.data, np.ndarray):
self.data = concat((self.data, pd_DataFrame(other.data)),
axis=1, ignore_index=True)
elif scipy.sparse.issparse(other.data):
self.data = concat((self.data, pd_DataFrame(other.data.toarray())),
axis=1, ignore_index=True)
elif isinstance(other.data, pd_DataFrame):
self.data = concat((self.data, other.data),
axis=1, ignore_index=True)
elif isinstance(other.data, dt_DataTable):
self.data = concat((self.data, pd_DataFrame(other.data.to_numpy())),
axis=1, ignore_index=True)
else:
self.data = None
elif isinstance(self.data, dt_DataTable):
if isinstance(other.data, np.ndarray):
self.data = dt_DataTable(np.hstack((self.data.to_numpy(), other.data)))
elif scipy.sparse.issparse(other.data):
self.data = dt_DataTable(np.hstack((self.data.to_numpy(), other.data.toarray())))
elif isinstance(other.data, pd_DataFrame):
self.data = dt_DataTable(np.hstack((self.data.to_numpy(), other.data.values)))
elif isinstance(other.data, dt_DataTable):
self.data = dt_DataTable(np.hstack((self.data.to_numpy(), other.data.to_numpy())))
else:
self.data = None
else:
self.data = None
if self.data is None:
err_msg = (f"Cannot add features from {type(other.data).__name__} type of raw data to "
f"{old_self_data_type} type of raw data.\n")
err_msg += ("Set free_raw_data=False when construct Dataset to avoid this"
if was_none else "Freeing raw data")
_log_warning(err_msg)
self.feature_name = self.get_feature_name()
_log_warning("Reseting categorical features.\n"
"You can set new categorical features via ``set_categorical_feature`` method")
self.categorical_feature = "auto"
self.pandas_categorical = None
return self
def _dump_text(self, filename):
"""Save Dataset to a text file.
This format cannot be loaded back in by LightGBM, but is useful for debugging purposes.
Parameters
----------
filename : string
Name of the output file.
Returns
-------
self : Dataset
Returns self.
"""
_safe_call(_LIB.LGBM_DatasetDumpText(
self.construct().handle,
c_str(filename)))
return self
class Booster:
"""Booster in LightGBM."""
def __init__(self, params=None, train_set=None, model_file=None, model_str=None, silent=False):
"""Initialize the Booster.
Parameters
----------
params : dict or None, optional (default=None)
Parameters for Booster.
train_set : Dataset or None, optional (default=None)
Training dataset.
model_file : string or None, optional (default=None)
Path to the model file.
model_str : string or None, optional (default=None)
Model will be loaded from this string.
silent : bool, optional (default=False)
Whether to print messages during construction.
"""
self.handle = None
self.network = False
self.__need_reload_eval_info = True
self._train_data_name = "training"
self.__attr = {}
self.__set_objective_to_none = False
self.best_iteration = -1
self.best_score = {}
params = {} if params is None else deepcopy(params)
# user can set verbose with params, it has higher priority
if not any(verbose_alias in params for verbose_alias in _ConfigAliases.get("verbosity")) and silent:
params["verbose"] = -1
if train_set is not None:
# Training task
if not isinstance(train_set, Dataset):
raise TypeError(f'Training data should be Dataset instance, met {type(train_set).__name__}')
params = _choose_param_value(
main_param_name="machines",
params=params,
default_value=None
)
# if "machines" is given, assume user wants to do distributed learning, and set up network
if params["machines"] is None:
params.pop("machines", None)
else:
machines = params["machines"]
if isinstance(machines, str):
num_machines_from_machine_list = len(machines.split(','))
elif isinstance(machines, (list, set)):
num_machines_from_machine_list = len(machines)
machines = ','.join(machines)
else:
raise ValueError("Invalid machines in params.")
params = _choose_param_value(
main_param_name="num_machines",
params=params,
default_value=num_machines_from_machine_list
)
params = _choose_param_value(
main_param_name="local_listen_port",
params=params,
default_value=12400
)
self.set_network(
machines=machines,
local_listen_port=params["local_listen_port"],
listen_time_out=params.get("time_out", 120),
num_machines=params["num_machines"]
)
# construct booster object
train_set.construct()
# copy the parameters from train_set
params.update(train_set.get_params())
params_str = param_dict_to_str(params)
self.handle = ctypes.c_void_p()
_safe_call(_LIB.LGBM_BoosterCreate(
train_set.handle,
c_str(params_str),
ctypes.byref(self.handle)))
# save reference to data
self.train_set = train_set
self.valid_sets = []
self.name_valid_sets = []
self.__num_dataset = 1
self.__init_predictor = train_set._predictor
if self.__init_predictor is not None:
_safe_call(_LIB.LGBM_BoosterMerge(
self.handle,
self.__init_predictor.handle))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.__num_class = out_num_class.value
# buffer for inner predict
self.__inner_predict_buffer = [None]
self.__is_predicted_cur_iter = [False]
self.__get_eval_info()
self.pandas_categorical = train_set.pandas_categorical
self.train_set_version = train_set.version
elif model_file is not None:
# Prediction task
out_num_iterations = ctypes.c_int(0)
self.handle = ctypes.c_void_p()
_safe_call(_LIB.LGBM_BoosterCreateFromModelfile(
c_str(model_file),
ctypes.byref(out_num_iterations),
ctypes.byref(self.handle)))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.__num_class = out_num_class.value
self.pandas_categorical = _load_pandas_categorical(file_name=model_file)
elif model_str is not None:
self.model_from_string(model_str, not silent)
else:
raise TypeError('Need at least one training dataset or model file or model string '
'to create Booster instance')
self.params = params
def __del__(self):
try:
if self.network:
self.free_network()
except AttributeError:
pass
try:
if self.handle is not None:
_safe_call(_LIB.LGBM_BoosterFree(self.handle))
except AttributeError:
pass
def __copy__(self):
return self.__deepcopy__(None)
def __deepcopy__(self, _):
model_str = self.model_to_string(num_iteration=-1)
booster = Booster(model_str=model_str)
return booster
def __getstate__(self):
this = self.__dict__.copy()
handle = this['handle']
this.pop('train_set', None)
this.pop('valid_sets', None)
if handle is not None:
this["handle"] = self.model_to_string(num_iteration=-1)
return this
def __setstate__(self, state):
model_str = state.get('handle', None)
if model_str is not None:
handle = ctypes.c_void_p()
out_num_iterations = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterLoadModelFromString(
c_str(model_str),
ctypes.byref(out_num_iterations),
ctypes.byref(handle)))
state['handle'] = handle
self.__dict__.update(state)
def free_dataset(self):
"""Free Booster's Datasets.
Returns
-------
self : Booster
Booster without Datasets.
"""
self.__dict__.pop('train_set', None)
self.__dict__.pop('valid_sets', None)
self.__num_dataset = 0
return self
def _free_buffer(self):
self.__inner_predict_buffer = []
self.__is_predicted_cur_iter = []
return self
def set_network(
self,
machines: Union[List[str], Set[str], str],
local_listen_port: int = 12400,
listen_time_out: int = 120,
num_machines: int = 1
) -> "Booster":
"""Set the network configuration.
Parameters
----------
machines : list, set or string
Names of machines.
local_listen_port : int, optional (default=12400)
TCP listen port for local machines.
listen_time_out : int, optional (default=120)
Socket time-out in minutes.
num_machines : int, optional (default=1)
The number of machines for distributed learning application.
Returns
-------
self : Booster
Booster with set network.
"""
if isinstance(machines, (list, set)):
machines = ','.join(machines)
_safe_call(_LIB.LGBM_NetworkInit(c_str(machines),
ctypes.c_int(local_listen_port),
ctypes.c_int(listen_time_out),
ctypes.c_int(num_machines)))
self.network = True
return self
def free_network(self):
"""Free Booster's network.
Returns
-------
self : Booster
Booster with freed network.
"""
_safe_call(_LIB.LGBM_NetworkFree())
self.network = False
return self
def trees_to_dataframe(self):
"""Parse the fitted model and return in an easy-to-read pandas DataFrame.
The returned DataFrame has the following columns.
- ``tree_index`` : int64, which tree a node belongs to. 0-based, so a value of ``6``, for example, means "this node is in the 7th tree".
- ``node_depth`` : int64, how far a node is from the root of the tree. The root node has a value of ``1``, its direct children are ``2``, etc.
- ``node_index`` : string, unique identifier for a node.
- ``left_child`` : string, ``node_index`` of the child node to the left of a split. ``None`` for leaf nodes.
- ``right_child`` : string, ``node_index`` of the child node to the right of a split. ``None`` for leaf nodes.
- ``parent_index`` : string, ``node_index`` of this node's parent. ``None`` for the root node.
- ``split_feature`` : string, name of the feature used for splitting. ``None`` for leaf nodes.
- ``split_gain`` : float64, gain from adding this split to the tree. ``NaN`` for leaf nodes.
- ``threshold`` : float64, value of the feature used to decide which side of the split a record will go down. ``NaN`` for leaf nodes.
- ``decision_type`` : string, logical operator describing how to compare a value to ``threshold``.
For example, ``split_feature = "Column_10", threshold = 15, decision_type = "<="`` means that
records where ``Column_10 <= 15`` follow the left side of the split, otherwise follows the right side of the split. ``None`` for leaf nodes.
- ``missing_direction`` : string, split direction that missing values should go to. ``None`` for leaf nodes.
- ``missing_type`` : string, describes what types of values are treated as missing.
- ``value`` : float64, predicted value for this leaf node, multiplied by the learning rate.
- ``weight`` : float64 or int64, sum of hessian (second-order derivative of objective), summed over observations that fall in this node.
- ``count`` : int64, number of records in the training data that fall into this node.
Returns
-------
result : pandas DataFrame
Returns a pandas DataFrame of the parsed model.
"""
if not PANDAS_INSTALLED:
raise LightGBMError('This method cannot be run without pandas installed. '
'You must install pandas and restart your session to use this method.')
if self.num_trees() == 0:
raise LightGBMError('There are no trees in this Booster and thus nothing to parse')
def _is_split_node(tree):
return 'split_index' in tree.keys()
def create_node_record(tree, node_depth=1, tree_index=None,
feature_names=None, parent_node=None):
def _get_node_index(tree, tree_index):
tree_num = f'{tree_index}-' if tree_index is not None else ''
is_split = _is_split_node(tree)
node_type = 'S' if is_split else 'L'
# if a single node tree it won't have `leaf_index` so return 0
node_num = tree.get('split_index' if is_split else 'leaf_index', 0)
return f"{tree_num}{node_type}{node_num}"
def _get_split_feature(tree, feature_names):
if _is_split_node(tree):
if feature_names is not None:
feature_name = feature_names[tree['split_feature']]
else:
feature_name = tree['split_feature']
else:
feature_name = None
return feature_name
def _is_single_node_tree(tree):
return set(tree.keys()) == {'leaf_value'}
# Create the node record, and populate universal data members
node = OrderedDict()
node['tree_index'] = tree_index
node['node_depth'] = node_depth
node['node_index'] = _get_node_index(tree, tree_index)
node['left_child'] = None
node['right_child'] = None
node['parent_index'] = parent_node
node['split_feature'] = _get_split_feature(tree, feature_names)
node['split_gain'] = None
node['threshold'] = None
node['decision_type'] = None
node['missing_direction'] = None
node['missing_type'] = None
node['value'] = None
node['weight'] = None
node['count'] = None
# Update values to reflect node type (leaf or split)
if _is_split_node(tree):
node['left_child'] = _get_node_index(tree['left_child'], tree_index)
node['right_child'] = _get_node_index(tree['right_child'], tree_index)
node['split_gain'] = tree['split_gain']
node['threshold'] = tree['threshold']
node['decision_type'] = tree['decision_type']
node['missing_direction'] = 'left' if tree['default_left'] else 'right'
node['missing_type'] = tree['missing_type']
node['value'] = tree['internal_value']
node['weight'] = tree['internal_weight']
node['count'] = tree['internal_count']
else:
node['value'] = tree['leaf_value']
if not _is_single_node_tree(tree):
node['weight'] = tree['leaf_weight']
node['count'] = tree['leaf_count']
return node
def tree_dict_to_node_list(tree, node_depth=1, tree_index=None,
feature_names=None, parent_node=None):
node = create_node_record(tree,
node_depth=node_depth,
tree_index=tree_index,
feature_names=feature_names,
parent_node=parent_node)
res = [node]
if _is_split_node(tree):
# traverse the next level of the tree
children = ['left_child', 'right_child']
for child in children:
subtree_list = tree_dict_to_node_list(
tree[child],
node_depth=node_depth + 1,
tree_index=tree_index,
feature_names=feature_names,
parent_node=node['node_index'])
# In tree format, "subtree_list" is a list of node records (dicts),
# and we add node to the list.
res.extend(subtree_list)
return res
model_dict = self.dump_model()
feature_names = model_dict['feature_names']
model_list = []
for tree in model_dict['tree_info']:
model_list.extend(tree_dict_to_node_list(tree['tree_structure'],
tree_index=tree['tree_index'],
feature_names=feature_names))
return pd_DataFrame(model_list, columns=model_list[0].keys())
def set_train_data_name(self, name):
"""Set the name to the training Dataset.
Parameters
----------
name : string
Name for the training Dataset.
Returns
-------
self : Booster
Booster with set training Dataset name.
"""
self._train_data_name = name
return self
def add_valid(self, data, name):
"""Add validation data.
Parameters
----------
data : Dataset
Validation data.
name : string
Name of validation data.
Returns
-------
self : Booster
Booster with set validation data.
"""
if not isinstance(data, Dataset):
raise TypeError(f'Validation data should be Dataset instance, met {type(data).__name__}')
if data._predictor is not self.__init_predictor:
raise LightGBMError("Add validation data failed, "
"you should use same predictor for these data")
_safe_call(_LIB.LGBM_BoosterAddValidData(
self.handle,
data.construct().handle))
self.valid_sets.append(data)
self.name_valid_sets.append(name)
self.__num_dataset += 1
self.__inner_predict_buffer.append(None)
self.__is_predicted_cur_iter.append(False)
return self
def reset_parameter(self, params):
"""Reset parameters of Booster.
Parameters
----------
params : dict
New parameters for Booster.
Returns
-------
self : Booster
Booster with new parameters.
"""
params_str = param_dict_to_str(params)
if params_str:
_safe_call(_LIB.LGBM_BoosterResetParameter(
self.handle,
c_str(params_str)))
self.params.update(params)
return self
def update(self, train_set=None, fobj=None):
"""Update Booster for one iteration.
Parameters
----------
train_set : Dataset or None, optional (default=None)
Training data.
If None, last training data is used.
fobj : callable or None, optional (default=None)
Customized objective function.
Should accept two parameters: preds, train_data,
and return (grad, hess).
preds : list or numpy 1-D array
The predicted values.
Predicted values are returned before any transformation,
e.g. they are raw margin instead of probability of positive class for binary task.
train_data : Dataset
The training dataset.
grad : list or numpy 1-D array
The value of the first order derivative (gradient) of the loss
with respect to the elements of preds for each sample point.
hess : list or numpy 1-D array
The value of the second order derivative (Hessian) of the loss
with respect to the elements of preds for each sample point.
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is score[j * num_data + i]
and you should group grad and hess in this way as well.
Returns
-------
is_finished : bool
Whether the update was successfully finished.
"""
# need reset training data
if train_set is None and self.train_set_version != self.train_set.version:
train_set = self.train_set
is_the_same_train_set = False
else:
is_the_same_train_set = train_set is self.train_set and self.train_set_version == train_set.version
if train_set is not None and not is_the_same_train_set:
if not isinstance(train_set, Dataset):
raise TypeError(f'Training data should be Dataset instance, met {type(train_set).__name__}')
if train_set._predictor is not self.__init_predictor:
raise LightGBMError("Replace training data failed, "
"you should use same predictor for these data")
self.train_set = train_set
_safe_call(_LIB.LGBM_BoosterResetTrainingData(
self.handle,
self.train_set.construct().handle))
self.__inner_predict_buffer[0] = None
self.train_set_version = self.train_set.version
is_finished = ctypes.c_int(0)
if fobj is None:
if self.__set_objective_to_none:
raise LightGBMError('Cannot update due to null objective function.')
_safe_call(_LIB.LGBM_BoosterUpdateOneIter(
self.handle,
ctypes.byref(is_finished)))
self.__is_predicted_cur_iter = [False for _ in range(self.__num_dataset)]
return is_finished.value == 1
else:
if not self.__set_objective_to_none:
self.reset_parameter({"objective": "none"}).__set_objective_to_none = True
grad, hess = fobj(self.__inner_predict(0), self.train_set)
return self.__boost(grad, hess)
def __boost(self, grad, hess):
"""Boost Booster for one iteration with customized gradient statistics.
.. note::
Score is returned before any transformation,
e.g. it is raw margin instead of probability of positive class for binary task.
For multi-class task, the score is group by class_id first, then group by row_id.
If you want to get i-th row score in j-th class, the access way is score[j * num_data + i]
and you should group grad and hess in this way as well.
Parameters
----------
grad : list or numpy 1-D array
The value of the first order derivative (gradient) of the loss
with respect to the elements of score for each sample point.
hess : list or numpy 1-D array
The value of the second order derivative (Hessian) of the loss
with respect to the elements of score for each sample point.
Returns
-------
is_finished : bool
Whether the boost was successfully finished.
"""
grad = list_to_1d_numpy(grad, name='gradient')
hess = list_to_1d_numpy(hess, name='hessian')
assert grad.flags.c_contiguous
assert hess.flags.c_contiguous
if len(grad) != len(hess):
raise ValueError(f"Lengths of gradient({len(grad)}) and hessian({len(hess)}) don't match")
is_finished = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterUpdateOneIterCustom(
self.handle,
grad.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
hess.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
ctypes.byref(is_finished)))
self.__is_predicted_cur_iter = [False for _ in range(self.__num_dataset)]
return is_finished.value == 1
def rollback_one_iter(self):
"""Rollback one iteration.
Returns
-------
self : Booster
Booster with rolled back one iteration.
"""
_safe_call(_LIB.LGBM_BoosterRollbackOneIter(
self.handle))
self.__is_predicted_cur_iter = [False for _ in range(self.__num_dataset)]
return self
def current_iteration(self):
"""Get the index of the current iteration.
Returns
-------
cur_iter : int
The index of the current iteration.
"""
out_cur_iter = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetCurrentIteration(
self.handle,
ctypes.byref(out_cur_iter)))
return out_cur_iter.value
def num_model_per_iteration(self):
"""Get number of models per iteration.
Returns
-------
model_per_iter : int
The number of models per iteration.
"""
model_per_iter = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterNumModelPerIteration(
self.handle,
ctypes.byref(model_per_iter)))
return model_per_iter.value
def num_trees(self):
"""Get number of weak sub-models.
Returns
-------
num_trees : int
The number of weak sub-models.
"""
num_trees = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterNumberOfTotalModel(
self.handle,
ctypes.byref(num_trees)))
return num_trees.value
def upper_bound(self):
"""Get upper bound value of a model.
Returns
-------
upper_bound : double
Upper bound value of the model.
"""
ret = ctypes.c_double(0)
_safe_call(_LIB.LGBM_BoosterGetUpperBoundValue(
self.handle,
ctypes.byref(ret)))
return ret.value
def lower_bound(self):
"""Get lower bound value of a model.
Returns
-------
lower_bound : double
Lower bound value of the model.
"""
ret = ctypes.c_double(0)
_safe_call(_LIB.LGBM_BoosterGetLowerBoundValue(
self.handle,
ctypes.byref(ret)))
return ret.value
def eval(self, data, name, feval=None):
"""Evaluate for data.
Parameters
----------
data : Dataset
Data for the evaluating.
name : string
Name of the data.
feval : callable or None, optional (default=None)
Customized evaluation function.
Should accept two parameters: preds, eval_data,
and return (eval_name, eval_result, is_higher_better) or list of such tuples.
preds : list or numpy 1-D array
The predicted values.
If ``fobj`` is specified, predicted values are returned before any transformation,
e.g. they are raw margin instead of probability of positive class for binary task in this case.
eval_data : Dataset
The evaluation dataset.
eval_name : string
The name of evaluation function (without whitespace).
eval_result : float
The eval result.
is_higher_better : bool
Is eval result higher better, e.g. AUC is ``is_higher_better``.
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
Returns
-------
result : list
List with evaluation results.
"""
if not isinstance(data, Dataset):
raise TypeError("Can only eval for Dataset instance")
data_idx = -1
if data is self.train_set:
data_idx = 0
else:
for i in range(len(self.valid_sets)):
if data is self.valid_sets[i]:
data_idx = i + 1
break
# need to push new valid data
if data_idx == -1:
self.add_valid(data, name)
data_idx = self.__num_dataset - 1
return self.__inner_eval(name, data_idx, feval)
def eval_train(self, feval=None):
"""Evaluate for training data.
Parameters
----------
feval : callable or None, optional (default=None)
Customized evaluation function.
Should accept two parameters: preds, train_data,
and return (eval_name, eval_result, is_higher_better) or list of such tuples.
preds : list or numpy 1-D array
The predicted values.
If ``fobj`` is specified, predicted values are returned before any transformation,
e.g. they are raw margin instead of probability of positive class for binary task in this case.
train_data : Dataset
The training dataset.
eval_name : string
The name of evaluation function (without whitespace).
eval_result : float
The eval result.
is_higher_better : bool
Is eval result higher better, e.g. AUC is ``is_higher_better``.
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
Returns
-------
result : list
List with evaluation results.
"""
return self.__inner_eval(self._train_data_name, 0, feval)
def eval_valid(self, feval=None):
"""Evaluate for validation data.
Parameters
----------
feval : callable or None, optional (default=None)
Customized evaluation function.
Should accept two parameters: preds, valid_data,
and return (eval_name, eval_result, is_higher_better) or list of such tuples.
preds : list or numpy 1-D array
The predicted values.
If ``fobj`` is specified, predicted values are returned before any transformation,
e.g. they are raw margin instead of probability of positive class for binary task in this case.
valid_data : Dataset
The validation dataset.
eval_name : string
The name of evaluation function (without whitespace).
eval_result : float
The eval result.
is_higher_better : bool
Is eval result higher better, e.g. AUC is ``is_higher_better``.
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
Returns
-------
result : list
List with evaluation results.
"""
return [item for i in range(1, self.__num_dataset)
for item in self.__inner_eval(self.name_valid_sets[i - 1], i, feval)]
def save_model(self, filename, num_iteration=None, start_iteration=0, importance_type='split'):
"""Save Booster to file.
Parameters
----------
filename : string
Filename to save Booster.
num_iteration : int or None, optional (default=None)
Index of the iteration that should be saved.
If None, if the best iteration exists, it is saved; otherwise, all iterations are saved.
If <= 0, all iterations are saved.
start_iteration : int, optional (default=0)
Start index of the iteration that should be saved.
importance_type : string, optional (default="split")
What type of feature importance should be saved.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
Returns
-------
self : Booster
Returns self.
"""
if num_iteration is None:
num_iteration = self.best_iteration
importance_type_int = FEATURE_IMPORTANCE_TYPE_MAPPER[importance_type]
_safe_call(_LIB.LGBM_BoosterSaveModel(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int(importance_type_int),
c_str(filename)))
_dump_pandas_categorical(self.pandas_categorical, filename)
return self
def shuffle_models(self, start_iteration=0, end_iteration=-1):
"""Shuffle models.
Parameters
----------
start_iteration : int, optional (default=0)
The first iteration that will be shuffled.
end_iteration : int, optional (default=-1)
The last iteration that will be shuffled.
If <= 0, means the last available iteration.
Returns
-------
self : Booster
Booster with shuffled models.
"""
_safe_call(_LIB.LGBM_BoosterShuffleModels(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(end_iteration)))
return self
def model_from_string(self, model_str, verbose=True):
"""Load Booster from a string.
Parameters
----------
model_str : string
Model will be loaded from this string.
verbose : bool, optional (default=True)
Whether to print messages while loading model.
Returns
-------
self : Booster
Loaded Booster object.
"""
if self.handle is not None:
_safe_call(_LIB.LGBM_BoosterFree(self.handle))
self._free_buffer()
self.handle = ctypes.c_void_p()
out_num_iterations = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterLoadModelFromString(
c_str(model_str),
ctypes.byref(out_num_iterations),
ctypes.byref(self.handle)))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
if verbose:
_log_info(f'Finished loading model, total used {int(out_num_iterations.value)} iterations')
self.__num_class = out_num_class.value
self.pandas_categorical = _load_pandas_categorical(model_str=model_str)
return self
def model_to_string(self, num_iteration=None, start_iteration=0, importance_type='split'):
"""Save Booster to string.
Parameters
----------
num_iteration : int or None, optional (default=None)
Index of the iteration that should be saved.
If None, if the best iteration exists, it is saved; otherwise, all iterations are saved.
If <= 0, all iterations are saved.
start_iteration : int, optional (default=0)
Start index of the iteration that should be saved.
importance_type : string, optional (default="split")
What type of feature importance should be saved.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
Returns
-------
str_repr : string
String representation of Booster.
"""
if num_iteration is None:
num_iteration = self.best_iteration
importance_type_int = FEATURE_IMPORTANCE_TYPE_MAPPER[importance_type]
buffer_len = 1 << 20
tmp_out_len = ctypes.c_int64(0)
string_buffer = ctypes.create_string_buffer(buffer_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterSaveModelToString(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int(importance_type_int),
ctypes.c_int64(buffer_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
actual_len = tmp_out_len.value
# if buffer length is not long enough, re-allocate a buffer
if actual_len > buffer_len:
string_buffer = ctypes.create_string_buffer(actual_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterSaveModelToString(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int(importance_type_int),
ctypes.c_int64(actual_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
ret = string_buffer.value.decode('utf-8')
ret += _dump_pandas_categorical(self.pandas_categorical)
return ret
def dump_model(self, num_iteration=None, start_iteration=0, importance_type='split'):
"""Dump Booster to JSON format.
Parameters
----------
num_iteration : int or None, optional (default=None)
Index of the iteration that should be dumped.
If None, if the best iteration exists, it is dumped; otherwise, all iterations are dumped.
If <= 0, all iterations are dumped.
start_iteration : int, optional (default=0)
Start index of the iteration that should be dumped.
importance_type : string, optional (default="split")
What type of feature importance should be dumped.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
Returns
-------
json_repr : dict
JSON format of Booster.
"""
if num_iteration is None:
num_iteration = self.best_iteration
importance_type_int = FEATURE_IMPORTANCE_TYPE_MAPPER[importance_type]
buffer_len = 1 << 20
tmp_out_len = ctypes.c_int64(0)
string_buffer = ctypes.create_string_buffer(buffer_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterDumpModel(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int(importance_type_int),
ctypes.c_int64(buffer_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
actual_len = tmp_out_len.value
# if buffer length is not long enough, reallocate a buffer
if actual_len > buffer_len:
string_buffer = ctypes.create_string_buffer(actual_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterDumpModel(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int(importance_type_int),
ctypes.c_int64(actual_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
ret = json.loads(string_buffer.value.decode('utf-8'))
ret['pandas_categorical'] = json.loads(json.dumps(self.pandas_categorical,
default=json_default_with_numpy))
return ret
def predict(self, data, start_iteration=0, num_iteration=None,
raw_score=False, pred_leaf=False, pred_contrib=False,
data_has_header=False, is_reshape=True, **kwargs):
"""Make a prediction.
Parameters
----------
data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse
Data source for prediction.
If string, it represents the path to txt file.
start_iteration : int, optional (default=0)
Start index of the iteration to predict.
If <= 0, starts from the first iteration.
num_iteration : int or None, optional (default=None)
Total number of iterations used in the prediction.
If None, if the best iteration exists and start_iteration <= 0, the best iteration is used;
otherwise, all iterations from ``start_iteration`` are used (no limits).
If <= 0, all iterations from ``start_iteration`` are used (no limits).
raw_score : bool, optional (default=False)
Whether to predict raw scores.
pred_leaf : bool, optional (default=False)
Whether to predict leaf index.
pred_contrib : bool, optional (default=False)
Whether to predict feature contributions.
.. note::
If you want to get more explanations for your model's predictions using SHAP values,
like SHAP interaction values,
you can install the shap package (https://github.com/slundberg/shap).
Note that unlike the shap package, with ``pred_contrib`` we return a matrix with an extra
column, where the last column is the expected value.
data_has_header : bool, optional (default=False)
Whether the data has header.
Used only if data is string.
is_reshape : bool, optional (default=True)
If True, result is reshaped to [nrow, ncol].
**kwargs
Other parameters for the prediction.
Returns
-------
result : numpy array, scipy.sparse or list of scipy.sparse
Prediction result.
Can be sparse or a list of sparse objects (each element represents predictions for one class) for feature contributions (when ``pred_contrib=True``).
"""
predictor = self._to_predictor(deepcopy(kwargs))
if num_iteration is None:
if start_iteration <= 0:
num_iteration = self.best_iteration
else:
num_iteration = -1
return predictor.predict(data, start_iteration, num_iteration,
raw_score, pred_leaf, pred_contrib,
data_has_header, is_reshape)
def refit(self, data, label, decay_rate=0.9, **kwargs):
"""Refit the existing Booster by new data.
Parameters
----------
data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse
Data source for refit.
If string, it represents the path to txt file.
label : list, numpy 1-D array or pandas Series / one-column DataFrame
Label for refit.
decay_rate : float, optional (default=0.9)
Decay rate of refit,
will use ``leaf_output = decay_rate * old_leaf_output + (1.0 - decay_rate) * new_leaf_output`` to refit trees.
**kwargs
Other parameters for refit.
These parameters will be passed to ``predict`` method.
Returns
-------
result : Booster
Refitted Booster.
"""
if self.__set_objective_to_none:
raise LightGBMError('Cannot refit due to null objective function.')
predictor = self._to_predictor(deepcopy(kwargs))
leaf_preds = predictor.predict(data, -1, pred_leaf=True)
nrow, ncol = leaf_preds.shape
out_is_linear = ctypes.c_bool(False)
_safe_call(_LIB.LGBM_BoosterGetLinear(
self.handle,
ctypes.byref(out_is_linear)))
new_params = deepcopy(self.params)
new_params["linear_tree"] = out_is_linear.value
train_set = Dataset(data, label, silent=True, params=new_params)
new_params['refit_decay_rate'] = decay_rate
new_booster = Booster(new_params, train_set)
# Copy models
_safe_call(_LIB.LGBM_BoosterMerge(
new_booster.handle,
predictor.handle))
leaf_preds = leaf_preds.reshape(-1)
ptr_data, _, _ = c_int_array(leaf_preds)
_safe_call(_LIB.LGBM_BoosterRefit(
new_booster.handle,
ptr_data,
ctypes.c_int32(nrow),
ctypes.c_int32(ncol)))
new_booster.network = self.network
new_booster.__attr = self.__attr.copy()
return new_booster
def get_leaf_output(self, tree_id, leaf_id):
"""Get the output of a leaf.
Parameters
----------
tree_id : int
The index of the tree.
leaf_id : int
The index of the leaf in the tree.
Returns
-------
result : float
The output of the leaf.
"""
ret = ctypes.c_double(0)
_safe_call(_LIB.LGBM_BoosterGetLeafValue(
self.handle,
ctypes.c_int(tree_id),
ctypes.c_int(leaf_id),
ctypes.byref(ret)))
return ret.value
def _to_predictor(self, pred_parameter=None):
"""Convert to predictor."""
predictor = _InnerPredictor(booster_handle=self.handle, pred_parameter=pred_parameter)
predictor.pandas_categorical = self.pandas_categorical
return predictor
def num_feature(self):
"""Get number of features.
Returns
-------
num_feature : int
The number of features.
"""
out_num_feature = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumFeature(
self.handle,
ctypes.byref(out_num_feature)))
return out_num_feature.value
def feature_name(self):
"""Get names of features.
Returns
-------
result : list
List with names of features.
"""
num_feature = self.num_feature()
# Get name of features
tmp_out_len = ctypes.c_int(0)
reserved_string_buffer_size = 255
required_string_buffer_size = ctypes.c_size_t(0)
string_buffers = [ctypes.create_string_buffer(reserved_string_buffer_size) for _ in range(num_feature)]
ptr_string_buffers = (ctypes.c_char_p * num_feature)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_BoosterGetFeatureNames(
self.handle,
ctypes.c_int(num_feature),
ctypes.byref(tmp_out_len),
ctypes.c_size_t(reserved_string_buffer_size),
ctypes.byref(required_string_buffer_size),
ptr_string_buffers))
if num_feature != tmp_out_len.value:
raise ValueError("Length of feature names doesn't equal with num_feature")
actual_string_buffer_size = required_string_buffer_size.value
# if buffer length is not long enough, reallocate buffers
if reserved_string_buffer_size < actual_string_buffer_size:
string_buffers = [ctypes.create_string_buffer(actual_string_buffer_size) for _ in range(num_feature)]
ptr_string_buffers = (ctypes.c_char_p * num_feature)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_BoosterGetFeatureNames(
self.handle,
ctypes.c_int(num_feature),
ctypes.byref(tmp_out_len),
ctypes.c_size_t(actual_string_buffer_size),
ctypes.byref(required_string_buffer_size),
ptr_string_buffers))
return [string_buffers[i].value.decode('utf-8') for i in range(num_feature)]
def feature_importance(self, importance_type='split', iteration=None):
"""Get feature importances.
Parameters
----------
importance_type : string, optional (default="split")
How the importance is calculated.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
iteration : int or None, optional (default=None)
Limit number of iterations in the feature importance calculation.
If None, if the best iteration exists, it is used; otherwise, all trees are used.
If <= 0, all trees are used (no limits).
Returns
-------
result : numpy array
Array with feature importances.
"""
if iteration is None:
iteration = self.best_iteration
importance_type_int = FEATURE_IMPORTANCE_TYPE_MAPPER[importance_type]
result = np.zeros(self.num_feature(), dtype=np.float64)
_safe_call(_LIB.LGBM_BoosterFeatureImportance(
self.handle,
ctypes.c_int(iteration),
ctypes.c_int(importance_type_int),
result.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if importance_type_int == 0:
return result.astype(np.int32)
else:
return result
def get_split_value_histogram(self, feature, bins=None, xgboost_style=False):
"""Get split value histogram for the specified feature.
Parameters
----------
feature : int or string
The feature name or index the histogram is calculated for.
If int, interpreted as index.
If string, interpreted as name.
.. warning::
Categorical features are not supported.
bins : int, string or None, optional (default=None)
The maximum number of bins.
If None, or int and > number of unique split values and ``xgboost_style=True``,
the number of bins equals number of unique split values.
If string, it should be one from the list of the supported values by ``numpy.histogram()`` function.
xgboost_style : bool, optional (default=False)
Whether the returned result should be in the same form as it is in XGBoost.
If False, the returned value is tuple of 2 numpy arrays as it is in ``numpy.histogram()`` function.
If True, the returned value is matrix, in which the first column is the right edges of non-empty bins
and the second one is the histogram values.
Returns
-------
result_tuple : tuple of 2 numpy arrays
If ``xgboost_style=False``, the values of the histogram of used splitting values for the specified feature
and the bin edges.
result_array_like : numpy array or pandas DataFrame (if pandas is installed)
If ``xgboost_style=True``, the histogram of used splitting values for the specified feature.
"""
def add(root):
"""Recursively add thresholds."""
if 'split_index' in root: # non-leaf
if feature_names is not None and isinstance(feature, str):
split_feature = feature_names[root['split_feature']]
else:
split_feature = root['split_feature']
if split_feature == feature:
if isinstance(root['threshold'], str):
raise LightGBMError('Cannot compute split value histogram for the categorical feature')
else:
values.append(root['threshold'])
add(root['left_child'])
add(root['right_child'])
model = self.dump_model()
feature_names = model.get('feature_names')
tree_infos = model['tree_info']
values = []
for tree_info in tree_infos:
add(tree_info['tree_structure'])
if bins is None or isinstance(bins, int) and xgboost_style:
n_unique = len(np.unique(values))
bins = max(min(n_unique, bins) if bins is not None else n_unique, 1)
hist, bin_edges = np.histogram(values, bins=bins)
if xgboost_style:
ret = np.column_stack((bin_edges[1:], hist))
ret = ret[ret[:, 1] > 0]
if PANDAS_INSTALLED:
return pd_DataFrame(ret, columns=['SplitValue', 'Count'])
else:
return ret
else:
return hist, bin_edges
def __inner_eval(self, data_name, data_idx, feval=None):
"""Evaluate training or validation data."""
if data_idx >= self.__num_dataset:
raise ValueError("Data_idx should be smaller than number of dataset")
self.__get_eval_info()
ret = []
if self.__num_inner_eval > 0:
result = np.zeros(self.__num_inner_eval, dtype=np.float64)
tmp_out_len = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetEval(
self.handle,
ctypes.c_int(data_idx),
ctypes.byref(tmp_out_len),
result.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if tmp_out_len.value != self.__num_inner_eval:
raise ValueError("Wrong length of eval results")
for i in range(self.__num_inner_eval):
ret.append((data_name, self.__name_inner_eval[i],
result[i], self.__higher_better_inner_eval[i]))
if callable(feval):
feval = [feval]
if feval is not None:
if data_idx == 0:
cur_data = self.train_set
else:
cur_data = self.valid_sets[data_idx - 1]
for eval_function in feval:
if eval_function is None:
continue
feval_ret = eval_function(self.__inner_predict(data_idx), cur_data)
if isinstance(feval_ret, list):
for eval_name, val, is_higher_better in feval_ret:
ret.append((data_name, eval_name, val, is_higher_better))
else:
eval_name, val, is_higher_better = feval_ret
ret.append((data_name, eval_name, val, is_higher_better))
return ret
def __inner_predict(self, data_idx):
"""Predict for training and validation dataset."""
if data_idx >= self.__num_dataset:
raise ValueError("Data_idx should be smaller than number of dataset")
if self.__inner_predict_buffer[data_idx] is None:
if data_idx == 0:
n_preds = self.train_set.num_data() * self.__num_class
else:
n_preds = self.valid_sets[data_idx - 1].num_data() * self.__num_class
self.__inner_predict_buffer[data_idx] = np.zeros(n_preds, dtype=np.float64)
# avoid to predict many time in one iteration
if not self.__is_predicted_cur_iter[data_idx]:
tmp_out_len = ctypes.c_int64(0)
data_ptr = self.__inner_predict_buffer[data_idx].ctypes.data_as(ctypes.POINTER(ctypes.c_double))
_safe_call(_LIB.LGBM_BoosterGetPredict(
self.handle,
ctypes.c_int(data_idx),
ctypes.byref(tmp_out_len),
data_ptr))
if tmp_out_len.value != len(self.__inner_predict_buffer[data_idx]):
raise ValueError(f"Wrong length of predict results for data {data_idx}")
self.__is_predicted_cur_iter[data_idx] = True
return self.__inner_predict_buffer[data_idx]
def __get_eval_info(self):
"""Get inner evaluation count and names."""
if self.__need_reload_eval_info:
self.__need_reload_eval_info = False
out_num_eval = ctypes.c_int(0)
# Get num of inner evals
_safe_call(_LIB.LGBM_BoosterGetEvalCounts(
self.handle,
ctypes.byref(out_num_eval)))
self.__num_inner_eval = out_num_eval.value
if self.__num_inner_eval > 0:
# Get name of eval metrics
tmp_out_len = ctypes.c_int(0)
reserved_string_buffer_size = 255
required_string_buffer_size = ctypes.c_size_t(0)
string_buffers = [
ctypes.create_string_buffer(reserved_string_buffer_size) for _ in range(self.__num_inner_eval)
]
ptr_string_buffers = (ctypes.c_char_p * self.__num_inner_eval)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_BoosterGetEvalNames(
self.handle,
ctypes.c_int(self.__num_inner_eval),
ctypes.byref(tmp_out_len),
ctypes.c_size_t(reserved_string_buffer_size),
ctypes.byref(required_string_buffer_size),
ptr_string_buffers))
if self.__num_inner_eval != tmp_out_len.value:
raise ValueError("Length of eval names doesn't equal with num_evals")
actual_string_buffer_size = required_string_buffer_size.value
# if buffer length is not long enough, reallocate buffers
if reserved_string_buffer_size < actual_string_buffer_size:
string_buffers = [
ctypes.create_string_buffer(actual_string_buffer_size) for _ in range(self.__num_inner_eval)
]
ptr_string_buffers = (ctypes.c_char_p * self.__num_inner_eval)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_BoosterGetEvalNames(
self.handle,
ctypes.c_int(self.__num_inner_eval),
ctypes.byref(tmp_out_len),
ctypes.c_size_t(actual_string_buffer_size),
ctypes.byref(required_string_buffer_size),
ptr_string_buffers))
self.__name_inner_eval = [
string_buffers[i].value.decode('utf-8') for i in range(self.__num_inner_eval)
]
self.__higher_better_inner_eval = [
name.startswith(('auc', 'ndcg@', 'map@', 'average_precision')) for name in self.__name_inner_eval
]
def attr(self, key):
"""Get attribute string from the Booster.
Parameters
----------
key : string
The name of the attribute.
Returns
-------
value : string or None
The attribute value.
Returns None if attribute does not exist.
"""
return self.__attr.get(key, None)
def set_attr(self, **kwargs):
"""Set attributes to the Booster.
Parameters
----------
**kwargs
The attributes to set.
Setting a value to None deletes an attribute.
Returns
-------
self : Booster
Booster with set attributes.
"""
for key, value in kwargs.items():
if value is not None:
if not isinstance(value, str):
raise ValueError("Only string values are accepted")
self.__attr[key] = value
else:
self.__attr.pop(key, None)
return self
| mit | 6,481,589,992,037,159,000 | 41.512168 | 161 | 0.553914 | false |
llvm/llvm-zorg | zorg/buildbot/builders/AnnotatedBuilder.py | 1 | 3401 | from buildbot.process.properties import WithProperties
from buildbot.steps.shell import SetProperty
from zorg.buildbot.commands.AnnotatedCommand import AnnotatedCommand
from zorg.buildbot.process.factory import LLVMBuildFactory
def getAnnotatedBuildFactory(
script,
clean=False,
depends_on_projects=None,
env=None,
extra_args=None,
timeout=1200,
checkout_llvm_sources=True):
"""
Returns a new build factory that uses AnnotatedCommand, which
allows the build to be run by version-controlled scripts that do
not require a buildmaster restart to update.
script: script under "builders/annotated" to be run by python
clean: set to true for a clean build of llvm
depends_on_projects: which subprojects to enable
llvm must be first in the list
(default: ["llvm", "clang", "compiler-rt", "libcxx",
"libcxxabi", "libunwind", "lld"])
env: environment overrides (map; default is no overrides)
extra_args: extra arguments to pass to the script (default: [])
timeout: specifies the builder's timeout in seconds (default: 1200)
"""
if depends_on_projects is None:
depends_on_projects = [
"llvm",
"clang",
"compiler-rt",
"libcxx",
"libcxxabi",
"libunwind",
"lld"]
if extra_args is None:
extra_args = []
f = LLVMBuildFactory(
clean=clean,
depends_on_projects=depends_on_projects)
if clean:
f.addStep(SetProperty(property='clean', command='echo 1'))
# We normally use the clean property to indicate that we want a
# clean build, but AnnotatedCommand uses the clobber property
# instead. Therefore, set clobber if clean is set to a truthy
# value. This will cause AnnotatedCommand to set
# BUILDBOT_CLOBBER=1 in the environment, which is how we
# communicate to the script that we need a clean build.
f.addStep(SetProperty(
property='clobber',
command='echo 1',
doStepIf=lambda step: step.build.getProperty('clean', False)))
merged_env = {
'TERM': 'dumb' # Be cautious and disable color output from all tools.
}
if env is not None:
# Overwrite pre-set items with the given ones, so user can set
# anything.
merged_env.update(env)
scripts_dir = "annotated"
# Check out zorg so we can run the annotator scripts.
f.addGetSourcecodeForProject(
name='update-annotated-scripts',
project='zorg',
src_dir='llvm-zorg',
alwaysUseLatest=True)
if checkout_llvm_sources:
f.addGetSourcecodeSteps()
extra_args_with_props = [WithProperties(arg) for arg in extra_args]
# Explicitly use '/' as separator, because it works on *nix and Windows.
if script.startswith('/'):
command = [script]
else:
script_path = "../llvm-zorg/zorg/buildbot/builders/annotated/%s" % (script)
command = ["python", script_path, WithProperties("--jobs=%(jobs:-)s")]
command += extra_args_with_props
f.addStep(AnnotatedCommand(name="annotate",
description="annotate",
timeout=timeout,
haltOnFailure=True,
command=command,
env=merged_env))
return f
| apache-2.0 | 84,255,668,484,032,700 | 34.427083 | 81 | 0.628051 | false |
cbitterfield/JobCard | archive/bulkvideosize.py | 1 | 3578 | #!/opt/local/bin/python
# encoding: utf-8
'''
bulkvideosize -- shortdesc
bulkvideosize is a description
It defines classes_and_methods
@author: user_name
@copyright: 2017 organization_name. All rights reserved.
@license: license
@contact: user_email
@deffield updated: Updated
'''
import sys
import os
import argparse
__all__ = []
__version__ = 0.1
__date__ = '2017-10-20'
__updated__ = '2017-10-20'
DEBUG = 1
TESTRUN = 0
PROFILE = 0
program_name = os.path.basename(sys.argv[0])
# Setup argument parser
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument("-v", "--verbose", action="store_true", help="Display detailed debugging information")
parser.add_argument("-l","--logfile", action="store", help="Write Logfile if ommitted write to STDOUT")
parser.add_argument("-s","--source", action="store", help="Source Directory")
# Process arguments
args = parser.parse_args()
verbose = args.verbose
path = args.source
logfile = args.logfile
if verbose > 0:
print("Verbose mode on")
## Use Get Video Size Function
def getvideosize(src):
import shlex
import os
from string import Template
import subprocess
import datetime
FFPROBE="/opt/local/bin/ffprobe"
Error = False
log_text = open(logfile, "w")
for video in os.listdir(src):
if video.endswith(".mp4"):
CMD_TEMPLATE = "$FFPROBE -v error -of flat=s=_ -select_streams v:0 -show_entries stream=height,width,bit_rate,duration '$VIDEO'"
CMD = Template(CMD_TEMPLATE).safe_substitute(FFPROBE=FFPROBE, VIDEO=src + "/" + video)
videoName = os.path.basename(video)
pathName = os.path.dirname(src + "/" + video)
#print("Get the Video Size Information for Video: " + videoName )
#print("Source Dir:" + pathName )
#print("getVideoSizeCMD:\n " )
pCMD = shlex.split(CMD)
#print("Command:" + CMD)
try:
result=subprocess.check_output(pCMD)
cWidth = result.splitlines(True)[0]
cHeight = result.splitlines(True)[1]
cDuration = result.splitlines(True)[2]
cBit_Rate = result.splitlines(True)[3]
lWidth = cWidth.split("=")[1]
lHeight = cHeight.split("=")[1]
lDuration = cDuration.split("=")[1]
lBitRate = cBit_Rate.split("=")[1]
Width = lWidth.replace('\n','')
Height = lHeight.replace('\n','')
Duration = lDuration.replace('\n','')
BitRate = lBitRate.replace('\n','')
Duration = Duration.replace('"','')
BitRate = BitRate.replace('"','')
sizeofVideo = str(Width) + "x" + str(Height)
myduration = str(datetime.timedelta(seconds=int(float(Duration))))
mybitrate = str(int(BitRate)/1000)
#print("Video Source: " + video + " Size: " + sizeofVideo + " Duration: " + myduration + " BitRate:" + mybitrate + " kbps" )
log_text.write("Video Source: " + video + " Size: " + sizeofVideo + " Duration: " + myduration + " BitRate:" + mybitrate + " kbps\n")
except:
#print("Video Source: " + video + "ERROR")
log_text.write("Video Source: " + video + "ERROR\n")
log_text.close()
return(Error, sizeofVideo, Duration, BitRate)
myError, mySize, myDuraction, myBitrate = getvideosize(path)
| gpl-3.0 | -261,463,053,978,957,760 | 28.089431 | 149 | 0.58161 | false |
Dapid/pywt | demo/dwt_signal_decomposition.py | 1 | 1789 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
import matplotlib.pyplot as plt
import pywt
ecg = np.load(os.path.join('data', 'ecg.npy'))
data1 = np.concatenate((np.arange(1, 400),
np.arange(398, 600),
np.arange(601, 1024)))
x = np.linspace(0.082, 2.128, num=1024)[::-1]
data2 = np.sin(40 * np.log(x)) * np.sign((np.log(x)))
mode = pywt.MODES.sp1
def plot_signal_decomp(data, w, title):
"""Decompose and plot a signal S.
S = An + Dn + Dn-1 + ... + D1
"""
w = pywt.Wavelet(w)
a = data
ca = []
cd = []
for i in range(5):
(a, d) = pywt.dwt(a, w, mode)
ca.append(a)
cd.append(d)
rec_a = []
rec_d = []
for i, coeff in enumerate(ca):
coeff_list = [coeff, None] + [None] * i
rec_a.append(pywt.waverec(coeff_list, w))
for i, coeff in enumerate(cd):
coeff_list = [None, coeff] + [None] * i
rec_d.append(pywt.waverec(coeff_list, w))
fig = plt.figure()
ax_main = fig.add_subplot(len(rec_a) + 1, 1, 1)
ax_main.set_title(title)
ax_main.plot(data)
ax_main.set_xlim(0, len(data) - 1)
for i, y in enumerate(rec_a):
ax = fig.add_subplot(len(rec_a) + 1, 2, 3 + i * 2)
ax.plot(y, 'r')
ax.set_xlim(0, len(y) - 1)
ax.set_ylabel("A%d" % (i + 1))
for i, y in enumerate(rec_d):
ax = fig.add_subplot(len(rec_d) + 1, 2, 4 + i * 2)
ax.plot(y, 'g')
ax.set_xlim(0, len(y) - 1)
ax.set_ylabel("D%d" % (i + 1))
plot_signal_decomp(data1, 'coif5', "DWT: Signal irregularity")
plot_signal_decomp(data2, 'sym5', "DWT: Frequency and phase change - Symmlets5")
plot_signal_decomp(ecg, 'sym5', "DWT: Ecg sample - Symmlets5")
plt.show()
| mit | 5,571,234,884,135,789,000 | 23.847222 | 80 | 0.536613 | false |
danrg/RGT-tool | src/RGT/XML/SVG/Attribs/xlinkAttributes.py | 1 | 4217 | from RGT.XML.SVG.Attribs.basicSvgAttribute import BasicSvgAttribute
from types import StringType
class XlinkAttributes(BasicSvgAttribute):
ATTRIBUTE_XLINK_HREF = 'xlink:href'
ATTRIBUTE_XLINK_SHOW = 'xlink:show'
ATTRIBUTE_XLINK_ACTUATE = 'xlink:actuate'
ATTRIBUTE_XLINK_TYPE = 'xlink:type'
ATTRIBUTE_XLINK_ROLE = 'xlink:role'
ATTRIBUTE_XLINK_ARCROLE = 'xlink:arcrole'
ATTRIBUTE_XLINK_TITLE = 'xlink:title'
def __init__(self):
BasicSvgAttribute.__init__(self)
def setXlinkHref(self, data):
if data is not None:
if type(data) is not StringType:
data = str(data)
self._setNodeAttribute(self.ATTRIBUTE_XLINK_HREF, data)
def setXlinkShow(self, data):
allowedValues = ['new', 'replace', 'embed', 'other', 'none']
if data is not None:
if data not in allowedValues:
values = ''
for value in allowedValues:
values += value + ', '
values = values[0: len(values) - 2]
raise ValueError('Value not allowed, only ' + values + 'are allowed')
else:
self._setNodeAttribute(self.ATTRIBUTE_XLINK_SHOW, data)
def setXlinkActuate(self, data):
allowedValues = ['onLoad']
if data is not None:
if data not in allowedValues:
values = ''
for value in allowedValues:
values += value + ', '
values = values[0: len(values) - 2]
raise ValueError('Value not allowed, only ' + values + 'are allowed')
else:
self._setNodeAttribute(self.ATTRIBUTE_XLINK_ACTUATE, data)
def setXlinkType(self, data):
allowedValues = ['simple']
if data is not None:
if data not in allowedValues:
values = ''
for value in allowedValues:
values += value + ', '
values = values[0: len(values) - 2]
raise ValueError('Value not allowed, only ' + values + 'are allowed')
else:
self._setNodeAttribute(self.ATTRIBUTE_XLINK_TYPE, data)
def setXlinkRole(self, data):
if data is not None:
if type(data) is not StringType:
data = str(data)
self._setNodeAttribute(self.ATTRIBUTE_XLINK_ROLE, data)
def setXlinkArcrole(self, data):
if data is not None:
if type(data) is not StringType:
data = str(data)
self._setNodeAttribute(self.ATTRIBUTE_XLINK_ARCROLE, data)
def setXlinkTitle(self, data):
if data is not None:
if type(data) is not StringType:
data = str(data)
self._setNodeAttribute(self.ATTRIBUTE_XLINK_TITLE, data)
def getXlinkHref(self):
node = self._getNodeAttribute(self.ATTRIBUTE_XLINK_HREF)
if node is not None:
return node.nodeValue
return None
def getXlinkShow(self):
node = self._getNodeAttribute(self.ATTRIBUTE_XLINK_SHOW)
if node is not None:
return node.nodeValue
return None
def getXlinkActuate(self):
node = self._getNodeAttribute(self.ATTRIBUTE_XLINK_ACTUATE)
if node is not None:
return node.nodeValue
return None
def getXlinkType(self):
node = self._getNodeAttribute(self.ATTRIBUTE_XLINK_TYPE)
if node is not None:
return node.nodeValue
return None
def getXlinkRole(self):
node = self._getNodeAttribute(self.ATTRIBUTE_XLINK_ROLE)
if node is not None:
return node.nodeValue
return None
def getXlinkArcrole(self):
node = self._getNodeAttribute(self.ATTRIBUTE_XLINK_ARCROLE)
if node is not None:
return node.nodeValue
return None
def getXlinkTitle(self):
node = self._getNodeAttribute(self.ATTRIBUTE_XLINK_TITLE)
if node is not None:
return node.nodeValue
return None | mit | 2,037,298,200,612,529,700 | 32.867769 | 85 | 0.564857 | false |
hsnr-gamera/gamera | gamera/gui/gaoptimizer/StopCriteriaPanel.py | 1 | 4654 | #
# Copyright (C) 2012 Tobias Bolten
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import wx
from gamera.gui import compat_wx
from gamera.gui.gaoptimizer.ExpertSettingPanel import *
#-------------------------------------------------------------------------------
class StopCriteriaPanel(ExpertSettingPanel):
#-------------------------------------------------------------------------------
#---------------------------------------------------------------------------
def __init__(self, parent, id):
#---------------------------------------------------------------------------
ExpertSettingPanel.__init__(self, parent, id)
sizer = wx.GridBagSizer(hgap=5, vgap=5)
self.SetSizer(sizer)
# best fitness
self.bestFitness = wx.CheckBox(self, -1, "Perfect LOO-recognition reached", \
name = "bestFitnessStop")
sizer.Add(self.bestFitness, pos=(0,0), \
flag = wx.LEFT | wx.RIGHT | wx.TOP | wx.EXPAND, border=10)
self.genericWidgets.append(self.bestFitness)
# generation counter
self.maxGeneration = wx.CheckBox(self, -1, "Max. number of generations", \
name = "maxGenerations")
sizer.Add(self.maxGeneration, pos=(1,0), \
flag = wx.LEFT | wx.RIGHT | wx.EXPAND, border = 10)
self.maxGenerationCount = wx.SpinCtrl(self, -1, size=(100,-1), \
min=10, max=5000, value='100')
compat_wx.set_tool_tip(self.maxGenerationCount, "Number of generations")
self.maxGenerationCount.Disable()
sizer.Add(self.maxGenerationCount, pos=(1,1), \
flag = wx.LEFT | wx.RIGHT | wx.EXPAND, border=10)
self.genericWidgets.append(self.maxGeneration)
self.AddChildToParent(self.maxGeneration, self.maxGenerationCount)
# fitness counter
self.maxFitnessEval = wx.CheckBox(self, -1, "Max. number of fitness evals", \
name = "maxFitnessEvals")
sizer.Add(self.maxFitnessEval, pos=(2,0), \
flag = wx.LEFT | wx.RIGHT | wx.EXPAND, border=10)
self.maxFitnessEvalCount = wx.SpinCtrl(self, -1, size=(100,-1), \
min=10, max=50000, value='5000')
compat_wx.set_tool_tip(self.maxFitnessEvalCount, "Number of evaluations")
self.maxFitnessEvalCount.Disable()
sizer.Add(self.maxFitnessEvalCount, pos=(2,1), \
flag = wx.LEFT | wx.RIGHT | wx.EXPAND, border=10)
self.genericWidgets.append(self.maxFitnessEval)
self.AddChildToParent(self.maxFitnessEval, self.maxFitnessEvalCount)
# steady state continue
self.steadyContinue = wx.CheckBox(self, -1, "Steady state continue", \
name = "steadyStateStop")
self.steadyContinue.SetValue(True)
sizer.Add(self.steadyContinue, pos=(3,0), \
flag = wx.LEFT | wx.RIGHT | wx.BOTTOM | wx.EXPAND, border=10)
self.steadyContinueMin = wx.SpinCtrl(self, -1, size=(100,-1), \
min=10, max=250000, value='40')
compat_wx.set_tool_tip(self.steadyContinueMin, "Minimum generations")
sizer.Add(self.steadyContinueMin, pos=(3,1), \
flag = wx.LEFT | wx.RIGHT | wx.BOTTOM | wx.EXPAND, border=10)
self.steadyContinueNoChange = wx.SpinCtrl(self, -1, size=(100,-1), \
min=1, max=10000, value='10')
compat_wx.set_tool_tip(self.steadyContinueNoChange, "Generations without improvement")
sizer.Add(self.steadyContinueNoChange, pos=(3,2), \
flag = wx.LEFT | wx.RIGHT | wx.BOTTOM | wx.EXPAND, border=10)
self.genericWidgets.append(self.steadyContinue)
self.AddChildToParent(self.steadyContinue, self.steadyContinueMin)
self.AddChildToParent(self.steadyContinue, self.steadyContinueNoChange)
# bind the EVT_CHECKBOX to the CheckBoxes
self.BindEvent(wx.EVT_CHECKBOX, self.OnCheckBox, \
[self.bestFitness, self.maxGeneration,
self.maxFitnessEval,
self.steadyContinue])
| gpl-2.0 | -2,199,924,503,062,840,000 | 46.489796 | 94 | 0.613666 | false |
BenKaehler/q2-feature-classifier | q2_feature_classifier/tests/__init__.py | 1 | 1162 | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2019, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import tempfile
import shutil
from warnings import filterwarnings
from qiime2.plugin.testing import TestPluginBase
class FeatureClassifierTestPluginBase(TestPluginBase):
def setUp(self):
try:
from q2_feature_classifier.plugin_setup import plugin
except ImportError:
self.fail("Could not import plugin object.")
self.plugin = plugin
self.temp_dir = tempfile.TemporaryDirectory(
prefix='q2-feature-classifier-test-temp-')
filterwarnings('ignore', 'The TaxonomicClassifier ', UserWarning)
def _setup_dir(self, filenames, dirfmt):
for filename in filenames:
filepath = self.get_data_path(filename)
shutil.copy(filepath, self.temp_dir.name)
return dirfmt(self.temp_dir.name, mode='r')
| bsd-3-clause | -7,658,275,673,037,822,000 | 32.2 | 78 | 0.598107 | false |
kalyan02/dayone | do/lib.py | 1 | 2300 | from BeautifulSoup import BeautifulSoup as Soupify
import urllib, re
import settings
import oauth2, urlparse, json
from do import settings
class DropboxAPI(object):
def __init__(self, user):
self.user = user
dinfo = self.user.social_auth.get(provider='dropbox')
access_token = urlparse.parse_qs( dinfo.extra_data['access_token'] )
self.user_token = oauth2.Token(key=access_token['oauth_token'][0],secret=access_token['oauth_token_secret'][0])
self.cons_token = oauth2.Consumer(key=settings.DROPBOX_APP_ID,secret=settings.DROPBOX_API_SECRET)
def request( self, api_call, extra_params=None ):
self.parameters = {
'oauth_signature_method': oauth2.SignatureMethod_PLAINTEXT.name,
'oauth_timestamp' : oauth2.generate_timestamp(),
'oauth_nonce' : oauth2.generate_nonce(),
'oauth_version' : '1.0',
}
if type(extra_params) is dict:
self.parameters.update(extra_params)
self.req = oauth2.Request( url=api_call, parameters=self.parameters )
self.req.sign_request( signature_method=oauth2.SignatureMethod_PLAINTEXT(), token=self.user_token, consumer=self.cons_token)
return self.req
def call(self,method,params):
pass
def format_json(json_string):
return json.dumps( json.loads( json_string ), indent=4 )
# def file_put_contents( fname, fcon ):
# fh = open( fname, 'w+' )
# fh.write( fcon )
# fh.close()
# def file_get_contents( fname ):
# fh = open( fname, 'r')
# return fh.read()
# dropbox_url = "https://www.dropbox.com/sh/7gcfvmk9h107ryc/F39GaH7W8C"
# con = urllib.urlopen( dropbox_url ).read()
# file_put_contents( 'fh.txt', con )
# con = file_get_contents('fh.txt')
# scon = Soupify( con )
# entries_url = scon.findAll( 'a', attrs={'href':re.compile('/entries$')} )[0]['href']
# photos_url = scon.findAll( 'a', attrs={'href':re.compile('/photos$')} )[0]['href']
# print entries_url
# print photos_url
# # entries_page = urllib.urlopen(entries_url).read()
# # file_put_contents('entries_page.txt',entries_page)
# entries_page = file_get_contents('entries_page.txt')
# econ = Soupify(entries_page)
# posts = econ.findAll( 'a', attrs={'href':re.compile('\.doentry')} )
# urls = [ each['href'] for i,each in enumerate(posts) if i % 2 == 1 ]
# mods = econ.findAll( attrs={'class':'modified-time'} ) | gpl-2.0 | -606,869,924,962,227,300 | 33.863636 | 126 | 0.677391 | false |
cfobel/sconspiracy | Python/racy/plugins/qt/sconstools/qt4.py | 1 | 21143 | # ***** BEGIN LICENSE BLOCK *****
# Sconspiracy - Copyright (C) IRCAD, 2004-2010.
# Distributed under the terms of the BSD Licence as
# published by the Open Source Initiative.
# ****** END LICENSE BLOCK ******
"""SCons.Tool.qt
Tool-specific initialization for Qt.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
Tool provided by http://www.iua.upf.es/~dgarcia/Codders/sconstools.html
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "/home/scons/scons/branch.0/branch.96/baseline/src/engine/SCons/Tool/qt.py 0.96.92.D001 2006/04/10 23:13:27 knight"
import os.path
import re
import SCons.Action
import SCons.Builder
import SCons.Defaults
import SCons.Scanner
import SCons.Tool
import SCons.Util
class ToolQtWarning(SCons.Warnings.Warning):
pass
class GeneratedMocFileNotIncluded(ToolQtWarning):
pass
class QtdirNotFound(ToolQtWarning):
pass
SCons.Warnings.enableWarningClass(ToolQtWarning)
qrcinclude_re = re.compile(r'<file>([^<]*)</file>', re.M)
def transformToWinePath(path) :
return os.popen('winepath -w "%s"'%path).read().strip().replace('\\','/')
header_extensions = [".h", ".hxx", ".hpp", ".hh"]
if SCons.Util.case_sensitive_suffixes('.h', '.H'):
header_extensions.append('.H')
# TODO: The following two lines will work when integrated back to SCons
# TODO: Meanwhile the third line will do the work
#cplusplus = __import__('c++', globals(), locals(), [])
#cxx_suffixes = cplusplus.CXXSuffixes
cxx_suffixes = [".c", ".cxx", ".cpp", ".cc"]
def checkMocIncluded(target, source, env):
moc = target[0]
cpp = source[0]
# looks like cpp.includes is cleared before the build stage :-(
# not really sure about the path transformations (moc.cwd? cpp.cwd?) :-/
path = SCons.Defaults.CScan.path_function(env, moc.cwd)
includes = SCons.Defaults.CScan(cpp, env, path)
if not moc in includes:
SCons.Warnings.warn(
GeneratedMocFileNotIncluded,
"Generated moc file '%s' is not included by '%s'" %
(str(moc), str(cpp)))
def find_file(filename, paths, node_factory):
for dir in paths:
node = node_factory(filename, dir)
if node.rexists():
return node
return None
class _Automoc:
"""
Callable class, which works as an emitter for Programs, SharedLibraries and
StaticLibraries.
"""
def __init__(self, objBuilderName):
self.objBuilderName = objBuilderName
def __call__(self, target, source, env):
"""
Smart autoscan function. Gets the list of objects for the Program
or Lib. Adds objects and builders for the special qt files.
"""
try:
if int(env.subst('$QT4_AUTOSCAN')) == 0:
return target, source
except ValueError:
pass
try:
debug = int(env.subst('$QT4_DEBUG'))
except ValueError:
debug = 0
# some shortcuts used in the scanner
splitext = SCons.Util.splitext
objBuilder = getattr(env, self.objBuilderName)
# some regular expressions:
# Q_OBJECT detection
q_object_search = re.compile(r'[^A-Za-z0-9]Q_OBJECT[^A-Za-z0-9]')
# cxx and c comment 'eater'
#comment = re.compile(r'(//.*)|(/\*(([^*])|(\*[^/]))*\*/)')
# CW: something must be wrong with the regexp. See also bug #998222
# CURRENTLY THERE IS NO TEST CASE FOR THAT
# The following is kind of hacky to get builders working properly (FIXME)
objBuilderEnv = objBuilder.env
objBuilder.env = env
mocBuilderEnv = env.Moc4.env
env.Moc4.env = env
# make a deep copy for the result; MocH objects will be appended
out_sources = source[:]
for obj in source:
if isinstance(obj,basestring): # big kludge!
print "scons: qt4: '%s' MAYBE USING AN OLD SCONS VERSION AND NOT CONVERTED TO 'File'. Discarded." % str(obj)
continue
if not obj.has_builder():
# binary obj file provided
if debug:
print "scons: qt: '%s' seems to be a binary. Discarded." % str(obj)
continue
cpp = obj.sources[0]
if not splitext(str(cpp))[1] in cxx_suffixes:
if debug:
print "scons: qt: '%s' is no cxx file. Discarded." % str(cpp)
# c or fortran source
continue
#cpp_contents = comment.sub('', cpp.get_contents())
try:
cpp_contents = cpp.get_contents()
except: continue # may be an still not generated source
h=None
for h_ext in header_extensions:
# try to find the header file in the corresponding source
# directory
hname = splitext(cpp.name)[0] + h_ext
h = find_file(hname, (cpp.get_dir(),), env.File)
if h:
if debug:
print "scons: qt: Scanning '%s' (header of '%s')" % (str(h), str(cpp))
#h_contents = comment.sub('', h.get_contents())
h_contents = h.get_contents()
break
if not h and debug:
print "scons: qt: no header for '%s'." % (str(cpp))
if h and q_object_search.search(h_contents):
# h file with the Q_OBJECT macro found -> add moc_cpp
moc_cpp = env.Moc4(h)
moc_o = objBuilder(moc_cpp)
out_sources.append(moc_o)
#moc_cpp.target_scanner = SCons.Defaults.CScan
if debug:
print "scons: qt: found Q_OBJECT macro in '%s', moc'ing to '%s'" % (str(h), str(moc_cpp))
if cpp and q_object_search.search(cpp_contents):
# cpp file with Q_OBJECT macro found -> add moc
# (to be included in cpp)
moc = env.Moc4(cpp)
env.Ignore(moc, moc)
if debug:
print "scons: qt: found Q_OBJECT macro in '%s', moc'ing to '%s'" % (str(cpp), str(moc))
#moc.source_scanner = SCons.Defaults.CScan
# restore the original env attributes (FIXME)
objBuilder.env = objBuilderEnv
env.Moc4.env = mocBuilderEnv
return (target, out_sources)
AutomocShared = _Automoc('SharedObject')
AutomocStatic = _Automoc('StaticObject')
def _detect(env):
"""Not really safe, but fast method to detect the QT library"""
try: return env['QTDIR']
except KeyError: pass
try: return os.environ['QTDIR']
except KeyError: pass
moc = env.WhereIs('moc-qt4') or env.WhereIs('moc4') or env.WhereIs('moc')
if moc:
QTDIR = os.path.dirname(os.path.dirname(moc))
SCons.Warnings.warn(
QtdirNotFound,
"QTDIR variable is not defined, using moc executable as a hint (QTDIR=%s)" % QTDIR)
return QTDIR
raise SCons.Errors.StopError(
QtdirNotFound,
"Could not detect Qt 4 installation")
return None
def generate(env):
"""Add Builders and construction variables for qt to an Environment."""
def locateQt4Command(env, command, qtdir) :
suffixes = [
'-qt4',
'-qt4.exe',
'4',
'4.exe',
'',
'.exe',
]
triedPaths = []
for suffix in suffixes :
fullpath = os.path.join(qtdir,'bin',command + suffix)
if os.access(fullpath, os.X_OK) :
return fullpath
triedPaths.append(fullpath)
fullpath = env.Detect([command+'-qt4', command+'4', command])
if not (fullpath is None) : return fullpath
raise Exception("Qt4 command '" + command + "' not found. Tried: " + ', '.join(triedPaths))
CLVar = SCons.Util.CLVar
Action = SCons.Action.Action
Builder = SCons.Builder.Builder
splitext = SCons.Util.splitext
env['QTDIR'] = _detect(env)
# TODO: 'Replace' should be 'SetDefault'
# env.SetDefault(
env.Replace(
QTDIR = env['QTDIR'],
QT4_BINPATH = os.path.join('$QTDIR', 'bin'),
QT4_CPPPATH = os.path.join('$QTDIR', 'include'),
QT4_LIBPATH = os.path.join('$QTDIR', 'lib'),
# TODO: This is not reliable to QTDIR value changes but needed in order to support '-qt4' variants
QT4_MOC = locateQt4Command(env,'moc', env['QTDIR']),
QT4_UIC = locateQt4Command(env,'uic', env['QTDIR']),
QT4_RCC = locateQt4Command(env,'rcc', env['QTDIR']),
QT4_LUPDATE = locateQt4Command(env,'lupdate', env['QTDIR']),
QT4_LRELEASE = locateQt4Command(env,'lrelease', env['QTDIR']),
QT4_LIB = '', # KLUDGE to avoid linking qt3 library
QT4_AUTOSCAN = 0, # Should the qt tool try to figure out, which sources are to be moc'ed?
# Some QT specific flags. I don't expect someone wants to
# manipulate those ...
QT4_UICFLAGS = CLVar(''),
QT4_MOCFROMHFLAGS = CLVar(''),
QT4_MOCFROMCXXFLAGS = CLVar('-i'),
QT4_QRCFLAGS = '',
# suffixes/prefixes for the headers / sources to generate
QT4_UISUFFIX = '.ui',
QT4_UICDECLPREFIX = 'ui_',
QT4_UICDECLSUFFIX = '.h',
QT4_MOCINCPREFIX = '-I',
QT4_MOCHPREFIX = 'moc_',
QT4_MOCHSUFFIX = '$CXXFILESUFFIX',
QT4_MOCCXXPREFIX = '',
QT4_MOCCXXSUFFIX = '.moc',
QT4_QRCSUFFIX = '.qrc',
QT4_QRCCXXSUFFIX = '$CXXFILESUFFIX',
QT4_QRCCXXPREFIX = 'qrc_',
QT4_MOCCPPPATH = [],
QT4_MOCINCFLAGS = '$( ${_concat(QT4_MOCINCPREFIX, QT4_MOCCPPPATH, INCSUFFIX, __env__, RDirs)} $)',
# Commands for the qt support ...
QT4_UICCOM = '$QT4_UIC $QT4_UICFLAGS -o $TARGET $SOURCE',
QT4_MOCFROMHCOM = '$QT4_MOC $QT4_MOCFROMHFLAGS $QT4_MOCINCFLAGS -o $TARGET $SOURCE',
QT4_MOCFROMCXXCOM = [
'$QT4_MOC $QT4_MOCFROMCXXFLAGS $QT4_MOCINCFLAGS -o $TARGET $SOURCE',
Action(checkMocIncluded,None)],
QT4_LUPDATECOM = '$QT4_LUPDATE $SOURCE -ts $TARGET',
QT4_LRELEASECOM = '$QT4_LRELEASE $SOURCE',
QT4_RCCCOM = '$QT4_RCC $QT4_QRCFLAGS $SOURCE -o $TARGET -name ${SOURCE.filebase}',
)
# Translation builder
tsbuilder = Builder(
action = SCons.Action.Action('$QT4_LUPDATECOM'), #,'$QT4_LUPDATECOMSTR'),
multi=1
)
env.Append( BUILDERS = { 'Ts': tsbuilder } )
qmbuilder = Builder(
action = SCons.Action.Action('$QT4_LRELEASECOM'),# , '$QT4_LRELEASECOMSTR'),
src_suffix = '.ts',
suffix = '.qm',
single_source = True
)
env.Append( BUILDERS = { 'Qm': qmbuilder } )
# Resource builder
def scanResources(node, env, path, arg):
# I've being careful on providing names relative to the qrc file
# If that was not needed that code could be simplified a lot
def recursiveFiles(basepath, path) :
result = []
for item in os.listdir(os.path.join(basepath, path)) :
itemPath = os.path.join(path, item)
if os.path.isdir(os.path.join(basepath, itemPath)) :
result += recursiveFiles(basepath, itemPath)
else:
result.append(itemPath)
return result
contents = node.get_contents()
includes = qrcinclude_re.findall(contents)
qrcpath = os.path.dirname(node.path)
dirs = [included for included in includes if os.path.isdir(os.path.join(qrcpath,included))]
# dirs need to include files recursively
for dir in dirs :
includes.remove(dir)
includes+=recursiveFiles(qrcpath,dir)
return includes
qrcscanner = SCons.Scanner.Scanner(name = 'qrcfile',
function = scanResources,
argument = None,
skeys = ['.qrc'])
qrcbuilder = Builder(
action = SCons.Action.Action('$QT4_RCCCOM'), #, '$QT4_RCCCOMSTR'),
source_scanner = qrcscanner,
src_suffix = '$QT4_QRCSUFFIX',
suffix = '$QT4_QRCCXXSUFFIX',
prefix = '$QT4_QRCCXXPREFIX',
single_source = True
)
env.Append( BUILDERS = { 'Qrc': qrcbuilder } )
# Interface builder
uic4builder = Builder(
action = SCons.Action.Action('$QT4_UICCOM'), #, '$QT4_UICCOMSTR'),
src_suffix='$QT4_UISUFFIX',
suffix='$QT4_UICDECLSUFFIX',
prefix='$QT4_UICDECLPREFIX',
single_source = True
#TODO: Consider the uiscanner on new scons version
)
env['BUILDERS']['Uic4'] = uic4builder
# Metaobject builder
mocBld = Builder(action={}, prefix={}, suffix={})
for h in header_extensions:
act = SCons.Action.Action('$QT4_MOCFROMHCOM') #, '$QT4_MOCFROMHCOMSTR')
mocBld.add_action(h, act)
mocBld.prefix[h] = '$QT4_MOCHPREFIX'
mocBld.suffix[h] = '$QT4_MOCHSUFFIX'
for cxx in cxx_suffixes:
act = SCons.Action.Action('$QT4_MOCFROMCXXCOM') #, '$QT4_MOCFROMCXXCOMSTR')
mocBld.add_action(cxx, act)
mocBld.prefix[cxx] = '$QT4_MOCCXXPREFIX'
mocBld.suffix[cxx] = '$QT4_MOCCXXSUFFIX'
env['BUILDERS']['Moc4'] = mocBld
# er... no idea what that was for
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
static_obj.src_builder.append('Uic4')
shared_obj.src_builder.append('Uic4')
# We use the emitters of Program / StaticLibrary / SharedLibrary
# to scan for moc'able files
# We can't refer to the builders directly, we have to fetch them
# as Environment attributes because that sets them up to be called
# correctly later by our emitter.
env.AppendUnique(PROGEMITTER =[AutomocStatic],
SHLIBEMITTER=[AutomocShared],
LIBEMITTER =[AutomocStatic],
# Of course, we need to link against the qt libraries
# CPPPATH=["$QT4_CPPPATH"],
LIBPATH=["$QT4_LIBPATH"],
LIBS=['$QT4_LIB'])
# TODO: Does dbusxml2cpp need an adapter
env.AddMethod(enable_modules, "EnableQt4Modules")
def enable_modules(self, modules, debug=False, crosscompiling=False) :
import sys
validModules = [
'QtCore',
'QtGui',
'QtOpenGL',
'Qt3Support',
'QtAssistant',
'QtScript',
'QtDBus',
'QtSql',
# The next modules have not been tested yet so, please
# maybe they require additional work on non Linux platforms
'QtNetwork',
'QtSvg',
'QtTest',
'QtXml',
'QtXmlPatterns',
'QtUiTools',
'QtDesigner',
'QtDesignerComponents',
'QtWebKit',
'QtHelp',
'QtScript',
]
pclessModules = [
# in qt <= 4.3 designer and designerComponents are pcless, on qt4.4 they are not, so removed.
# 'QtDesigner',
# 'QtDesignerComponents',
]
staticModules = [
'QtUiTools',
]
invalidModules=[]
for module in modules:
if module not in validModules :
invalidModules.append(module)
if invalidModules :
raise Exception("Modules %s are not Qt4 modules. Valid Qt4 modules are: %s"% (
str(invalidModules),str(validModules)))
moduleDefines = {
'QtScript' : ['QT_SCRIPT_LIB'],
'QtSvg' : ['QT_SVG_LIB'],
'Qt3Support' : ['QT_QT3SUPPORT_LIB','QT3_SUPPORT'],
'QtSql' : ['QT_SQL_LIB'],
'QtXml' : ['QT_XML_LIB'],
'QtOpenGL' : ['QT_OPENGL_LIB'],
'QtGui' : ['QT_GUI_LIB'],
'QtNetwork' : ['QT_NETWORK_LIB'],
'QtCore' : ['QT_CORE_LIB'],
}
for module in modules :
try : self.AppendUnique(CPPDEFINES=moduleDefines[module])
except: pass
debugSuffix = ''
if sys.platform in ["darwin", "linux2"] and not crosscompiling :
if debug : debugSuffix = '_debug'
for module in modules :
if module not in pclessModules : continue
self.AppendUnique(LIBS=[module+debugSuffix])
self.AppendUnique(LIBPATH=[os.path.join("$QTDIR","lib")])
self.AppendUnique(CPPPATH=[os.path.join("$QTDIR","include","qt4")])
self.AppendUnique(CPPPATH=[os.path.join("$QTDIR","include","qt4",module)])
pcmodules = [module+debugSuffix for module in modules if module not in pclessModules ]
if 'QtDBus' in pcmodules:
self.AppendUnique(CPPPATH=[os.path.join("$QTDIR","include","qt4","QtDBus")])
if "QtAssistant" in pcmodules:
self.AppendUnique(CPPPATH=[os.path.join("$QTDIR","include","qt4","QtAssistant")])
pcmodules.remove("QtAssistant")
pcmodules.append("QtAssistantClient")
self.ParseConfig('pkg-config %s --libs --cflags'% ' '.join(pcmodules))
self["QT4_MOCCPPPATH"] = self["CPPPATH"]
return
if sys.platform == "win32" or crosscompiling :
if crosscompiling:
transformedQtdir = transformToWinePath(self['QTDIR'])
self['QT4_MOC'] = "QTDIR=%s %s"%( transformedQtdir, self['QT4_MOC'])
self.AppendUnique(CPPPATH=[os.path.join("$QTDIR","include")])
try: modules.remove("QtDBus")
except: pass
if debug : debugSuffix = 'd'
if "QtAssistant" in modules:
self.AppendUnique(CPPPATH=[os.path.join("$QTDIR","include","QtAssistant")])
modules.remove("QtAssistant")
modules.append("QtAssistantClient")
self.AppendUnique(LIBS=[lib+'4'+debugSuffix for lib in modules if lib not in staticModules])
self.PrependUnique(LIBS=[lib+debugSuffix for lib in modules if lib in staticModules])
if 'QtOpenGL' in modules:
self.AppendUnique(LIBS=['opengl32'])
self.AppendUnique(CPPPATH=[ '$QTDIR/include/'])
self.AppendUnique(CPPPATH=[ '$QTDIR/include/'+module for module in modules])
if crosscompiling :
self["QT4_MOCCPPPATH"] = [
path.replace('$QTDIR', transformedQtdir)
for path in self['CPPPATH'] ]
else :
self["QT4_MOCCPPPATH"] = self["CPPPATH"]
self.AppendUnique(LIBPATH=[os.path.join('$QTDIR','lib')])
return
"""
if sys.platform=="darwin" :
# TODO: Test debug version on Mac
self.AppendUnique(LIBPATH=[os.path.join('$QTDIR','lib')])
self.AppendUnique(LINKFLAGS="-F$QTDIR/lib")
self.AppendUnique(LINKFLAGS="-L$QTDIR/lib") #TODO clean!
if debug : debugSuffix = 'd'
for module in modules :
# self.AppendUnique(CPPPATH=[os.path.join("$QTDIR","include")])
# self.AppendUnique(CPPPATH=[os.path.join("$QTDIR","include",module)])
# port qt4-mac:
self.AppendUnique(CPPPATH=[os.path.join("$QTDIR","include", "qt4")])
self.AppendUnique(CPPPATH=[os.path.join("$QTDIR","include", "qt4", module)])
if module in staticModules :
self.AppendUnique(LIBS=[module+debugSuffix]) # TODO: Add the debug suffix
self.AppendUnique(LIBPATH=[os.path.join("$QTDIR","lib")])
else :
# self.Append(LINKFLAGS=['-framework', module])
# port qt4-mac:
self.Append(LIBS=module)
if 'QtOpenGL' in modules:
self.AppendUnique(LINKFLAGS="-F/System/Library/Frameworks")
self.Append(LINKFLAGS=['-framework', 'AGL']) #TODO ughly kludge to avoid quotes
self.Append(LINKFLAGS=['-framework', 'OpenGL'])
self["QT4_MOCCPPPATH"] = self["CPPPATH"]
return
# This should work for mac but doesn't
# env.AppendUnique(FRAMEWORKPATH=[os.path.join(env['QTDIR'],'lib')])
# env.AppendUnique(FRAMEWORKS=['QtCore','QtGui','QtOpenGL', 'AGL'])
"""
def exists(env):
return _detect(env)
| bsd-3-clause | 6,324,493,422,340,307,000 | 38.593633 | 130 | 0.593577 | false |
terrelln/python-zstandard | tests/test_train_dictionary.py | 1 | 2899 | import sys
import unittest
import zstandard as zstd
from . common import (
make_cffi,
)
if sys.version_info[0] >= 3:
int_type = int
else:
int_type = long
def generate_samples():
samples = []
for i in range(128):
samples.append(b'foo' * 64)
samples.append(b'bar' * 64)
samples.append(b'foobar' * 64)
samples.append(b'baz' * 64)
samples.append(b'foobaz' * 64)
samples.append(b'bazfoo' * 64)
return samples
@make_cffi
class TestTrainDictionary(unittest.TestCase):
def test_no_args(self):
with self.assertRaises(TypeError):
zstd.train_dictionary()
def test_bad_args(self):
with self.assertRaises(TypeError):
zstd.train_dictionary(8192, u'foo')
with self.assertRaises(ValueError):
zstd.train_dictionary(8192, [u'foo'])
def test_basic(self):
samples = generate_samples()
d = zstd.train_dictionary(8192, samples)
self.assertLessEqual(len(d), 8192)
dict_id = d.dict_id()
self.assertIsInstance(dict_id, int_type)
data = d.as_bytes()
self.assertEqual(data[0:4], b'\x37\xa4\x30\xec')
def test_set_dict_id(self):
samples = generate_samples()
d = zstd.train_dictionary(8192, samples, dict_id=42)
self.assertEqual(d.dict_id(), 42)
@make_cffi
class TestTrainCoverDictionary(unittest.TestCase):
def test_no_args(self):
with self.assertRaises(TypeError):
zstd.train_cover_dictionary()
def test_bad_args(self):
with self.assertRaises(TypeError):
zstd.train_cover_dictionary(8192, u'foo')
with self.assertRaises(ValueError):
zstd.train_cover_dictionary(8192, [u'foo'])
def test_basic(self):
samples = []
for i in range(128):
samples.append(b'foo' * 64)
samples.append(b'foobar' * 64)
d = zstd.train_cover_dictionary(8192, samples, k=64, d=16)
self.assertIsInstance(d.dict_id(), int_type)
data = d.as_bytes()
self.assertEqual(data[0:4], b'\x37\xa4\x30\xec')
self.assertEqual(d.k, 64)
self.assertEqual(d.d, 16)
def test_set_dict_id(self):
samples = []
for i in range(128):
samples.append(b'foo' * 64)
samples.append(b'foobar' * 64)
d = zstd.train_cover_dictionary(8192, samples, k=64, d=16,
dict_id=42)
self.assertEqual(d.dict_id(), 42)
def test_optimize(self):
samples = []
for i in range(128):
samples.append(b'foo' * 64)
samples.append(b'foobar' * 64)
d = zstd.train_cover_dictionary(8192, samples, optimize=True,
threads=-1, steps=1, d=16)
self.assertEqual(d.k, 50)
self.assertEqual(d.d, 16)
| bsd-3-clause | -5,757,206,112,141,331,000 | 25.842593 | 69 | 0.574336 | false |
jpopelka/fabric8-analytics-worker | f8a_worker/solver.py | 1 | 37059 | """Classes for resolving dependencies as specified in each ecosystem."""
import anymarkup
from bs4 import BeautifulSoup
from collections import defaultdict
from functools import cmp_to_key
import logging
from lxml import etree
from operator import itemgetter
from pip._internal.req.req_file import parse_requirements
from pip._vendor.packaging.specifiers import _version_split
import re
from requests import get
from semantic_version import Version as semver_version
from subprocess import check_output
from tempfile import NamedTemporaryFile, TemporaryDirectory
from urllib.parse import urljoin, quote
from urllib.request import urlopen
import requests
from f8a_worker.enums import EcosystemBackend
from f8a_worker.models import Analysis, Ecosystem, Package, Version
from f8a_worker.utils import cwd, TimedCommand
from f8a_worker.process import Git
logger = logging.getLogger(__name__)
class SolverException(Exception):
"""Exception to be raised in Solver."""
pass
class Tokens(object):
"""Comparison token representation."""
operators = ['>=', '<=', '==', '>', '<', '=', '!=']
(GTE, LTE, EQ1, GT, LT, EQ2, NEQ) = range(len(operators))
def compare_version(a, b):
"""Compare two version strings.
:param a: str
:param b: str
:return: -1 / 0 / 1
"""
def _range(q):
"""Convert a version string to array of integers.
"1.2.3" -> [1, 2, 3]
:param q: str
:return: List[int]
"""
r = []
for n in q.replace('-', '.').split('.'):
try:
r.append(int(n))
except ValueError:
# sort rc*, alpha, beta etc. lower than their non-annotated counterparts
r.append(-1)
return r
def _append_zeros(x, num_zeros):
"""Append `num_zeros` zeros to a copy of `x` and return it.
:param x: List[int]
:param num_zeros: int
:return: List[int]
"""
nx = list(x)
for _ in range(num_zeros):
nx.append(0)
return nx
def _cardinal(x, y):
"""Make both input lists be of same cardinality.
:param x: List[int]
:param y: List[int]
:return: List[int]
"""
lx, ly = len(x), len(y)
if lx == ly:
return x, y
elif lx > ly:
return x, _append_zeros(y, lx - ly)
else:
return _append_zeros(x, ly - lx), y
left, right = _cardinal(_range(a), _range(b))
return (left > right) - (left < right)
class ReleasesFetcher(object):
"""Base class for fetching releases."""
def __init__(self, ecosystem):
"""Initialize ecosystem."""
self._ecosystem = ecosystem
@property
def ecosystem(self):
"""Get ecosystem property."""
return self._ecosystem
def fetch_releases(self, package):
"""Abstract method for getting list of releases versions."""
raise NotImplementedError
class PypiReleasesFetcher(ReleasesFetcher):
"""Releases fetcher for Pypi."""
def __init__(self, ecosystem):
"""Initialize instance."""
super(PypiReleasesFetcher, self).__init__(ecosystem)
def fetch_releases(self, package):
"""Fetch package releases versions.
XML-RPC API Documentation: https://wiki.python.org/moin/PyPIXmlRpc
Signature: package_releases(package_name, show_hidden=False)
"""
if not package:
raise ValueError("package")
package = package.lower()
pypi_package_url = urljoin(
self.ecosystem.fetch_url, '{pkg_name}/json'.format(pkg_name=package)
)
response = requests.get(pypi_package_url)
if response.status_code != 200:
logger.error('Unable to obtain a list of versions for {pkg_name}'.format(
pkg_name=package
))
return package, []
return package, list({x for x in response.json().get('releases', {})})
class NpmReleasesFetcher(ReleasesFetcher):
"""Releases fetcher for NPM."""
def __init__(self, ecosystem):
"""Initialize instance."""
super(NpmReleasesFetcher, self).__init__(ecosystem)
def fetch_releases(self, package):
"""Fetch package releases versions.
Example output from the NPM endpoint:
{
...
versions: {
"0.1.0": {},
"0.1.2": {}
...
}
}
"""
if not package:
raise ValueError("package")
# quote '/' (but not '@') in scoped package name, e.g. in '@slicemenice/item-layouter'
r = get(self.ecosystem.fetch_url + quote(package, safe='@'))
if r.status_code == 200 and r.content:
return package, list(r.json().get('versions', {}).keys())
return package, []
class RubyGemsReleasesFetcher(ReleasesFetcher):
"""Releases fetcher for Rubygems."""
def __init__(self, ecosystem):
"""Initialize instance."""
super(RubyGemsReleasesFetcher, self).__init__(ecosystem)
def _search_package_name(self, package):
"""Search package on rubygems.org."""
url = '{url}/search.json?query={pkg}'.format(url=self.ecosystem.fetch_url,
pkg=package)
r = get(url)
if r.status_code == 200:
exact_match = [p['name']
for p in r.json()
if p['name'].lower() == package.lower()]
if exact_match:
return exact_match.pop()
raise ValueError("Package {} not found".format(package))
def fetch_releases(self, package):
"""Fetch package releases versions.
Example output from the RubyGems endpoint
[
{
"number": "1.0.0",
...
},
{
"number": "2.0.0",
...
}
...
]
"""
if not package:
raise ValueError("package")
url = '{url}/versions/{pkg}.json'.format(url=self.ecosystem.fetch_url,
pkg=package)
r = get(url)
if r.status_code == 404:
return self.fetch_releases(self._search_package_name(package))
return package, [ver['number'] for ver in r.json()]
class NugetReleasesFetcher(ReleasesFetcher):
"""Releases fetcher for Nuget."""
def __init__(self, ecosystem):
"""Initialize instance."""
super(NugetReleasesFetcher, self).__init__(ecosystem)
def scrape_versions_from_nuget_org(self, package, sort_by_downloads=False):
"""Scrape 'Version History' from Nuget."""
releases = []
nuget_packages_url = 'https://www.nuget.org/packages/'
page = get(nuget_packages_url + package)
page = BeautifulSoup(page.text, 'html.parser')
version_history = page.find(class_="version-history")
for version in version_history.find_all(href=re.compile('/packages/')):
version_text = version.text.replace('(current)', '').strip()
try:
semver_version.coerce(version_text)
downloads = int(version.find_next('td').text.strip().replace(',', ''))
except ValueError:
pass
else:
releases.append((version_text, downloads))
if sort_by_downloads:
releases.sort(key=itemgetter(1))
return package, [p[0] for p in reversed(releases)]
def fetch_releases(self, package):
"""Fetch package releases versions."""
if not package:
raise ValueError("package not specified")
# There's an API interface which lists available releases at
# https://api.nuget.org/v3-flatcontainer/{package}/index.json
# But it lists also unlisted/deprecated/shouldn't-be-used versions,
# so we don't use it.
return self.scrape_versions_from_nuget_org(package)
class MavenReleasesFetcher(ReleasesFetcher):
"""Releases fetcher for Maven."""
def __init__(self, ecosystem):
"""Initialize instance."""
super().__init__(ecosystem)
def releases_from_maven_org(self, group_id, artifact_id):
"""Fetch releases versions for group_id/artifact_id."""
metadata_filenames = ['maven-metadata.xml', 'maven-metadata-local.xml']
group_id_path = group_id.replace('.', '/')
versions = set()
we_good = False
for filename in metadata_filenames:
url = urljoin(
self.ecosystem.fetch_url,
'{g}/{a}/{f}'.format(g=group_id_path, a=artifact_id, f=filename)
)
try:
metadata_xml = etree.parse(urlopen(url))
we_good = True # We successfully downloaded at least one of the metadata files
version_elements = metadata_xml.findall('.//version')
versions = versions.union({x.text for x in version_elements})
except OSError:
# Not both XML files have to exist, so don't freak out yet
pass
if not we_good:
logger.error('Unable to obtain a list of versions for {g}:{a}'.format(
g=group_id, a=artifact_id)
)
return list(versions)
def fetch_releases(self, package):
"""Fetch package releases versions."""
if not package:
raise ValueError("package not specified")
try:
group_id, artifact_id = package.split(':')
except ValueError as exc:
raise ValueError("Invalid Maven coordinates: {a}".format(a=package)) from exc
return package, self.releases_from_maven_org(group_id, artifact_id)
class GolangReleasesFetcher(ReleasesFetcher):
"""Releases fetcher for Golang."""
def __init__(self, ecosystem):
"""Initialize instance."""
super(GolangReleasesFetcher, self).__init__(ecosystem)
def fetch_releases(self, package):
"""Fetch package releases versions."""
if not package:
raise ValueError('package not specified')
parts = package.split("/")[:3]
if len(parts) == 3: # this assumes github.com/org/project like structure
host, org, proj = parts
repo_url = 'git://{host}/{org}/{proj}.git'.format(host=host, org=org, proj=proj)
elif len(parts) == 2 and parts[0] == 'gopkg.in': # specific to gopkg.in/packages
host, proj = parts
repo_url = 'https://{host}/{proj}.git'.format(host=host, proj=proj)
else:
raise ValueError("Package {} is invalid git repository".format(package))
output = Git.ls_remote(repo_url, args=['-q'], refs=['HEAD'])
version, ref = output[0].split()
if not version:
raise ValueError("Package {} does not have associated versions".format(package))
return package, [version]
class F8aReleasesFetcher(ReleasesFetcher):
"""Releases fetcher for internal database."""
def __init__(self, ecosystem, database):
"""Initialize instance."""
super(F8aReleasesFetcher, self).__init__(ecosystem)
self.database = database
def fetch_releases(self, package):
"""Fetch analysed versions for specific ecosystem + package from f8a."""
query = self.database.query(Version).\
join(Analysis).join(Package).join(Ecosystem).\
filter(Package.name == package,
Ecosystem.name == self.ecosystem.name,
Analysis.finished_at.isnot(None))
versions = {v.identifier for v in query}
return package, list(sorted(versions, key=cmp_to_key(compare_version)))
class Dependency(object):
"""A Dependency consists of (package) name and version spec."""
def __init__(self, name, spec):
"""Initialize instance."""
self._name = name
# spec is a list where each item is either 2-tuple (operator, version) or list of these
# example: [[('>=', '0.6.0'), ('<', '0.7.0')], ('>', '1.0.0')] means:
# (>=0.6.0 and <0.7.0) or >1.0.0
self._spec = spec
@property
def name(self):
"""Get name property."""
return self._name
@property
def spec(self):
"""Get version spec property."""
return self._spec
def __contains__(self, item):
"""Implement 'in' operator."""
return self.check(item)
def __repr__(self):
"""Return string representation of this instance."""
return "{} {}".format(self.name, self.spec)
def __eq__(self, other):
"""Implement '==' operator."""
return self.name == other.name and self.spec == other.spec
def check(self, version):
"""Check if `version` fits into our dependency specification.
:param version: str
:return: bool
"""
def _compare_spec(spec):
if len(spec) == 1:
spec = ('=', spec[0])
token = Tokens.operators.index(spec[0])
comparison = compare_version(version, spec[1])
if token in [Tokens.EQ1, Tokens.EQ2]:
return comparison == 0
elif token == Tokens.GT:
return comparison == 1
elif token == Tokens.LT:
return comparison == -1
elif token == Tokens.GTE:
return comparison >= 0
elif token == Tokens.LTE:
return comparison <= 0
elif token == Tokens.NEQ:
return comparison != 0
else:
raise ValueError('Invalid comparison token')
def _all(spec_):
return all(_all(s) if isinstance(s, list) else _compare_spec(s) for s in spec_)
return any(_all(s) if isinstance(s, list) else _compare_spec(s) for s in self.spec)
class DependencyParser(object):
"""Base class for Dependency parsing."""
def parse(self, specs):
"""Abstract method for Dependency parsing."""
pass
@staticmethod
def compose_sep(deps, separator):
"""Opposite of parse().
:param deps: list of Dependency()
:param separator: when joining dependencies, use this separator
:return: dict of {name: version spec}
"""
result = {}
for dep in deps:
if dep.name not in result:
result[dep.name] = separator.join([op + ver for op, ver in dep.spec])
else:
result[dep.name] += separator + separator.join([op + ver for op, ver in dep.spec])
return result
class PypiDependencyParser(DependencyParser):
"""Pypi Dependency parsing."""
@staticmethod
def _parse_python(spec):
"""Parse PyPI specification of a single dependency.
:param spec: str, for example "Django>=1.5,<1.8"
:return: [Django [[('>=', '1.5'), ('<', '1.8')]]]
"""
def _extract_op_version(spec):
# https://www.python.org/dev/peps/pep-0440/#compatible-release
if spec.operator == '~=':
version = _version_split(spec.version)
if len(version) > 1:
# ignore pre-release, post-release or developmental release
while not version[-1].isdigit():
del version[-1]
del version[-1] # will increase the last but one in next line
version[-1] = str(int(version[-1]) + 1)
else:
raise ValueError('%r must not be used with %r' % (spec.operator, spec.version))
return [('>=', spec.version), ('<', '.'.join(version))]
# Trailing .* is permitted per
# https://www.python.org/dev/peps/pep-0440/#version-matching
elif spec.operator == '==' and spec.version.endswith('.*'):
try:
result = check_output(['/usr/bin/semver-ranger', spec.version],
universal_newlines=True).strip()
gte, lt = result.split()
return [('>=', gte.lstrip('>=')), ('<', lt.lstrip('<'))]
except ValueError:
logger.info("couldn't resolve ==%s", spec.version)
return spec.operator, spec.version
# https://www.python.org/dev/peps/pep-0440/#arbitrary-equality
# Use of this operator is heavily discouraged, so just convert it to 'Version matching'
elif spec.operator == '===':
return '==', spec.version
else:
return spec.operator, spec.version
def _get_pip_spec(requirements):
"""There's no `specs` field In Pip 8+, take info from `specifier` field."""
if hasattr(requirements, 'specs'):
return requirements.specs
elif hasattr(requirements, 'specifier'):
specs = [_extract_op_version(spec) for spec in requirements.specifier]
if len(specs) == 0:
specs = [('>=', '0.0.0')]
elif len(specs) > 1:
specs = [specs]
return specs
# create a temporary file and store the spec there since
# `parse_requirements` requires a file
with NamedTemporaryFile(mode='w+', suffix='pysolve') as f:
f.write(spec)
f.flush()
parsed = parse_requirements(f.name, session=f.name)
dependency = [Dependency(x.name, _get_pip_spec(x.req)) for x in parsed].pop()
return dependency
def parse(self, specs):
"""Parse specs."""
return [self._parse_python(s) for s in specs]
@staticmethod
def compose(deps):
"""Compose deps."""
return DependencyParser.compose_sep(deps, ',')
@staticmethod
def restrict_versions(deps):
"""Not implemented."""
return deps # TODO
class NpmDependencyParser(DependencyParser):
"""NPM Dependency parsing."""
@staticmethod
def _parse_npm_tokens(spec):
"""Parse npm tokens."""
for token in Tokens.operators:
if token in spec:
return token, spec.split(token)[1]
return spec,
def _parse_npm(self, name, spec):
"""Parse NPM specification of a single dependency.
:param name: str
:param spec: str
:return: Dependency
"""
if spec == 'latest':
specs = '*'
else:
specs = check_output(['/usr/bin/semver-ranger', spec], universal_newlines=True).strip()
if specs == 'null':
logger.info("invalid version specification for %s = %s", name, spec)
return None
ret = []
for s in specs.split('||'):
if ' ' in s:
spaced = s.split(' ')
assert len(spaced) == 2
left, right = spaced
ret.append([self._parse_npm_tokens(left), self._parse_npm_tokens(right)])
elif s == '*':
ret.append(('>=', '0.0.0'))
else:
ret.append(self._parse_npm_tokens(s))
return Dependency(name, ret)
def parse(self, specs):
"""Transform list of dependencies (strings) to list of Dependency."""
deps = []
for spec in specs:
name, ver = spec.split(' ', 1)
parsed = self._parse_npm(name, ver)
if parsed:
deps.append(parsed)
return deps
@staticmethod
def compose(deps):
"""Oposite of parse()."""
return DependencyParser.compose_sep(deps, ' ')
@staticmethod
def restrict_versions(deps):
"""From list of semver ranges select only the most restricting ones for each operator.
:param deps: list of Dependency(), example:
[node [('>=', '0.6.0')], node [('<', '1.0.0')], node [('>=', '0.8.0')]]
:return: list of Dependency() with only the most restrictive versions, example:
[node [('<', '1.0.0')], node [('>=', '0.8.0')]]
"""
# list to dict
# {
# 'node' : {
# '>=': ['0.8.0', '0.6.0'],
# '<': ['1.0.0']
# }
# }
dps_dict = defaultdict(dict)
for dp in deps:
if dp.name not in dps_dict:
dps_dict[dp.name] = defaultdict(list)
for spec in dp.spec:
if len(spec) != 2:
continue
operator, version = spec
dps_dict[dp.name][operator].append(version)
# select only the most restrictive versions
result = []
for name, version_spec_dict in dps_dict.items():
specs = []
for operator, versions in version_spec_dict.items():
if operator in ['>', '>=']: # select highest version
version = sorted(versions, key=cmp_to_key(compare_version))[-1]
elif operator in ['<', '<=']: # select lowest version
version = sorted(versions, key=cmp_to_key(compare_version))[0]
specs.append((operator, version))
# dict back to list
result.append(Dependency(name, specs))
return result
RubyGemsDependencyParser = NpmDependencyParser
class OSSIndexDependencyParser(NpmDependencyParser):
"""Parse OSS Index version specification."""
def _parse_npm(self, name, spec):
"""Parse OSS Index version specification. It's similar to NPM semver, with few tweaks."""
# sometimes there's '|' instead of '||', but the meaning seems to be the same
spec = spec.replace(' | ', ' || ')
# remove superfluous brackets
spec = spec.replace('(', '').replace(')', '')
return super()._parse_npm(name, spec)
class NugetDependencyParser(object):
"""Nuget version specification parsing."""
def parse(self, specs):
"""Transform list of dependencies (strings) to list of Dependency.
https://docs.microsoft.com/en-us/nuget/create-packages/dependency-versions#version-ranges
:param specs: list of dependencies (strings)
:return: list of Dependency
"""
# TODO: reduce cyclomatic complexity
deps = []
for spec in specs:
name, version_range = spec.split(' ', 1)
# 1.0 -> 1.0≤x
if re.search(r'[,()\[\]]', version_range) is None:
dep = Dependency(name, [('>=', version_range)])
# [1.0,2.0] -> 1.0≤x≤2.0
elif re.fullmatch(r'\[(.+),(.+)\]', version_range):
m = re.fullmatch(r'\[(.+),(.+)\]', version_range)
dep = Dependency(name, [[('>=', m.group(1)), ('<=', m.group(2))]])
# (1.0,2.0) -> 1.0<x<2.0
elif re.fullmatch(r'\((.+),(.+)\)', version_range):
m = re.fullmatch(r'\((.+),(.+)\)', version_range)
dep = Dependency(name, [[('>', m.group(1)), ('<', m.group(2))]])
# The following one is not in specification,
# so we can just guess what was the intention.
# Seen in NLog:5.0.0-beta08 dependencies
# [1.0, ) -> 1.0≤x
elif re.fullmatch(r'\[(.+), \)', version_range):
m = re.fullmatch(r'\[(.+), \)', version_range)
dep = Dependency(name, [('>=', m.group(1))])
# [1.0,2.0) -> 1.0≤x<2.0
elif re.fullmatch(r'\[(.+),(.+)\)', version_range):
m = re.fullmatch(r'\[(.+),(.+)\)', version_range)
dep = Dependency(name, [[('>=', m.group(1)), ('<', m.group(2))]])
# (1.0,) -> 1.0<x
elif re.fullmatch(r'\((.+),\)', version_range):
m = re.fullmatch(r'\((.+),\)', version_range)
dep = Dependency(name, [('>', m.group(1))])
# [1.0] -> x==1.0
elif re.fullmatch(r'\[(.+)\]', version_range):
m = re.fullmatch(r'\[(.+)\]', version_range)
dep = Dependency(name, [('==', m.group(1))])
# (,1.0] -> x≤1.0
elif re.fullmatch(r'\(,(.+)\]', version_range):
m = re.fullmatch(r'\(,(.+)\]', version_range)
dep = Dependency(name, [('<=', m.group(1))])
# (,1.0) -> x<1.0
elif re.fullmatch(r'\(,(.+)\)', version_range):
m = re.fullmatch(r'\(,(.+)\)', version_range)
dep = Dependency(name, [('<', m.group(1))])
elif re.fullmatch(r'\((.+)\)', version_range):
raise ValueError("invalid version range %r" % version_range)
deps.append(dep)
return deps
class NoOpDependencyParser(DependencyParser):
"""Dummy dependency parser for ecosystems that don't support version ranges."""
def parse(self, specs):
"""Transform list of dependencies (strings) to list of Dependency."""
return [Dependency(*x.split(' ')) for x in specs]
@staticmethod
def compose(deps):
"""Opposite of parse()."""
return DependencyParser.compose_sep(deps, ' ')
@staticmethod
def restrict_versions(deps):
"""Not implemented."""
return deps
class GolangDependencyParser(DependencyParser):
"""Dependency parser for Golang."""
def parse(self, specs):
"""Transform list of dependencies (strings) to list of Dependency."""
dependencies = []
for spec in specs:
spec_list = spec.split(' ')
if len(spec_list) > 1:
dependencies.append(Dependency(spec_list[0], spec_list[1]))
else:
dependencies.append(Dependency(spec_list[0], ''))
return dependencies
@staticmethod
def compose(deps):
"""Opposite of parse()."""
return DependencyParser.compose_sep(deps, ' ')
@staticmethod
def restrict_versions(deps):
"""Not implemented."""
return deps
class Solver(object):
"""Base class for resolving dependencies."""
def __init__(self, ecosystem, dep_parser=None, fetcher=None, highest_dependency_version=True):
"""Initialize instance."""
self.ecosystem = ecosystem
self._dependency_parser = dep_parser
self._release_fetcher = fetcher
self._highest_dependency_version = highest_dependency_version
@property
def dependency_parser(self):
"""Return DependencyParser instance used by this solver."""
return self._dependency_parser
@property
def release_fetcher(self):
"""Return ReleasesFetcher instance used by this solver."""
return self._release_fetcher
def solve(self, dependencies, graceful=True, all_versions=False):
"""Solve `dependencies` against upstream repository.
:param dependencies: List, List of dependencies in native format
:param graceful: bool, Print info output to stdout
:param all_versions: bool, Return all matched versions instead of the latest
:return: Dict[str, str], Matched versions
"""
solved = {}
for dep in self.dependency_parser.parse(dependencies):
logger.debug("Fetching releases for: {}".format(dep))
name, releases = self.release_fetcher.fetch_releases(dep.name)
if name in solved:
raise SolverException("Dependency: {} is listed multiple times".format(name))
if not releases:
if graceful:
logger.info("No releases found for: %s", dep.name)
else:
raise SolverException("No releases found for: {}".format(dep.name))
matching = sorted([release
for release in releases
if release in dep], key=cmp_to_key(compare_version))
logger.debug(" matching:\n {}".format(matching))
if all_versions:
solved[name] = matching
else:
if not matching:
solved[name] = None
else:
if self._highest_dependency_version:
solved[name] = matching[-1]
else:
solved[name] = matching[0]
return solved
class PypiSolver(Solver):
"""Pypi dependencies solver."""
def __init__(self, ecosystem, parser=None, fetcher=None):
"""Initialize instance."""
super(PypiSolver, self).__init__(ecosystem,
parser or PypiDependencyParser(),
fetcher or PypiReleasesFetcher(ecosystem))
class NpmSolver(Solver):
"""Npm dependencies solver."""
def __init__(self, ecosystem, parser=None, fetcher=None):
"""Initialize instance."""
super(NpmSolver, self).__init__(ecosystem,
parser or NpmDependencyParser(),
fetcher or NpmReleasesFetcher(ecosystem))
class RubyGemsSolver(Solver):
"""Rubygems dependencies solver."""
def __init__(self, ecosystem, parser=None, fetcher=None):
"""Initialize instance."""
super(RubyGemsSolver, self).__init__(ecosystem,
parser or RubyGemsDependencyParser(),
fetcher or RubyGemsReleasesFetcher(ecosystem))
class NugetSolver(Solver):
"""Nuget dependencies solver.
Nuget is a bit specific because it by default resolves version specs to lowest possible version.
https://docs.microsoft.com/en-us/nuget/release-notes/nuget-2.8#-dependencyversion-switch
"""
def __init__(self, ecosystem, parser=None, fetcher=None):
"""Initialize instance."""
super(NugetSolver, self).__init__(ecosystem,
parser or NugetDependencyParser(),
fetcher or NugetReleasesFetcher(ecosystem),
highest_dependency_version=False)
class MavenManualSolver(Solver):
"""Use this only if you need to resolve all versions or use specific DependencyParser.
Otherwise use MavenSolver (below).
"""
def __init__(self, ecosystem, parser, fetcher=None):
"""Initialize instance."""
super().__init__(ecosystem,
parser,
fetcher or MavenReleasesFetcher(ecosystem))
class GolangSolver(Solver):
"""Golang dependencies solver."""
def __init__(self, ecosystem, parser=None, fetcher=None):
"""Initialize instance."""
super(GolangSolver, self).__init__(ecosystem,
parser or GolangDependencyParser(),
fetcher or GolangReleasesFetcher(ecosystem))
def solve(self, dependencies):
"""Solve `dependencies` against upstream repository."""
result = {}
for dependency in self.dependency_parser.parse(dependencies):
if dependency.spec:
result[dependency.name] = dependency.spec
else:
version = self.release_fetcher.fetch_releases(dependency.name)[1][0]
result[dependency.name] = version
return result
class MavenSolver(object):
"""Doesn't inherit from Solver, because we don't use its solve().
We also don't need a DependencyParser nor a ReleasesFetcher for Maven.
'mvn versions:resolve-ranges' does all the dirty work for us.
Resolves only to one version, so if you need solve(all_versions=True), use MavenManualSolver
"""
@staticmethod
def _generate_pom_xml(to_solve):
"""Create pom.xml with dependencies from to_solve.
And run 'mvn versions:resolve-ranges',
which resolves the version ranges (overwrites the pom.xml).
:param to_solve: {"groupId:artifactId": "version-range"}
"""
project = etree.Element('project')
etree.SubElement(project, 'modelVersion').text = '4.0.0'
etree.SubElement(project, 'groupId').text = 'foo.bar.baz'
etree.SubElement(project, 'artifactId').text = 'testing'
etree.SubElement(project, 'version').text = '1.0.0'
dependencies = etree.SubElement(project, 'dependencies')
for name, version_range in to_solve.items():
group_id, artifact_id = name.rstrip(':').split(':')
dependency = etree.SubElement(dependencies, 'dependency')
etree.SubElement(dependency, 'groupId').text = group_id
etree.SubElement(dependency, 'artifactId').text = artifact_id
etree.SubElement(dependency, 'version').text = version_range
with open('pom.xml', 'wb') as pom:
pom.write(etree.tostring(project, xml_declaration=True, pretty_print=True))
TimedCommand.get_command_output(['mvn', 'versions:resolve-ranges'], graceful=False)
@staticmethod
def _dependencies_from_pom_xml():
"""Extract dependencies from pom.xml in current directory.
:return: {"groupId:artifactId": "version"}
"""
solved = {}
with open('pom.xml') as r:
pom_dict = anymarkup.parse(r.read())
dependencies = pom_dict.get('project', {}).get('dependencies', {}).get('dependency', [])
if not isinstance(dependencies, list):
dependencies = [dependencies]
for dependency in dependencies:
name = "{}:{}".format(dependency['groupId'], dependency['artifactId'])
solved[name] = str(dependency['version'])
return solved
@staticmethod
def _resolve_versions(to_solve):
"""Resolve version ranges in to_solve.
:param to_solve: {"groupId:artifactId": "version-range"}
:return: {"groupId:artifactId": "version"}
"""
if not to_solve:
return {}
with TemporaryDirectory() as tmpdir:
with cwd(tmpdir):
MavenSolver._generate_pom_xml(to_solve)
return MavenSolver._dependencies_from_pom_xml()
@staticmethod
def is_version_range(ver_spec):
"""Check whether ver_spec contains version range."""
# http://maven.apache.org/enforcer/enforcer-rules/versionRanges.html
return re.search(r'[,()\[\]]', ver_spec) is not None
def solve(self, dependencies):
"""Solve version ranges in dependencies."""
already_solved = {}
to_solve = {}
for dependency in dependencies:
name, ver_spec = dependency.split(' ', 1)
if not self.is_version_range(ver_spec):
already_solved[name] = ver_spec
else:
to_solve[name] = ver_spec
result = already_solved.copy()
result.update(self._resolve_versions(to_solve))
return result
def get_ecosystem_solver(ecosystem, with_parser=None, with_fetcher=None):
"""Get Solver subclass instance for particular ecosystem.
:param ecosystem: Ecosystem
:param with_parser: DependencyParser instance
:param with_fetcher: ReleasesFetcher instance
:return: Solver
"""
if ecosystem.is_backed_by(EcosystemBackend.maven):
if with_parser is None:
return MavenSolver()
else:
return MavenManualSolver(ecosystem, with_parser, with_fetcher)
elif ecosystem.is_backed_by(EcosystemBackend.npm):
return NpmSolver(ecosystem, with_parser, with_fetcher)
elif ecosystem.is_backed_by(EcosystemBackend.pypi):
return PypiSolver(ecosystem, with_parser, with_fetcher)
elif ecosystem.is_backed_by(EcosystemBackend.rubygems):
return RubyGemsSolver(ecosystem, with_parser, with_fetcher)
elif ecosystem.is_backed_by(EcosystemBackend.nuget):
return NugetSolver(ecosystem, with_parser, with_fetcher)
elif ecosystem.is_backed_by(EcosystemBackend.go):
return GolangSolver(ecosystem, with_parser, with_fetcher)
raise ValueError('Unknown ecosystem: {}'.format(ecosystem.name))
def get_ecosystem_parser(ecosystem):
"""Get DependencyParser subclass instance for particular ecosystem."""
if ecosystem.is_backed_by(EcosystemBackend.maven):
return NoOpDependencyParser()
elif ecosystem.is_backed_by(EcosystemBackend.npm):
return NpmDependencyParser()
elif ecosystem.is_backed_by(EcosystemBackend.pypi):
return PypiDependencyParser()
elif ecosystem.is_backed_by(EcosystemBackend.rubygems):
return RubyGemsDependencyParser()
elif ecosystem.is_backed_by(EcosystemBackend.nuget):
return NugetDependencyParser()
elif ecosystem.is_backed_by(EcosystemBackend.go):
return GolangDependencyParser()
raise ValueError('Unknown ecosystem: {}'.format(ecosystem.name))
| gpl-3.0 | -6,675,852,524,338,168,000 | 35.320588 | 100 | 0.562799 | false |
JohnLunzer/flexx | flexx/ui/widgets/_html.py | 1 | 3211 | """
Simple example:
.. UIExample:: 75
from flexx import app, ui
class Example(ui.Widget):
def init(self):
with ui.html.UL():
ui.html.LI(text='foo')
ui.html.LI(text='bar')
.. UIExample:: 150
from flexx import app, ui, event
class Example(ui.Widget):
def init(self):
with ui.html.UL():
ui.html.LI(text='foo')
ui.html.LI(text='bar')
with ui.html.LI():
with ui.html.I():
self.now = ui.html.Span(text='0')
self.but = ui.html.Button(text='press me')
class JS:
@event.connect('but.mouse_down')
def on_click(self, *events):
self.now.text = window.Date.now()
"""
from ... import event
from . import Widget
class Div(Widget):
"""
This class is the base class for "HTML widgets". These provides a
lower-level way of working with HTML content that can feel more
natural to users with a background in web development.
Via the ``flexx.ui.html`` factory object, it is possible to create *any*
type of DOM element. E.g. ``ui.html.Table()`` creates an table and
``ui.html.b(text='foo')`` creates a piece of bold text.
Since this class inherits from ``Widget``, all base widget functionality
(e.g. mouse events) work as expected. However, the specific functionality
of each element (e.g. ``src`` for img elements) must be used in the
"JavaScript way".
In contrast to regular Flexx widgets, the css class name of the node only
consists of the name(s) provided via the ``css_class`` property.
Also see :ref:`this example <classic_web_dev.py>`.
"""
class Both:
@event.prop
def text(self, v=''):
""" The inner HTML for this element.
"""
return str(v)
class JS:
def __init__(self, *args):
super().__init__(*args)
self.node.className = ''
def _init_phosphor_and_node(self):
self.phosphor = self._create_phosphor_widget(self._class_name.lower())
self.node = self.phosphor.node
@event.connect('text')
def __on_inner_html(self, *events):
self.node.innerHTML = events[-1].new_value
def _add_child(self, widget):
self.node.appendChild(widget.node)
class HTMLElementFactory:
"""
This object can be used to generate a Flexx Widget class for any
HTML element that you'd like. These Widget classes inherit from ``Div``.
"""
def __getattr__(self, name):
name = name.lower()
cache = globals()
if name.startswith('_'):
return super().__getattr__(name)
if name not in cache:
# Create new class, put it in this module so that JSModule can find it
cls = type(name, (Div,), {})
cls.__module__ = cls.__jsmodule__ = __name__
cache[name] = cls
return cache[name]
html = HTMLElementFactory()
| bsd-2-clause | -7,527,310,105,070,645,000 | 27.415929 | 82 | 0.540953 | false |
oskyar/test-TFG | TFG/urls.py | 1 | 2144 | from django.conf import settings
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.contrib.auth.views import login, logout_then_login
from django.views.static import serve
import logging
from importlib import import_module
from django.conf import settings
from TFG.apps.handlererrors.views import Error403, Error404, Error500
from TFG.apps.user.views import Index
from vanilla import TemplateView
from django.core import exceptions
from TFG.apps.user.views import ClientViewErrors
# from registration.views import RegistrationView
admin.autodiscover()
urlpatterns = [
# Examples:
# url(r'^$', 'TFG.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', Index.as_view(), name='index'),
url(r'^cookies$', TemplateView.as_view(template_name="cookies.html"), name='cookies'),
url(r'^$', login, {'template_name': 'user/login.html'},
name='login'),
url(r'^logout/$', logout_then_login, name='logout'),
# url(r'^', include('TFG.apps.handlererrors.urls')),
# url(r'^db', TFG.apps.index.views.db, name='db'),
url(r'^admin/', include(admin.site.urls)),
url(r'^chaining/', include('smart_selects.urls')),
url(r'^user/', include('TFG.apps.user.urls')),
url(r'^test/', include('TFG.apps.test.urls')),
url(r'^subject/', include('TFG.apps.subject.urls')),
url(r'^search/', include('TFG.apps.search.urls')),
url(r'^s3direct/', include('s3direct.urls')),
# url(r'^test/', include('TFG.apps.test.urls')),
url(r'^media/(?P<path>.*)$', serve,
{'document_root': settings.MEDIA_ROOT,}),
]
# import_module("TFG.apps.index.signals")
# Este código sirve para buscar todos los signals añadidos a las apps
"""logger = logging.getLogger(__name__)
signal_modules = {}
for app in settings.INSTALLED_APPS:
signals_module = '%s.signals' % app
try:
logger.debug('loading "%s" ..' % signals_module)
signal_modules[app] = import_module(signals_module)
except ImportError as e:
logger.warning(
'failed to import "%s", reason: %s' % (signals_module, str(e)))
"""
| gpl-2.0 | 830,453,397,419,805,600 | 34.7 | 90 | 0.668534 | false |
dingzg/onepanel | lib/module/user.py | 1 | 10650 | #!/usr/bin/env python2.6
#-*- coding: utf-8 -*-
# Copyright [OnePanel]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package for user management.
"""
import os
if __name__ == '__main__':
import sys
root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, root_path)
import pexpect
import shlex
import time
import pwd
import grp
import subprocess
from utils import b2h, ftime
#---------------------------------------------------------------------------------------------------
#Function Name : main_process
#Usage :
#Parameters : None
#
#Return value :
# 1
#---------------------------------------------------------------------------------------------------
def main_process(self):
action = self.get_argument('action', '')
if action == 'listuser':
fullinfo = self.get_argument('fullinfo', 'on')
self.write({'code': 0, 'msg': u'成功获取用户列表!', 'data': listuser(fullinfo=='on')})
elif action == 'listgroup':
fullinfo = self.get_argument('fullinfo', 'on')
self.write({'code': 0, 'msg': u'成功获取用户组列表!', 'data': listgroup(fullinfo=='on')})
elif action in ('useradd', 'usermod'):
if self.config.get('runtime', 'mode') == 'demo':
self.write({'code': -1, 'msg': u'DEMO状态不允许添加和修改用户!'})
return
pw_name = self.get_argument('pw_name', '')
pw_gecos = self.get_argument('pw_gecos', '')
pw_gname = self.get_argument('pw_gname', '')
pw_dir = self.get_argument('pw_dir', '')
pw_shell = self.get_argument('pw_shell', '')
pw_passwd = self.get_argument('pw_passwd', '')
pw_passwdc = self.get_argument('pw_passwdc', '')
lock = self.get_argument('lock', '')
lock = (lock == 'on') and True or False
if pw_passwd != pw_passwdc:
self.write({'code': -1, 'msg': u'两次输入的密码不一致!'})
return
options = {
'pw_gecos': _u(pw_gecos),
'pw_gname': _u(pw_gname),
'pw_dir': _u(pw_dir),
'pw_shell': _u(pw_shell),
'lock': lock
}
if len(pw_passwd)>0: options['pw_passwd'] = _u(pw_passwd)
if action == 'useradd':
createhome = self.get_argument('createhome', '')
createhome = (createhome == 'on') and True or False
options['createhome'] = createhome
if useradd(_u(pw_name), options):
self.write({'code': 0, 'msg': u'用户添加成功!'})
else:
self.write({'code': -1, 'msg': u'用户添加失败!'})
elif action == 'usermod':
if usermod(_u(pw_name), options):
self.write({'code': 0, 'msg': u'用户修改成功!'})
else:
self.write({'code': -1, 'msg': u'用户修改失败!'})
elif action == 'userdel':
if self.config.get('runtime', 'mode') == 'demo':
self.write({'code': -1, 'msg': u'DEMO状态不允许删除用户!'})
return
pw_name = self.get_argument('pw_name', '')
if userdel(_u(pw_name)):
self.write({'code': 0, 'msg': u'用户删除成功!'})
else:
self.write({'code': -1, 'msg': u'用户删除失败!'})
elif action in ('groupadd', 'groupmod', 'groupdel'):
if self.config.get('runtime', 'mode') == 'demo':
self.write({'code': -1, 'msg': u'DEMO状态不允许操作用户组!'})
return
gr_name = self.get_argument('gr_name', '')
gr_newname = self.get_argument('gr_newname', '')
actionstr = {'groupadd': u'添加', 'groupmod': u'修改', 'groupdel': u'删除'};
if action == 'groupmod':
rt = groupmod(_u(gr_name), _u(gr_newname))
else:
rt = getattr(user, action)(_u(gr_name))
if rt:
self.write({'code': 0, 'msg': u'用户组%s成功!' % actionstr[action]})
else:
self.write({'code': -1, 'msg': u'用户组%s失败!' % actionstr[action]})
elif action in ('groupmems_add', 'groupmems_del'):
if self.config.get('runtime', 'mode') == 'demo':
self.write({'code': -1, 'msg': u'DEMO状态不允许操作用户组成员!'})
return
gr_name = self.get_argument('gr_name', '')
mem = self.get_argument('mem', '')
option = action.split('_')[1]
optionstr = {'add': u'添加', 'del': u'删除'}
if groupmems(_u(gr_name), _u(option), _u(mem)):
self.write({'code': 0, 'msg': u'用户组成员%s成功!' % optionstr[option]})
else:
self.write({'code': -1, 'msg': u'用户组成员%s成功!' % optionstr[option]})
def listuser(fullinfo=True):
if fullinfo:
# get lock status from /etc/shadow
locks = {}
with open('/etc/shadow') as f:
for line in f:
fields = line.split(':', 2)
locks[fields[0]] = fields[1].startswith('!')
users = pwd.getpwall()
for i, user in enumerate(users):
users[i] = dict((name, getattr(user, name))
for name in dir(user)
if not name.startswith('__'))
try:
gname = grp.getgrgid(user.pw_gid).gr_name
except:
gname = ''
users[i]['pw_gname'] = gname
users[i]['lock'] = locks[user.pw_name]
else:
users = [pw.pw_name for pw in pwd.getpwall()]
return users
def passwd(username, password):
try:
cmd = shlex.split('passwd \'%s\'' % username)
except:
return False
child = pexpect.spawn(cmd[0], cmd[1:])
i = child.expect(['New password', 'Unknown user name'])
if i == 1:
if child.isalive(): child.wait()
return False
child.sendline(password)
child.expect('Retype new password')
child.sendline(password)
i = child.expect(['updated successfully', pexpect.EOF])
if child.isalive(): child.wait()
return i == 0
def useradd(username, options):
# command like: useradd -c 'New User' -g newgroup -s /bin/bash -m newuser
cmd = ['useradd']
if options.has_key('pw_gname') and options['pw_gname']:
cmd.extend(['-g', options['pw_gname']])
if options.has_key('pw_gecos'):
cmd.extend(['-c', options['pw_gecos']])
if options.has_key('pw_shell'):
cmd.extend(['-s', options['pw_shell']])
if options.has_key('createhome') and options['createhome']:
cmd.append('-m')
else:
cmd.append('-M')
cmd.append(username)
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
p.stdout.read()
p.stderr.read()
if p.wait() != 0: return False
# check if need to lock/unlock the new account
if options.has_key('lock') and options['lock']:
if not usermod(username, {'lock': options['lock']}): return False
# check if need to set passwd
if options.has_key('pw_passwd'):
if not passwd(username, options['pw_passwd']): return False
return True
def usermod(username, options):
user = pwd.getpwnam(username)
# command like: usermod -c 'I am root' -g root -d /root/ -s /bin/bash -U root
cmd = ['usermod']
if options.has_key('pw_gname'):
cmd.extend(['-g', options['pw_gname']])
if options.has_key('pw_gecos') and options['pw_gecos'] != user.pw_gecos:
cmd.extend(['-c', options['pw_gecos']])
if options.has_key('pw_dir') and options['pw_dir'] != user.pw_dir:
cmd.extend(['-d', options['pw_dir']])
if options.has_key('pw_shell') and options['pw_shell'] != user.pw_shell:
cmd.extend(['-s', options['pw_shell']])
if options.has_key('lock') and options['lock']:
cmd.append('-L')
else:
cmd.append('-U')
cmd.append(username)
if len(cmd) > 2:
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
p.stdout.read()
msg = p.stderr.read()
if p.wait() != 0:
if not 'no changes' in msg:
return False
# check if need to change passwd
if options.has_key('pw_passwd'):
if not passwd(username, options['pw_passwd']): return False
return True
def userdel(username):
p = subprocess.Popen(['userdel', username],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
p.stdout.read()
p.stderr.read()
return p.wait() == 0
def listgroup(fullinfo=True):
if fullinfo:
groups = grp.getgrall()
for i, group in enumerate(groups):
groups[i] = dict((name, getattr(group, name))
for name in dir(group)
if not name.startswith('__'))
else:
groups = [gr.gr_name for gr in grp.getgrall()]
return groups
def groupadd(groupname):
p = subprocess.Popen(['groupadd', groupname],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
p.stdout.read()
p.stderr.read()
return p.wait() == 0
def groupmod(groupname, newgroupname):
p = subprocess.Popen(['groupmod', '-n', newgroupname, groupname],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
p.stdout.read()
p.stderr.read()
return p.wait() == 0
def groupdel(groupname):
p = subprocess.Popen(['groupdel', groupname],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
p.stdout.read()
p.stderr.read()
return p.wait() == 0
def groupmems(groupname, option, mem):
cmd = ['groupmems', '-g', groupname]
if option == 'add':
cmd.extend(['-a', mem])
elif option == 'del':
cmd.extend(['-d', mem])
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
p.stdout.read()
p.stderr.read()
return p.wait() == 0
| apache-2.0 | -5,547,206,471,437,395,000 | 33.801347 | 100 | 0.543731 | false |
Fat-Zer/FreeCAD_sf_master | src/Tools/updatefromcrowdin.py | 11 | 12203 | #!/usr/bin/python
#***************************************************************************
#* *
#* Copyright (c) 2009 Yorik van Havre <[email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Library General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
from __future__ import print_function
'''
Usage:
updatefromcrowdin.py [options] [LANGCODE] [LANGCODE LANGCODE...]
Example:
./updatefromcrowdin.py [-d <directory>] fr nl pt_BR
Options:
-h or --help : prints this help text
-d or --directory : specifies a directory containing unzipped translation folders
-z or --zipfile : specifies a path to the freecad.zip file
-m or --module : specifies a single module name to be updated, instead of all modules
If no argument is specified, the command will try to find and use a freecad.zip file
located in the current src/Tools directory (such as the one obtained by running
updatecrowdin.py download) and will extract the default languages specified below
in this file.
This command must be run from its current source tree location (/src/Tools)
so it can find the correct places to put the translation files. If run with
no arguments, the latest translations from crowdin will be downloaded, unzipped
and put to the correct locations. The necessary renaming of files and .qm generation
will be taken care of. The qrc files will also be updated when new
translations are added.
NOTE! The crowdin site only allows to download "builds" (zipped archives)
which must be built prior to downloading. This means a build might not
reflect the latest state of the translations. Better always make a build before
using this script!
You can specify a directory with the -d option if you already downloaded
and extracted the build, or you can specify a single module to update with -m.
You can also run the script without any language code, in which case all the
languages contained in the archive or directory will be added.
'''
import sys, os, shutil, tempfile, zipfile, getopt, StringIO, re
crowdinpath = "http://crowdin.net/download/project/freecad.zip"
# locations list contains Module name, relative path to translation folder and relative path to qrc file
locations = [["AddonManager","../Mod/AddonManager/Resources/translations","../Mod/AddonManager/Resources/AddonManager.qrc"],
["Arch","../Mod/Arch/Resources/translations","../Mod/Arch/Resources/Arch.qrc"],
["Assembly","../Mod/Assembly/Gui/Resources/translations","../Mod/Assembly/Gui/Resources/Assembly.qrc"],
["draft","../Mod/Draft/Resources/translations","../Mod/Draft/Resources/Draft.qrc"],
["Drawing","../Mod/Drawing/Gui/Resources/translations","../Mod/Drawing/Gui/Resources/Drawing.qrc"],
["Fem","../Mod/Fem/Gui/Resources/translations","../Mod/Fem/Gui/Resources/Fem.qrc"],
["FreeCAD","../Gui/Language","../Gui/Language/translation.qrc"],
["Image","../Mod/Image/Gui/Resources/translations","../Mod/Image/Gui/Resources/Image.qrc"],
["Mesh","../Mod/Mesh/Gui/Resources/translations","../Mod/Mesh/Gui/Resources/Mesh.qrc"],
["MeshPart","../Mod/MeshPart/Gui/Resources/translations","../Mod/MeshPart/Gui/Resources/MeshPart.qrc"],
["OpenSCAD","../Mod/OpenSCAD/Resources/translations","../Mod/OpenSCAD/Resources/OpenSCAD.qrc"],
["Part","../Mod/Part/Gui/Resources/translations","../Mod/Part/Gui/Resources/Part.qrc"],
["PartDesign","../Mod/PartDesign/Gui/Resources/translations","../Mod/PartDesign/Gui/Resources/PartDesign.qrc"],
["Points","../Mod/Points/Gui/Resources/translations","../Mod/Points/Gui/Resources/Points.qrc"],
["Raytracing","../Mod/Raytracing/Gui/Resources/translations","../Mod/Raytracing/Gui/Resources/Raytracing.qrc"],
["ReverseEngineering","../Mod/ReverseEngineering/Gui/Resources/translations","../Mod/ReverseEngineering/Gui/Resources/ReverseEngineering.qrc"],
["Robot","../Mod/Robot/Gui/Resources/translations","../Mod/Robot/Gui/Resources/Robot.qrc"],
["Sketcher","../Mod/Sketcher/Gui/Resources/translations","../Mod/Sketcher/Gui/Resources/Sketcher.qrc"],
["StartPage","../Mod/Start/Gui/Resources/translations","../Mod/Start/Gui/Resources/Start.qrc"],
["Test","../Mod/Test/Gui/Resources/translations","../Mod/Test/Gui/Resources/Test.qrc"],
["Ship","../Mod/Ship/resources/translations","../Mod/Ship/resources/Ship.qrc"],
["Plot","../Mod/Plot/resources/translations","../Mod/Plot/resources/Plot.qrc"],
["Web","../Mod/Web/Gui/Resources/translations","../Mod/Web/Gui/Resources/Web.qrc"],
["Spreadsheet","../Mod/Spreadsheet/Gui/Resources/translations","../Mod/Spreadsheet/Gui/Resources/Spreadsheet.qrc"],
["Path","../Mod/Path/Gui/Resources/translations","../Mod/Path/Gui/Resources/Path.qrc"],
["Tux","../Mod/Tux/Resources/translations","../Mod/Tux/Resources/Tux.qrc"],
["TechDraw","../Mod/TechDraw/Gui/Resources/translations","../Mod/TechDraw/Gui/Resources/TechDraw.qrc"],
]
default_languages = "af ar ca cs de el es-ES eu fi fil fr gl hr hu id it ja kab ko lt nl no pl pt-BR pt-PT ro ru sk sl sr sv-SE tr uk val-ES vi zh-CN zh-TW"
def updateqrc(qrcpath,lncode):
"updates a qrc file with the given translation entry"
print("opening " + qrcpath + "...")
# getting qrc file contents
if not os.path.exists(qrcpath):
print("ERROR: Resource file " + qrcpath + " doesn't exist")
sys.exit()
f = open(qrcpath,"ro")
resources = []
for l in f.readlines():
resources.append(l)
f.close()
# checking for existing entry
name = "_" + lncode + ".qm"
for r in resources:
if name in r:
print("language already exists in qrc file")
return
# find the latest qm line
pos = None
for i in range(len(resources)):
if ".qm" in resources[i]:
pos = i
if pos is None:
print("No existing .qm file in this resource. Appending to the end position")
for i in range(len(resources)):
if "</qresource>" in resources[i]:
pos = i-1
if pos is None:
print("ERROR: couldn't add qm files to this resource: " + qrcpath)
sys.exit()
# inserting new entry just after the last one
line = resources[pos]
if ".qm" in line:
line = re.sub("_.*\.qm","_"+lncode+".qm",line)
else:
modname = os.path.splitext(os.path.basename(qrcpath))[0]
line = " <file>translations/"+modname+"_"+lncode+".qm</file>\n"
#print "ERROR: no existing qm entry in this resource: Please add one manually " + qrcpath
#sys.exit()
print("inserting line: ",line)
resources.insert(pos+1,line)
# writing the file
f = open(qrcpath,"wb")
for r in resources:
f.write(r)
f.close()
print("successfully updated ",qrcpath)
def doFile(tsfilepath,targetpath,lncode,qrcpath):
"updates a single ts file, and creates a corresponding qm file"
basename = os.path.basename(tsfilepath)[:-3]
# special fix of the draft filename...
if basename == "draft": basename = "Draft"
newname = basename + "_" + lncode + ".ts"
newpath = targetpath + os.sep + newname
shutil.copyfile(tsfilepath, newpath)
os.system("lrelease " + newpath)
newqm = targetpath + os.sep + basename + "_" + lncode + ".qm"
if not os.path.exists(newqm):
print("ERROR: impossible to create " + newqm + ", aborting")
sys.exit()
updateqrc(qrcpath,lncode)
def doLanguage(lncode,fmodule=""):
" treats a single language"
if lncode == "en":
# never treat "english" translation... For now :)
return
mods = []
if fmodule:
for l in locations:
if l[0].upper() == fmodule.upper():
mods = [l]
else:
mods = locations
if not mods:
print("Error: Couldn't find module "+fmodule)
sys.exit()
for target in mods:
basefilepath = tempfolder + os.sep + lncode + os.sep + target[0] + ".ts"
targetpath = os.path.abspath(target[1])
qrcpath = os.path.abspath(target[2])
doFile(basefilepath,targetpath,lncode,qrcpath)
print(lncode + " done!")
if __name__ == "__main__":
inputdir = ""
inputzip = ""
fmodule = ""
args = sys.argv[1:]
if len(args) < 1:
inputzip = os.path.join(os.path.abspath(os.curdir),"freecad.zip")
if os.path.exists(inputzip):
print("Using zip file found at",inputzip)
else:
print(__doc__)
sys.exit()
else:
try:
opts, args = getopt.getopt(sys.argv[1:], "hd:z:m:", ["help", "directory=","zipfile=", "module="])
except getopt.GetoptError:
print(__doc__)
sys.exit()
# checking on the options
for o, a in opts:
if o in ("-h", "--help"):
print(__doc__)
sys.exit()
if o in ("-d", "--directory"):
inputdir = a
if o in ("-z", "--zipfile"):
inputzip = a
if o in ("-m", "--module"):
fmodule = a
currentfolder = os.getcwd()
if inputdir:
tempfolder = os.path.realpath(inputdir)
if not os.path.exists(tempfolder):
print("ERROR: " + tempfolder + " not found")
sys.exit()
elif inputzip:
tempfolder = tempfile.mkdtemp()
print("creating temp folder " + tempfolder)
inputzip=os.path.realpath(inputzip)
if not os.path.exists(inputzip):
print("ERROR: " + inputzip + " not found")
sys.exit()
shutil.copy(inputzip,tempfolder)
os.chdir(tempfolder)
zfile=zipfile.ZipFile("freecad.zip")
print("extracting freecad.zip...")
zfile.extractall()
else:
tempfolder = tempfile.mkdtemp()
print("creating temp folder " + tempfolder)
os.chdir(tempfolder)
os.system("wget "+crowdinpath)
if not os.path.exists("freecad.zip"):
print("download failed!")
sys.exit()
zfile=zipfile.ZipFile("freecad.zip")
print("extracting freecad.zip...")
zfile.extractall()
os.chdir(currentfolder)
if not args:
#args = [o for o in os.listdir(tempfolder) if o != "freecad.zip"]
# do not treat all languages in the zip file. Some are not translated enough.
args = default_languages.split()
for ln in args:
if not os.path.exists(tempfolder + os.sep + ln):
print("ERROR: language path for " + ln + " not found!")
else:
doLanguage(ln,fmodule)
| lgpl-2.1 | -2,449,099,759,645,174,300 | 44.87594 | 156 | 0.588462 | false |
tlksio/tlksio | env/lib/python3.4/site-packages/logilab/astng/mixins.py | 1 | 4348 | # copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of logilab-astng.
#
# logilab-astng is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# logilab-astng is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-astng. If not, see <http://www.gnu.org/licenses/>.
"""This module contains some mixins for the different nodes.
"""
from logilab.astng.exceptions import (ASTNGBuildingException, InferenceError,
NotFoundError)
class BlockRangeMixIn(object):
"""override block range """
def set_line_info(self, lastchild):
self.fromlineno = self.lineno
self.tolineno = lastchild.tolineno
self.blockstart_tolineno = self._blockstart_toline()
def _elsed_block_range(self, lineno, orelse, last=None):
"""handle block line numbers range for try/finally, for, if and while
statements
"""
if lineno == self.fromlineno:
return lineno, lineno
if orelse:
if lineno >= orelse[0].fromlineno:
return lineno, orelse[-1].tolineno
return lineno, orelse[0].fromlineno - 1
return lineno, last or self.tolineno
class FilterStmtsMixin(object):
"""Mixin for statement filtering and assignment type"""
def _get_filtered_stmts(self, _, node, _stmts, mystmt):
"""method used in _filter_stmts to get statemtents and trigger break"""
if self.statement() is mystmt:
# original node's statement is the assignment, only keep
# current node (gen exp, list comp)
return [node], True
return _stmts, False
def ass_type(self):
return self
class AssignTypeMixin(object):
def ass_type(self):
return self
def _get_filtered_stmts(self, lookup_node, node, _stmts, mystmt):
"""method used in filter_stmts"""
if self is mystmt:
return _stmts, True
if self.statement() is mystmt:
# original node's statement is the assignment, only keep
# current node (gen exp, list comp)
return [node], True
return _stmts, False
class ParentAssignTypeMixin(AssignTypeMixin):
def ass_type(self):
return self.parent.ass_type()
class FromImportMixIn(FilterStmtsMixin):
"""MixIn for From and Import Nodes"""
def _infer_name(self, frame, name):
return name
def do_import_module(self, modname):
"""return the ast for a module whose name is <modname> imported by <self>
"""
# handle special case where we are on a package node importing a module
# using the same name as the package, which may end in an infinite loop
# on relative imports
# XXX: no more needed ?
mymodule = self.root()
level = getattr(self, 'level', None) # Import as no level
# XXX we should investigate deeper if we really want to check
# importing itself: modname and mymodule.name be relative or absolute
if mymodule.relative_to_absolute_name(modname, level) == mymodule.name:
# FIXME: we used to raise InferenceError here, but why ?
return mymodule
try:
return mymodule.import_module(modname, level=level)
except ASTNGBuildingException:
raise InferenceError(modname)
except SyntaxError as ex:
raise InferenceError(str(ex))
def real_name(self, asname):
"""get name from 'as' name"""
for name, _asname in self.names:
if name == '*':
return asname
if not _asname:
name = name.split('.', 1)[0]
_asname = name
if asname == _asname:
return name
raise NotFoundError(asname)
| mit | -2,813,748,875,574,029,000 | 34.639344 | 81 | 0.635005 | false |
naturali/tensorflow | tensorflow/python/kernel_tests/variable_scope_test.py | 1 | 35348 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for variable store."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variable_scope
class VariableScopeTest(tf.test.TestCase):
def testGetVar(self):
vs = variable_scope._get_default_variable_store()
v = vs.get_variable("v", [1])
v1 = vs.get_variable("v", [1])
assert v == v1
def testNameExists(self):
vs = variable_scope._get_default_variable_store()
# No check by default, so we can both create and get existing names.
v = vs.get_variable("v", [1])
v1 = vs.get_variable("v", [1])
assert v == v1
# When reuse is False, we fail when variables are already there.
vs.get_variable("w", [1], reuse=False) # That's ok.
with self.assertRaises(ValueError):
vs.get_variable("v", [1], reuse=False) # That fails.
# When reuse is True, we fail when variables are new.
vs.get_variable("v", [1], reuse=True) # That's ok.
with self.assertRaises(ValueError):
vs.get_variable("u", [1], reuse=True) # That fails.
def testNamelessStore(self):
vs = variable_scope._get_default_variable_store()
vs.get_variable("v1", [2])
vs.get_variable("v2", [2])
expected_names = ["%s:0" % name for name in ["v1", "v2"]]
self.assertEqual(set(expected_names),
set([v.name for v in vs._vars.values()]))
def testVarScopeInitializer(self):
with self.test_session() as sess:
init = tf.constant_initializer(0.3)
with tf.variable_scope("tower") as tower:
with tf.variable_scope("foo", initializer=init):
v = tf.get_variable("v", [])
sess.run(tf.initialize_variables([v]))
self.assertAllClose(v.eval(), 0.3)
with tf.variable_scope(tower, initializer=init):
w = tf.get_variable("w", [])
sess.run(tf.initialize_variables([w]))
self.assertAllClose(w.eval(), 0.3)
def testVarScopeDType(self):
with self.test_session():
with tf.variable_scope("tower") as tower:
with tf.variable_scope("foo", dtype=tf.float16):
v = tf.get_variable("v", [])
self.assertEqual(v.dtype, tf.float16_ref)
with tf.variable_scope(tower, dtype=tf.float16):
w = tf.get_variable("w", [])
self.assertEqual(w.dtype, tf.float16_ref)
def testInitFromNonTensorValue(self):
with self.test_session() as sess:
v = tf.get_variable("v", initializer=4, dtype=tf.int32)
sess.run(tf.initialize_variables([v]))
self.assertAllClose(v.eval(), 4)
w = tf.get_variable("w",
initializer=numpy.array([1, 2, 3]),
dtype=tf.int64)
sess.run(tf.initialize_variables([w]))
self.assertAllClose(w.eval(), [1, 2, 3])
with self.assertRaises(TypeError):
tf.get_variable("x", initializer={})
def testVarScopeCachingDevice(self):
with self.test_session():
caching_device = "/job:moo"
with tf.variable_scope("tower"):
with tf.variable_scope("caching", caching_device=caching_device):
v = tf.get_variable("v", [])
self.assertTrue(v.value().device.startswith(caching_device))
with tf.variable_scope("child"):
v2 = tf.get_variable("v", [])
self.assertTrue(v2.value().device.startswith(caching_device))
with tf.variable_scope("not_cached", caching_device=""):
v2_not_cached = tf.get_variable("v", [])
self.assertFalse(
v2_not_cached.value().device.startswith(caching_device))
with tf.variable_scope(
"not_cached_identity_device",
caching_device=lambda op: op.device):
v2_identity_device = tf.get_variable("v", [])
self.assertFalse(
v2_identity_device.value().device.startswith(caching_device))
with tf.variable_scope("we_will_do_it_live") as vs_live:
vs_live.set_caching_device("/job:live")
v_live = tf.get_variable("v", [])
self.assertTrue(v_live.value().device.startswith("/job:live"))
v_tower = tf.get_variable("v", [])
self.assertFalse(v_tower.value().device.startswith(caching_device))
def testVarScopeRegularizer(self):
with self.test_session() as sess:
init = tf.constant_initializer(0.3)
def regularizer1(v):
return tf.reduce_mean(v) + 0.1
def regularizer2(v):
return tf.reduce_mean(v) + 0.2
with tf.variable_scope("tower", regularizer=regularizer1) as tower:
with tf.variable_scope("foo", initializer=init):
v = tf.get_variable("v", [])
sess.run(tf.initialize_variables([v]))
losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(1, len(losses))
self.assertAllClose(losses[0].eval(), 0.4)
with tf.variable_scope(tower, initializer=init) as vs:
u = tf.get_variable("u", [])
vs.set_regularizer(regularizer2)
w = tf.get_variable("w", [])
# Next 3 variable not regularized to test disabling regularization.
x = tf.get_variable("x", [], regularizer=tf.no_regularizer)
with tf.variable_scope("baz", regularizer=tf.no_regularizer):
y = tf.get_variable("y", [])
vs.set_regularizer(tf.no_regularizer)
z = tf.get_variable("z", [])
# Check results.
losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(3, len(losses))
sess.run(tf.initialize_variables([u, w, x, y, z]))
self.assertAllClose(losses[0].eval(), 0.4)
self.assertAllClose(losses[1].eval(), 0.4)
self.assertAllClose(losses[2].eval(), 0.5)
with tf.variable_scope("foo", reuse=True):
v = tf.get_variable("v", []) # "v" is alredy there, reused
losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(3, len(losses)) # No new loss added.
def testIntializeFromValue(self):
with self.test_session() as sess:
init = tf.constant(0.1)
w = tf.get_variable("v", initializer=init)
sess.run(tf.initialize_variables([w]))
self.assertAllClose(w.eval(), 0.1)
with self.assertRaisesRegexp(ValueError, "shape"):
# We disallow explicit shape specification when initializer is constant.
tf.get_variable("u", [1], initializer=init)
with tf.variable_scope("foo", initializer=init):
# Constant initializer can be passed through scopes if needed.
v = tf.get_variable("v")
sess.run(tf.initialize_variables([v]))
self.assertAllClose(v.eval(), 0.1)
# Check that non-float32 initializer creates a non-float32 variable.
init = tf.constant(1, dtype=tf.int32)
t = tf.get_variable("t", initializer=init)
self.assertEqual(t.dtype.base_dtype, tf.int32)
# Raise error if `initializer` dtype and `dtype` are not identical.
with self.assertRaisesRegexp(ValueError, "don't match"):
tf.get_variable("s", initializer=init, dtype=tf.float64)
def testControlDeps(self):
with self.test_session() as sess:
v0 = tf.get_variable("v0", [1], initializer=tf.constant_initializer(0))
with tf.control_dependencies([v0.value()]):
v1 = tf.get_variable("v1", [1], initializer=tf.constant_initializer(1))
add = v1 + v0
# v0 should be uninitialized.
with self.assertRaisesRegexp(tf.OpError, "uninitialized"):
sess.run(v0)
# We should be able to initialize and run v1 without initializing
# v0, even if the variable was created with a control dep on v0.
sess.run(v1.initializer)
self.assertEqual(1, sess.run(v1))
# v0 should still be uninitialized.
with self.assertRaisesRegexp(tf.OpError, "uninitialized"):
sess.run(v0)
with self.assertRaisesRegexp(tf.OpError, "uninitialized"):
sess.run(add)
# If we initialize v0 we should be able to run 'add'.
sess.run(v0.initializer)
sess.run(add)
def testControlFlow(self):
with self.test_session() as sess:
v0 = tf.get_variable("v0", [], initializer=tf.constant_initializer(0))
var_dict = {}
# Call get_variable in each of the cond clauses.
def var_in_then_clause():
v1 = tf.get_variable("v1", [1], initializer=tf.constant_initializer(1))
var_dict["v1"] = v1
return v1 + v0
def var_in_else_clause():
v2 = tf.get_variable("v2", [1], initializer=tf.constant_initializer(2))
var_dict["v2"] = v2
return v2 + v0
add = control_flow_ops.cond(tf.less(v0, 10),
var_in_then_clause,
var_in_else_clause)
v1 = var_dict["v1"]
v2 = var_dict["v2"]
# We should be able to initialize and run v1 and v2 without initializing
# v0, even if the variable was created with a control dep on v0.
sess.run(v1.initializer)
self.assertEqual([1], sess.run(v1))
sess.run(v2.initializer)
self.assertEqual([2], sess.run(v2))
# v0 should still be uninitialized.
with self.assertRaisesRegexp(tf.OpError, "uninitialized"):
sess.run(v0)
# We should not be able to run 'add' yet.
with self.assertRaisesRegexp(tf.OpError, "uninitialized"):
sess.run(add)
# If we initialize v0 we should be able to run 'add'.
sess.run(v0.initializer)
sess.run(add)
def testGetVariableScope(self):
# Test the get_variable_scope() function and setting properties of result.
with self.test_session() as sess:
init = tf.constant_initializer(0.3)
with tf.variable_scope("foo"):
new_init1 = tf.get_variable_scope().initializer
self.assertEqual(new_init1, None)
# Check that we can set initializer like this.
tf.get_variable_scope().set_initializer(init)
v = tf.get_variable("v", [])
sess.run(tf.initialize_variables([v]))
self.assertAllClose(v.eval(), 0.3)
# Check that we can set reuse.
tf.get_variable_scope().reuse_variables()
with self.assertRaises(ValueError): # Fail, w does not exist yet.
tf.get_variable("w", [1])
# Check that the set initializer goes away.
new_init = tf.get_variable_scope().initializer
self.assertEqual(new_init, None)
def testVarScope(self):
with self.test_session():
with tf.variable_scope("tower") as tower:
self.assertEqual(tower.name, "tower")
with tf.name_scope("scope") as sc:
self.assertEqual(sc, "tower/scope/")
with tf.variable_scope("foo"):
with tf.variable_scope("bar") as bar:
self.assertEqual(bar.name, "foo/bar")
with tf.name_scope("scope") as sc:
self.assertEqual(sc, "foo/bar/scope/")
with tf.variable_scope("foo"):
with tf.variable_scope(tower, reuse=True) as tower_shared:
self.assertEqual(tower_shared.name, "tower")
with tf.name_scope("scope") as sc:
self.assertEqual(sc, "foo_1/tower/scope/")
def testVarScopeNameScope(self):
with self.test_session():
with tf.name_scope("scope1"):
with tf.variable_scope("tower") as tower:
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower/scope2/")
with tf.variable_scope(tower): # Re-entering acts like another "tower".
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower_1/scope2/")
with tf.variable_scope("tower"): # Re-entering by string acts the same.
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower_2/scope2/")
with tf.name_scope("scope3"):
with tf.variable_scope("tower"):
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope3/tower/scope2/")
with tf.variable_scope(tower):
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope3/tower_1/scope2/")
root_var_scope = tf.get_variable_scope()
with tf.name_scope("scope4"):
with tf.variable_scope(root_var_scope):
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope4/scope2/")
def testVarScopeOriginalNameScope(self):
with self.test_session():
with tf.name_scope("scope1"):
with tf.variable_scope("tower") as tower:
self.assertEqual(tower.original_name_scope, "scope1/tower/")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower/scope2/")
with tf.name_scope("scope2"):
with tf.variable_scope(tower) as tower1:
# Re-entering preserves original name scope.
self.assertEqual(tower1.original_name_scope, "scope1/tower/")
with tf.name_scope("foo") as sc2:
self.assertEqual(sc2, "scope2/tower/foo/")
# Test re-entering original name scope.
with tf.name_scope(tower.original_name_scope):
with tf.name_scope("bar") as sc3:
self.assertEqual(sc3, "scope1/tower/bar/")
with tf.name_scope("scope2"):
with tf.variable_scope(tower):
with tf.name_scope(tower.original_name_scope):
with tf.name_scope("bar") as sc3:
self.assertEqual(sc3, "scope1/tower/bar_1/")
def testVarScopeObjectReuse(self):
with self.test_session():
vs = None
with tf.variable_scope("jump", reuse=True) as scope:
vs = scope
with tf.variable_scope(vs) as jump:
self.assertTrue(jump.reuse)
with tf.variable_scope(vs, reuse=True) as jump_reuse:
self.assertTrue(jump_reuse.reuse)
with tf.variable_scope(vs, reuse=False) as jump_no_reuse:
self.assertFalse(jump_no_reuse.reuse)
with tf.variable_scope("jump", reuse=False) as scope:
vs = scope
with tf.variable_scope(vs) as jump:
self.assertFalse(jump.reuse)
with tf.variable_scope(vs, reuse=True) as jump_reuse:
self.assertTrue(jump_reuse.reuse)
with tf.variable_scope(vs, reuse=False) as jump_no_reuse:
self.assertFalse(jump_no_reuse.reuse)
def testVarOpScope(self):
with self.test_session():
with tf.name_scope("scope1"):
with tf.variable_scope("tower", "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"tower/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower/scope2/")
with tf.variable_scope("tower", "default", []):
with self.assertRaises(ValueError):
tf.get_variable("w", [])
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower_1/scope2/")
with tf.name_scope("scope2"):
with tf.variable_scope(None, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope2/default/scope2/")
with tf.variable_scope(None, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"default_1/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope2/default_1/scope2/")
def testVarOpScopeUniqueNamesInterleavedSubstringScopes(self):
with self.test_session():
with tf.variable_scope(None, "defaultScope1"):
with tf.variable_scope(None, "layer"):
self.assertEqual(tf.get_variable("w", []).name,
"defaultScope1/layer/w:0")
with tf.variable_scope(None, "defaultScope1"):
with tf.variable_scope(None, "layer"):
self.assertEqual(tf.get_variable("w", []).name,
"defaultScope1_1/layer/w:0")
with tf.variable_scope(None, "defaultScope"):
with tf.variable_scope(None, "layer"):
self.assertEqual(tf.get_variable("w", []).name,
"defaultScope/layer/w:0")
with tf.variable_scope(None, "defaultScope1"):
with tf.variable_scope(None, "layer"):
self.assertEqual(tf.get_variable("w", []).name,
"defaultScope1_2/layer/w:0")
def testVarOpScopeReuse(self):
with self.test_session():
with tf.variable_scope("outer") as outer:
with tf.variable_scope("tower", "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/tower/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/tower/scope2/")
with tf.variable_scope(None, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with tf.variable_scope(outer, reuse=True) as outer:
with tf.variable_scope("tower", "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/tower/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/tower/scope2/")
with tf.variable_scope(None, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
def testVarScopeGetVar(self):
with self.test_session():
with tf.variable_scope("root"):
with tf.variable_scope("towerA") as tower_a:
va = tf.get_variable("v", [1])
self.assertEqual(va.name, "root/towerA/v:0")
with tf.variable_scope(tower_a, reuse=True):
va2 = tf.get_variable("v", [1])
self.assertEqual(va2, va)
with tf.variable_scope("towerB"):
vb = tf.get_variable("v", [1])
self.assertEqual(vb.name, "root/towerB/v:0")
with self.assertRaises(ValueError):
with tf.variable_scope("towerA"):
va2 = tf.get_variable("v", [1])
with tf.variable_scope("towerA", reuse=True):
va2 = tf.get_variable("v", [1])
self.assertEqual(va2, va)
with tf.variable_scope("foo"):
with tf.variable_scope("bar"):
v = tf.get_variable("v", [1])
self.assertEqual(v.name, "root/foo/bar/v:0")
with tf.variable_scope(tower_a, reuse=True):
va3 = tf.get_variable("v", [1])
self.assertEqual(va, va3)
with self.assertRaises(ValueError):
with tf.variable_scope(tower_a, reuse=True):
with tf.variable_scope("baz"):
tf.get_variable("v", [1])
with self.assertRaises(ValueError) as exc:
with tf.variable_scope(tower_a, reuse=True):
tf.get_variable("v", [2]) # Different shape.
self.assertEqual("shape" in str(exc.exception), True)
with self.assertRaises(ValueError) as exc:
with tf.variable_scope(tower_a, reuse=True):
tf.get_variable("v", [1], dtype=tf.int32)
self.assertEqual("dtype" in str(exc.exception), True)
def testVarScopeOuterScope(self):
with self.test_session():
with tf.variable_scope("outer") as outer:
pass
with tf.variable_scope(outer):
self.assertEqual(tf.get_variable("w", []).name,
"outer/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with tf.variable_scope("default"):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
with tf.variable_scope(outer, reuse=True):
self.assertEqual(tf.get_variable("w", []).name,
"outer/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/scope2/")
with tf.variable_scope("default", reuse=True):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/default/scope2/")
def testVarScopeNestedOuterScope(self):
with self.test_session():
with tf.variable_scope("outer") as outer:
with tf.variable_scope(outer):
self.assertEqual(tf.get_variable("w", []).name,
"outer/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer/scope2/")
with tf.variable_scope("default"):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with tf.variable_scope(outer, reuse=True):
self.assertEqual(tf.get_variable("w", []).name,
"outer/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer_1/scope2/")
with tf.variable_scope("default", reuse=True):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default_1/scope2/")
def testVarOpScopeReuseParam(self):
with self.test_session():
with tf.variable_scope("outer") as outer:
with tf.variable_scope("tower", "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/tower/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/tower/scope2/")
with tf.variable_scope(None, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with tf.variable_scope(outer) as outer:
with tf.variable_scope("tower", "default", reuse=True):
self.assertEqual(tf.get_variable("w", []).name,
"outer/tower/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/tower/scope2/")
outer.reuse_variables()
with tf.variable_scope(None, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
def testVarOpScopeReuseError(self):
with self.test_session():
with self.assertRaises(ValueError):
with tf.variable_scope(None, "default", reuse=True):
self.assertEqual(tf.get_variable("w", []).name,
"outer/tower/w:0")
def testVarOpScopeOuterScope(self):
with self.test_session():
with tf.variable_scope("outer") as outer:
pass
with tf.variable_scope(outer, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with tf.variable_scope(None, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
with tf.variable_scope(outer, "default", reuse=True):
self.assertEqual(tf.get_variable("w", []).name,
"outer/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/scope2/")
outer.reuse_variables()
with tf.variable_scope(None, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/default/scope2/")
def testVarOpScopeNestedOuterScope(self):
with self.test_session():
with tf.variable_scope("outer") as outer:
with tf.variable_scope(outer, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer/scope2/")
with tf.variable_scope(None, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with tf.variable_scope(outer, "default", reuse=True):
self.assertEqual(tf.get_variable("w", []).name,
"outer/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with tf.variable_scope(None, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
def testGetLocalVar(self):
with self.test_session():
# Check that local variable respects naming.
with tf.variable_scope("outer") as outer:
with tf.variable_scope(outer, "default", []):
local_var = variable_scope.get_local_variable(
"w", [], collections=["foo"])
self.assertEqual(local_var.name, "outer/w:0")
# Since variable is local, it should be in the local variable collection
# but not the the trainable collection.
self.assertIn(local_var, tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES))
self.assertIn(local_var, tf.get_collection("foo"))
self.assertNotIn(
local_var, tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES))
# Check that local variable respects `reuse`.
with tf.variable_scope(outer, "default", reuse=True):
self.assertEqual(variable_scope.get_local_variable("w", []).name,
"outer/w:0")
def axis0_into1_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
return part
def axis0_into2_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
part[0] = 2
return part
def axis0_into3_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
part[0] = 3
return part
class VariableScopeWithPartitioningTest(tf.test.TestCase):
def testResultNameMatchesRequested(self):
with tf.variable_scope("scope0", partitioner=axis0_into2_partitioner):
v = tf.get_variable("name0", shape=(3, 1, 1))
self.assertEqual(v.name, "scope0/name0")
v_concat = v.as_tensor()
self.assertEqual(v_concat.name, "scope0/name0:0")
variables = tf.get_collection(tf.GraphKeys.VARIABLES)
self.assertTrue("scope0/name0/part_0:0" in [x.name for x in variables])
self.assertTrue("scope0/name0/part_1:0" in [x.name for x in variables])
self.assertFalse("scope0/name0/part_2:0" in [x.name for x in variables])
def testBreaksIfPartitioningChanges(self):
with tf.variable_scope("scope0", partitioner=axis0_into2_partitioner):
tf.get_variable("name0", shape=(3, 1, 1))
with tf.variable_scope("scope0",
partitioner=axis0_into3_partitioner,
reuse=True):
with self.assertRaisesRegexp(
ValueError,
"Trying to reuse partitioned variable .* but specified partitions .* "
"and found partitions .*"):
tf.get_variable("name0", shape=(3, 1, 1))
with tf.variable_scope("scope0",
partitioner=axis0_into1_partitioner,
reuse=True):
with self.assertRaisesRegexp(
ValueError,
"Trying to reuse partitioned variable .* but specified partitions .* "
"and found partitions .*"):
tf.get_variable("name0", shape=(3, 1, 1))
def testReturnsExistingConcatenatedValueIfReuse(self):
with tf.variable_scope("scope0", partitioner=axis0_into2_partitioner):
v_concat = tf.get_variable("name0", shape=(3, 1, 1))
tf.get_variable_scope().reuse_variables()
v_concat_2 = tf.get_variable("name0", shape=(3, 1, 1))
self.assertEqual(v_concat, v_concat_2)
def testAllowsReuseWithoutPartitioner(self):
with tf.variable_scope("scope0", partitioner=axis0_into2_partitioner):
v = tf.get_variable("name0", shape=(3, 1, 1))
with tf.variable_scope("scope0", reuse=True):
v_reused = tf.get_variable("name0")
self.assertEqual(v, v_reused)
def testPropagatePartitionerOnReopening(self):
with tf.variable_scope("scope0", partitioner=axis0_into2_partitioner) as vs:
self.assertEqual(axis0_into2_partitioner, vs.partitioner)
with tf.variable_scope(vs) as vs1:
self.assertEqual(axis0_into2_partitioner, vs1.partitioner)
def testPartitionConcatenatesAlongCorrectAxis(self):
def _part_axis_0(**unused_kwargs):
return (2, 1, 1)
def _part_axis_1(**unused_kwargs):
return (1, 2, 1)
with tf.variable_scope("root"):
v0 = tf.get_variable("n0", shape=(2, 2, 2), partitioner=_part_axis_0)
v1 = tf.get_variable("n1", shape=(2, 2, 2), partitioner=_part_axis_1)
self.assertEqual(v0.get_shape(), (2, 2, 2))
self.assertEqual(v1.get_shape(), (2, 2, 2))
n0_0 = tf.get_default_graph().get_tensor_by_name("root/n0/part_0:0")
n0_1 = tf.get_default_graph().get_tensor_by_name("root/n0/part_1:0")
self.assertEqual(n0_0.get_shape(), (1, 2, 2))
self.assertEqual(n0_1.get_shape(), (1, 2, 2))
n1_0 = tf.get_default_graph().get_tensor_by_name("root/n1/part_0:0")
n1_1 = tf.get_default_graph().get_tensor_by_name("root/n1/part_1:0")
self.assertEqual(n1_0.get_shape(), (2, 1, 2))
self.assertEqual(n1_1.get_shape(), (2, 1, 2))
class VariableScopeWithCustomGetterTest(tf.test.TestCase):
def testNonCallableGetterFails(self):
with self.assertRaisesRegexp(ValueError, r"custom_getter .* not callable:"):
with tf.variable_scope("scope0", custom_getter=3):
tf.get_variable("name0")
with self.assertRaisesRegexp(ValueError, r"custom_getter .* not callable:"):
tf.get_variable("name0", custom_getter=3)
def testNoSideEffectsWithIdentityCustomGetter(self):
called = [0]
def custom_getter(getter, *args, **kwargs):
called[0] += 1
return getter(*args, **kwargs)
with tf.variable_scope("scope", custom_getter=custom_getter) as scope:
v = tf.get_variable("v", [1])
with tf.variable_scope(scope, reuse=True):
v2 = tf.get_variable("v", [1])
with tf.variable_scope("new_scope") as new_scope:
v3 = tf.get_variable("v3", [1])
with tf.variable_scope(new_scope, reuse=True, custom_getter=custom_getter):
v4 = tf.get_variable("v3", [1])
self.assertEqual(v, v2)
self.assertEqual(v3, v4)
self.assertEqual(3, called[0]) # skipped one in the first new_scope
def testGetterThatCreatesTwoVariablesAndSumsThem(self):
def custom_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/0" % name, *args, **kwargs)
g_1 = getter("%s/1" % name, *args, **kwargs)
with tf.name_scope("custom_getter"):
return g_0 + g_1
with tf.variable_scope("scope", custom_getter=custom_getter):
v = tf.get_variable("v", [1, 2, 3])
self.assertEqual([1, 2, 3], v.get_shape())
true_vars = tf.trainable_variables()
self.assertEqual(2, len(true_vars))
self.assertEqual("scope/v/0:0", true_vars[0].name)
self.assertEqual("scope/v/1:0", true_vars[1].name)
self.assertEqual("custom_getter/add:0", v.name)
with self.test_session() as sess:
tf.initialize_all_variables().run()
np_vars, np_v = sess.run([true_vars, v])
self.assertAllClose(np_v, sum(np_vars))
class PartitionInfoTest(tf.test.TestCase):
def testConstructorChecks(self):
# Invalid arg types.
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=None, var_offset=[0, 1])
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset=None)
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape="foo", var_offset=[0, 1])
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset="foo")
# full_shape and var_offset must have same length.
with self.assertRaises(ValueError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset=[0])
# Offset must always be less than shape.
with self.assertRaises(ValueError):
variable_scope._PartitionInfo(full_shape=[1, 1], var_offset=[0, 1])
def testSingleOffset(self):
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
self.assertEqual(4, partition_info.single_offset([1, 3]))
# Tests when the variable isn't partitioned at all.
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[0, 0])
self.assertEqual(0, partition_info.single_offset([9, 3]))
def testSingleSliceDim(self):
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
# Invalid shape.
with self.assertRaises(TypeError):
partition_info.single_slice_dim(None)
# Rank of shape differs from full_shape.
with self.assertRaises(ValueError):
partition_info.single_slice_dim([1, 2, 3])
# Shape is too large given var_offset (4+6 > 9).
with self.assertRaises(ValueError):
partition_info.single_slice_dim([6, 3])
# Multiple possible slice dim from shape.
with self.assertRaises(ValueError):
partition_info.single_slice_dim([1, 1])
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[0, 0])
self.assertEqual(1, partition_info.single_slice_dim([9, 2]))
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
self.assertEqual(0, partition_info.single_slice_dim([2, 3]))
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | -7,995,408,519,929,527,000 | 40.931198 | 80 | 0.606286 | false |
craigderington/striker_api | tutorial/snippets/views4.py | 1 | 1130 | from snippets.models import Snippet
from snippets.serializers import SnippetSerializer
from rest_framework import mixins
from rest_framework import generics
class SnippetList(mixins.ListModelMixin,
mixins.CreateModelMixin,
generics.GenericAPIView):
queryset = Snippet.objects.all()
serializer_class = SnippetSerializer
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
class SnippetDetail(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
generics.GenericAPIView):
queryset = Snippet.objects.all()
serializer_class = SnippetSerializer
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, *kwargs)
| gpl-3.0 | 2,336,629,355,937,926,000 | 30.388889 | 54 | 0.659292 | false |
bretttegart/treadmill | lib/python/treadmill/runtime/linux/image/native.py | 1 | 18468 | """A collection of native images.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import errno
import glob
import io
import logging
import os
import pwd
import shutil
import stat
from treadmill import appcfg
from treadmill import cgroups
from treadmill import fs
from treadmill import keytabs
from treadmill import runtime
from treadmill import subproc
from treadmill import supervisor
from treadmill import utils
from treadmill.fs import linux as fs_linux
from . import fs as image_fs
from . import _image_base
from . import _repository_base
from .. import _manifest
_LOGGER = logging.getLogger(__name__)
_CONTAINER_ENV_DIR = 'env'
_CONTAINER_DOCKER_ENV_DIR = os.path.join('docker', 'env')
_CONTAINER_DOCKER_ETC_DIR = os.path.join('docker', 'etc')
def create_docker_environ_dir(container_dir, root_dir, app):
"""Creates environ dir for docker"""
env_dir = os.path.join(container_dir, _CONTAINER_DOCKER_ENV_DIR)
env = {}
if app.ephemeral_ports.tcp or app.ephemeral_ports.udp:
env['LD_PRELOAD'] = os.path.join(
_manifest.TREADMILL_BIND_PATH,
'$LIB',
'treadmill_bind_preload.so'
)
supervisor.create_environ_dir(env_dir, env)
# Bind the environ directory in the container volume
fs.mkdir_safe(os.path.join(root_dir, _CONTAINER_DOCKER_ENV_DIR))
fs_linux.mount_bind(
root_dir, os.path.join(os.sep, _CONTAINER_DOCKER_ENV_DIR),
source=os.path.join(container_dir, _CONTAINER_DOCKER_ENV_DIR),
recursive=False, read_only=True
)
def create_environ_dir(container_dir, root_dir, app):
"""Creates environ dir for s6-envdir."""
env_dir = os.path.join(container_dir, _CONTAINER_ENV_DIR)
env = {
'TREADMILL_APP': app.app,
'TREADMILL_CELL': app.cell,
'TREADMILL_CPU': app.cpu,
'TREADMILL_DISK': app.disk,
'TREADMILL_HOST_IP': app.network.external_ip,
'TREADMILL_IDENTITY': app.identity,
'TREADMILL_IDENTITY_GROUP': app.identity_group,
'TREADMILL_INSTANCEID': app.task,
'TREADMILL_MEMORY': app.memory,
'TREADMILL_PROID': app.proid,
'TREADMILL_ENV': app.environment,
}
for endpoint in app.endpoints:
envname = 'TREADMILL_ENDPOINT_{0}'.format(endpoint.name.upper())
env[envname] = endpoint.real_port
env['TREADMILL_EPHEMERAL_TCP_PORTS'] = ' '.join(
[str(port) for port in app.ephemeral_ports.tcp]
)
env['TREADMILL_EPHEMERAL_UDP_PORTS'] = ' '.join(
[str(port) for port in app.ephemeral_ports.udp]
)
env['TREADMILL_CONTAINER_IP'] = app.network.vip
env['TREADMILL_GATEWAY_IP'] = app.network.gateway
if app.shared_ip:
env['TREADMILL_SERVICE_IP'] = app.network.external_ip
supervisor.create_environ_dir(env_dir, env)
# Bind the environ directory in the container volume
fs.mkdir_safe(os.path.join(root_dir, _CONTAINER_ENV_DIR))
fs_linux.mount_bind(
root_dir, os.path.join(os.sep, _CONTAINER_ENV_DIR),
source=os.path.join(container_dir, _CONTAINER_ENV_DIR),
recursive=False, read_only=True
)
if app.docker:
create_docker_environ_dir(container_dir, root_dir, app)
def create_supervision_tree(tm_env, container_dir, root_dir, app,
cgroups_path):
"""Creates s6 supervision tree."""
uniq_name = appcfg.app_unique_name(app)
ctl_uds = os.path.join(os.sep, 'run', 'tm_ctl')
tombstone_ctl_uds = os.path.join(ctl_uds, 'tombstone')
sys_dir = os.path.join(container_dir, 'sys')
sys_scandir = supervisor.create_scan_dir(
sys_dir,
finish_timeout=6000,
wait_cgroups=cgroups_path,
)
for svc_def in app.system_services:
if svc_def.restart is not None:
monitor_policy = {
'limit': svc_def.restart.limit,
'interval': svc_def.restart.interval,
'tombstone': {
'uds': False,
'path': tm_env.services_tombstone_dir,
'id': '{},{}'.format(uniq_name, svc_def.name)
}
}
else:
monitor_policy = None
supervisor.create_service(
sys_scandir,
name=svc_def.name,
app_run_script=svc_def.command,
userid='root',
environ_dir=os.path.join(container_dir, _CONTAINER_ENV_DIR),
environ={
envvar.name: envvar.value
for envvar in svc_def.environ
},
environment=app.environment,
downed=svc_def.downed,
trace=None,
monitor_policy=monitor_policy
)
sys_scandir.write()
services_dir = os.path.join(container_dir, 'services')
services_scandir = supervisor.create_scan_dir(
services_dir,
finish_timeout=5000
)
for svc_def in app.services:
if svc_def.restart is not None:
monitor_policy = {
'limit': svc_def.restart.limit,
'interval': svc_def.restart.interval,
'tombstone': {
'uds': True,
'path': tombstone_ctl_uds,
'id': '{},{}'.format(uniq_name, svc_def.name)
}
}
else:
monitor_policy = None
if svc_def.trace is not None:
trace = {
'instanceid': app.name,
'uniqueid': app.uniqueid,
'service': svc_def.name,
'path': os.path.join(ctl_uds, 'appevents')
}
else:
trace = None
logger_template = getattr(svc_def, 'logger', 's6.app-logger.run')
_LOGGER.info('Using logger: %s', logger_template)
supervisor.create_service(
services_scandir,
name=svc_def.name,
app_run_script=svc_def.command,
userid=svc_def.proid,
environ_dir='/' + _CONTAINER_ENV_DIR,
environ={
envvar.name: envvar.value
for envvar in svc_def.environ
},
environment=app.environment,
downed=False,
trace=trace if svc_def.trace else None,
log_run_script=logger_template,
monitor_policy=monitor_policy
)
services_scandir.write()
# Bind the service directory in the container volume
fs.mkdir_safe(os.path.join(root_dir, 'services'))
fs_linux.mount_bind(
root_dir, os.path.join(os.sep, 'services'),
source=os.path.join(container_dir, 'services'),
recursive=False, read_only=False
)
# Bind the ctrl directory in the container volume which has all the
# unix domain sockets to communicate outside the container to treadmill
fs.mkdir_safe(os.path.join(root_dir, 'run', 'tm_ctl'))
fs_linux.mount_bind(
root_dir, os.path.join(os.sep, 'run', 'tm_ctl'),
source=tm_env.ctl_dir,
recursive=False, read_only=False
)
def make_fsroot(root_dir, app):
"""Initializes directory structure for the container in a new root.
The container uses pretty much a blank a FHS 3 layout.
- Bind directories in parent / (with exceptions - see below.)
- Skip /tmp, create /tmp in the new root with correct permissions.
- Selectively create / bind /var.
- /var/tmp (new)
- /var/log (new)
- /var/spool - create empty with dirs.
- Bind everything in /var, skipping /spool/tickets
"""
newroot_norm = fs.norm_safe(root_dir)
emptydirs = [
'/bin',
'/dev',
'/etc',
'/home',
'/lib',
'/lib64',
'/opt',
'/proc',
'/root',
'/run',
'/sbin',
'/sys',
'/tmp',
'/usr',
'/var/cache',
'/var/empty',
'/var/lib',
'/var/lock',
'/var/log',
'/var/opt',
'/var/spool',
'/var/tmp',
'/var/spool/keytabs',
'/var/spool/tickets',
'/var/spool/tokens',
# for SSS
'/var/lib/sss',
]
stickydirs = [
'/opt',
'/run',
'/tmp',
'/var/cache',
'/var/lib',
'/var/lock',
'/var/log',
'/var/opt',
'/var/tmp',
'/var/spool/keytabs',
'/var/spool/tickets',
'/var/spool/tokens',
]
# these folders are shared with underlying host and other containers,
mounts = [
'/bin',
'/etc', # TODO: Add /etc/opt
'/lib',
'/lib64',
'/root',
'/sbin',
'/usr',
# for SSS
'/var/lib/sss',
# TODO: Remove below once PAM UDS is implemented
'/var/tmp/treadmill/env',
'/var/tmp/treadmill/spool',
]
# Add everything under /opt
mounts += glob.glob('/opt/*')
for directory in emptydirs:
fs.mkdir_safe(newroot_norm + directory)
for directory in stickydirs:
os.chmod(newroot_norm + directory, 0o777 | stat.S_ISVTX)
# /var/empty must be owned by root and not group or world-writable.
os.chmod(os.path.join(newroot_norm, 'var/empty'), 0o711)
fs_linux.mount_bind(
newroot_norm, os.path.join(os.sep, 'sys'),
source='/sys',
recursive=True, read_only=False
)
# TODO: For security, /dev/ should be minimal and separated to each
# container.
fs_linux.mount_bind(
newroot_norm, os.path.join(os.sep, 'dev'),
source='/dev',
recursive=True, read_only=False
)
# Per FHS3 /var/run should be a symlink to /run which should be tmpfs
fs.symlink_safe(
os.path.join(newroot_norm, 'var', 'run'),
'/run'
)
# We create an unbounded tmpfs mount so that runtime data can be written to
# it, counting against the memory limit of the container.
fs_linux.mount_tmpfs(newroot_norm, '/run')
# Make shared directories/files readonly to container
for mount in mounts:
if os.path.exists(mount):
fs_linux.mount_bind(
newroot_norm, mount,
recursive=True, read_only=True
)
if app.docker:
_mount_docker_tmpfs(newroot_norm)
def _mount_docker_tmpfs(newroot_norm):
"""Mount tmpfs for docker
"""
# /etc/docker as temp fs as dockerd create /etc/docker/key.json
fs_linux.mount_tmpfs(newroot_norm, '/etc/docker')
def create_overlay(tm_env, container_dir, root_dir, app):
"""Create overlay configuration files for the container.
"""
# ldpreloads
_prepare_ldpreload(container_dir, app)
# hosts
_prepare_hosts(container_dir, app)
# resolv.conf
_prepare_resolv_conf(tm_env, container_dir)
# sshd PAM configuration
_prepare_pam_sshd(tm_env, container_dir, app)
# constructed keytab.
_prepare_krb(tm_env, container_dir, root_dir, app)
# bind prepared inside container
_bind_overlay(container_dir, root_dir)
if app.docker:
_bind_overlay_docker(container_dir, root_dir)
def _prepare_krb(tm_env, container_dir, root_dir, app):
"""Manage kerberos environment inside container.
"""
etc_dir = os.path.join(container_dir, 'overlay', 'etc')
fs.mkdir_safe(etc_dir)
kt_dest = os.path.join(etc_dir, 'krb5.keytab')
kt_sources = glob.glob(os.path.join(tm_env.spool_dir, 'keytabs', 'host#*'))
keytabs.make_keytab(kt_dest, kt_sources)
for kt_spec in app.keytabs:
if ':' in kt_spec:
owner, princ = kt_spec.split(':', 1)
else:
owner = kt_spec
princ = kt_spec
kt_dest = os.path.join(root_dir, 'var', 'spool', 'keytabs', owner)
kt_sources = glob.glob(os.path.join(tm_env.spool_dir, 'keytabs',
'%s#*' % princ))
keytabs.make_keytab(kt_dest, kt_sources, owner)
def _prepare_ldpreload(container_dir, app):
"""Add mandatory ldpreloads to the container environment.
"""
etc_dir = os.path.join(container_dir, 'overlay', 'etc')
fs.mkdir_safe(etc_dir)
new_ldpreload = os.path.join(etc_dir, 'ld.so.preload')
try:
shutil.copyfile('/etc/ld.so.preload', new_ldpreload)
except IOError as err:
if err.errno != errno.ENOENT:
raise
_LOGGER.info('/etc/ld.so.preload not found, creating empty.')
utils.touch(new_ldpreload)
ldpreloads = []
if app.ephemeral_ports.tcp or app.ephemeral_ports.udp:
treadmill_bind_preload = subproc.resolve('treadmill_bind_preload.so')
ldpreloads.append(treadmill_bind_preload)
if not ldpreloads:
return
_LOGGER.info('Configuring /etc/ld.so.preload: %r', ldpreloads)
with io.open(new_ldpreload, 'a') as f:
f.write('\n'.join(ldpreloads) + '\n')
def _prepare_hosts(container_dir, app):
"""Create a hosts file for the container.
overlay/
/etc/
hosts # hosts file to be bind mounted in container.
/run/
/host-aliases/ # Directory to be bind mounted in container.
"""
etc_dir = os.path.join(container_dir, 'overlay', 'etc')
ha_dir = os.path.join(container_dir, 'overlay', 'run', 'host-aliases')
fs.mkdir_safe(etc_dir)
fs.mkdir_safe(ha_dir)
shutil.copyfile(
'/etc/hosts',
os.path.join(etc_dir, 'hosts')
)
pwnam = pwd.getpwnam(app.proid)
os.chown(ha_dir, pwnam.pw_uid, pwnam.pw_gid)
def _prepare_pam_sshd(tm_env, container_dir, app):
"""Override pam.d sshd stack with special sshd pam stack.
"""
pamd_dir = os.path.join(container_dir, 'overlay', 'etc', 'pam.d')
fs.mkdir_safe(pamd_dir)
new_pam_sshd = os.path.join(pamd_dir, 'sshd')
if app.shared_network:
template_pam_sshd = os.path.join(
tm_env.root, 'etc', 'pam.d', 'sshd.shared_network'
)
else:
template_pam_sshd = os.path.join(
tm_env.root, 'etc', 'pam.d', 'sshd'
)
if not os.path.exists(template_pam_sshd):
_LOGGER.warning('Falling back to local PAM sshd config.')
template_pam_sshd = '/etc/pam.d/sshd'
shutil.copyfile(
template_pam_sshd,
new_pam_sshd
)
def _prepare_resolv_conf(tm_env, container_dir):
"""Create an resolv.conf file for the container.
"""
etc_dir = os.path.join(container_dir, 'overlay', 'etc')
fs.mkdir_safe(etc_dir)
new_resolv_conf = os.path.join(etc_dir, 'resolv.conf')
# TODO(boysson): This should probably be based instead on /etc/resolv.conf
# for other resolver options
template_resolv_conf = os.path.join(tm_env.root, 'etc', 'resolv.conf')
if not os.path.exists(template_resolv_conf):
_LOGGER.warning('Falling back to local resolver config.')
template_resolv_conf = '/etc/resolv.conf'
shutil.copyfile(
template_resolv_conf,
new_resolv_conf
)
def _bind_overlay(container_dir, root_dir):
"""Create the overlay in the container.
:param ``str`` container_dir:
Base directory of container data/config.
:param ``str`` root_dir:
New root directory of the container.
"""
# Overlay overrides container configs
# - /etc/resolv.conf, so that container always uses dnscache.
# - pam.d sshd stack with special sshd pam that unshares network.
# - /etc/ld.so.preload to enforce necessary system hooks
#
overlay_dir = os.path.join(container_dir, 'overlay')
for overlay_file in ['etc/hosts',
'etc/krb5.keytab',
'etc/ld.so.preload',
'etc/pam.d/sshd',
'etc/resolv.conf']:
fs_linux.mount_bind(
root_dir, os.path.join(os.sep, overlay_file),
source=os.path.join(overlay_dir, overlay_file),
recursive=False, read_only=True)
# Mount host-aliases as read-write
fs_linux.mount_bind(
root_dir, os.path.join(os.sep, 'run', 'host-aliases'),
source=os.path.join(overlay_dir, 'run', 'host-aliases'),
recursive=False, read_only=False
)
# Also override resolv.conf in the current mount namespace so that
# system services have access to our resolver.
fs_linux.mount_bind(
'/', '/etc/resolv.conf',
source=os.path.join(overlay_dir, 'etc/resolv.conf'),
recursive=False, read_only=True
)
def _bind_overlay_docker(container_dir, root_dir):
"""Mount etc/hosts for docker container
"""
# XXX: This path is mounted as RW
# because ro volume in treadmill container can not be mounted in docker
# 'Error response from daemon: chown /etc/hosts: read-only file system.'
overlay_dir = os.path.join(container_dir, 'overlay')
fs_linux.mount_bind(
root_dir, os.path.join(os.sep, _CONTAINER_DOCKER_ETC_DIR, 'hosts'),
source=os.path.join(overlay_dir, 'etc/hosts'),
recursive=False, read_only=False
)
def get_cgroup_path(app):
"""Gets the path of the cgroup."""
unique_name = appcfg.app_unique_name(app)
cgrp = os.path.join('treadmill', 'apps', unique_name)
return cgrp
class NativeImage(_image_base.Image):
"""Represents a native image."""
__slots__ = (
'tm_env',
)
def __init__(self, tm_env):
self.tm_env = tm_env
def unpack(self, container_dir, root_dir, app):
make_fsroot(root_dir, app)
image_fs.configure_plugins(self.tm_env, container_dir, app)
# FIXME: Lots of things are still reading this file.
# Copy updated state manifest as app.json in the
# container_dir so it is visible in chrooted env.
shutil.copy(os.path.join(container_dir, runtime.STATE_JSON),
os.path.join(root_dir, appcfg.APP_JSON))
cgrp = get_cgroup_path(app)
create_environ_dir(container_dir, root_dir, app)
create_supervision_tree(
self.tm_env, container_dir, root_dir, app,
cgroups_path=cgroups.makepath(
'memory', cgrp
)
)
create_overlay(self.tm_env, container_dir, root_dir, app)
class NativeImageRepository(_repository_base.ImageRepository):
"""A collection of native images."""
def get(self, url):
return NativeImage(self.tm_env)
| apache-2.0 | -8,521,182,695,245,798,000 | 30.038655 | 79 | 0.589398 | false |
catalpainternational/OIPA | OIPA/api/v3/resources/csv_serializer.py | 1 | 7048 | from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
import csv
import io
from tastypie.serializers import Serializer
import string
class CsvSerializer(Serializer):
formats = ['json', 'xml', 'csv']
content_types = {
'json': 'application/json',
'xml': 'application/xml',
'csv': 'text/csv',
}
def to_csv(self, data, options=None):
options = options or {}
data = self.to_simple(data, options)
raw_data = io.StringIO()
first = True
try:
if "meta" in list(data.keys()): #if multiple objects are returned
objects = data.get("objects")
for value in objects:
test = self.set_data(value)
if first:
writer = csv.DictWriter(raw_data, list(test.keys()), delimiter=";", quotechar="'", quoting=csv.QUOTE_NONNUMERIC)
writer.writeheader()
writer.writerow(test)
first=False
else:
writer.writerow(test)
else:
test = {}
self.flatten("", data, test)
if first:
writer = csv.DictWriter(raw_data, list(test.keys()), delimiter=";", quotechar="'", quoting=csv.QUOTE_NONNUMERIC)
writer.writeheader()
writer.writerow(test)
first=False
else:
writer.writerow(test)
CSVContent=raw_data.getvalue()
return CSVContent
except Exception as e:
print(e)
def set_data(self, data):
# set keys
column_dict = {'activity_scope': None, 'activity_status': None, 'total_budget': None, 'collaboration_type': None, 'countries': None, 'default_aid_type': None, 'default_finance_type': None, 'default_flow_type': None, 'descriptions': None, 'titles': None, 'end_actual': None, 'end_planned': None, 'start_actual': None, 'start_planned': None, 'iati_identifier': None, 'last_updated_datetime': None, 'participating_organisations': None, 'participating_org_accountable': None, 'participating_org_extending': None, 'participating_org_funding': None, 'participating_org_implementing': None}
try:
# fill keys
for dest_key in column_dict:
if dest_key in list(data.keys()):
if isinstance(data[dest_key], list):
if dest_key == "participating_organisations":
participating_org_accountable = []
participating_org_extending = []
participating_org_funding = []
participating_org_implementing = []
for org in data[dest_key]:
if not org["name"]:
continue
if 'role_id' in org:
if org["role_id"] == "Accountable":
participating_org_accountable.append(org["name"])
if org["role_id"] == "Extending":
participating_org_extending.append(org["name"])
if org["role_id"] == "Funding":
participating_org_funding.append(org["name"])
if org["role_id"] == "Implementing":
participating_org_implementing.append(org["name"])
column_dict["participating_org_accountable"] = ", ".join(participating_org_accountable)
column_dict["participating_org_extending"] = ", ".join(participating_org_extending)
column_dict["participating_org_funding"] = ", ".join(participating_org_funding)
column_dict["participating_org_implementing"] = ", ".join(participating_org_implementing)
if dest_key == "countries":
countries = []
for country in data[dest_key]:
countries.append(country["name"])
column_dict[dest_key] = ", ".join(countries)
if dest_key == "regions":
regions = []
for region in data[dest_key]:
regions.append(region["name"])
column_dict[dest_key] = ", ".join(regions)
if dest_key == "sectors":
sectors = []
for sector in data[dest_key]:
sectors.append(sector["name"])
column_dict[dest_key] = ", ".join(sectors)
if dest_key == "titles":
titles = []
for title in data[dest_key]:
if title["title"]:
titles.append(title["title"])
column_dict[dest_key] = ", ".join(titles)
if dest_key == "descriptions":
descriptions = []
for description in data[dest_key]:
if description["description"]:
descriptions.append(description["description"])
column_dict[dest_key] = ", ".join(descriptions)
continue
if isinstance(data[dest_key], dict):
if "name" in data[dest_key]:
column_dict[dest_key] = data[dest_key]["name"]
continue
column_dict[dest_key] = data[dest_key]
del column_dict["participating_organisations"]
for dest_key in column_dict:
if column_dict[dest_key]:
column_dict[dest_key] = column_dict[dest_key].encode('utf-8', 'ignore')
except Exception as e:
print(e)
return column_dict
def flatten(self, parent_name, data, odict={}):
# if list, flatten the list
if isinstance(data, list):
for value in data:
self.flatten(parent_name, value, odict)
# if dictionary, flatten the dictionary
elif isinstance(data, dict):
for (key, value) in list(data.items()):
# if no dict or list, add to odict
if not isinstance(value, (dict, list)):
if parent_name:
key = parent_name + "_" + key
odict[key] = value
else:
self.flatten(key, value, odict)
| agpl-3.0 | 7,082,719,169,658,676,000 | 37.939227 | 591 | 0.462684 | false |
ClimbsRocks/auto_ml | tests/advanced_tests/automated_tests.py | 1 | 1586 | from collections import OrderedDict
import os
import sys
sys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path
os.environ['is_test_suite'] = 'True'
import classifiers as classifier_tests
import regressors as regressor_tests
training_parameters = {
'model_names': ['DeepLearning', 'GradientBoosting', 'XGB', 'LGBM', 'CatBoost']
}
# Make this an OrderedDict so that we run the tests in a consistent order
test_names = OrderedDict([
('getting_single_predictions_multilabel_classification', classifier_tests.getting_single_predictions_multilabel_classification),
# ('getting_single_predictions_classification', classifier_tests.getting_single_predictions_classification),
('optimize_final_model_classification', classifier_tests.optimize_final_model_classification)
# ('feature_learning_getting_single_predictions_classification', classifier_tests.feature_learning_getting_single_predictions_classification),
# ('categorical_ensembling_classification', classifier_tests.categorical_ensembling_classification),
# ('feature_learning_categorical_ensembling_getting_single_predictions_classification', classifier_tests.feature_learning_categorical_ensembling_getting_single_predictions_classification)
])
def test_generator():
for model_name in training_parameters['model_names']:
for test_name, test in test_names.items():
test_model_name = model_name + 'Classifier'
# test_model_name = model_name
test.description = str(test_model_name) + '_' + test_name
yield test, test_model_name
| mit | -5,031,269,755,627,746,000 | 45.647059 | 191 | 0.747793 | false |
hlange/LogSoCR | .waf/waflib/extras/use_config.py | 1 | 5505 | #!/usr/bin/env python
# coding=utf-8
# Mathieu Courtois - EDF R&D, 2013 - http://www.code-aster.org
"""
When a project has a lot of options the 'waf configure' command line can be
very long and it becomes a cause of error.
This tool provides a convenient way to load a set of configuration parameters
from a local file or from a remote url.
The configuration parameters are stored in a Python file that is imported as
an extra waf tool can be.
Example:
$ waf configure --use-config-dir=http://www.anywhere.org --use-config=myconf1 ...
The file 'myconf1' will be downloaded from 'http://www.anywhere.org'
(or 'http://www.anywhere.org/wafcfg').
If the files are available locally, it could be:
$ waf configure --use-config-dir=/somewhere/myconfigurations --use-config=myconf1 ...
The configuration of 'myconf1.py' is automatically loaded by calling
its 'configure' function. In this example, it defines environment variables and
set options:
def configure(self):
self.env['CC'] = 'gcc-4.8'
self.env.append_value('LIBPATH', [...])
self.options.perlbinary = '/usr/local/bin/perl'
self.options.pyc = False
The corresponding command line should have been:
$ CC=gcc-4.8 LIBPATH=... waf configure --nopyc --with-perl-binary=/usr/local/bin/perl
This is an extra tool, not bundled with the default waf binary.
To add the use_config tool to the waf file:
$ ./waf-light --tools=use_config
When using this tool, the wscript will look like:
def options(opt):
opt.load('use_config')
def configure(conf):
conf.load('use_config')
"""
import sys
import os.path as osp
import os
local_repo = ''
"""Local repository containing additional Waf tools (plugins)"""
remote_repo = 'https://raw.githubusercontent.com/waf-project/waf/master/'
"""
Remote directory containing downloadable waf tools. The missing tools can be downloaded by using::
$ waf configure --download
"""
remote_locs = ['waflib/extras', 'waflib/Tools']
"""
Remote directories for use with :py:const:`waflib.extras.use_config.remote_repo`
"""
try:
from urllib import request
except ImportError:
from urllib import urlopen
else:
urlopen = request.urlopen
from waflib import Errors, Context, Logs, Utils, Options, Configure
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
DEFAULT_DIR = 'wafcfg'
# add first the current wafcfg subdirectory
sys.path.append(osp.abspath(DEFAULT_DIR))
def options(self):
group = self.add_option_group('configure options')
group.add_option('--download', dest='download', default=False, action='store_true', help='try to download the tools if missing')
group.add_option('--use-config', action='store', default=None,
metavar='CFG', dest='use_config',
help='force the configuration parameters by importing '
'CFG.py. Several modules may be provided (comma '
'separated).')
group.add_option('--use-config-dir', action='store', default=DEFAULT_DIR,
metavar='CFG_DIR', dest='use_config_dir',
help='path or url where to find the configuration file')
def download_check(node):
"""
Hook to check for the tools which are downloaded. Replace with your function if necessary.
"""
pass
def download_tool(tool, force=False, ctx=None):
"""
Download a Waf tool from the remote repository defined in :py:const:`waflib.extras.use_config.remote_repo`::
$ waf configure --download
"""
for x in Utils.to_list(remote_repo):
for sub in Utils.to_list(remote_locs):
url = '/'.join((x, sub, tool + '.py'))
try:
web = urlopen(url)
try:
if web.getcode() != 200:
continue
except AttributeError:
pass
except Exception:
# on python3 urlopen throws an exception
# python 2.3 does not have getcode and throws an exception to fail
continue
else:
tmp = ctx.root.make_node(os.sep.join((Context.waf_dir, 'waflib', 'extras', tool + '.py')))
tmp.write(web.read(), 'wb')
Logs.warn('Downloaded %s from %s', tool, url)
download_check(tmp)
try:
module = Context.load_tool(tool)
except Exception:
Logs.warn('The tool %s from %s is unusable', tool, url)
try:
tmp.delete()
except Exception:
pass
continue
return module
raise Errors.WafError('Could not load the Waf tool')
def load_tool(tool, tooldir=None, ctx=None, with_sys_path=True):
try:
module = Context.load_tool_default(tool, tooldir, ctx, with_sys_path)
except ImportError as e:
if Options.options.download:
module = download_tool(tool, ctx=ctx)
if not module:
ctx.fatal('Could not load the Waf tool %r or download a suitable replacement from the repository (sys.path %r)\n%s' % (tool, sys.path, e))
else:
ctx.fatal('Could not load the Waf tool %r from %r (try the --download option?):\n%s' % (tool, sys.path, e))
return module
Context.load_tool_default = Context.load_tool
Context.load_tool = load_tool
Configure.download_tool = download_tool
def configure(self):
opts = self.options
use_cfg = opts.use_config
if use_cfg is None:
return
url = urlparse(opts.use_config_dir)
kwargs = {}
if url.scheme:
kwargs['download'] = True
kwargs['remote_url'] = url.geturl()
# search first with the exact url, else try with +'/wafcfg'
kwargs['remote_locs'] = ['', DEFAULT_DIR]
tooldir = url.geturl() + ' ' + DEFAULT_DIR
for cfg in use_cfg.split(','):
Logs.pprint('NORMAL', "Searching configuration '%s'..." % cfg)
self.load(cfg, tooldir=tooldir, **kwargs)
self.start_msg('Checking for configuration')
self.end_msg(use_cfg)
| agpl-3.0 | 2,054,614,675,661,956,600 | 29.247253 | 142 | 0.702997 | false |
madedotcom/photon-pump | test/conversations/test_read_all_events_stream_conversation.py | 1 | 4703 | from asyncio import Queue
from uuid import uuid4
import pytest
from photonpump import messages as msg, exceptions
from photonpump import messages_pb2 as proto
from photonpump.conversations import ReadAllEvents
@pytest.mark.asyncio
async def test_read_all_request():
output = Queue()
convo = ReadAllEvents(msg.Position(10, 11))
await convo.start(output)
request = await output.get()
body = proto.ReadAllEvents()
body.ParseFromString(request.payload)
assert request.command is msg.TcpCommand.ReadAllEventsForward
assert body.commit_position == 10
assert body.prepare_position == 11
assert body.resolve_link_tos is True
assert body.require_master is False
assert body.max_count == 100
@pytest.mark.asyncio
async def test_read_all_backward():
output = Queue()
convo = ReadAllEvents(
from_position=msg.Position(10, 11),
direction=msg.StreamDirection.Backward,
max_count=20,
)
await convo.start(output)
request = await output.get()
body = proto.ReadAllEvents()
body.ParseFromString(request.payload)
assert request.command is msg.TcpCommand.ReadAllEventsBackward
assert body.commit_position == 10
assert body.prepare_position == 11
assert body.resolve_link_tos is True
assert body.require_master is False
assert body.max_count == 20
@pytest.mark.asyncio
async def test_read_all_success():
event_1_id = uuid4()
event_2_id = uuid4()
convo = ReadAllEvents()
response = proto.ReadAllEventsCompleted()
response.result = msg.ReadEventResult.Success
response.next_commit_position = 10
response.next_prepare_position = 10
response.commit_position = 9
response.prepare_position = 9
event_1 = proto.ResolvedEvent()
event_1.commit_position = 8
event_1.prepare_position = 8
event_1.event.event_stream_id = "stream-123"
event_1.event.event_number = 32
event_1.event.event_id = event_1_id.bytes_le
event_1.event.event_type = "event-type"
event_1.event.data_content_type = msg.ContentType.Json
event_1.event.metadata_content_type = msg.ContentType.Binary
event_1.event.data = """
{
'color': 'red',
'winner': true
}
""".encode(
"UTF-8"
)
event_2 = proto.ResolvedEvent()
event_2.CopyFrom(event_1)
event_2.event.event_stream_id = "stream-456"
event_2.event.event_type = "event-2-type"
event_2.event.event_id = event_2_id.bytes_le
event_2.event.event_number = 32
response.events.extend([event_1, event_2])
await convo.respond_to(
msg.InboundMessage(
uuid4(),
msg.TcpCommand.ReadAllEventsForwardCompleted,
response.SerializeToString(),
),
None,
)
result = await convo.result
assert isinstance(result, msg.AllStreamSlice)
[event_1, event_2] = result.events
assert event_1.stream == "stream-123"
assert event_1.id == event_1_id
assert event_1.type == "event-type"
assert event_1.event_number == 32
assert event_2.stream == "stream-456"
assert event_2.id == event_2_id
assert event_2.type == "event-2-type"
assert event_2.event_number == 32
@pytest.mark.asyncio
async def test_all_events_error():
convo = ReadAllEvents()
response = proto.ReadAllEventsCompleted()
response.result = msg.ReadAllResult.Error
response.next_commit_position = 10
response.next_prepare_position = 10
response.commit_position = 9
response.prepare_position = 9
response.error = "Something really weird just happened"
await convo.respond_to(
msg.InboundMessage(
uuid4(),
msg.TcpCommand.ReadAllEventsForwardCompleted,
response.SerializeToString(),
),
None,
)
with pytest.raises(exceptions.ReadError) as exn:
await convo.result
assert exn.stream == "$all"
assert exn.conversation_id == convo.conversation_id
@pytest.mark.asyncio
async def test_all_events_access_denied():
convo = ReadAllEvents()
response = proto.ReadAllEventsCompleted()
response.result = msg.ReadAllResult.AccessDenied
response.next_commit_position = 10
response.next_prepare_position = 10
response.commit_position = 9
response.prepare_position = 9
await convo.respond_to(
msg.InboundMessage(
uuid4(), msg.TcpCommand.ReadAllEventsForward, response.SerializeToString()
),
None,
)
with pytest.raises(exceptions.AccessDenied) as exn:
await convo.result
assert exn.conversation_id == convo.conversation_id
assert exn.conversation_type == "ReadAllEvents"
| mit | 23,669,084,093,015,520 | 26.828402 | 86 | 0.672549 | false |
unixunion/python-libsolace | bin/solace-list-clients.py | 1 | 4986 | #!/usr/bin/env python
"""
Show solace clients and counts, optionally pump all stats into influxdb
"""
import logging
import sys
logging.basicConfig(format='[%(module)s] %(filename)s:%(lineno)s %(asctime)s %(levelname)s %(message)s',
stream=sys.stderr)
import libsolace.settingsloader as settings
from libsolace.SolaceAPI import SolaceAPI
from libsolace.SolaceXMLBuilder import SolaceXMLBuilder
from optparse import OptionParser
import simplejson as json
import sys
import pprint
import demjson
from time import gmtime, strftime
import time
pp = pprint.PrettyPrinter(indent=4, width=20)
if __name__ == '__main__':
""" parse opts, read site.xml, start provisioning vpns. """
usage = "list all vpns in an environment"
parser = OptionParser(usage=usage)
parser.add_option("-e", "--env", "--environment", action="store", type="string", dest="env",
help="environment to run job in eg:[ dev | ci1 | si1 | qa1 | pt1 | prod ]")
parser.add_option("-d", "--debug", action="store_true", dest="debug",
default=False, help="toggles solace debug mode")
parser.add_option("--details", action="store_true", dest="details", help="Show client details", default=False)
parser.add_option("--stats", action="store_true", dest="stats", help="Show client stats", default=False)
parser.add_option("--client", action="store", type="string", dest="client", help="client filter e.g. 'dev_*'",
default="*")
parser.add_option("--influxdb", action="store_true", dest="influxdb", help="influxdb url and port", default=False)
parser.add_option("--influxdb-host", action="store", type="string", dest="influxdb_host", help="influxdb hostname", default="defiant")
parser.add_option("--influxdb-port", action="store", type="int", dest="influxdb_port", help="influxdb port", default=8086)
parser.add_option("--influxdb-user", action="store", type="string", dest="influxdb_user", help="influxdb user", default="root")
parser.add_option("--influxdb-pass", action="store", type="string", dest="influxdb_pass", help="influxdb pass", default="root")
parser.add_option("--influxdb-db", action="store", type="string", dest="influxdb_db", help="influxdb db name", default="solace-clients")
(options, args) = parser.parse_args()
if not options.env:
parser.print_help()
sys.exit()
if options.debug:
logging.getLogger().setLevel(logging.DEBUG)
if options.influxdb:
logging.info("Connecting to influxdb")
from influxdb import InfluxDBClient
try:
client = InfluxDBClient(options.influxdb_host, options.influxdb_port, options.influxdb_user,
options.influxdb_pass, options.influxdb_db)
try:
client.create_database(options.influxdb_db)
except Exception, e:
logging.warn("Unable to create database, does it already exist?")
except Exception, e:
logging.error("Unable to connect to influxdb")
sys.exit(1)
# forces read-only
options.testmode = True
settings.env = options.env.lower()
logging.info("Connecting to appliance in %s, testmode:%s" % (settings.env, options.testmode))
connection = SolaceAPI(settings.env, testmode=options.testmode)
if options.details:
connection.x = SolaceXMLBuilder("show clients details")
connection.x.show.client.name = options.client
connection.x.show.client.detais
elif options.stats:
connection.x = SolaceXMLBuilder("show clients stats")
connection.x.show.client.name = options.client
connection.x.show.client.stats
# get the clients
clients = connection.rpc(str(connection.x), primaryOnly=True)
count = 0
# print clients[0]
timeNow = strftime("%Y-%m-%dT%H:%M:%SZ", gmtime())
startTime = time.time()
for c in clients[0]['rpc-reply']['rpc']['show']['client']['primary-virtual-router']['client']:
j = demjson.encode(c)
p = json.loads(j)
if options.stats:
t = {}
for k in p["stats"]:
logging.debug("Key: %s value %s" % (k, p["stats"][k]))
try:
t[k] = long(p["stats"][k])
except Exception, ve:
logging.debug("skipping")
pass
json_body = [{
"measurement": "client-stats",
"tags": {
"message-vpn": p['message-vpn'],
"name": p['name']
},
"fields": t,
"time": timeNow
}]
# print json.dumps(json_body)
# print json.dumps(json_body, sort_keys=False, indent=4, separators=(',', ': '))
client.write_points(json_body)
logging.info("Total Clients: %s" % count)
logging.info("Time Taken: %s" % (time.time()-startTime) )
| mit | -2,089,378,427,571,660,000 | 37.953125 | 140 | 0.609306 | false |
sonicyang/TIE | src/forwarder/dev-bluetooth.py | 1 | 4067 | #!/usr/bin/env python
#
# Copyright (C) 2013 AcoMo Technology.
# All rights reserved.
#
# Authored by Jyun-Yu Huang <[email protected]>
#
# This is a bluetooth forwarder.
#
# AcoMo forwarder bridge sensor and server, the protocol of client (sensor) is bluetooth RFCOMM,
# and the protocol of server is XML-RPC.
#
# TODO: Restore data before upload.
# Receiver
from bluetooth import *
import bluetooth
import urllib2
# XML-RPC
import sys
import xmlrpclib
# Acomo format check
from xmlhandle import check_acomo_xml_format, append_elem_to_root
# Information
from info import parse_argument
# Account management
from account import get_username, search_user
# Quality test
#from test_forwarder import test
# Environment variable
#import sys
import time
# Timestamp flag :
# The flag will be hang up "ONE TIME" when a user start to testing,
# teh other hand the range of ECG data load from DB for NN file calculate
# are [latest to flag = 1], it's a short-term test.
#flag_timestamp = 0
# User list :
# When a registered user device queried by forwarder,
# the username will be appended into the list,
# and the "TIMESTAMP" tag value will be seted to "1" ONE TIME
# the other hand the range of ECG data load from DB for NN file calculate
# are [latest to TIMESTAMP = 1], it's a short-term test
list_user = []
# Forwarder flag :
# When the flag is hang up: continue to transfer,
# down : stop transfer.
flag_forwarder = 1
# Define
DEBUG = 0
CHANNEL = 6
BUFFERSIZE = 1024
ADDR_DEVICE = "00:11:67:E9:56:03"
VALUE_TIMEOUT = 10#1
# Site
SITE_BASE = 'hrv.acomotech.com'
SITE = "http://" + SITE_BASE + "/xmldata/forwarder"
SITE_CHECK = "http://" + SITE_BASE + "/xmldata/latestecg"
SITE_UPDATE_USER = "http://" + SITE_BASE + "/account/update"
# Custom tags
TAG_ACOMO_USER = 'USER'
TAG_ACOMO_TIMESTAMP = 'TIMESTAMP'
TAG_ACOMO_MAC = 'MAC'
def forwarder():
# Parsing argument
if(parse_argument() == 1):
return
# Start to forwarder:
# 1. devices query
# 2. server connect detect
# 3. data transfer
while(flag_forwarder == 1):
# Device query
print "Performing inquiry..."
nearby_devices = bluetooth.discover_devices(lookup_names = True)
print "Found %d devices" % len(nearby_devices)
# Continue to find
if 0 == len(nearby_devices):
continue
# Set proxy for server site
rpc_srv = xmlrpclib.ServerProxy(SITE, allow_none=True, verbose=False)
# Collect devices done, check if these device have registered
for addr, name in nearby_devices:
username = get_username(addr)
if(username == ""):
continue
# Device connecting
print "Name:%s, Address:%s connecting" % (name, addr)
client_socket=BluetoothSocket( RFCOMM )
client_socket.connect((addr, CHANNEL))
data = ""
# Data receiveing
try:
data = client_socket.recv(BUFFERSIZE)
except IOError:
pass
# XML RPC
if 0 == check_acomo_xml_format(data):
# Append username to data
username = get_username(addr)
data = append_elem_to_root(data, TAG_ACOMO_USER, username)
data = append_elem_to_root(data, TAG_ACOMO_MAC, addr)
# First appear ?
if(not search_user(username, list_user)):
list_user.append(username)
data = append_elem_to_root(data, TAG_ACOMO_TIMESTAMP, '1')
else:
data = append_elem_to_root(data, TAG_ACOMO_TIMESTAMP, '0')
print("data == " + data)
result = rpc_srv.raw_handle(data)
# Hand shake
try:
url_site_check = SITE_CHECK + "?user=" + username
response = urllib2.urlopen(url_site_check, timeout = VALUE_TIMEOUT)
value = response.read()
if(cmp(value, data) != 0):
print "Error: Server-side data do not match."
except urllib2.URLError, e:
print "Error: Connection timeout!"
# Update users and devices
try:
url_site_update_user = SITE_UPDATE_USER + "?user=" + username + "&" + "device=" + name
response = urllib2.urlopen(url_site_update_user)
value = response.read()
except urllib2.URLError, e:
print "Error: Connection timeout!"
client_socket.close()
forwarder()
| gpl-2.0 | 5,352,912,468,466,347,000 | 23.208333 | 96 | 0.681338 | false |
ingadhoc/sale | sale_require_ref/__manifest__.py | 1 | 1503 | ##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Sale Order Require Reference on Confirmation',
'version': '12.0.1.0.0',
'category': 'Projects & Services',
'sequence': 14,
'summary': '',
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'license': 'AGPL-3',
'images': [
],
'depends': [
'sale_exception',
],
'data': [
'data/exception_rule_data.xml',
],
'demo': [
'demo/exception_rule_demo.xml',
'demo/sale_order_demo.xml',
],
'installable': False,
'auto_install': False,
'application': False,
}
| agpl-3.0 | 5,450,806,373,730,809,000 | 33.159091 | 78 | 0.566201 | false |
espensirnes/paneltime | paneltime/system/system_arguments.py | 1 | 8361 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#This module contains the arguments class used to handle regression arguments
import numpy as np
import functions as fu
import stat_functions as stat
class arguments:
"""Sets initial arguments and stores static properties of the arguments"""
def __init__(self,panel, args_d_old):
p,q,d,k,m=panel.pqdkm
self.args_d_old=args_d_old
self.categories=['beta','rho','lambda','gamma','psi','omega','z']
self.args_init,self.args_d_OLS, self.args_d_restricted=dict(),dict(),dict()
self.panel=panel
self.equations=[]
self.n_equations=len(panel.X)
self.n_args=[]
self.positions=dict()
self.positions_map=dict()
self.name_positions_map=dict()
arg_count=0
self.names_v=[]
self.eq_number_v=[]
if args_d_old is None:
args_d_old=[None]*self.n_equations
for i in range(self.n_equations):
e=equation(panel.X[i],panel.Y[i],panel.W,self, args_d_old[i],i,arg_count,panel.X_names[i])
self.equations.append(e)
self.n_args.append(e.n_args)
self.args_init[i]=e.args_init
self.args_d_OLS[i]=e.args_d_OLS
self.args_d_restricted[i]=e.args_d_restricted
arg_count+=e.n_args
self.args_init['rho']=np.diag(np.ones(self.n_equations))
self.args_d_OLS['rho']=np.diag(np.ones(self.n_equations))
self.args_d_restricted['rho']=np.diag(np.ones(self.n_equations))
self.n_args_eq=arg_count
self.n_args_tot=int(np.sum(self.n_args)+(self.n_equations-1)*self.n_equations/2)
add_rho_names(self.names_v,arg_count)
self.eq_number_v.extend([None]*(self.n_args_tot-arg_count))
def system_conv_to_dicts(self,args):
args_d=dict()
if type(args[0])==dict:
return args
for eq in self.equations:
d=dict()
for category in self.categories:
rng=eq.positions[category]
s=self.args_init[eq.id][category].shape
d[category]=args[rng].reshape(s)
args_d[eq.id]=d
args_d['rho']=rho_list_to_matrix(args[self.n_args_eq:],self.n_equations)
return args_d
def system_conv_to_vector(self,args):
args_v=[]
if type(args[0])!=dict:
return args
n=0
for i in range(self.n_equations):
for name in self.categories:
args_v.extend(args[i][name].flatten())
n+=len(args[i][name])
args_v.extend(rho_matrix_to_list(args['rho'],self.n_equations))
args_v=np.array(args_v)
return args_v
def rho_definitions(self):
n=self.n_args_eq
self.rho_position_list=[]
r=range(n)
x=[[[min((i,j)),max((i,j))] for i in r] for j in r]
self.rho_position_matrix=np.array([[str(x[i,j]) for i in r] for j in r])
for i in range(n):
for j in range(i,n):
self.names_v.append('System reg. rho(%s,%s)' %(i,j))
self.rho_position_list.append[x[i,j]]
def rho_list_to_matrix(self,lst):
n=len(self.rho_position_list)
m=np.zeros((n,n))
for k in range(n):
i,j=self.rho_position_list[k]
m[i,j]=lst[k]
m[j,i]=lst[k]
return m
def rho_matrix_to_list(self,m):
n=len(self.rho_position_list)
lst=np.zeros(n)
for k in range(n):
i,j=self.rho_position_list[k]
lst[k]=m[i,j]
return lst
class equation:
def __init__(self,X,Y,W,arguments,args_d_old,i,arg_count,X_names):
a=arguments
self.id=i
p,q,d,k,m=panel.pqdkm
self.args_init,self.args_d_OLS, self.args_d_restricted=set_init_args(X,Y,W,args_d_old,p, d, q, m, k,a.panel)
self.names_d=get_namevector(a.panel,p, q, m, k,X_names,a,i)
self.position_defs(a,arg_count,X_names)
self.args_v=conv_to_vector(self.args_init,a.categories)
self.n_args=len(self.args_v)
self.args_rng=range(arg_count,arg_count+self.n_args)
a.eq_number_v.extend([i]*self.n_args)
def position_defs(self,system,arg_count,X_names):
"""Defines positions in vector argument in each equation for the system args_v vector"""
self.positions_map=dict()#a dictionary of indicies containing the string name and sub-position of index within the category
self.positions=dict()#a dictionary of category strings containing the index range of the category
self.beta_map=dict()
k=arg_count
for category in system.categories:
n=len(self.args_init[category])
rng=range(k,k+n)
self.positions[category]=rng#self.positions[<category>]=range(<system position start>,<system position end>)
if category in system.positions:
system.positions[category].append(rng)
else:
system.positions[category]=[rng]
for j in rng:
self.positions_map[j]=[category,j-k]#self.positions_map[<system position>]=<category>,<equation position>
system.positions_map[j]=[self.id,category,j-k]#system.positions_map[<system position>]=<equation number>,<category>,<equation position>
k+=n
for i in range(len(X_names)):
self.beta_map[X_names[i]]=self.positions['beta'][i]
def initargs(X,Y,W,args_old,p,d,q,m,k,panel):
N,T,k=X.shape
if args_old is None:
armacoefs=0
else:
armacoefs=0
args=dict()
args['beta']=np.zeros((k,1))
args['omega']=np.zeros((W.shape[2],1))
args['rho']=np.ones(p)*armacoefs
args['lambda']=np.ones(q)*armacoefs
args['psi']=np.ones(m)*armacoefs
args['gamma']=np.ones(k)*armacoefs
args['z']=np.array([])
if m>0 and N>1:
args['omega'][0][0]=0
if m>0:
args['psi'][0]=0.00001
args['z']=np.array([0.00001])
return args
def set_init_args(X,Y,W,args_old,p,d,q,m,k,panel):
args=initargs(X,Y,W,args_old,p, d, q, m, k, panel)
args_restricted=fu.copy_array_dict(args)
if panel.has_intercept:
args_restricted['beta'][0][0]=panel.mean(Y)
args_restricted['omega'][0][0]=np.log(panel.var(Y))
else:
args_restricted['omega'][0][0]=np.log(panel.var(Y,k=0,mean=0))
beta,e=stat.OLS(panel,X,Y,return_e=True)
args['beta']=beta
args['omega'][0]=np.log(np.sum(e**2*panel.included)/np.sum(panel.included))
args_OLS=fu.copy_array_dict(args)
if panel.m_zero:
args['omega'][0]=0
if not args_old is None:
args['beta']=insert_arg(args['beta'],args_old['beta'])
args['omega']=insert_arg(args['omega'],args_old['omega'])
args['rho']=insert_arg(args['rho'],args_old['rho'])
args['lambda']=insert_arg(args['lambda'],args_old['lambda'])
args['psi']=insert_arg(args['psi'],args_old['psi'])
args['gamma']=insert_arg(args['gamma'],args_old['gamma'])
args['z']=insert_arg(args['z'],args_old['z'])
return args,args_OLS, args_restricted
def conv_to_dict(args,categories,positions):
"""Converts a vector argument args to a dictionary argument. If args is a dict, it is returned unchanged"""
if type(args)==dict:
return args
else:
d=dict()
k=0
for i in categories:
n=len(positions[i])
rng=range(k,k+n)
d[i]=args[rng]
if i=='beta' or i=='omega':
d[i]=d[i].reshape((n,1))
k+=n
return d
def conv_to_vector(args,categories):
"""Converts a dict argument args to vector argument. if args is a vector, it is returned unchanged.\n
If args=None, the vector of self.args is returned"""
if type(args)==list or type(args)==np.ndarray:
return args
v=np.array([])
for category in categories:
s=args[category]
if type(s)==np.ndarray:
s=s.flatten()
v=np.concatenate((v,s))
return v
def get_namevector(panel,p, q, m, k,X_names,system,eq_num):
"""Creates a vector of the names of all regression varaibles,
including variables, ARIMA and GARCH terms. This defines the positions
of the variables througout the estimation."""
names_d=dict()
#sequence must match definition of categories in arguments.__init__:
#self.categories=['beta','rho','lambda','gamma','psi','omega','z']
eq_prefix='%02d|' %(eq_num,)
names_v=[eq_prefix+i for i in X_names]#copy variable names
names_d['beta']=names_v
add_names(p,eq_prefix+'AR term %s (p)','rho',names_d,names_v)
add_names(q,eq_prefix+'MA term %s (q)','lambda',names_d,names_v)
add_names(m,eq_prefix+'MACH term %s (m)','psi',names_d,names_v)
add_names(k,eq_prefix+'ARCH term %s (k)','gamma',names_d,names_v)
names_d['omega']=[eq_prefix+i for i in panel.heteroscedasticity_factors]#copy variable names
names_v.extend(names_d['omega'])
if m>0:
names_d['z']=[eq_prefix+'z in h(e,z)']
names_v.extend(names_d['z'])
n=len(system.names_v)
for i in range(len(names_v)):
system.name_positions_map[names_v[i]]=n+i
system.names_v.extend(names_v)
return names_d
def add_names(T,namesstr,category,d,names):
a=[]
for i in range(T):
a.append(namesstr %(i,))
names.extend(a)
d[category]=a
def insert_arg(arg,add):
n=min((len(arg),len(add)))
arg[:n]=add[:n]
return arg
| gpl-3.0 | 7,984,326,016,556,555,000 | 28.234266 | 139 | 0.668222 | false |
praekelt/panya-event | event/models.py | 1 | 1514 | from django.db import models
from django.core.urlresolvers import reverse
from ckeditor.fields import RichTextField
from panya.models import ModelBase
PROVINCES = (
('Eastern Cape', 'Eastern Cape'),
('Free State', 'Free State'),
('Gauteng', 'Gauteng'),
('KwaZulu-Natal', 'KwaZulu-Natal'),
('Limpopo', 'Limpopo'),
('Mpumalanga', 'Mpumalanga'),
('Northern Cape', 'Northern Cape'),
('North-West', 'North-West'),
('Western Cape', 'Western Cape'),
)
class Location(models.Model):
city = models.CharField(max_length=255, help_text='Name of the city.')
province = models.CharField(
choices=PROVINCES,
max_length=255,
help_text='Name of the province.'
)
def __unicode__(self):
return "%s, %s" % (self.city, self.province)
class Venue(models.Model):
name = models.CharField(max_length=255, help_text='A short descriptive name.')
address = models.CharField(max_length=512, help_text='Physical venue address.')
location = models.ForeignKey(
Location,
blank=True,
null=True,
help_text='Location of the venue.'
)
def __unicode__(self):
return self.name
class Event(ModelBase):
venue = models.ForeignKey(
Venue,
help_text='Venue where the event will take place.'
)
content = RichTextField(help_text='Full article detailing this event.')
def get_absolute_url(self):
return reverse('event_object_detail', kwargs={'slug': self.slug})
| bsd-3-clause | 7,660,587,681,568,411,000 | 28.115385 | 83 | 0.637384 | false |
xindiguo/pythonSynapseUtils | pythonSynapseUtils/synutils.py | 1 | 4081 | #!/usr/bin/env python
import argparse
import os
import sys
import synapseclient
import hashlib
import string
script_path = os.path.dirname(__file__)
local_module_path = os.path.abspath(os.path.join(script_path,'lib'))
sys.path.append(local_module_path)
import s3
STATIC_BUCKET = "static.synapse.org"
def create_html_file(html_link):
#get a unique file name from txt/link
html_file_name = str(hashlib.md5(html_link).hexdigest()) + '.html'
f = open(html_file_name, 'w')
html_template = string.Template("""
<!DOCTYPE html>
<html>
<body>
<iframe src="$HTML_LINK" width="1500" height="1000" allowfullscreen="true" mozallowfullscreen="true" webkitallowfullscreen="true"></iframe>
</body>
</html>
""")
html_content = html_template.substitute(HTML_LINK=html_link)
f.write(html_content)
f.close()
os.chmod(html_file_name, 0755) #make the file web readable before upload
return(html_file_name)
def s3manage(args):
"""
Utilities for managing S3 bukcets
"""
#establish a connection to S3
bucket = s3.bucketManager(STATIC_BUCKET, args.aws_key, args.aws_secret, rememberMe=args.rememberMe)
#if user specifies an html link
if args.html_link is not None:
html_file = create_html_file(args.html_link)
args.upload_path = html_file
if os.path.isdir(args.upload_path) is True:
url = bucket.uploadDir(args.upload_path,args.upload_prefix)
else:
url = bucket.uploadFiles(args.upload_path,args.upload_prefix)
if args.synapse_wikiID is not None:
embed_url_in_synapse_wiki(url,args.synapse_wikiID)
def embed_url_in_synapse_wiki(url, wikiID):
import synapseclient
syn = synapseclient.login()
wiki = syn.getWiki(wikiID)
markdown = wiki['markdown']
#complete hack
if len(url) > 1:
url = [url[x] for x in url if x.endswith('index.html')]
url = url[0]
else:
url = url.values()[0]
#percent encoded URL
import urllib
url = urllib.quote(url, safe='')
link_markdown = '${iframe?site=' + url + '&height=1000}'
wiki['markdown'] = link_markdown
wiki = syn.store(wiki)
syn.onweb(wikiID)
def build_parser():
"""Builds the argument parser and returns the result."""
parser = argparse.ArgumentParser(description='Synapse Python Utilities')
parser.add_argument('--debug', dest='debug', action='store_true')
subparsers = parser.add_subparsers(title='commands',
description='The following commands are available:',
help='For additional help: "synutils.py <COMMAND> -h"')
parser_s3 = subparsers.add_parser('s3',help='utilities to manage data on static.synapse.org')
parser_s3.add_argument('-k' , '--aws_key', dest='aws_key', help='AWS Key', default=None)
parser_s3.add_argument('-s' , '--aws_secret', dest='aws_secret', help='AWS secret key', default=None)
parser_s3.add_argument('-up', '--upload', dest='upload_path', type=str, default=None)
parser_s3.add_argument('-l', '--link', dest='html_link', type=str, default=None,
help = "html link to embed in a synapse wiki")
parser_s3.add_argument('-w', '--wikiID', dest='synapse_wikiID', type=str, default=None,
help = "synapse wiki id to embed the link in")
parser_s3.add_argument('-p', '--prefix', dest='upload_prefix', type=str, default='scratch/',
help = 'prefix adds the sub dir structure on S3 eg. test/ will add the file under test/ folder on s3 bucket')
parser_s3.add_argument('--rememberMe', '--remember-me', dest='rememberMe', action='store_true', default=False,
help='Cache credentials for automatic authentication for future interactions')
parser_s3.set_defaults(func=s3manage)
return parser
def perform_main(args):
if 'func' in args:
try:
args.func(args)
except Exception as ex:
raise
def main():
args = build_parser().parse_args()
perform_main(args)
if __name__ == "__main__":
main()
| apache-2.0 | -1,364,867,809,172,800,000 | 33.008333 | 144 | 0.645675 | false |
dddomodossola/remi | examples/onclose_window_app.py | 1 | 1513 | """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import remi.gui as gui
from remi import start, App
class MyApp(App):
def main(self, name='world'):
# margin 0px auto allows to center the app to the screen
wid = gui.VBox(width=300, height=200, margin='0px auto')
lbl = gui.Label("Close or reload the page, the console thread will stop automatically.")
wid.append(lbl)
# add the following 3 lines to your app and the on_window_close method to make the console close automatically
tag = gui.Tag(_type='script')
tag.add_child("javascript", """window.onunload=function(e){remi.sendCallback('%s','%s');return "close?";};""" % (
str(id(self)), "on_window_close"))
wid.add_child("onunloadevent", tag)
# returning the root widget
return wid
def on_window_close(self):
# here you can handle the unload
print("app closing")
self.close()
if __name__ == "__main__":
start(MyApp)
| apache-2.0 | -3,865,904,934,796,648,400 | 34.186047 | 121 | 0.662921 | false |
opentripplanner/OTPQA | hreport.py | 1 | 2481 | import simplejson as json
import numpy as np
def parsetime(aa):
if aa is None:
return None
return float( aa.split()[0] )
def main(filenames):
if len(filenames)==0:
return
yield "<html>"
yield """<head><style>table, th, td {
border: 1px solid black;
border-collapse: collapse;
}
th, td {
text-align: left;
}</style></head>"""
datasets = []
shas = []
for fn in filenames:
blob = json.load( open(fn) )
shas.append( blob['git_sha1'] )
dataset = dict( [(response["id_tuple"], response) for response in blob['responses']] )
datasets.append( dataset )
id_tuples = datasets[0].keys()
yield """<table border="1">"""
dataset_total_times = dict(zip( range(len(datasets)),[[] for x in range(len(datasets))]) )
dataset_avg_times = dict(zip(range(len(datasets)),[[] for x in range(len(datasets))]) )
dataset_fails = dict(zip(range(len(datasets)), [0]*len(datasets)))
yield "<tr><td>request id</td>"
for fn,sha in zip(filenames,shas):
yield "<td>%s (commit:%s)</td>"%(fn,sha)
yield "</tr>"
for id_tuple in id_tuples:
yield """<tr><td rowspan="2"><a href="%s">%s</a></td>"""%(datasets[0][id_tuple]['url'], id_tuple)
for i, dataset in enumerate( datasets ):
response = dataset[id_tuple]
dataset_total_times[i].append( parsetime( response['total_time'] ) )
dataset_avg_times[i].append( parsetime( response['avg_time'] ) )
yield "<td>%s total, %s avg</td>"%(response['total_time'],response['avg_time'])
yield "</tr>"
for i, dataset in enumerate( datasets ):
yield "<td>"
response = dataset[id_tuple]
yield "<table border=1 width=100%><tr>"
if len(response['itins']) == 0:
dataset_fails[i] += 1
yield "<td style=\"background-color:#EDA1A1\">NONE</td>"
for itin in response['itins']:
filling = itin['routes']
if filling=="{}":
color = "#EDECA1"
else:
color = "#AEEDA1"
yield "<td style=\"background-color:%s\">%s</td>"%(color,filling)
yield "</tr></table>"
yield "</td>"
yield "</tr>"
yield "<tr><td>stats</td>"
for i in range(len(datasets)):
yield "<td>fails: %s (%.2f%%). total time: median:%.2fs mean:%.2fs</td>"%(dataset_fails[i], 100*dataset_fails[i]/float(len(id_tuples)), np.median(dataset_total_times[i]),np.mean(dataset_total_times[i]))
yield "</tr>"
yield "</table>"
yield "</html>"
if __name__=='__main__':
import sys
if len(sys.argv)<2:
print "usage: cmd fn1 [fn2 [fn3 ...]]"
exit()
for line in main(sys.argv[1:]):
print line | gpl-3.0 | -7,302,979,014,916,302,000 | 24.854167 | 204 | 0.614672 | false |
gnowledge/OTM2 | opentreemap/treemap/plugin.py | 1 | 3086 | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from django.conf import settings
from django.db.models import Q
from django.test.signals import setting_changed
from django.dispatch import receiver
from treemap.lib import get_function_by_path
# For use in tests, as basic functions to use in override_settings
always_false = lambda *args, **kwargs: False
#
# Plugin functions allow python modules which are not part of the OTM2 core the
# the ability to override select functionality.
#
# For instance, feature_enabled is called in certain locations in OTM2 before
# showing a feature. It's default implementation always returns True to enable
# the feature, but an outside python module can override this to selectively
# disable certain unwanted features.
#
_plugin_fn_dict = {}
_plugin_setting_dict = {}
def get_plugin_function(plugin_fn_setting, default_fn):
"""
Gets a plugin function from an external python module, and wraps it
so that it can be safely overridden for testing purposes.
Implementors of plugin functions should ensure that their function's
signature matches that of the default_fn
plugin_fn_setting - A string in the Django settings specifiying the
module and function path
default_fn - The function to call if plugin_fn_setting is not set
"""
# cache the function
_plugin_fn_dict[plugin_fn_setting] =\
_resolve_plugin_function(plugin_fn_setting, default_fn)
def wrapper(*args, **kwargs):
plugin_fn = _plugin_fn_dict.get(plugin_fn_setting)
if plugin_fn is None:
plugin_fn = _resolve_plugin_function(plugin_fn_setting, default_fn)
_plugin_fn_dict[plugin_fn_setting] = plugin_fn
return plugin_fn(*args, **kwargs)
return wrapper
def _resolve_plugin_function(fn_setting, default_fn):
fn_path = getattr(settings, fn_setting, None)
if fn_path is None:
return default_fn
return get_function_by_path(fn_path)
# Needed to support use of @override_settings in unit tests
@receiver(setting_changed)
def reset(sender, setting, value, **kwargs):
if setting in _plugin_fn_dict:
_plugin_fn_dict[setting] = None
feature_enabled = get_plugin_function('FEATURE_BACKEND_FUNCTION',
lambda instance, feature: True)
# Should return True if an activation email should be sent on user creation
should_send_user_activation = get_plugin_function(
'USER_ACTIVATION_FUNCTION',
lambda request, username, email, password: True)
setup_for_ui_test = get_plugin_function('UITEST_SETUP_FUNCTION', lambda: None)
get_mobile_instances_filter = get_plugin_function('MOBILE_INSTANCES_FUNCTION',
lambda: Q())
get_viewable_instances_filter = get_plugin_function(
'VIEWABLE_INSTANCES_FUNCTION', lambda: Q())
get_tree_limit = get_plugin_function('TREE_LIMIT_FUNCTION',
lambda instance: None)
| gpl-3.0 | -6,459,698,252,821,900,000 | 30.814433 | 79 | 0.692158 | false |
pdamodaran/yellowbrick | yellowbrick/text/dispersion.py | 1 | 10916 | # yellowbrick.text.dispersion
# Implementations of lexical dispersions for text visualization.
#
# Author: Larry Gray
# Created: 2018-06-21 10:06
#
# Copyright (C) 2018 District Data Labs
# For license information, see LICENSE.txt
#
# ID: dispersion.py [] [email protected] $
"""
Implementation of lexical dispersion for text visualization
"""
##########################################################################
## Imports
##########################################################################
from collections import defaultdict
import itertools
from yellowbrick.text.base import TextVisualizer
from yellowbrick.style.colors import resolve_colors
from yellowbrick.exceptions import YellowbrickValueError
import numpy as np
##########################################################################
## Dispersion Plot Visualizer
##########################################################################
class DispersionPlot(TextVisualizer):
"""
DispersionPlotVisualizer allows for visualization of the lexical dispersion
of words in a corpus. Lexical dispersion is a measure of a word's
homeogeneity across the parts of a corpus. This plot notes the occurences
of a word and how many words from the beginning it appears.
Parameters
----------
target_words : list
A list of target words whose dispersion across a corpus passed at fit
will be visualized.
ax : matplotlib axes, default: None
The axes to plot the figure on.
labels : list of strings
The names of the classes in the target, used to create a legend.
Labels must match names of classes in sorted order.
colors : list or tuple of colors
Specify the colors for each individual class
colormap : string or matplotlib cmap
Qualitative colormap for discrete target
ignore_case : boolean, default: False
Specify whether input will be case-sensitive.
annotate_docs : boolean, default: False
Specify whether document boundaries will be displayed. Vertical lines
are positioned at the end of each document.
kwargs : dict
Pass any additional keyword arguments to the super class.
These parameters can be influenced later on in the visualization
process, but can and should be set as early as possible.
"""
# NOTE: cannot be np.nan
NULL_CLASS = None
def __init__(self, target_words, ax=None, colors=None, ignore_case=False,
annotate_docs=False, labels=None, colormap=None, **kwargs):
super(DispersionPlot, self).__init__(ax=ax, **kwargs)
self.labels = labels
self.colors = colors
self.colormap = colormap
self.target_words = target_words
self.ignore_case = ignore_case
self.annotate_docs = annotate_docs
def _compute_dispersion(self, text, y):
self.boundaries_ = []
offset = 0
if y is None:
y = itertools.repeat(None)
for doc, target in zip(text, y):
for word in doc:
if self.ignore_case:
word = word.lower()
# NOTE: this will find all indices if duplicate words are supplied
# In the case that word is not in target words, any empty list is
# returned and no data will be yielded
offset += 1
for y_coord in (self.indexed_words_ == word).nonzero()[0]:
y_coord = int(y_coord)
yield (offset, y_coord, target)
if self.annotate_docs:
self.boundaries_.append(offset)
self.boundaries_ = np.array(self.boundaries_, dtype=int)
def _check_missing_words(self, points):
for index in range(len(self.indexed_words_)):
if index in points[:,1]:
pass
else:
raise YellowbrickValueError((
"The indexed word '{}' is not found in "
"this corpus"
).format(self.indexed_words_[index]))
def fit(self, X, y=None, **kwargs):
"""
The fit method is the primary drawing input for the dispersion
visualization.
Parameters
----------
X : list or generator
Should be provided as a list of documents or a generator
that yields a list of documents that contain a list of
words in the order they appear in the document.
y : ndarray or Series of length n
An optional array or series of target or class values for
instances. If this is specified, then the points will be colored
according to their class.
kwargs : dict
Pass generic arguments to the drawing method
Returns
-------
self : instance
Returns the instance of the transformer/visualizer
"""
if y is not None:
self.classes_ = np.unique(y)
elif y is None and self.labels is not None:
self.classes_ = np.array([self.labels[0]])
else:
self.classes_ = np.array([self.NULL_CLASS])
# Create an index (e.g. the y position) for the target words
self.indexed_words_ = np.flip(self.target_words, axis=0)
if self.ignore_case:
self.indexed_words_ = np.array([w.lower() for w in self.indexed_words_])
# Stack is used to create a 2D array from the generator
try:
points_target = np.stack(self._compute_dispersion(X, y))
except ValueError:
raise YellowbrickValueError((
"No indexed words were found in the corpus"
))
points = np.stack(zip(points_target[:,0].astype(int),
points_target[:,1].astype(int)))
self.target = points_target[:,2]
self._check_missing_words(points)
self.draw(points, self.target)
return self
def draw(self, points, target=None, **kwargs):
"""
Called from the fit method, this method creates the canvas and
draws the plot on it.
Parameters
----------
kwargs: generic keyword arguments.
"""
# Resolve the labels with the classes
labels = self.labels if self.labels is not None else self.classes_
if len(labels) != len(self.classes_):
raise YellowbrickValueError((
"number of supplied labels ({}) does not "
"match the number of classes ({})"
).format(len(labels), len(self.classes_)))
# Create the color mapping for the labels.
color_values = resolve_colors(
n_colors=len(labels), colormap=self.colormap, colors=self.color)
colors = dict(zip(labels, color_values))
# Transform labels into a map of class to label
labels = dict(zip(self.classes_, labels))
# Define boundaries with a vertical line
if self.annotate_docs:
for xcoords in self.boundaries_:
self.ax.axvline(x=xcoords, color='lightgray', linestyle='dashed')
series = defaultdict(lambda: {'x':[], 'y':[]})
if target is not None:
for point, t in zip(points, target):
label = labels[t]
series[label]['x'].append(point[0])
series[label]['y'].append(point[1])
else:
label = self.classes_[0]
for x, y in points:
series[label]['x'].append(x)
series[label]['y'].append(y)
for label, points in series.items():
self.ax.scatter(points['x'], points['y'], marker='|',
c=colors[label], zorder=100, label=label)
self.ax.set_yticks(list(range(len(self.indexed_words_))))
self.ax.set_yticklabels(self.indexed_words_)
def finalize(self, **kwargs):
"""
The finalize method executes any subclass-specific axes
finalization steps. The user calls poof & poof calls finalize.
Parameters
----------
kwargs: generic keyword arguments.
"""
self.ax.set_ylim(-1, len(self.indexed_words_))
self.ax.set_title("Lexical Dispersion Plot")
self.ax.set_xlabel("Word Offset")
self.ax.grid(False)
# Add the legend outside of the figure box.
if not all(self.classes_ == np.array([self.NULL_CLASS])):
box = self.ax.get_position()
self.ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
self.ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
##########################################################################
## Quick Method
##########################################################################
def dispersion(words, corpus, y=None, ax=None, colors=None, colormap=None,
labels=None, annotate_docs=False, ignore_case=False, **kwargs):
""" Displays lexical dispersion plot for words in a corpus
This helper function is a quick wrapper to utilize the DisperstionPlot
Visualizer for one-off analysis
Parameters
----------
words : list
A list of words whose dispersion will be examined within a corpus
y : ndarray or Series of length n
An optional array or series of target or class values for
instances. If this is specified, then the points will be colored
according to their class.
corpus : list
Should be provided as a list of documents that contain
a list of words in the order they appear in the document.
ax : matplotlib axes, default: None
The axes to plot the figure on.
labels : list of strings
The names of the classes in the target, used to create a legend.
Labels must match names of classes in sorted order.
colors : list or tuple of colors
Specify the colors for each individual class
colormap : string or matplotlib cmap
Qualitative colormap for discrete target
annotate_docs : boolean, default: False
Specify whether document boundaries will be displayed. Vertical lines
are positioned at the end of each document.
ignore_case : boolean, default: False
Specify whether input will be case-sensitive.
kwargs : dict
Pass any additional keyword arguments to the super class.
Returns
-------
ax: matplotlib axes
Returns the axes that the plot was drawn on
"""
# Instantiate the visualizer
visualizer = DispersionPlot(
words, ax=ax, colors=colors, colormap=colormap,
ignore_case=ignore_case, labels=labels,
annotate_docs=annotate_docs, **kwargs
)
# Fit and transform the visualizer (calls draw)
visualizer.fit(corpus, y, **kwargs)
# Return the axes object on the visualizer
return visualizer.ax
| apache-2.0 | -2,299,755,290,369,907,200 | 33.764331 | 84 | 0.588402 | false |
lucasberti/telegrao-py | plugins/apex.py | 1 | 2106 | import requests
import json
from api import send_message
ENDPOINT = "https://public-api.tracker.gg/apex/v1/standard/profile/5/"
PLAYERS = {
14160874: "bertoncio",
16631085: "beartz",
85867003: "R3TCH4",
52451934: "xisteaga",
10549434: "Axasdas123",
123123862: "MeroFabio",
569341881: "burnovisk",
299335806: "Springl3s",
77547673: "BeDabul"
}
def get_stats(username):
headers = {
"TRN-Api-Key": "987c5b41-5649-4b4e-9d3f-4d58cc904584"
}
return requests.get(ENDPOINT + username, headers=headers).json()
def get_string(data):
data = data["data"]
legend_on_menu = data["children"][0]["metadata"]["legend_name"]
username = data["metadata"]["platformUserHandle"]
hero_stats = ""
for legend in data["children"]:
hero_stats += f"{legend['metadata']['legend_name']}\n"
for stat in legend["stats"]:
name = stat["metadata"]["name"]
value = stat["displayValue"]
percentile = stat["percentile"] if "percentile" in stat.keys() else "desconecidi"
rank = stat["rank"] if "rank" in stat.keys() else "desconecidi"
hero_stats += f"{name}: {value} (top {percentile}% rank {rank})\n"
hero_stats += "\n"
global_stats = ""
for stat in data["stats"]:
global_stats += f"{stat['metadata']['name']}: {stat['displayValue']}\n"
return f"""{username} mt noob rs
ta c {legend_on_menu} selelessiondn
{hero_stats}
globau:
{global_stats}"""
def on_msg_received(msg, matches):
chat = msg["chat"]["id"]
user = msg["from"]["id"]
player = None
if matches.group(1):
player = matches.group(1)
else:
if user in PLAYERS:
player = PLAYERS[user]
if player is not None:
try:
data = get_stats(player)
stats = get_string(data)
print(stats)
send_message(chat, stats)
except Exception as e:
send_message(chat, f"vish deu merda..... {e}")
| mit | 2,123,689,757,657,363,200 | 25 | 93 | 0.561254 | false |
DecipherOne/Troglodyte | Trog Build Dependencies/Python26/Lib/bsddb/test/test_all.py | 1 | 16100 | """Run all test cases.
"""
import sys
import os
import unittest
try:
# For Pythons w/distutils pybsddb
import bsddb3 as bsddb
except ImportError:
# For Python 2.3
import bsddb
if sys.version_info[0] >= 3 :
charset = "iso8859-1" # Full 8 bit
class cursor_py3k(object) :
def __init__(self, db, *args, **kwargs) :
self._dbcursor = db.cursor(*args, **kwargs)
def __getattr__(self, v) :
return getattr(self._dbcursor, v)
def _fix(self, v) :
if v is None : return None
key, value = v
if isinstance(key, bytes) :
key = key.decode(charset)
return (key, value.decode(charset))
def __next__(self) :
v = getattr(self._dbcursor, "next")()
return self._fix(v)
next = __next__
def previous(self) :
v = self._dbcursor.previous()
return self._fix(v)
def last(self) :
v = self._dbcursor.last()
return self._fix(v)
def set(self, k) :
if isinstance(k, str) :
k = bytes(k, charset)
v = self._dbcursor.set(k)
return self._fix(v)
def set_recno(self, num) :
v = self._dbcursor.set_recno(num)
return self._fix(v)
def set_range(self, k, dlen=-1, doff=-1) :
if isinstance(k, str) :
k = bytes(k, charset)
v = self._dbcursor.set_range(k, dlen=dlen, doff=doff)
return self._fix(v)
def dup(self, flags=0) :
cursor = self._dbcursor.dup(flags)
return dup_cursor_py3k(cursor)
def next_dup(self) :
v = self._dbcursor.next_dup()
return self._fix(v)
def next_nodup(self) :
v = self._dbcursor.next_nodup()
return self._fix(v)
def put(self, key, value, flags=0, dlen=-1, doff=-1) :
if isinstance(key, str) :
key = bytes(key, charset)
if isinstance(value, str) :
value = bytes(value, charset)
return self._dbcursor.put(key, value, flags=flags, dlen=dlen,
doff=doff)
def current(self, flags=0, dlen=-1, doff=-1) :
v = self._dbcursor.current(flags=flags, dlen=dlen, doff=doff)
return self._fix(v)
def first(self) :
v = self._dbcursor.first()
return self._fix(v)
def pget(self, key=None, data=None, flags=0) :
# Incorrect because key can be a bare number,
# but enough to pass testsuite
if isinstance(key, int) and (data is None) and (flags == 0) :
flags = key
key = None
if isinstance(key, str) :
key = bytes(key, charset)
if isinstance(data, int) and (flags==0) :
flags = data
data = None
if isinstance(data, str) :
data = bytes(data, charset)
v=self._dbcursor.pget(key=key, data=data, flags=flags)
if v is not None :
v1, v2, v3 = v
if isinstance(v1, bytes) :
v1 = v1.decode(charset)
if isinstance(v2, bytes) :
v2 = v2.decode(charset)
v = (v1, v2, v3.decode(charset))
return v
def join_item(self) :
v = self._dbcursor.join_item()
if v is not None :
v = v.decode(charset)
return v
def get(self, *args, **kwargs) :
l = len(args)
if l == 2 :
k, f = args
if isinstance(k, str) :
k = bytes(k, "iso8859-1")
args = (k, f)
elif l == 3 :
k, d, f = args
if isinstance(k, str) :
k = bytes(k, charset)
if isinstance(d, str) :
d = bytes(d, charset)
args =(k, d, f)
v = self._dbcursor.get(*args, **kwargs)
if v is not None :
k, v = v
if isinstance(k, bytes) :
k = k.decode(charset)
v = (k, v.decode(charset))
return v
def get_both(self, key, value) :
if isinstance(key, str) :
key = bytes(key, charset)
if isinstance(value, str) :
value = bytes(value, charset)
v=self._dbcursor.get_both(key, value)
return self._fix(v)
class dup_cursor_py3k(cursor_py3k) :
def __init__(self, dbcursor) :
self._dbcursor = dbcursor
class DB_py3k(object) :
def __init__(self, *args, **kwargs) :
args2=[]
for i in args :
if isinstance(i, DBEnv_py3k) :
i = i._dbenv
args2.append(i)
args = tuple(args2)
for k, v in kwargs.items() :
if isinstance(v, DBEnv_py3k) :
kwargs[k] = v._dbenv
self._db = bsddb._db.DB_orig(*args, **kwargs)
def __contains__(self, k) :
if isinstance(k, str) :
k = bytes(k, charset)
return getattr(self._db, "has_key")(k)
def __getitem__(self, k) :
if isinstance(k, str) :
k = bytes(k, charset)
v = self._db[k]
if v is not None :
v = v.decode(charset)
return v
def __setitem__(self, k, v) :
if isinstance(k, str) :
k = bytes(k, charset)
if isinstance(v, str) :
v = bytes(v, charset)
self._db[k] = v
def __delitem__(self, k) :
if isinstance(k, str) :
k = bytes(k, charset)
del self._db[k]
def __getattr__(self, v) :
return getattr(self._db, v)
def __len__(self) :
return len(self._db)
def has_key(self, k, txn=None) :
if isinstance(k, str) :
k = bytes(k, charset)
return self._db.has_key(k, txn=txn)
def put(self, key, value, txn=None, flags=0, dlen=-1, doff=-1) :
if isinstance(key, str) :
key = bytes(key, charset)
if isinstance(value, str) :
value = bytes(value, charset)
return self._db.put(key, value, flags=flags, txn=txn, dlen=dlen,
doff=doff)
def append(self, value, txn=None) :
if isinstance(value, str) :
value = bytes(value, charset)
return self._db.append(value, txn=txn)
def get_size(self, key) :
if isinstance(key, str) :
key = bytes(key, charset)
return self._db.get_size(key)
def get(self, key, default="MagicCookie", txn=None, flags=0, dlen=-1, doff=-1) :
if isinstance(key, str) :
key = bytes(key, charset)
if default != "MagicCookie" : # Magic for 'test_get_none.py'
v=self._db.get(key, default=default, txn=txn, flags=flags,
dlen=dlen, doff=doff)
else :
v=self._db.get(key, txn=txn, flags=flags,
dlen=dlen, doff=doff)
if (v is not None) and isinstance(v, bytes) :
v = v.decode(charset)
return v
def pget(self, key, txn=None) :
if isinstance(key, str) :
key = bytes(key, charset)
v=self._db.pget(key, txn=txn)
if v is not None :
v1, v2 = v
if isinstance(v1, bytes) :
v1 = v1.decode(charset)
v = (v1, v2.decode(charset))
return v
def get_both(self, key, value, txn=None, flags=0) :
if isinstance(key, str) :
key = bytes(key, charset)
if isinstance(value, str) :
value = bytes(value, charset)
v=self._db.get_both(key, value, txn=txn, flags=flags)
if v is not None :
v = v.decode(charset)
return v
def delete(self, key, txn=None) :
if isinstance(key, str) :
key = bytes(key, charset)
return self._db.delete(key, txn=txn)
def keys(self) :
k = self._db.keys()
if len(k) and isinstance(k[0], bytes) :
return [i.decode(charset) for i in self._db.keys()]
else :
return k
def items(self) :
data = self._db.items()
if not len(data) : return data
data2 = []
for k, v in data :
if isinstance(k, bytes) :
k = k.decode(charset)
data2.append((k, v.decode(charset)))
return data2
def associate(self, secondarydb, callback, flags=0, txn=None) :
class associate_callback(object) :
def __init__(self, callback) :
self._callback = callback
def callback(self, key, data) :
if isinstance(key, str) :
key = key.decode(charset)
data = data.decode(charset)
key = self._callback(key, data)
if (key != bsddb._db.DB_DONOTINDEX) and isinstance(key,
str) :
key = bytes(key, charset)
return key
return self._db.associate(secondarydb._db,
associate_callback(callback).callback, flags=flags, txn=txn)
def cursor(self, txn=None, flags=0) :
return cursor_py3k(self._db, txn=txn, flags=flags)
def join(self, cursor_list) :
cursor_list = [i._dbcursor for i in cursor_list]
return dup_cursor_py3k(self._db.join(cursor_list))
class DBEnv_py3k(object) :
def __init__(self, *args, **kwargs) :
self._dbenv = bsddb._db.DBEnv_orig(*args, **kwargs)
def __getattr__(self, v) :
return getattr(self._dbenv, v)
class DBSequence_py3k(object) :
def __init__(self, db, *args, **kwargs) :
self._db=db
self._dbsequence = bsddb._db.DBSequence_orig(db._db, *args, **kwargs)
def __getattr__(self, v) :
return getattr(self._dbsequence, v)
def open(self, key, *args, **kwargs) :
return self._dbsequence.open(bytes(key, charset), *args, **kwargs)
def get_key(self) :
return self._dbsequence.get_key().decode(charset)
def get_dbp(self) :
return self._db
import string
string.letters=[chr(i) for i in xrange(65,91)]
bsddb._db.DBEnv_orig = bsddb._db.DBEnv
bsddb._db.DB_orig = bsddb._db.DB
bsddb._db.DBSequence_orig = bsddb._db.DBSequence
def do_proxy_db_py3k(flag) :
flag2 = do_proxy_db_py3k.flag
do_proxy_db_py3k.flag = flag
if flag :
bsddb.DBEnv = bsddb.db.DBEnv = bsddb._db.DBEnv = DBEnv_py3k
bsddb.DB = bsddb.db.DB = bsddb._db.DB = DB_py3k
bsddb._db.DBSequence = DBSequence_py3k
else :
bsddb.DBEnv = bsddb.db.DBEnv = bsddb._db.DBEnv = bsddb._db.DBEnv_orig
bsddb.DB = bsddb.db.DB = bsddb._db.DB = bsddb._db.DB_orig
bsddb._db.DBSequence = bsddb._db.DBSequence_orig
return flag2
do_proxy_db_py3k.flag = False
do_proxy_db_py3k(True)
try:
# For Pythons w/distutils pybsddb
from bsddb3 import db, dbtables, dbutils, dbshelve, \
hashopen, btopen, rnopen, dbobj
except ImportError:
# For Python 2.3
from bsddb import db, dbtables, dbutils, dbshelve, \
hashopen, btopen, rnopen, dbobj
try:
from bsddb3 import test_support
except ImportError:
if sys.version_info[0] < 3 :
from test import test_support
else :
from test import support as test_support
try:
if sys.version_info[0] < 3 :
from threading import Thread, currentThread
del Thread, currentThread
else :
from threading import Thread, current_thread
del Thread, current_thread
have_threads = True
except ImportError:
have_threads = False
verbose = 0
if 'verbose' in sys.argv:
verbose = 1
sys.argv.remove('verbose')
if 'silent' in sys.argv: # take care of old flag, just in case
verbose = 0
sys.argv.remove('silent')
def print_versions():
print
print '-=' * 38
print db.DB_VERSION_STRING
print 'bsddb.db.version(): %s' % (db.version(), )
print 'bsddb.db.__version__: %s' % db.__version__
print 'bsddb.db.cvsid: %s' % db.cvsid
print 'py module: %s' % bsddb.__file__
print 'extension module: %s' % bsddb._bsddb.__file__
print 'python version: %s' % sys.version
print 'My pid: %s' % os.getpid()
print '-=' * 38
def get_new_path(name) :
get_new_path.mutex.acquire()
try :
import os
path=os.path.join(get_new_path.prefix,
name+"_"+str(os.getpid())+"_"+str(get_new_path.num))
get_new_path.num+=1
finally :
get_new_path.mutex.release()
return path
def get_new_environment_path() :
path=get_new_path("environment")
import os
try:
os.makedirs(path,mode=0700)
except os.error:
test_support.rmtree(path)
os.makedirs(path)
return path
def get_new_database_path() :
path=get_new_path("database")
import os
if os.path.exists(path) :
os.remove(path)
return path
# This path can be overriden via "set_test_path_prefix()".
import os, os.path
get_new_path.prefix=os.path.join(os.sep,"tmp","z-Berkeley_DB")
get_new_path.num=0
def get_test_path_prefix() :
return get_new_path.prefix
def set_test_path_prefix(path) :
get_new_path.prefix=path
def remove_test_path_directory() :
test_support.rmtree(get_new_path.prefix)
if have_threads :
import threading
get_new_path.mutex=threading.Lock()
del threading
else :
class Lock(object) :
def acquire(self) :
pass
def release(self) :
pass
get_new_path.mutex=Lock()
del Lock
class PrintInfoFakeTest(unittest.TestCase):
def testPrintVersions(self):
print_versions()
# This little hack is for when this module is run as main and all the
# other modules import it so they will still be able to get the right
# verbose setting. It's confusing but it works.
if sys.version_info[0] < 3 :
import test_all
test_all.verbose = verbose
else :
import sys
print >>sys.stderr, "Work to do!"
def suite(module_prefix='', timing_check=None):
test_modules = [
'test_associate',
'test_basics',
'test_compare',
'test_compat',
'test_cursor_pget_bug',
'test_dbobj',
'test_dbshelve',
'test_dbtables',
'test_distributed_transactions',
'test_early_close',
'test_get_none',
'test_join',
'test_lock',
'test_misc',
'test_pickle',
'test_queue',
'test_recno',
'test_replication',
'test_sequence',
'test_thread',
]
alltests = unittest.TestSuite()
for name in test_modules:
#module = __import__(name)
# Do it this way so that suite may be called externally via
# python's Lib/test/test_bsddb3.
module = __import__(module_prefix+name, globals(), locals(), name)
alltests.addTest(module.test_suite())
if timing_check:
alltests.addTest(unittest.makeSuite(timing_check))
return alltests
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(PrintInfoFakeTest))
return suite
if __name__ == '__main__':
print_versions()
unittest.main(defaultTest='suite')
| mit | -3,441,069,557,960,012,300 | 29.666667 | 88 | 0.511677 | false |
vik001ind/RSAExploits | RSAExploits/exploits/hastad.py | 1 | 3524 | """ Class defintion for simple hastad broadcast exploit """
from RSAExploits import util
from RSAExploits.exploits.exploit import Exploit
class Hastad(Exploit):
""" Class providing a run interface to hastad broadcast exploit"""
def run(self, rsadata_list, info_dict = None):
""" Attempts to recover plaintext using Hastad's broadcast attack
This attack works when we have a list of RSA_Data objects such
that the length of the list is greater than or equal to e,
unique and coprime moduli are used for each encryption, and the
same plaintext message is encrypted to generate all ciphertexts.
Args:
rsadata_list: A list of RSA_Data objects on which to attempt
Hastad's exploit
info_dict: Not currently used
Assumptions:
None of the RSA_Data objects in rsadata_list have the same
public exponent e, same id number, and same modulus N. This
should be prevented by calling the remove_duplicates() function
in RSA_Data.py
Side Effects:
If a message is recovered, the corresponding RSA_Data objects
will be updated with this information
Return:
True if at least one message was recovered
"""
print ("Hastad: Running Attack...")
success = False
e_id_dict = self.group_by_e_and_id(rsadata_list)
for group in e_id_dict:
msg = self.hastad_broadcast_exploit(e_id_dict[group])
if msg != None:
success = True
for rsadata in e_id_dict[group]:
rsadata.set_m(msg)
if success:
print("Hastad: Success, message found.")
else:
print("Hastad: Failure, message not found.")
return success
@staticmethod
def group_by_e_and_id(rsadata_list):
""" Group the RSA_Data objects by public exponent and id """
e_id_dict = {}
for rsadata in rsadata_list:
# Only consider entries with an ID number
if rsadata.get_id() == None:
continue
# Only consider entries with a ciphertext
if rsadata.get_c() == None:
continue
# If the (e, idnum) tuple already exists in the dictionary, just
# append the new entry to the already existing list
if (rsadata.get_e(), rsadata.get_id()) in e_id_dict:
e_id_dict[(rsadata.get_e(), rsadata.get_id())].append(rsadata)
# Otherwise, create a new list for the new tuple
else:
e_id_dict[(rsadata.get_e(), rsadata.get_id())] = [rsadata]
return e_id_dict
@staticmethod
def hastad_broadcast_exploit(rsadata_list):
""" Recover the plaintext message using chinese remainder theorem """
e = rsadata_list[0].get_e()
if len(rsadata_list) < e:
return None
ns = []
cs = []
for index in range(e):
ns.append(rsadata_list[index].get_n())
cs.append(rsadata_list[index].get_c())
s = util.crt(ns, cs)
pt = util.int_nthroot(s, e)
if pt is not None:
return pt
else:
return None
| mit | -132,503,831,933,218,540 | 33.891089 | 80 | 0.542849 | false |
aschleg/mathpy | mathpy/special/gamma.py | 1 | 1555 | # encoding=utf8
import numpy as np
def k_function(n):
r"""
Returns the K-function up to a given integer n.
Parameters
----------
n : int
The length of the returned K-function as in :math:`K(n)`.
Returns
-------
array-like
numpy array of the computed integers returned by the K-function.
Notes
-----
The K-function generalizes the hyperfactorial for complex numbers and is defined for positive
integers as:
.. math::
K(n) \equiv 1^1 2^2 3^3 \cdots (n - 1)^{n - 1}
The K-function can also be expressed as a hyperfactorial, :math:`H`:
.. math::
K(n) = H(n - 1)
The Gamma function and Barnes G-Function are also closely related by:
.. math::
K(n) = \frac{[\Gamma{n}]^{n - 1}}{G(n)}
Examples
--------
>>> k_function(3)
array([1., 1., 4.])
>>> k_function(5).astype(int)
array([ 1, 1, 4, 108, 27648])
References
----------
Sloane, N. J. A. Sequence A002109/M3706 in "The On-Line Encyclopedia of Integer Sequences."
Weisstein, Eric W. "K-Function." From MathWorld--A Wolfram Web Resource.
http://mathworld.wolfram.com/K-Function.html
Wikipedia contributors. (2015, December 5). K-function. In Wikipedia, The Free Encyclopedia.
Retrieved 13:56, March 1, 2018, from https://en.wikipedia.org/w/index.php?title=K-function&oldid=693891074
"""
kn = np.empty(n)
k = 1
for i in np.arange(0, n):
k *= (i) ** (i)
kn[i] = k
return kn
| mit | -6,329,161,334,717,358,000 | 21.867647 | 114 | 0.573633 | false |
maarteninja/ml2014 | three/minimize.py | 1 | 8935 | #This program is distributed WITHOUT ANY WARRANTY; without even the implied
#warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
#
#This file contains a Python version of Carl Rasmussen's Matlab-function
#minimize.m
#
#minimize.m is copyright (C) 1999 - 2006, Carl Edward Rasmussen.
#Python adaptation by Roland Memisevic 2008.
#
#
#The following is the original copyright notice that comes with the
#function minimize.m
#(from http://www.kyb.tuebingen.mpg.de/bs/people/carl/code/minimize/Copyright):
#
#
#"(C) Copyright 1999 - 2006, Carl Edward Rasmussen
#
#Permission is granted for anyone to copy, use, or modify these
#programs and accompanying documents for purposes of research or
#education, provided this copyright notice is retained, and note is
#made of any changes that have been made.
#
#These programs and documents are distributed without any warranty,
#express or implied. As the programs were written for research
#purposes only, they have not been tested to the degree that would be
#advisable in any important application. All use of these programs is
#entirely at the user's own risk."
"""minimize.py
This module contains a function 'minimize' that performs unconstrained
gradient based optimization using nonlinear conjugate gradients.
The function is a straightforward Python-translation of Carl Rasmussen's
Matlab-function minimize.m
"""
from numpy import dot, isinf, isnan, any, sqrt, isreal, real, nan, inf
def minimize(X, f, grad, args, maxnumlinesearch=None, maxnumfuneval=None, red=1.0, verbose=True):
INT = 0.1;# don't reevaluate within 0.1 of the limit of the current bracket
EXT = 3.0; # extrapolate maximum 3 times the current step-size
MAX = 20; # max 20 function evaluations per line search
RATIO = 10; # maximum allowed slope ratio
SIG = 0.1;RHO = SIG/2;# SIG and RHO are the constants controlling the Wolfe-
#Powell conditions. SIG is the maximum allowed absolute ratio between
#previous and new slopes (derivatives in the search direction), thus setting
#SIG to low (positive) values forces higher precision in the line-searches.
#RHO is the minimum allowed fraction of the expected (from the slope at the
#initial point in the linesearch). Constants must satisfy 0 < RHO < SIG < 1.
#Tuning of SIG (depending on the nature of the function to be optimized) may
#speed up the minimization; it is probably not worth playing much with RHO.
SMALL = 10.**-16 #minimize.m uses matlab's realmin
if maxnumlinesearch == None:
if maxnumfuneval == None:
raise "Specify maxnumlinesearch or maxnumfuneval"
else:
S = 'Function evaluation'
length = maxnumfuneval
else:
if maxnumfuneval != None:
raise "Specify either maxnumlinesearch or maxnumfuneval (not both)"
else:
S = 'Linesearch'
length = maxnumlinesearch
i = 0 # zero the run length counter
ls_failed = 0 # no previous line search has failed
f0 = f(X, *args) # get function value and gradient
df0 = grad(X, *args)
fX = [f0]
i = i + (length<0) # count epochs?!
s = -df0; d0 = -dot(s,s) # initial search direction (steepest) and slope
x3 = red/(1.0-d0) # initial step is red/(|s|+1)
while i < abs(length): # while not finished
i = i + (length>0) # count iterations?!
X0 = X; F0 = f0; dF0 = df0 # make a copy of current values
if length>0:
M = MAX
else:
M = min(MAX, -length-i)
while 1: # keep extrapolating as long as necessary
x2 = 0; f2 = f0; d2 = d0; f3 = f0; df3 = df0
success = 0
while (not success) and (M > 0):
try:
M = M - 1; i = i + (length<0) # count epochs?!
f3 = f(X+x3*s, *args)
df3 = grad(X+x3*s, *args)
if isnan(f3) or isinf(f3) or any(isnan(df3)+isinf(df3)):
print "an error in minimize error"
print "f3 = ", f3
print "df3 = ", df3
return
success = 1
except: # catch any error which occured in f
x3 = (x2+x3)/2 # bisect and try again
if f3 < F0:
X0 = X+x3*s; F0 = f3; dF0 = df3 # keep best values
d3 = dot(df3,s) # new slope
if d3 > SIG*d0 or f3 > f0+x3*RHO*d0 or M == 0:
# are we done extrapolating?
break
x1 = x2; f1 = f2; d1 = d2 # move point 2 to point 1
x2 = x3; f2 = f3; d2 = d3 # move point 3 to point 2
A = 6*(f1-f2)+3*(d2+d1)*(x2-x1) # make cubic extrapolation
B = 3*(f2-f1)-(2*d1+d2)*(x2-x1)
Z = B+sqrt(complex(B*B-A*d1*(x2-x1)))
if Z != 0.0:
x3 = x1-d1*(x2-x1)**2/Z # num. error possible, ok!
else:
x3 = inf
if (not isreal(x3)) or isnan(x3) or isinf(x3) or (x3 < 0):
# num prob | wrong sign?
x3 = x2*EXT # extrapolate maximum amount
elif x3 > x2*EXT: # new point beyond extrapolation limit?
x3 = x2*EXT # extrapolate maximum amount
elif x3 < x2+INT*(x2-x1): # new point too close to previous point?
x3 = x2+INT*(x2-x1)
x3 = real(x3)
while (abs(d3) > -SIG*d0 or f3 > f0+x3*RHO*d0) and M > 0:
# keep interpolating
if (d3 > 0) or (f3 > f0+x3*RHO*d0): # choose subinterval
x4 = x3; f4 = f3; d4 = d3 # move point 3 to point 4
else:
x2 = x3; f2 = f3; d2 = d3 # move point 3 to point 2
if f4 > f0:
x3 = x2-(0.5*d2*(x4-x2)**2)/(f4-f2-d2*(x4-x2))
# quadratic interpolation
else:
A = 6*(f2-f4)/(x4-x2)+3*(d4+d2) # cubic interpolation
B = 3*(f4-f2)-(2*d2+d4)*(x4-x2)
if A != 0:
x3=x2+(sqrt(B*B-A*d2*(x4-x2)**2)-B)/A
# num. error possible, ok!
else:
x3 = inf
if isnan(x3) or isinf(x3):
x3 = (x2+x4)/2 # if we had a numerical problem then bisect
x3 = max(min(x3, x4-INT*(x4-x2)),x2+INT*(x4-x2))
# don't accept too close
f3 = f(X+x3*s, *args)
df3 = grad(X+x3*s, *args)
if f3 < F0:
X0 = X+x3*s; F0 = f3; dF0 = df3 # keep best values
M = M - 1; i = i + (length<0) # count epochs?!
d3 = dot(df3,s) # new slope
if abs(d3) < -SIG*d0 and f3 < f0+x3*RHO*d0: # if line search succeeded
X = X+x3*s; f0 = f3; fX.append(f0) # update variables
if verbose: print '%s %6i; Value %4.6e\r' % (S, i, f0)
s = (dot(df3,df3)-dot(df0,df3))/dot(df0,df0)*s - df3
# Polack-Ribiere CG direction
df0 = df3 # swap derivatives
d3 = d0; d0 = dot(df0,s)
if d0 > 0: # new slope must be negative
s = -df0; d0 = -dot(s,s) # otherwise use steepest direction
x3 = x3 * min(RATIO, d3/(d0-SMALL)) # slope ratio but max RATIO
ls_failed = 0 # this line search did not fail
else:
X = X0; f0 = F0; df0 = dF0 # restore best point so far
if ls_failed or (i>abs(length)):# line search failed twice in a row
break # or we ran out of time, so we give up
s = -df0; d0 = -dot(s,s) # try steepest
x3 = 1/(1-d0)
ls_failed = 1 # this line search failed
if verbose: print "\n"
return X, fX, i
| gpl-2.0 | -1,338,693,259,614,311,000 | 49.196629 | 97 | 0.484835 | false |
rande/python-element | element/plugins/disqus/disqus.py | 1 | 1405 | #
# Copyright 2014 Thomas Rabaix <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import element.node
class DisqusHandler(element.node.NodeHandler):
def __init__(self, account, templating):
self.account = account
self.templating = templating
def get_defaults(self, node):
return {
'template': 'element.plugins.disqus:comments.html'
}
def get_name(self):
return 'Disqus'
def execute(self, request_handler, context):
if not self.account:
return
params = {
'account': self.account,
}
self.render(request_handler, self.templating, context.settings['template'], params)
def listener(self, event):
node = element.node.Node('disqus://%s' % event.get('subject').id, {
'type': 'disqus.comments',
})
event.set('node', node) | apache-2.0 | -9,126,190,087,776,498,000 | 29.565217 | 91 | 0.659786 | false |
GiulioRossetti/ndlib | ndlib/models/epidemics/IndependentCascadesModel.py | 1 | 3628 | from ..DiffusionModel import DiffusionModel
import numpy as np
import future.utils
__author__ = 'Giulio Rossetti'
__license__ = "BSD-2-Clause"
__email__ = "[email protected]"
class IndependentCascadesModel(DiffusionModel):
"""
Edge Parameters to be specified via ModelConfig
:param threshold: The edge threshold. As default a value of 0.1 is assumed for all edges.
"""
def __init__(self, graph, seed=None):
"""
Model Constructor
:param graph: A networkx graph object
"""
super(self.__class__, self).__init__(graph, seed)
self.available_statuses = {
"Susceptible": 0,
"Infected": 1,
"Removed": 2
}
self.parameters = {
"model": {},
"nodes": {},
"edges": {
"threshold": {
"descr": "Edge threshold",
"range": [0, 1],
"optional": True,
"default": 0.1
}
},
}
self.name = "Independent Cascades"
def iteration(self, node_status=True):
"""
Execute a single model iteration
:return: Iteration_id, Incremental node status (dictionary node->status)
"""
self.clean_initial_status(self.available_statuses.values())
actual_status = {node: nstatus for node, nstatus in future.utils.iteritems(self.status)}
if self.actual_iteration == 0:
self.actual_iteration += 1
delta, node_count, status_delta = self.status_delta(actual_status)
if node_status:
return {"iteration": 0, "status": actual_status.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": 0, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
for u in self.graph.nodes:
if self.status[u] != 1:
continue
neighbors = list(self.graph.neighbors(u)) # neighbors and successors (in DiGraph) produce the same result
# Standard threshold
if len(neighbors) > 0:
threshold = 1.0/len(neighbors)
for v in neighbors:
if actual_status[v] == 0:
key = (u, v)
# Individual specified thresholds
if 'threshold' in self.params['edges']:
if key in self.params['edges']['threshold']:
threshold = self.params['edges']['threshold'][key]
elif (v, u) in self.params['edges']['threshold'] and not self.graph.directed:
threshold = self.params['edges']['threshold'][(v, u)]
flip = np.random.random_sample()
if flip <= threshold:
actual_status[v] = 1
actual_status[u] = 2
delta, node_count, status_delta = self.status_delta(actual_status)
self.status = actual_status
self.actual_iteration += 1
if node_status:
return {"iteration": self.actual_iteration - 1, "status": delta.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": self.actual_iteration - 1, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
| bsd-2-clause | -6,692,687,738,186,138,000 | 35.28 | 118 | 0.507993 | false |
Northeaster/TargetSentimentAnalysis | lib/rnn_cells/gru_cell.py | 1 | 1575 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from lib.rnn_cells.base_cell import BaseCell
from lib import linalg
#***************************************************************
class GRUCell(BaseCell):
""""""
#=============================================================
def __call__(self, inputs, state, scope=None):
""""""
with tf.variable_scope(scope or type(self).__name__):
with tf.variable_scope('Gates'):
linear = linalg.linear([inputs, state],
self.output_size,
add_bias=True,
n_splits=2,
moving_params=self.moving_params)
update_act, reset_act = linear
update_gate = linalg.sigmoid(update_act-self.forget_bias)
reset_gate = linalg.sigmoid(reset_act)
reset_state = reset_gate * state
with tf.variable_scope('Candidate'):
hidden_act = linalg.linear([inputs, reset_state],
self.output_size,
add_bias=False,
moving_params=self.moving_params)
hidden_tilde = self.recur_func(hidden_act)
hidden = update_gate * state + (1-update_gate) * hidden_tilde
return hidden, hidden
#=============================================================
@property
def state_size(self):
return self.output_size
| apache-2.0 | -7,597,540,596,920,359,000 | 34.795455 | 68 | 0.486984 | false |
cbenhagen/kivy | kivy/uix/textinput.py | 1 | 108858 | # -*- encoding: utf-8 -*-
'''
Text Input
==========
.. versionadded:: 1.0.4
.. image:: images/textinput-mono.jpg
.. image:: images/textinput-multi.jpg
The :class:`TextInput` widget provides a box for editable plain text.
Unicode, multiline, cursor navigation, selection and clipboard features
are supported.
The :class:`TextInput` uses two different coordinate systems:
* (x, y) - coordinates in pixels, mostly used for rendering on screen.
* (row, col) - cursor index in characters / lines, used for selection
and cursor movement.
Usage example
-------------
To create a multiline :class:`TextInput` (the 'enter' key adds a new line)::
from kivy.uix.textinput import TextInput
textinput = TextInput(text='Hello world')
To create a singleline :class:`TextInput`, set the :class:`TextInput.multiline`
property to False (the 'enter' key will defocus the TextInput and emit an
'on_text_validate' event)::
def on_enter(instance, value):
print('User pressed enter in', instance)
textinput = TextInput(text='Hello world', multiline=False)
textinput.bind(on_text_validate=on_enter)
The textinput's text is stored in its :attr:`TextInput.text` property. To run a
callback when the text changes::
def on_text(instance, value):
print('The widget', instance, 'have:', value)
textinput = TextInput()
textinput.bind(text=on_text)
You can set the :class:`focus <kivy.uix.behaviors.FocusBehavior>` to a
Textinput, meaning that the input box will be highlighted and keyboard focus
will be requested::
textinput = TextInput(focus=True)
The textinput is defocused if the 'escape' key is pressed, or if another
widget requests the keyboard. You can bind a callback to the focus property to
get notified of focus changes::
def on_focus(instance, value):
if value:
print('User focused', instance)
else:
print('User defocused', instance)
textinput = TextInput()
textinput.bind(focus=on_focus)
See :class:`~kivy.uix.behaviors.FocusBehavior`, from which the
:class:`TextInput` inherits, for more details.
Selection
---------
The selection is automatically updated when the cursor position changes.
You can get the currently selected text from the
:attr:`TextInput.selection_text` property.
Filtering
---------
You can control which text can be added to the :class:`TextInput` by
overwriting :meth:`TextInput.insert_text`. Every string that is typed, pasted
or inserted by any other means into the :class:`TextInput` is passed through
this function. By overwriting it you can reject or change unwanted characters.
For example, to write only in capitalized characters::
class CapitalInput(TextInput):
def insert_text(self, substring, from_undo=False):
s = substring.upper()
return super(CapitalInput, self).insert_text(s,\
from_undo=from_undo)
Or to only allow floats (0 - 9 and a single period)::
class FloatInput(TextInput):
pat = re.compile('[^0-9]')
def insert_text(self, substring, from_undo=False):
pat = self.pat
if '.' in self.text:
s = re.sub(pat, '', substring)
else:
s = '.'.join([re.sub(pat, '', s) for s in\
substring.split('.', 1)])
return super(FloatInput, self).insert_text(s, from_undo=from_undo)
Default shortcuts
-----------------
=============== ========================================================
Shortcuts Description
--------------- --------------------------------------------------------
Left Move cursor to left
Right Move cursor to right
Up Move cursor to up
Down Move cursor to down
Home Move cursor at the beginning of the line
End Move cursor at the end of the line
PageUp Move cursor to 3 lines before
PageDown Move cursor to 3 lines after
Backspace Delete the selection or character before the cursor
Del Delete the selection of character after the cursor
Shift + <dir> Start a text selection. Dir can be Up, Down, Left or
Right
Control + c Copy selection
Control + x Cut selection
Control + p Paste selection
Control + a Select all the content
Control + z undo
Control + r redo
=============== ========================================================
.. note::
To enable Emacs-style keyboard shortcuts, you can use
:class:`~kivy.uix.behaviors.emacs.EmacsBehavior`.
'''
__all__ = ('TextInput', )
import re
import sys
import string
from functools import partial
from os import environ
from weakref import ref
from kivy.animation import Animation
from kivy.base import EventLoop
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.config import Config
from kivy.compat import PY2
from kivy.logger import Logger
from kivy.metrics import inch
from kivy.utils import boundary, platform
from kivy.uix.behaviors import FocusBehavior
from kivy.core.text import Label
from kivy.graphics import Color, Rectangle, PushMatrix, PopMatrix, Callback
from kivy.graphics.context_instructions import Transform
from kivy.graphics.texture import Texture
from kivy.uix.widget import Widget
from kivy.uix.bubble import Bubble
from kivy.uix.behaviors import ButtonBehavior
from kivy.uix.image import Image
from kivy.properties import StringProperty, NumericProperty, \
BooleanProperty, AliasProperty, \
ListProperty, ObjectProperty, VariableListProperty, OptionProperty
Cache_register = Cache.register
Cache_append = Cache.append
Cache_get = Cache.get
Cache_remove = Cache.remove
Cache_register('textinput.label', timeout=60.)
Cache_register('textinput.width', timeout=60.)
FL_IS_NEWLINE = 0x01
# late binding
Clipboard = None
CutBuffer = None
MarkupLabel = None
_platform = platform
# for reloading, we need to keep a list of textinput to retrigger the rendering
_textinput_list = []
# cache the result
_is_osx = sys.platform == 'darwin'
# When we are generating documentation, Config doesn't exist
_is_desktop = False
if Config:
_is_desktop = Config.getboolean('kivy', 'desktop')
# register an observer to clear the textinput cache when OpenGL will reload
if 'KIVY_DOC' not in environ:
def _textinput_clear_cache(*l):
Cache_remove('textinput.label')
Cache_remove('textinput.width')
for wr in _textinput_list[:]:
textinput = wr()
if textinput is None:
_textinput_list.remove(wr)
else:
textinput._trigger_refresh_text()
textinput._refresh_hint_text()
from kivy.graphics.context import get_context
get_context().add_reload_observer(_textinput_clear_cache, True)
class Selector(ButtonBehavior, Image):
# Internal class for managing the selection Handles.
window = ObjectProperty()
target = ObjectProperty()
matrix = ObjectProperty()
def __init__(self, **kwargs):
super(Selector, self).__init__(**kwargs)
self.window.bind(on_touch_down=self.on_window_touch_down)
self.matrix = self.target.get_window_matrix()
with self.canvas.before:
Callback(self.update_transform)
PushMatrix()
self.transform = Transform()
with self.canvas.after:
PopMatrix()
def update_transform(self, cb):
m = self.target.get_window_matrix()
if self.matrix != m:
self.matrix = m
self.transform.identity()
self.transform.transform(self.matrix)
def transform_touch(self, touch):
matrix = self.matrix.inverse()
touch.apply_transform_2d(
lambda x, y: matrix.transform_point(x, y, 0)[:2])
def on_window_touch_down(self, win, touch):
if self.parent is not win:
return
try:
touch.push()
self.transform_touch(touch)
self._touch_diff = self.top - touch.y
if self.collide_point(*touch.pos):
FocusBehavior.ignored_touch.append(touch)
return super(Selector, self).on_touch_down(touch)
finally:
touch.pop()
class TextInputCutCopyPaste(Bubble):
# Internal class used for showing the little bubble popup when
# copy/cut/paste happen.
textinput = ObjectProperty(None)
''' Holds a reference to the TextInput this Bubble belongs to.
'''
but_cut = ObjectProperty(None)
but_copy = ObjectProperty(None)
but_paste = ObjectProperty(None)
but_selectall = ObjectProperty(None)
matrix = ObjectProperty(None)
def __init__(self, **kwargs):
self.mode = 'normal'
super(TextInputCutCopyPaste, self).__init__(**kwargs)
Clock.schedule_interval(self._check_parent, .5)
self.matrix = self.textinput.get_window_matrix()
with self.canvas.before:
Callback(self.update_transform)
PushMatrix()
self.transform = Transform()
with self.canvas.after:
PopMatrix()
def update_transform(self, cb):
m = self.textinput.get_window_matrix()
if self.matrix != m:
self.matrix = m
self.transform.identity()
self.transform.transform(self.matrix)
def transform_touch(self, touch):
matrix = self.matrix.inverse()
touch.apply_transform_2d(
lambda x, y: matrix.transform_point(x, y, 0)[:2])
def on_touch_down(self, touch):
try:
touch.push()
self.transform_touch(touch)
if self.collide_point(*touch.pos):
FocusBehavior.ignored_touch.append(touch)
return super(TextInputCutCopyPaste, self).on_touch_down(touch)
finally:
touch.pop()
def on_textinput(self, instance, value):
global Clipboard
if value and not Clipboard and not _is_desktop:
value._ensure_clipboard()
def _check_parent(self, dt):
# this is a prevention to get the Bubble staying on the screen, if the
# attached textinput is not on the screen anymore.
parent = self.textinput
while parent is not None:
if parent == parent.parent:
break
parent = parent.parent
if parent is None:
Clock.unschedule(self._check_parent)
if self.textinput:
self.textinput._hide_cut_copy_paste()
def on_parent(self, instance, value):
parent = self.textinput
mode = self.mode
if parent:
self.clear_widgets()
if mode == 'paste':
# show only paste on long touch
self.but_selectall.opacity = 1
widget_list = [self.but_selectall, ]
if not parent.readonly:
widget_list.append(self.but_paste)
elif parent.readonly:
# show only copy for read only text input
widget_list = (self.but_copy, )
else:
# normal mode
widget_list = (self.but_cut, self.but_copy, self.but_paste)
for widget in widget_list:
self.add_widget(widget)
def do(self, action):
textinput = self.textinput
if action == 'cut':
textinput._cut(textinput.selection_text)
elif action == 'copy':
textinput.copy()
elif action == 'paste':
textinput.paste()
elif action == 'selectall':
textinput.select_all()
self.mode = ''
anim = Animation(opacity=0, d=.333)
anim.bind(on_complete=lambda *args:
self.on_parent(self, self.parent))
anim.start(self.but_selectall)
return
self.hide()
def hide(self):
parent = self.parent
if not parent:
return
anim = Animation(opacity=0, d=.225)
anim.bind(on_complete=lambda *args: parent.remove_widget(self))
anim.start(self)
class TextInput(FocusBehavior, Widget):
'''TextInput class. See module documentation for more information.
:Events:
`on_text_validate`
Fired only in multiline=False mode when the user hits 'enter'.
This will also unfocus the textinput.
`on_double_tap`
Fired when a double tap happens in the text input. The default
behavior selects the text around the cursor position. More info at
:meth:`on_double_tap`.
`on_triple_tap`
Fired when a triple tap happens in the text input. The default
behavior selects the line around the cursor position. More info at
:meth:`on_triple_tap`.
`on_quad_touch`
Fired when four fingers are touching the text input. The default
behavior selects the whole text. More info at
:meth:`on_quad_touch`.
.. warning::
When changing a :class:`TextInput` property that requires re-drawing,
e.g. modifying the :attr:`text`, the updates occur on the next
clock cycle and not instantly. This might cause any changes to the
:class:`TextInput` that occur between the modification and the next
cycle to be ignored, or to use previous values. For example, after
a update to the :attr:`text`, changing the cursor in the same clock
frame will move it using the previous text and will likely end up in an
incorrect position. The solution is to schedule any updates to occur
on the next clock cycle using
:meth:`~kivy.clock.ClockBase.schedule_once`.
.. Note::
Selection is cancelled when TextInput is focused. If you need to
show selection when TextInput is focused, you should delay
(use Clock.schedule) the call to the functions for selecting
text (select_all, select_text).
.. versionchanged:: 1.9.0
:class:`TextInput` now inherits from
:class:`~kivy.uix.behaviors.FocusBehavior`.
:attr:`~kivy.uix.behaviors.FocusBehavior.keyboard_mode`,
:meth:`~kivy.uix.behaviors.FocusBehavior.show_keyboard`,
:meth:`~kivy.uix.behaviors.FocusBehavior.hide_keyboard`,
:meth:`~kivy.uix.behaviors.FocusBehavior.focus`,
and :attr:`~kivy.uix.behaviors.FocusBehavior.input_type`
have been removed since they are now inherited
from :class:`~kivy.uix.behaviors.FocusBehavior`.
.. versionchanged:: 1.7.0
`on_double_tap`, `on_triple_tap` and `on_quad_touch` events added.
'''
__events__ = ('on_text_validate', 'on_double_tap', 'on_triple_tap',
'on_quad_touch')
def __init__(self, **kwargs):
self.is_focusable = kwargs.get('is_focusable', True)
self._cursor_blink_time = Clock.get_time()
self._cursor = [0, 0]
self._selection = False
self._selection_finished = True
self._selection_touch = None
self.selection_text = u''
self._selection_from = None
self._selection_to = None
self._selection_callback = None
self._handle_left = None
self._handle_right = None
self._handle_middle = None
self._bubble = None
self._lines_flags = []
self._lines_labels = []
self._lines_rects = []
self._hint_text_flags = []
self._hint_text_labels = []
self._hint_text_rects = []
self._label_cached = None
self._line_options = None
self._keyboard_mode = Config.get('kivy', 'keyboard_mode')
self._command_mode = False
self._command = ''
self.reset_undo()
self._touch_count = 0
self._ctrl_l = False
self._ctrl_r = False
self._alt_l = False
self._alt_r = False
self.interesting_keys = {
8: 'backspace',
13: 'enter',
127: 'del',
271: 'enter',
273: 'cursor_up',
274: 'cursor_down',
275: 'cursor_right',
276: 'cursor_left',
278: 'cursor_home',
279: 'cursor_end',
280: 'cursor_pgup',
281: 'cursor_pgdown',
303: 'shift_L',
304: 'shift_R',
305: 'ctrl_L',
306: 'ctrl_R',
308: 'alt_L',
307: 'alt_R'}
super(TextInput, self).__init__(**kwargs)
fbind = self.fbind
refresh_line_options = self._trigger_refresh_line_options
update_text_options = self._update_text_options
fbind('font_size', refresh_line_options)
fbind('font_name', refresh_line_options)
def handle_readonly(instance, value):
if value and (not _is_desktop or not self.allow_copy):
self.is_focusable = False
fbind('padding', update_text_options)
fbind('tab_width', update_text_options)
fbind('font_size', update_text_options)
fbind('font_name', update_text_options)
fbind('size', update_text_options)
fbind('password', update_text_options)
fbind('password_mask', update_text_options)
fbind('pos', self._trigger_update_graphics)
fbind('readonly', handle_readonly)
fbind('focus', self._on_textinput_focused)
handle_readonly(self, self.readonly)
handles = self._trigger_position_handles = Clock.create_trigger(
self._position_handles)
self._trigger_show_handles = Clock.create_trigger(
self._show_handles, .05)
self._trigger_update_cutbuffer = Clock.create_trigger(
self._update_cutbuffer)
refresh_line_options()
self._trigger_refresh_text()
fbind('pos', handles)
fbind('size', handles)
# when the gl context is reloaded, trigger the text rendering again.
_textinput_list.append(ref(self, TextInput._reload_remove_observer))
if platform == 'linux':
self._ensure_clipboard()
def on_text_validate(self):
pass
def cursor_index(self, cursor=None):
'''Return the cursor index in the text/value.
'''
if not cursor:
cursor = self.cursor
try:
l = self._lines
if len(l) == 0:
return 0
lf = self._lines_flags
index, cr = cursor
for row in range(cr):
if row >= len(l):
continue
index += len(l[row])
if lf[row] & FL_IS_NEWLINE:
index += 1
if lf[cr] & FL_IS_NEWLINE:
index += 1
return index
except IndexError:
return 0
def cursor_offset(self):
'''Get the cursor x offset on the current line.
'''
offset = 0
row = self.cursor_row
col = self.cursor_col
_lines = self._lines
if col and row < len(_lines):
offset = self._get_text_width(
_lines[row][:col], self.tab_width,
self._label_cached)
return offset
def get_cursor_from_index(self, index):
'''Return the (row, col) of the cursor from text index.
'''
index = boundary(index, 0, len(self.text))
if index <= 0:
return 0, 0
lf = self._lines_flags
l = self._lines
i = 0
for row in range(len(l)):
ni = i + len(l[row])
if lf[row] & FL_IS_NEWLINE:
ni += 1
i += 1
if ni >= index:
return index - i, row
i = ni
return index, row
def select_text(self, start, end):
''' Select a portion of text displayed in this TextInput.
.. versionadded:: 1.4.0
:Parameters:
`start`
Index of textinput.text from where to start selection
`end`
Index of textinput.text till which the selection should be
displayed
'''
if end < start:
raise Exception('end must be superior to start')
m = len(self.text)
self._selection_from = boundary(start, 0, m)
self._selection_to = boundary(end, 0, m)
self._selection_finished = True
self._update_selection(True)
self._update_graphics_selection()
def select_all(self):
''' Select all of the text displayed in this TextInput.
.. versionadded:: 1.4.0
'''
self.select_text(0, len(self.text))
re_indent = re.compile('^(\s*|)')
def _auto_indent(self, substring):
index = self.cursor_index()
_text = self._get_text(encode=False)
if index > 0:
line_start = _text.rfind('\n', 0, index)
if line_start > -1:
line = _text[line_start + 1:index]
indent = self.re_indent.match(line).group()
substring += indent
return substring
def insert_text(self, substring, from_undo=False):
'''Insert new text at the current cursor position. Override this
function in order to pre-process text for input validation.
'''
if self.readonly or not substring:
return
if isinstance(substring, bytes):
substring = substring.decode('utf8')
if self.replace_crlf:
substring = substring.replace(u'\r\n', u'\n')
mode = self.input_filter
if mode is not None:
chr = type(substring)
if chr is bytes:
int_pat = self._insert_int_patb
else:
int_pat = self._insert_int_patu
if mode == 'int':
substring = re.sub(int_pat, chr(''), substring)
elif mode == 'float':
if '.' in self.text:
substring = re.sub(int_pat, chr(''), substring)
else:
substring = '.'.join([re.sub(int_pat, chr(''), k) for k
in substring.split(chr('.'), 1)])
else:
substring = mode(substring, from_undo)
if not substring:
return
self._hide_handles(EventLoop.window)
if not from_undo and self.multiline and self.auto_indent \
and substring == u'\n':
substring = self._auto_indent(substring)
cc, cr = self.cursor
sci = self.cursor_index
ci = sci()
text = self._lines[cr]
len_str = len(substring)
new_text = text[:cc] + substring + text[cc:]
self._set_line_text(cr, new_text)
wrap = (self._get_text_width(
new_text,
self.tab_width,
self._label_cached) > self.width)
if len_str > 1 or substring == u'\n' or wrap:
# Avoid refreshing text on every keystroke.
# Allows for faster typing of text when the amount of text in
# TextInput gets large.
start, finish, lines,\
lineflags, len_lines = self._get_line_from_cursor(cr, new_text)
# calling trigger here could lead to wrong cursor positioning
# and repeating of text when keys are added rapidly in a automated
# fashion. From Android Keyboard for example.
self._refresh_text_from_property('insert', start, finish, lines,
lineflags, len_lines)
self.cursor = self.get_cursor_from_index(ci + len_str)
# handle undo and redo
self._set_unredo_insert(ci, ci + len_str, substring, from_undo)
def _get_line_from_cursor(self, start, new_text):
# get current paragraph from cursor position
finish = start
lines = self._lines
linesflags = self._lines_flags
if start and not linesflags[start]:
start -= 1
new_text = u''.join((lines[start], new_text))
try:
while not linesflags[finish + 1]:
new_text = u''.join((new_text, lines[finish + 1]))
finish += 1
except IndexError:
pass
lines, lineflags = self._split_smart(new_text)
len_lines = max(1, len(lines))
return start, finish, lines, lineflags, len_lines
def _set_unredo_insert(self, ci, sci, substring, from_undo):
# handle undo and redo
if from_undo:
return
self._undo.append({'undo_command': ('insert', ci, sci),
'redo_command': (ci, substring)})
# reset redo when undo is appended to
self._redo = []
def reset_undo(self):
'''Reset undo and redo lists from memory.
.. versionadded:: 1.3.0
'''
self._redo = self._undo = []
def do_redo(self):
'''Do redo operation.
.. versionadded:: 1.3.0
This action re-does any command that has been un-done by
do_undo/ctrl+z. This function is automatically called when
`ctrl+r` keys are pressed.
'''
try:
x_item = self._redo.pop()
undo_type = x_item['undo_command'][0]
_get_cusror_from_index = self.get_cursor_from_index
if undo_type == 'insert':
ci, substring = x_item['redo_command']
self.cursor = _get_cusror_from_index(ci)
self.insert_text(substring, True)
elif undo_type == 'bkspc':
self.cursor = _get_cusror_from_index(x_item['redo_command'])
self.do_backspace(from_undo=True)
elif undo_type == 'shiftln':
direction, rows, cursor = x_item['redo_command'][1:]
self._shift_lines(direction, rows, cursor, True)
else:
# delsel
ci, sci = x_item['redo_command']
self._selection_from = ci
self._selection_to = sci
self._selection = True
self.delete_selection(True)
self.cursor = _get_cusror_from_index(ci)
self._undo.append(x_item)
except IndexError:
# reached at top of undo list
pass
def do_undo(self):
'''Do undo operation.
.. versionadded:: 1.3.0
This action un-does any edits that have been made since the last
call to reset_undo().
This function is automatically called when `ctrl+z` keys are pressed.
'''
try:
x_item = self._undo.pop()
undo_type = x_item['undo_command'][0]
self.cursor = self.get_cursor_from_index(x_item['undo_command'][1])
if undo_type == 'insert':
ci, sci = x_item['undo_command'][1:]
self._selection_from = ci
self._selection_to = sci
self._selection = True
self.delete_selection(True)
elif undo_type == 'bkspc':
substring = x_item['undo_command'][2:][0]
self.insert_text(substring, True)
elif undo_type == 'shiftln':
direction, rows, cursor = x_item['undo_command'][1:]
self._shift_lines(direction, rows, cursor, True)
else:
# delsel
substring = x_item['undo_command'][2:][0]
self.insert_text(substring, True)
self._redo.append(x_item)
except IndexError:
# reached at top of undo list
pass
def do_backspace(self, from_undo=False, mode='bkspc'):
'''Do backspace operation from the current cursor position.
This action might do several things:
- removing the current selection if available.
- removing the previous char and move the cursor back.
- do nothing, if we are at the start.
'''
if self.readonly:
return
cc, cr = self.cursor
_lines = self._lines
text = _lines[cr]
cursor_index = self.cursor_index()
text_last_line = _lines[cr - 1]
if cc == 0 and cr == 0:
return
_lines_flags = self._lines_flags
start = cr
if cc == 0:
substring = u'\n' if _lines_flags[cr] else u' '
new_text = text_last_line + text
self._set_line_text(cr - 1, new_text)
self._delete_line(cr)
start = cr - 1
else:
#ch = text[cc-1]
substring = text[cc - 1]
new_text = text[:cc - 1] + text[cc:]
self._set_line_text(cr, new_text)
# refresh just the current line instead of the whole text
start, finish, lines, lineflags, len_lines =\
self._get_line_from_cursor(start, new_text)
# avoid trigger refresh, leads to issue with
# keys/text send rapidly through code.
self._refresh_text_from_property('del', start, finish, lines,
lineflags, len_lines)
self.cursor = self.get_cursor_from_index(cursor_index - 1)
# handle undo and redo
self._set_undo_redo_bkspc(
cursor_index,
cursor_index - 1,
substring, from_undo)
def _set_undo_redo_bkspc(self, ol_index, new_index, substring, from_undo):
# handle undo and redo for backspace
if from_undo:
return
self._undo.append({
'undo_command': ('bkspc', new_index, substring),
'redo_command': ol_index})
#reset redo when undo is appended to
self._redo = []
_re_whitespace = re.compile(r'\s+')
def _move_cursor_word_left(self, index=None):
pos = index or self.cursor_index()
if pos == 0:
return self.cursor
lines = self._lines
col, row = self.get_cursor_from_index(pos)
if col == 0:
row -= 1
col = len(lines[row])
while True:
matches = list(self._re_whitespace.finditer(lines[row], 0, col))
if not matches:
if col == 0:
if row == 0:
return 0, 0
row -= 1
col = len(lines[row])
continue
return 0, row
match = matches[-1]
mpos = match.end()
if mpos == col:
if len(matches) > 1:
match = matches[-2]
mpos = match.end()
else:
if match.start() == 0:
if row == 0:
return 0, 0
row -= 1
col = len(lines[row])
continue
return 0, row
col = mpos
return col, row
def _move_cursor_word_right(self, index=None):
pos = index or self.cursor_index()
col, row = self.get_cursor_from_index(pos)
lines = self._lines
mrow = len(lines) - 1
if row == mrow and col == len(lines[row]):
return col, row
if col == len(lines[row]):
row += 1
col = 0
while True:
matches = list(self._re_whitespace.finditer(lines[row], col))
if not matches:
if col == len(lines[row]):
if row == mrow:
return col, row
row += 1
col = 0
continue
return len(lines[row]), row
match = matches[0]
mpos = match.start()
if mpos == col:
if len(matches) > 1:
match = matches[1]
mpos = match.start()
else:
if match.end() == len(lines[row]):
if row == mrow:
return col, row
row += 1
col = 0
continue
return len(lines[row]), row
col = mpos
return col, row
def _expand_range(self, ifrom, ito=None):
if ito is None:
ito = ifrom
rfrom = self.get_cursor_from_index(ifrom)[1]
rtcol, rto = self.get_cursor_from_index(ito)
rfrom, rto = self._expand_rows(rfrom, rto + 1 if rtcol else rto)
return (self.cursor_index((0, rfrom)),
self.cursor_index((0, rto)))
def _expand_rows(self, rfrom, rto=None):
if rto is None or rto == rfrom:
rto = rfrom + 1
lines = self._lines
flags = list(reversed(self._lines_flags))
while rfrom > 0 and not (flags[rfrom - 1] & FL_IS_NEWLINE):
rfrom -= 1
rmax = len(lines) - 1
while 0 < rto < rmax and not (flags[rto - 1] & FL_IS_NEWLINE):
rto += 1
return max(0, rfrom), min(rmax, rto)
def _shift_lines(self, direction, rows=None, old_cursor=None,
from_undo=False):
if self._selection_callback:
if from_undo:
self._selection_callback.cancel()
else:
return
lines = self._lines
flags = list(reversed(self._lines_flags))
labels = self._lines_labels
rects = self._lines_rects
orig_cursor = self.cursor
sel = None
if old_cursor is not None:
self.cursor = old_cursor
if not rows:
sindex = self.selection_from
eindex = self.selection_to
if (sindex or eindex) and sindex != eindex:
sindex, eindex = tuple(sorted((sindex, eindex)))
sindex, eindex = self._expand_range(sindex, eindex)
else:
sindex, eindex = self._expand_range(self.cursor_index())
srow = self.get_cursor_from_index(sindex)[1]
erow = self.get_cursor_from_index(eindex)[1]
sel = sindex, eindex
if direction < 0 and srow > 0:
psrow, perow = self._expand_rows(srow - 1)
rows = ((srow, erow), (psrow, perow))
elif direction > 0 and erow < len(lines) - 1:
psrow, perow = self._expand_rows(erow)
rows = ((srow, erow), (psrow, perow))
if rows:
(srow, erow), (psrow, perow) = rows
if direction < 0:
m1srow, m1erow = psrow, perow
m2srow, m2erow = srow, erow
cdiff = psrow - perow
xdiff = srow - erow
else:
m1srow, m1erow = srow, erow
m2srow, m2erow = psrow, perow
cdiff = perow - psrow
xdiff = erow - srow
self._lines_flags = list(reversed(
flags[:m1srow] + flags[m2srow:m2erow] + flags[m1srow:m1erow] +
flags[m2erow:]))
self._lines = (lines[:m1srow] + lines[m2srow:m2erow] +
lines[m1srow:m1erow] + lines[m2erow:])
self._lines_labels = (labels[:m1srow] + labels[m2srow:m2erow] +
labels[m1srow:m1erow] + labels[m2erow:])
self._lines_rects = (rects[:m1srow] + rects[m2srow:m2erow] +
rects[m1srow:m1erow] + rects[m2erow:])
self._trigger_update_graphics()
csrow = srow + cdiff
cerow = erow + cdiff
sel = (self.cursor_index((0, csrow)),
self.cursor_index((0, cerow)))
self.cursor = self.cursor_col, self.cursor_row + cdiff
if not from_undo:
undo_rows = ((srow + cdiff, erow + cdiff),
(psrow - xdiff, perow - xdiff))
self._undo.append({
'undo_command': ('shiftln', direction * -1, undo_rows,
self.cursor),
'redo_command': ('shiftln', direction, rows, orig_cursor),
})
self._redo = []
if sel:
def cb(dt):
self.select_text(*sel)
self._selection_callback = None
self._selection_callback = Clock.schedule_once(cb)
def do_cursor_movement(self, action, control=False, alt=False):
'''Move the cursor relative to it's current position.
Action can be one of :
- cursor_left: move the cursor to the left
- cursor_right: move the cursor to the right
- cursor_up: move the cursor on the previous line
- cursor_down: move the cursor on the next line
- cursor_home: move the cursor at the start of the current line
- cursor_end: move the cursor at the end of current line
- cursor_pgup: move one "page" before
- cursor_pgdown: move one "page" after
In addition, the behavior of certain actions can be modified:
- control + cursor_left: move the cursor one word to the left
- control + cursor_right: move the cursor one word to the right
- control + cursor_up: scroll up one line
- control + cursor_down: scroll down one line
- control + cursor_home: go to beginning of text
- control + cursor_end: go to end of text
- alt + cursor_up: shift line(s) up
- alt + cursor_down: shift line(s) down
.. versionchanged:: 1.9.1
'''
pgmove_speed = int(self.height /
(self.line_height + self.line_spacing) - 1)
col, row = self.cursor
if action == 'cursor_up':
if self.multiline and control:
self.scroll_y = max(0, self.scroll_y - self.line_height)
elif not self.readonly and self.multiline and alt:
self._shift_lines(-1)
return
else:
row = max(row - 1, 0)
col = min(len(self._lines[row]), col)
elif action == 'cursor_down':
if self.multiline and control:
maxy = self.minimum_height - self.height
self.scroll_y = max(0, min(maxy,
self.scroll_y + self.line_height))
elif not self.readonly and self.multiline and alt:
self._shift_lines(1)
return
else:
row = min(row + 1, len(self._lines) - 1)
col = min(len(self._lines[row]), col)
elif action == 'cursor_left':
if not self.password and control:
col, row = self._move_cursor_word_left()
else:
if col == 0:
if row:
row -= 1
col = len(self._lines[row])
else:
col, row = col - 1, row
elif action == 'cursor_right':
if not self.password and control:
col, row = self._move_cursor_word_right()
else:
if col == len(self._lines[row]):
if row < len(self._lines) - 1:
col = 0
row += 1
else:
col, row = col + 1, row
elif action == 'cursor_home':
col = 0
if control:
row = 0
elif action == 'cursor_end':
if control:
row = len(self._lines) - 1
col = len(self._lines[row])
elif action == 'cursor_pgup':
row = max(0, row - pgmove_speed)
col = min(len(self._lines[row]), col)
elif action == 'cursor_pgdown':
row = min(row + pgmove_speed, len(self._lines) - 1)
col = min(len(self._lines[row]), col)
self.cursor = (col, row)
def get_cursor_from_xy(self, x, y):
'''Return the (row, col) of the cursor from an (x, y) position.
'''
padding_left = self.padding[0]
padding_top = self.padding[1]
l = self._lines
dy = self.line_height + self.line_spacing
cx = x - self.x
scrl_y = self.scroll_y
scrl_x = self.scroll_x
scrl_y = scrl_y / dy if scrl_y > 0 else 0
cy = (self.top - padding_top + scrl_y * dy) - y
cy = int(boundary(round(cy / dy - 0.5), 0, len(l) - 1))
_get_text_width = self._get_text_width
_tab_width = self.tab_width
_label_cached = self._label_cached
for i in range(0, len(l[cy])):
if _get_text_width(l[cy][:i], _tab_width, _label_cached) + \
_get_text_width(l[cy][i], _tab_width, _label_cached)*0.6 + \
padding_left > cx + scrl_x:
cx = i
break
return cx, cy
#
# Selection control
#
def cancel_selection(self):
'''Cancel current selection (if any).
'''
self._selection_from = self._selection_to = self.cursor_index()
self._selection = False
self._selection_finished = True
self._selection_touch = None
self._trigger_update_graphics()
def delete_selection(self, from_undo=False):
'''Delete the current text selection (if any).
'''
if self.readonly:
return
self._hide_handles(EventLoop.window)
scrl_x = self.scroll_x
scrl_y = self.scroll_y
cc, cr = self.cursor
if not self._selection:
return
v = self._get_text(encode=False)
a, b = self._selection_from, self._selection_to
if a > b:
a, b = b, a
self.cursor = cursor = self.get_cursor_from_index(a)
start = cursor
finish = self.get_cursor_from_index(b)
cur_line = self._lines[start[1]][:start[0]] +\
self._lines[finish[1]][finish[0]:]
lines, lineflags = self._split_smart(cur_line)
len_lines = len(lines)
if start[1] == finish[1]:
self._set_line_text(start[1], cur_line)
else:
self._refresh_text_from_property('del', start[1], finish[1], lines,
lineflags, len_lines)
self.scroll_x = scrl_x
self.scroll_y = scrl_y
# handle undo and redo for delete selecttion
self._set_unredo_delsel(a, b, v[a:b], from_undo)
self.cancel_selection()
def _set_unredo_delsel(self, a, b, substring, from_undo):
# handle undo and redo for backspace
if from_undo:
return
self._undo.append({
'undo_command': ('delsel', a, substring),
'redo_command': (a, b)})
# reset redo when undo is appended to
self._redo = []
def _update_selection(self, finished=False):
'''Update selection text and order of from/to if finished is True.
Can be called multiple times until finished is True.
'''
a, b = self._selection_from, self._selection_to
if a > b:
a, b = b, a
self._selection_finished = finished
_selection_text = self._get_text(encode=False)[a:b]
self.selection_text = ("" if not self.allow_copy else
((self.password_mask * (b - a)) if
self.password else _selection_text))
if not finished:
self._selection = True
else:
self._selection = bool(len(_selection_text))
self._selection_touch = None
if a == 0:
# update graphics only on new line
# allows smoother scrolling, noticeably
# faster when dealing with large text.
self._update_graphics_selection()
#self._trigger_update_graphics()
#
# Touch control
#
def long_touch(self, dt):
if self._selection_to == self._selection_from:
pos = self.to_local(*self._long_touch_pos, relative=True)
self._show_cut_copy_paste(
pos, EventLoop.window, mode='paste')
def on_double_tap(self):
'''This event is dispatched when a double tap happens
inside TextInput. The default behavior is to select the
word around the current cursor position. Override this to provide
different behavior. Alternatively, you can bind to this
event to provide additional functionality.
'''
ci = self.cursor_index()
cc = self.cursor_col
line = self._lines[self.cursor_row]
len_line = len(line)
start = max(0, len(line[:cc]) - line[:cc].rfind(u' ') - 1)
end = line[cc:].find(u' ')
end = end if end > - 1 else (len_line - cc)
Clock.schedule_once(lambda dt: self.select_text(ci - start, ci + end))
def on_triple_tap(self):
'''This event is dispatched when a triple tap happens
inside TextInput. The default behavior is to select the
line around current cursor position. Override this to provide
different behavior. Alternatively, you can bind to this
event to provide additional functionality.
'''
ci = self.cursor_index()
sindex, eindex = self._expand_range(ci)
Clock.schedule_once(lambda dt: self.select_text(sindex, eindex))
def on_quad_touch(self):
'''This event is dispatched when four fingers are touching
inside TextInput. The default behavior is to select all text.
Override this to provide different behavior. Alternatively,
you can bind to this event to provide additional functionality.
'''
Clock.schedule_once(lambda dt: self.select_all())
def on_touch_down(self, touch):
if self.disabled:
return
touch_pos = touch.pos
if not self.collide_point(*touch_pos):
return False
if super(TextInput, self).on_touch_down(touch):
return True
# Check for scroll wheel
if 'button' in touch.profile and touch.button.startswith('scroll'):
scroll_type = touch.button[6:]
if scroll_type == 'down':
if self.multiline:
if self.scroll_y <= 0:
return
self.scroll_y -= self.line_height
else:
if self.scroll_x <= 0:
return
self.scroll_x -= self.line_height
if scroll_type == 'up':
if self.multiline:
if (self._lines_rects[-1].pos[1] > self.y +
self.line_height):
return
self.scroll_y += self.line_height
else:
if (self.scroll_x + self.width >=
self._lines_rects[-1].texture.size[0]):
return
self.scroll_x += self.line_height
touch.grab(self)
self._touch_count += 1
if touch.is_double_tap:
self.dispatch('on_double_tap')
if touch.is_triple_tap:
self.dispatch('on_triple_tap')
if self._touch_count == 4:
self.dispatch('on_quad_touch')
self._hide_cut_copy_paste(EventLoop.window)
# schedule long touch for paste
self._long_touch_pos = touch.pos
Clock.schedule_once(self.long_touch, .5)
self.cursor = self.get_cursor_from_xy(*touch_pos)
if not self._selection_touch:
self.cancel_selection()
self._selection_touch = touch
self._selection_from = self._selection_to = self.cursor_index()
self._update_selection()
if CutBuffer and 'button' in touch.profile and touch.button == 'middle':
self.insert_text(CutBuffer.get_cutbuffer())
return True
return False
def on_touch_move(self, touch):
if touch.grab_current is not self:
return
if not self.focus:
touch.ungrab(self)
if self._selection_touch is touch:
self._selection_touch = None
return False
if self._selection_touch is touch:
self.cursor = self.get_cursor_from_xy(touch.x, touch.y)
self._selection_to = self.cursor_index()
self._update_selection()
return True
def on_touch_up(self, touch):
if touch.grab_current is not self:
return
touch.ungrab(self)
self._touch_count -= 1
# schedule long touch for paste
Clock.unschedule(self.long_touch)
if not self.focus:
return False
if self._selection_touch is touch:
self._selection_to = self.cursor_index()
self._update_selection(True)
# show Bubble
win = EventLoop.window
if self._selection_to != self._selection_from:
self._show_cut_copy_paste(touch.pos, win)
elif self.use_handles:
self._hide_handles()
handle_middle = self._handle_middle
if handle_middle is None:
self._handle_middle = handle_middle = Selector(
source=self.handle_image_middle,
window=win,
target=self,
size_hint=(None, None),
size=('45dp', '45dp'))
handle_middle.bind(on_press=self._handle_pressed,
on_touch_move=self._handle_move,
on_release=self._handle_released)
if not self._handle_middle.parent and self.text:
EventLoop.window.add_widget(handle_middle, canvas='after')
self._position_handles(mode='middle')
return True
def _handle_pressed(self, instance):
self._hide_cut_copy_paste()
sf, st = self._selection_from, self.selection_to
if sf > st:
self._selection_from, self._selection_to = st, sf
def _handle_released(self, instance):
sf, st = self._selection_from, self.selection_to
if sf == st:
return
self._update_selection()
self._show_cut_copy_paste(
(instance.right if instance is self._handle_left else instance.x,
instance.top + self.line_height),
EventLoop.window)
def _handle_move(self, instance, touch):
if touch.grab_current != instance:
return
get_cursor = self.get_cursor_from_xy
handle_right = self._handle_right
handle_left = self._handle_left
handle_middle = self._handle_middle
try:
touch.push()
touch.apply_transform_2d(self.to_widget)
x, y = touch.pos
finally:
touch.pop()
cursor = get_cursor(
x,
y + instance._touch_diff + (self.line_height / 2))
if instance != touch.grab_current:
return
if instance == handle_middle:
self.cursor = cursor
self._position_handles(mode='middle')
return
ci = self.cursor_index(cursor=cursor)
sf, st = self._selection_from, self.selection_to
if instance == handle_left:
self._selection_from = ci
elif instance == handle_right:
self._selection_to = ci
self._trigger_update_graphics()
self._trigger_position_handles()
def _position_handles(self, *args, **kwargs):
if not self.text:
return
mode = kwargs.get('mode', 'both')
lh = self.line_height
handle_middle = self._handle_middle
if handle_middle:
hp_mid = self.cursor_pos
pos = self.to_local(*hp_mid, relative=True)
handle_middle.x = pos[0] - handle_middle.width / 2
handle_middle.top = pos[1] - lh
if mode[0] == 'm':
return
group = self.canvas.get_group('selection')
if not group:
return
EventLoop.window.remove_widget(self._handle_middle)
handle_left = self._handle_left
if not handle_left:
return
hp_left = group[2].pos
handle_left.pos = self.to_local(*hp_left, relative=True)
handle_left.x -= handle_left.width
handle_left.y -= handle_left.height
handle_right = self._handle_right
last_rect = group[-1]
hp_right = last_rect.pos[0], last_rect.pos[1]
x, y = self.to_local(*hp_right, relative=True)
handle_right.x = x + last_rect.size[0]
handle_right.y = y - handle_right.height
def _hide_handles(self, win=None):
win = win or EventLoop.window
if win is None:
return
win.remove_widget(self._handle_right)
win.remove_widget(self._handle_left)
win.remove_widget(self._handle_middle)
def _show_handles(self, dt):
if not self.use_handles or not self.text:
return
win = EventLoop.window
handle_right = self._handle_right
handle_left = self._handle_left
if self._handle_left is None:
self._handle_left = handle_left = Selector(
source=self.handle_image_left,
target=self,
window=win,
size_hint=(None, None),
size=('45dp', '45dp'))
handle_left.bind(on_press=self._handle_pressed,
on_touch_move=self._handle_move,
on_release=self._handle_released)
self._handle_right = handle_right = Selector(
source=self.handle_image_right,
target=self,
window=win,
size_hint=(None, None),
size=('45dp', '45dp'))
handle_right.bind(on_press=self._handle_pressed,
on_touch_move=self._handle_move,
on_release=self._handle_released)
else:
if self._handle_left.parent:
self._position_handles()
return
if not self.parent:
return
self._trigger_position_handles()
if self.selection_from != self.selection_to:
self._handle_left.opacity = self._handle_right.opacity = 0
win.add_widget(self._handle_left, canvas='after')
win.add_widget(self._handle_right, canvas='after')
anim = Animation(opacity=1, d=.4)
anim.start(self._handle_right)
anim.start(self._handle_left)
def _show_cut_copy_paste(self, pos, win, parent_changed=False,
mode='', pos_in_window=False, *l):
# Show a bubble with cut copy and paste buttons
if not self.use_bubble:
return
bubble = self._bubble
if bubble is None:
self._bubble = bubble = TextInputCutCopyPaste(textinput=self)
self.fbind('parent', self._show_cut_copy_paste, pos, win, True)
win.bind(
size=lambda *args: self._hide_cut_copy_paste(win))
self.bind(cursor_pos=lambda *args: self._hide_cut_copy_paste(win))
else:
win.remove_widget(bubble)
if not self.parent:
return
if parent_changed:
return
# Search the position from the touch to the window
lh, ls = self.line_height, self.line_spacing
x, y = pos
t_pos = (x, y) if pos_in_window else self.to_window(x, y)
bubble_size = bubble.size
bubble_hw = bubble_size[0] / 2.
win_size = win.size
bubble_pos = (t_pos[0], t_pos[1] + inch(.25))
if (bubble_pos[0] - bubble_hw) < 0:
# bubble beyond left of window
if bubble_pos[1] > (win_size[1] - bubble_size[1]):
# bubble above window height
bubble_pos = (bubble_hw, (t_pos[1]) - (lh + ls + inch(.25)))
bubble.arrow_pos = 'top_left'
else:
bubble_pos = (bubble_hw, bubble_pos[1])
bubble.arrow_pos = 'bottom_left'
elif (bubble_pos[0] + bubble_hw) > win_size[0]:
# bubble beyond right of window
if bubble_pos[1] > (win_size[1] - bubble_size[1]):
# bubble above window height
bubble_pos = (win_size[0] - bubble_hw,
(t_pos[1]) - (lh + ls + inch(.25)))
bubble.arrow_pos = 'top_right'
else:
bubble_pos = (win_size[0] - bubble_hw, bubble_pos[1])
bubble.arrow_pos = 'bottom_right'
else:
if bubble_pos[1] > (win_size[1] - bubble_size[1]):
# bubble above window height
bubble_pos = (bubble_pos[0],
(t_pos[1]) - (lh + ls + inch(.25)))
bubble.arrow_pos = 'top_mid'
else:
bubble.arrow_pos = 'bottom_mid'
bubble_pos = self.to_widget(*bubble_pos)
bubble.center_x = bubble_pos[0]
if bubble.arrow_pos[0] == 't':
bubble.top = bubble_pos[1]
else:
bubble.y = bubble_pos[1]
bubble.mode = mode
Animation.cancel_all(bubble)
bubble.opacity = 0
win.add_widget(bubble, canvas='after')
Animation(opacity=1, d=.225).start(bubble)
def _hide_cut_copy_paste(self, win=None):
bubble = self._bubble
if not bubble:
return
bubble.hide()
#
# Private
#
@staticmethod
def _reload_remove_observer(wr):
# called when the textinput is deleted
if wr in _textinput_list:
_textinput_list.remove(wr)
def _on_textinput_focused(self, instance, value, *largs):
self.focus = value
win = EventLoop.window
self.cancel_selection()
self._hide_cut_copy_paste(win)
if value:
if (not (self.readonly or self.disabled) or _is_desktop and
self._keyboard_mode == 'system'):
Clock.schedule_interval(self._do_blink_cursor, 1 / 2.)
self._editable = True
else:
self._editable = False
else:
Clock.unschedule(self._do_blink_cursor)
self._hide_handles(win)
def _ensure_clipboard(self):
global Clipboard, CutBuffer
if not Clipboard:
from kivy.core.clipboard import Clipboard, CutBuffer
def cut(self):
''' Copy current selection to clipboard then delete it from TextInput.
.. versionadded:: 1.8.0
'''
self._cut(self.selection_text)
def _cut(self, data):
self._ensure_clipboard()
Clipboard.copy(data)
self.delete_selection()
def copy(self, data=''):
''' Copy the value provided in argument `data` into current clipboard.
If data is not of type string it will be converted to string.
If no data is provided then current selection if present is copied.
.. versionadded:: 1.8.0
'''
self._ensure_clipboard()
if data:
return Clipboard.copy(data)
if self.selection_text:
return Clipboard.copy(self.selection_text)
def paste(self):
''' Insert text from system :class:`~kivy.core.clipboard.Clipboard`
into the :class:`~kivy.uix.textinput.TextInput` at current cursor
position.
.. versionadded:: 1.8.0
'''
self._ensure_clipboard()
data = Clipboard.paste()
self.delete_selection()
self.insert_text(data)
def _update_cutbuffer(self, *args):
CutBuffer.set_cutbuffer(self.selection_text)
def _get_text_width(self, text, tab_width, _label_cached):
# Return the width of a text, according to the current line options
kw = self._get_line_options()
try:
cid = u'{}\0{}\0{}'.format(text, self.password, kw)
except UnicodeDecodeError:
cid = '{}\0{}\0{}'.format(text, self.password, kw)
width = Cache_get('textinput.width', cid)
if width:
return width
if not _label_cached:
_label_cached = self._label_cached
text = text.replace('\t', ' ' * tab_width)
if not self.password:
width = _label_cached.get_extents(text)[0]
else:
width = _label_cached.get_extents(
self.password_mask * len(text))[0]
Cache_append('textinput.width', cid, width)
return width
def _do_blink_cursor(self, dt):
# Callback called by the timer to blink the cursor, according to the
# last activity in the widget
b = (Clock.get_time() - self._cursor_blink_time)
self.cursor_blink = int(b * 2) % 2
def on_cursor(self, instance, value):
# When the cursor is moved, reset the activity timer, and update all
# the graphics.
self._cursor_blink_time = Clock.get_time()
self._trigger_update_graphics()
def _delete_line(self, idx):
# Delete current line, and fix cursor position
assert(idx < len(self._lines))
self._lines_flags.pop(idx)
self._lines_labels.pop(idx)
self._lines.pop(idx)
self.cursor = self.cursor
def _set_line_text(self, line_num, text):
# Set current line with other text than the default one.
self._lines_labels[line_num] = self._create_line_label(text)
self._lines[line_num] = text
def _trigger_refresh_line_options(self, *largs):
Clock.unschedule(self._refresh_line_options)
Clock.schedule_once(self._refresh_line_options, 0)
def _refresh_line_options(self, *largs):
self._line_options = None
self._get_line_options()
self._refresh_text_from_property()
self._refresh_hint_text()
self.cursor = self.get_cursor_from_index(len(self.text))
def _trigger_refresh_text(self, *largs):
if len(largs) and largs[0] == self:
largs = ()
Clock.unschedule(lambda dt: self._refresh_text_from_property(*largs))
Clock.schedule_once(lambda dt:
self._refresh_text_from_property(*largs))
def _update_text_options(self, *largs):
Cache_remove('textinput.width')
self._trigger_refresh_text()
def _refresh_text_from_trigger(self, dt, *largs):
self._refresh_text_from_property(*largs)
def _refresh_text_from_property(self, *largs):
self._refresh_text(self._get_text(encode=False), *largs)
def _refresh_text(self, text, *largs):
# Refresh all the lines from a new text.
# By using cache in internal functions, this method should be fast.
mode = 'all'
if len(largs) > 1:
mode, start, finish, _lines, _lines_flags, len_lines = largs
#start = max(0, start)
cursor = None
else:
cursor = self.cursor_index()
_lines, self._lines_flags = self._split_smart(text)
_lines_labels = []
_line_rects = []
_create_label = self._create_line_label
for x in _lines:
lbl = _create_label(x)
_lines_labels.append(lbl)
_line_rects.append(Rectangle(size=lbl.size))
if mode == 'all':
self._lines_labels = _lines_labels
self._lines_rects = _line_rects
self._lines = _lines
elif mode == 'del':
if finish > start:
self._insert_lines(start,
finish if start == finish else (finish + 1),
len_lines, _lines_flags,
_lines, _lines_labels, _line_rects)
elif mode == 'insert':
self._insert_lines(
start,
finish if (start == finish and not len_lines)
else (finish + 1),
len_lines, _lines_flags, _lines, _lines_labels,
_line_rects)
min_line_ht = self._label_cached.get_extents('_')[1]
# with markup texture can be of height `1`
self.line_height = max(_lines_labels[0].height, min_line_ht)
#self.line_spacing = 2
# now, if the text change, maybe the cursor is not at the same place as
# before. so, try to set the cursor on the good place
row = self.cursor_row
self.cursor = self.get_cursor_from_index(self.cursor_index()
if cursor is None else cursor)
# if we back to a new line, reset the scroll, otherwise, the effect is
# ugly
if self.cursor_row != row:
self.scroll_x = 0
# with the new text don't forget to update graphics again
self._trigger_update_graphics()
def _insert_lines(self, start, finish, len_lines, _lines_flags,
_lines, _lines_labels, _line_rects):
self_lines_flags = self._lines_flags
_lins_flags = []
_lins_flags.extend(self_lines_flags[:start])
if len_lines:
# if not inserting at first line then
if start:
# make sure line flags restored for first line
# _split_smart assumes first line to be not a new line
_lines_flags[0] = self_lines_flags[start]
_lins_flags.extend(_lines_flags)
_lins_flags.extend(self_lines_flags[finish:])
self._lines_flags = _lins_flags
_lins_lbls = []
_lins_lbls.extend(self._lines_labels[:start])
if len_lines:
_lins_lbls.extend(_lines_labels)
_lins_lbls.extend(self._lines_labels[finish:])
self._lines_labels = _lins_lbls
_lins_rcts = []
_lins_rcts.extend(self._lines_rects[:start])
if len_lines:
_lins_rcts.extend(_line_rects)
_lins_rcts.extend(self._lines_rects[finish:])
self._lines_rects = _lins_rcts
_lins = []
_lins.extend(self._lines[:start])
if len_lines:
_lins.extend(_lines)
_lins.extend(self._lines[finish:])
self._lines = _lins
def _trigger_update_graphics(self, *largs):
Clock.unschedule(self._update_graphics)
Clock.schedule_once(self._update_graphics, -1)
def _update_graphics(self, *largs):
# Update all the graphics according to the current internal values.
#
# This is a little bit complex, cause we have to :
# - handle scroll_x
# - handle padding
# - create rectangle for the lines matching the viewport
# - crop the texture coordinates to match the viewport
#
# This is the first step of graphics, the second is the selection.
self.canvas.clear()
add = self.canvas.add
lh = self.line_height
dy = lh + self.line_spacing
# adjust view if the cursor is going outside the bounds
sx = self.scroll_x
sy = self.scroll_y
# draw labels
if not self._lines or (
not self._lines[0] and len(self._lines) == 1):
rects = self._hint_text_rects
labels = self._hint_text_labels
lines = self._hint_text_lines
else:
rects = self._lines_rects
labels = self._lines_labels
lines = self._lines
padding_left, padding_top, padding_right, padding_bottom = self.padding
x = self.x + padding_left
y = self.top - padding_top + sy
miny = self.y + padding_bottom
maxy = self.top - padding_top
for line_num, value in enumerate(lines):
if miny <= y <= maxy + dy:
texture = labels[line_num]
size = list(texture.size)
texc = texture.tex_coords[:]
# calcul coordinate
viewport_pos = sx, 0
vw = self.width - padding_left - padding_right
vh = self.height - padding_top - padding_bottom
tw, th = list(map(float, size))
oh, ow = tch, tcw = texc[1:3]
tcx, tcy = 0, 0
# adjust size/texcoord according to viewport
if viewport_pos:
tcx, tcy = viewport_pos
tcx = tcx / tw * (ow)
tcy = tcy / th * oh
if tw - viewport_pos[0] < vw:
tcw = tcw - tcx
size[0] = tcw * size[0]
elif vw < tw:
tcw = (vw / tw) * tcw
size[0] = vw
if vh < th:
tch = (vh / th) * tch
size[1] = vh
# cropping
mlh = lh
if y > maxy:
vh = (maxy - y + lh)
tch = (vh / float(lh)) * oh
tcy = oh - tch
size[1] = vh
if y - lh < miny:
diff = miny - (y - lh)
y += diff
vh = lh - diff
tch = (vh / float(lh)) * oh
size[1] = vh
texc = (
tcx,
tcy + tch,
tcx + tcw,
tcy + tch,
tcx + tcw,
tcy,
tcx,
tcy)
# add rectangle.
r = rects[line_num]
r.pos = int(x), int(y - mlh)
r.size = size
r.texture = texture
r.tex_coords = texc
add(r)
y -= dy
self._update_graphics_selection()
def _update_graphics_selection(self):
if not self._selection:
return
self.canvas.remove_group('selection')
dy = self.line_height + self.line_spacing
rects = self._lines_rects
padding_top = self.padding[1]
padding_bottom = self.padding[3]
_top = self.top
y = _top - padding_top + self.scroll_y
miny = self.y + padding_bottom
maxy = _top - padding_top
draw_selection = self._draw_selection
a, b = self._selection_from, self._selection_to
if a > b:
a, b = b, a
get_cursor_from_index = self.get_cursor_from_index
s1c, s1r = get_cursor_from_index(a)
s2c, s2r = get_cursor_from_index(b)
s2r += 1
# pass only the selection lines[]
# passing all the lines can get slow when dealing with a lot of text
y -= s1r * dy
_lines = self._lines
_get_text_width = self._get_text_width
tab_width = self.tab_width
_label_cached = self._label_cached
width = self.width
padding_left = self.padding[0]
padding_right = self.padding[2]
x = self.x
canvas_add = self.canvas.add
selection_color = self.selection_color
for line_num, value in enumerate(_lines[s1r:s2r], start=s1r):
if miny <= y <= maxy + dy:
r = rects[line_num]
draw_selection(r.pos, r.size, line_num, (s1c, s1r),
(s2c, s2r - 1), _lines, _get_text_width,
tab_width, _label_cached, width,
padding_left, padding_right, x,
canvas_add, selection_color)
y -= dy
self._position_handles('both')
def _draw_selection(self, *largs):
pos, size, line_num, (s1c, s1r), (s2c, s2r),\
_lines, _get_text_width, tab_width, _label_cached, width,\
padding_left, padding_right, x, canvas_add, selection_color = largs
# Draw the current selection on the widget.
if line_num < s1r or line_num > s2r:
return
x, y = pos
w, h = size
x1 = x
x2 = x + w
if line_num == s1r:
lines = _lines[line_num]
x1 -= self.scroll_x
x1 += _get_text_width(lines[:s1c], tab_width, _label_cached)
if line_num == s2r:
lines = _lines[line_num]
x2 = (x - self.scroll_x) + _get_text_width(lines[:s2c],
tab_width,
_label_cached)
width_minus_padding = width - (padding_right + padding_left)
maxx = x + width_minus_padding
if x1 > maxx:
return
x1 = max(x1, x)
x2 = min(x2, x + width_minus_padding)
canvas_add(Color(*selection_color, group='selection'))
canvas_add(Rectangle(
pos=(x1, pos[1]), size=(x2 - x1, size[1]), group='selection'))
def on_size(self, instance, value):
# if the size change, we might do invalid scrolling / text split
# size the text maybe be put after size_hint have been resolved.
self._trigger_refresh_text()
self._refresh_hint_text()
self.scroll_x = self.scroll_y = 0
def _get_cursor_pos(self):
# return the current cursor x/y from the row/col
dy = self.line_height + self.line_spacing
padding_left = self.padding[0]
padding_top = self.padding[1]
left = self.x + padding_left
top = self.top - padding_top
y = top + self.scroll_y
y -= self.cursor_row * dy
x, y = left + self.cursor_offset() - self.scroll_x, y
if x < left:
self.scroll_x = 0
x = left
if y > top:
y = top
self.scroll_y = 0
return x, y
def _get_line_options(self):
# Get or create line options, to be used for Label creation
if self._line_options is None:
self._line_options = kw = {
'font_size': self.font_size,
'font_name': self.font_name,
'anchor_x': 'left',
'anchor_y': 'top',
'padding_x': 0,
'padding_y': 0,
'padding': (0, 0)}
self._label_cached = Label(**kw)
return self._line_options
def _create_line_label(self, text, hint=False):
# Create a label from a text, using line options
ntext = text.replace(u'\n', u'').replace(u'\t', u' ' * self.tab_width)
if self.password and not hint: # Don't replace hint_text with *
ntext = self.password_mask * len(ntext)
kw = self._get_line_options()
cid = '%s\0%s' % (ntext, str(kw))
texture = Cache_get('textinput.label', cid)
if texture is None:
# FIXME right now, we can't render very long line...
# if we move on "VBO" version as fallback, we won't need to
# do this. try to found the maximum text we can handle
label = None
label_len = len(ntext)
ld = None
# check for blank line
if not ntext:
texture = Texture.create(size=(1, 1))
Cache_append('textinput.label', cid, texture)
return texture
while True:
try:
label = Label(text=ntext[:label_len], **kw)
label.refresh()
if ld is not None and ld > 2:
ld = int(ld / 2)
label_len += ld
else:
break
except:
# exception happen when we tried to render the text
# reduce it...
if ld is None:
ld = len(ntext)
ld = int(ld / 2)
if ld < 2 and label_len:
label_len -= 1
label_len -= ld
continue
# ok, we found it.
texture = label.texture
Cache_append('textinput.label', cid, texture)
return texture
def _tokenize(self, text):
# Tokenize a text string from some delimiters
if text is None:
return
delimiters = u' ,\'".;:\n\r\t'
oldindex = 0
for index, char in enumerate(text):
if char not in delimiters:
continue
if oldindex != index:
yield text[oldindex:index]
yield text[index:index + 1]
oldindex = index + 1
yield text[oldindex:]
def _split_smart(self, text):
# Do a "smart" split. If autowidth or autosize is set,
# we are not doing smart split, just a split on line break.
# Otherwise, we are trying to split as soon as possible, to prevent
# overflow on the widget.
# depend of the options, split the text on line, or word
if not self.multiline:
lines = text.split(u'\n')
lines_flags = [0] + [FL_IS_NEWLINE] * (len(lines) - 1)
return lines, lines_flags
# no autosize, do wordwrap.
x = flags = 0
line = []
lines = []
lines_flags = []
_join = u''.join
lines_append, lines_flags_append = lines.append, lines_flags.append
padding_left = self.padding[0]
padding_right = self.padding[2]
width = self.width - padding_left - padding_right
text_width = self._get_text_width
_tab_width, _label_cached = self.tab_width, self._label_cached
# try to add each word on current line.
for word in self._tokenize(text):
is_newline = (word == u'\n')
w = text_width(word, _tab_width, _label_cached)
# if we have more than the width, or if it's a newline,
# push the current line, and create a new one
if (x + w > width and line) or is_newline:
lines_append(_join(line))
lines_flags_append(flags)
flags = 0
line = []
x = 0
if is_newline:
flags |= FL_IS_NEWLINE
else:
x += w
line.append(word)
if line or flags & FL_IS_NEWLINE:
lines_append(_join(line))
lines_flags_append(flags)
return lines, lines_flags
def _key_down(self, key, repeat=False):
displayed_str, internal_str, internal_action, scale = key
if internal_action is None:
if self._selection:
self.delete_selection()
self.insert_text(displayed_str)
elif internal_action in ('shift', 'shift_L', 'shift_R'):
if not self._selection:
self._selection_from = self._selection_to = self.cursor_index()
self._selection = True
self._selection_finished = False
elif internal_action == 'ctrl_L':
self._ctrl_l = True
elif internal_action == 'ctrl_R':
self._ctrl_r = True
elif internal_action == 'alt_L':
self._alt_l = True
elif internal_action == 'alt_R':
self._alt_r = True
elif internal_action.startswith('cursor_'):
cc, cr = self.cursor
self.do_cursor_movement(internal_action,
self._ctrl_l or self._ctrl_r,
self._alt_l or self._alt_r)
if self._selection and not self._selection_finished:
self._selection_to = self.cursor_index()
self._update_selection()
else:
self.cancel_selection()
elif self._selection and internal_action in ('del', 'backspace'):
self.delete_selection()
elif internal_action == 'del':
# Move cursor one char to the right. If that was successful,
# do a backspace (effectively deleting char right of cursor)
cursor = self.cursor
self.do_cursor_movement('cursor_right')
if cursor != self.cursor:
self.do_backspace(mode='del')
elif internal_action == 'backspace':
self.do_backspace()
elif internal_action == 'enter':
if self.multiline:
self.insert_text(u'\n')
else:
self.dispatch('on_text_validate')
self.focus = False
elif internal_action == 'escape':
self.focus = False
if internal_action != 'escape':
#self._recalc_size()
pass
def _key_up(self, key, repeat=False):
displayed_str, internal_str, internal_action, scale = key
if internal_action in ('shift', 'shift_L', 'shift_R'):
if self._selection:
self._update_selection(True)
elif internal_action == 'ctrl_L':
self._ctrl_l = False
elif internal_action == 'ctrl_R':
self._ctrl_r = False
elif internal_action == 'alt_L':
self._alt_l = False
elif internal_action == 'alt_R':
self._alt_r = False
def keyboard_on_key_down(self, window, keycode, text, modifiers):
# Keycodes on OS X:
ctrl, cmd = 64, 1024
key, key_str = keycode
win = EventLoop.window
# This allows *either* ctrl *or* cmd, but not both.
is_shortcut = (modifiers == ['ctrl'] or (
_is_osx and modifiers == ['meta']))
is_interesting_key = key in (list(self.interesting_keys.keys()) + [27])
if not self.write_tab and super(TextInput,
self).keyboard_on_key_down(window, keycode, text, modifiers):
return True
if not self._editable:
# duplicated but faster testing for non-editable keys
if text and not is_interesting_key:
if is_shortcut and key == ord('c'):
self.copy()
elif key == 27:
self.focus = False
return True
if text and not is_interesting_key:
self._hide_handles(win)
self._hide_cut_copy_paste(win)
win.remove_widget(self._handle_middle)
# check for command modes
# we use \x01INFO\x02 to get info from IME on mobiles
# pygame seems to pass \x01 as the unicode for ctrl+a
# checking for modifiers ensures conflict resolution.
first_char = ord(text[0])
if not modifiers and first_char == 1:
self._command_mode = True
self._command = ''
if not modifiers and first_char == 2:
self._command_mode = False
self._command = self._command[1:]
if self._command_mode:
self._command += text
return
_command = self._command
if _command and first_char == 2:
from_undo = True
_command, data = _command.split(':')
self._command = ''
if self._selection:
self.delete_selection()
if _command == 'DEL':
count = int(data)
if not count:
self.delete_selection(from_undo=True)
end = self.cursor_index()
self._selection_from = max(end - count, 0)
self._selection_to = end
self._selection = True
self.delete_selection(from_undo=True)
return
elif _command == 'INSERT':
self.insert_text(data, from_undo)
elif _command == 'INSERTN':
from_undo = False
self.insert_text(data, from_undo)
elif _command == 'SELWORD':
self.dispatch('on_double_tap')
elif _command == 'SEL':
if data == '0':
Clock.schedule_once(lambda dt: self.cancel_selection())
elif _command == 'CURCOL':
self.cursor = int(data), self.cursor_row
return
if is_shortcut:
if key == ord('x'): # cut selection
self._cut(self.selection_text)
elif key == ord('c'): # copy selection
self.copy()
elif key == ord('v'): # paste selection
self.paste()
elif key == ord('a'): # select all
self.select_all()
elif key == ord('z'): # undo
self.do_undo()
elif key == ord('r'): # redo
self.do_redo()
else:
if EventLoop.window.__class__.__module__ == \
'kivy.core.window.window_sdl2':
return
if self._selection:
self.delete_selection()
self.insert_text(text)
#self._recalc_size()
return
if is_interesting_key:
self._hide_cut_copy_paste(win)
self._hide_handles(win)
if key == 27: # escape
self.focus = False
return True
elif key == 9: # tab
self.insert_text(u'\t')
return True
k = self.interesting_keys.get(key)
if k:
key = (None, None, k, 1)
self._key_down(key)
def keyboard_on_key_up(self, window, keycode):
key, key_str = keycode
k = self.interesting_keys.get(key)
if k:
key = (None, None, k, 1)
self._key_up(key)
def keyboard_on_textinput(self, window, text):
if self._selection:
self.delete_selection()
self.insert_text(text, False)
def on_hint_text(self, instance, value):
self._refresh_hint_text()
def _refresh_hint_text(self):
_lines, self._hint_text_flags = self._split_smart(self.hint_text)
_hint_text_labels = []
_hint_text_rects = []
_create_label = self._create_line_label
for x in _lines:
lbl = _create_label(x, hint=True)
_hint_text_labels.append(lbl)
_hint_text_rects.append(Rectangle(size=lbl.size))
self._hint_text_lines = _lines
self._hint_text_labels = _hint_text_labels
self._hint_text_rects = _hint_text_rects
# Remember to update graphics
self._trigger_update_graphics()
#
# Properties
#
_lines = ListProperty([])
_hint_text_lines = ListProperty([])
_editable = BooleanProperty(True)
_insert_int_patu = re.compile(u'[^0-9]')
_insert_int_patb = re.compile(b'[^0-9]')
readonly = BooleanProperty(False)
'''If True, the user will not be able to change the content of a textinput.
.. versionadded:: 1.3.0
:attr:`readonly` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
multiline = BooleanProperty(True)
'''If True, the widget will be able show multiple lines of text. If False,
the "enter" keypress will defocus the textinput instead of adding a new
line.
:attr:`multiline` is a :class:`~kivy.properties.BooleanProperty` and
defaults to True.
'''
password = BooleanProperty(False)
'''If True, the widget will display its characters as the character
set in :attr:`password_mask`.
.. versionadded:: 1.2.0
:attr:`password` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
password_mask = StringProperty('*')
'''Sets the character used to mask the text when :attr:`password` is True.
.. versionadded:: 1.9.2
:attr:`password_mask` is a :class:`~kivy.properties.StringProperty` and
defaults to `'*'`.
'''
keyboard_suggestions = BooleanProperty(True)
'''If True provides auto suggestions on top of keyboard.
This will only work if :attr:`input_type` is set to `text`.
.. versionadded:: 1.8.0
:attr:`keyboard_suggestions` is a
:class:`~kivy.properties.BooleanProperty` defaults to True.
'''
cursor_blink = BooleanProperty(False)
'''This property is used to blink the cursor graphic. The value of
:attr:`cursor_blink` is automatically computed. Setting a value on it will
have no impact.
:attr:`cursor_blink` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
def _get_cursor(self):
return self._cursor
def _set_cursor(self, pos):
if not self._lines:
self._trigger_refresh_text()
return
l = self._lines
cr = boundary(pos[1], 0, len(l) - 1)
cc = boundary(pos[0], 0, len(l[cr]))
cursor = cc, cr
if self._cursor == cursor:
return
self._cursor = cursor
# adjust scrollview to ensure that the cursor will be always inside our
# viewport.
padding_left = self.padding[0]
padding_right = self.padding[2]
viewport_width = self.width - padding_left - padding_right
sx = self.scroll_x
offset = self.cursor_offset()
# if offset is outside the current bounds, reajust
if offset > viewport_width + sx:
self.scroll_x = offset - viewport_width
if offset < sx:
self.scroll_x = offset
# do the same for Y
# this algo try to center the cursor as much as possible
dy = self.line_height + self.line_spacing
offsety = cr * dy
sy = self.scroll_y
padding_top = self.padding[1]
padding_bottom = self.padding[3]
viewport_height = self.height - padding_top - padding_bottom - dy
if offsety > viewport_height + sy:
sy = offsety - viewport_height
if offsety < sy:
sy = offsety
self.scroll_y = sy
return True
cursor = AliasProperty(_get_cursor, _set_cursor)
'''Tuple of (row, col) values indicating the current cursor position.
You can set a new (row, col) if you want to move the cursor. The scrolling
area will be automatically updated to ensure that the cursor is
visible inside the viewport.
:attr:`cursor` is an :class:`~kivy.properties.AliasProperty`.
'''
def _get_cursor_col(self):
return self._cursor[0]
cursor_col = AliasProperty(_get_cursor_col, None, bind=('cursor', ))
'''Current column of the cursor.
:attr:`cursor_col` is an :class:`~kivy.properties.AliasProperty` to
cursor[0], read-only.
'''
def _get_cursor_row(self):
return self._cursor[1]
cursor_row = AliasProperty(_get_cursor_row, None, bind=('cursor', ))
'''Current row of the cursor.
:attr:`cursor_row` is an :class:`~kivy.properties.AliasProperty` to
cursor[1], read-only.
'''
cursor_pos = AliasProperty(_get_cursor_pos, None, bind=(
'cursor', 'padding', 'pos', 'size', 'focus',
'scroll_x', 'scroll_y'))
'''Current position of the cursor, in (x, y).
:attr:`cursor_pos` is an :class:`~kivy.properties.AliasProperty`,
read-only.
'''
cursor_color = ListProperty([1, 0, 0, 1])
'''Current color of the cursor, in (r, g, b, a) format.
.. versionadded:: 1.9.0
:attr:`cursor_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [1, 0, 0, 1].
'''
line_height = NumericProperty(1)
'''Height of a line. This property is automatically computed from the
:attr:`font_name`, :attr:`font_size`. Changing the line_height will have
no impact.
.. note::
:attr:`line_height` is the height of a single line of text.
Use :attr:`minimum_height`, which also includes padding, to
get the height required to display the text properly.
:attr:`line_height` is a :class:`~kivy.properties.NumericProperty`,
read-only.
'''
tab_width = NumericProperty(4)
'''By default, each tab will be replaced by four spaces on the text
input widget. You can set a lower or higher value.
:attr:`tab_width` is a :class:`~kivy.properties.NumericProperty` and
defaults to 4.
'''
padding_x = VariableListProperty([0, 0], length=2)
'''Horizontal padding of the text: [padding_left, padding_right].
padding_x also accepts a one argument form [padding_horizontal].
:attr:`padding_x` is a :class:`~kivy.properties.VariableListProperty` and
defaults to [0, 0]. This might be changed by the current theme.
.. deprecated:: 1.7.0
Use :attr:`padding` instead.
'''
def on_padding_x(self, instance, value):
self.padding[0] = value[0]
self.padding[2] = value[1]
padding_y = VariableListProperty([0, 0], length=2)
'''Vertical padding of the text: [padding_top, padding_bottom].
padding_y also accepts a one argument form [padding_vertical].
:attr:`padding_y` is a :class:`~kivy.properties.VariableListProperty` and
defaults to [0, 0]. This might be changed by the current theme.
.. deprecated:: 1.7.0
Use :attr:`padding` instead.
'''
def on_padding_y(self, instance, value):
self.padding[1] = value[0]
self.padding[3] = value[1]
padding = VariableListProperty([6, 6, 6, 6])
'''Padding of the text: [padding_left, padding_top, padding_right,
padding_bottom].
padding also accepts a two argument form [padding_horizontal,
padding_vertical] and a one argument form [padding].
.. versionchanged:: 1.7.0
Replaced AliasProperty with VariableListProperty.
:attr:`padding` is a :class:`~kivy.properties.VariableListProperty` and
defaults to [6, 6, 6, 6].
'''
scroll_x = NumericProperty(0)
'''X scrolling value of the viewport. The scrolling is automatically
updated when the cursor is moved or text changed. If there is no
user input, the scroll_x and scroll_y properties may be changed.
:attr:`scroll_x` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.
'''
scroll_y = NumericProperty(0)
'''Y scrolling value of the viewport. See :attr:`scroll_x` for more
information.
:attr:`scroll_y` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.
'''
selection_color = ListProperty([0.1843, 0.6549, 0.8313, .5])
'''Current color of the selection, in (r, g, b, a) format.
.. warning::
The color should always have an "alpha" component less than 1
since the selection is drawn after the text.
:attr:`selection_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [0.1843, 0.6549, 0.8313, .5].
'''
border = ListProperty([4, 4, 4, 4])
'''Border used for :class:`~kivy.graphics.vertex_instructions.BorderImage`
graphics instruction. Used with :attr:`background_normal` and
:attr:`background_active`. Can be used for a custom background.
.. versionadded:: 1.4.1
It must be a list of four values: (top, right, bottom, left). Read the
BorderImage instruction for more information about how to use it.
:attr:`border` is a :class:`~kivy.properties.ListProperty` and defaults
to (4, 4, 4, 4).
'''
background_normal = StringProperty(
'atlas://data/images/defaulttheme/textinput')
'''Background image of the TextInput when it's not in focus.
.. versionadded:: 1.4.1
:attr:`background_normal` is a :class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/textinput'.
'''
background_disabled_normal = StringProperty(
'atlas://data/images/defaulttheme/textinput_disabled')
'''Background image of the TextInput when disabled.
.. versionadded:: 1.8.0
:attr:`background_disabled_normal` is a
:class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/textinput_disabled'.
'''
background_active = StringProperty(
'atlas://data/images/defaulttheme/textinput_active')
'''Background image of the TextInput when it's in focus.
.. versionadded:: 1.4.1
:attr:`background_active` is a
:class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/textinput_active'.
'''
background_disabled_active = StringProperty(
'atlas://data/images/defaulttheme/textinput_disabled_active')
'''Background image of the TextInput when it's in focus and disabled.
.. versionadded:: 1.8.0
:attr:`background_disabled_active` is a
:class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/textinput_disabled_active'.
'''
background_color = ListProperty([1, 1, 1, 1])
'''Current color of the background, in (r, g, b, a) format.
.. versionadded:: 1.2.0
:attr:`background_color` is a :class:`~kivy.properties.ListProperty`
and defaults to [1, 1, 1, 1] (white).
'''
foreground_color = ListProperty([0, 0, 0, 1])
'''Current color of the foreground, in (r, g, b, a) format.
.. versionadded:: 1.2.0
:attr:`foreground_color` is a :class:`~kivy.properties.ListProperty`
and defaults to [0, 0, 0, 1] (black).
'''
disabled_foreground_color = ListProperty([0, 0, 0, .5])
'''Current color of the foreground when disabled, in (r, g, b, a) format.
.. versionadded:: 1.8.0
:attr:`disabled_foreground_color` is a
:class:`~kivy.properties.ListProperty` and
defaults to [0, 0, 0, 5] (50% transparent black).
'''
use_bubble = BooleanProperty(not _is_desktop)
'''Indicates whether the cut/copy/paste bubble is used.
.. versionadded:: 1.7.0
:attr:`use_bubble` is a :class:`~kivy.properties.BooleanProperty`
and defaults to True on mobile OS's, False on desktop OS's.
'''
use_handles = BooleanProperty(not _is_desktop)
'''Indicates whether the selection handles are displayed.
.. versionadded:: 1.8.0
:attr:`use_handles` is a :class:`~kivy.properties.BooleanProperty`
and defaults to True on mobile OS's, False on desktop OS's.
'''
suggestion_text = StringProperty('')
'''Shows a suggestion text/word from currentcursor position onwards,
that can be used as a possible completion. Usefull for suggesting completion
text. This can also be used by the IME to setup the current word being
edited
.. versionadded:: 1.9.0
:attr:`suggestion_text` is a :class:`~kivy.properties.StringProperty`
defaults to `''`
'''
def on_suggestion_text(self, instance, value):
global MarkupLabel
if not MarkupLabel:
from kivy.core.text.markup import MarkupLabel
cursor_pos = self.cursor_pos
txt = self._lines[self.cursor_row]
cr = self.cursor_row
kw = self._get_line_options()
rct = self._lines_rects[cr]
lbl = text = None
if value:
lbl = MarkupLabel(
text=txt + "[b]{}[/b]".format(value), **kw)
else:
lbl = Label(**kw)
text = txt
lbl.refresh()
self._lines_labels[cr] = lbl.texture
rct.size = lbl.size
self._update_graphics()
def get_sel_from(self):
return self._selection_from
selection_from = AliasProperty(get_sel_from, None)
'''If a selection is in progress or complete, this property will represent
the cursor index where the selection started.
.. versionchanged:: 1.4.0
:attr:`selection_from` is an :class:`~kivy.properties.AliasProperty`
and defaults to None, readonly.
'''
def get_sel_to(self):
return self._selection_to
selection_to = AliasProperty(get_sel_to, None)
'''If a selection is in progress or complete, this property will represent
the cursor index where the selection started.
.. versionchanged:: 1.4.0
:attr:`selection_to` is an :class:`~kivy.properties.AliasProperty` and
defaults to None, readonly.
'''
selection_text = StringProperty(u'')
'''Current content selection.
:attr:`selection_text` is a :class:`~kivy.properties.StringProperty`
and defaults to '', readonly.
'''
def on_selection_text(self, instance, value):
if value:
if self.use_handles:
self._trigger_show_handles()
if CutBuffer and not self.password:
self._trigger_update_cutbuffer()
def _get_text(self, encode=False):
lf = self._lines_flags
l = self._lines
len_l = len(l)
if len(lf) < len_l:
lf.append(1)
text = u''.join([(u'\n' if (lf[i] & FL_IS_NEWLINE) else u'') + l[i]
for i in range(len_l)])
if encode and not isinstance(text, bytes):
text = text.encode('utf8')
return text
def _set_text(self, text):
if isinstance(text, bytes):
text = text.decode('utf8')
if self.replace_crlf:
text = text.replace(u'\r\n', u'\n')
if self._get_text(encode=False) == text:
return
self._refresh_text(text)
self.cursor = self.get_cursor_from_index(len(text))
text = AliasProperty(_get_text, _set_text, bind=('_lines', ))
'''Text of the widget.
Creation of a simple hello world::
widget = TextInput(text='Hello world')
If you want to create the widget with an unicode string, use::
widget = TextInput(text=u'My unicode string')
:attr:`text` is an :class:`~kivy.properties.AliasProperty`.
'''
font_name = StringProperty('Roboto')
'''Filename of the font to use. The path can be absolute or relative.
Relative paths are resolved by the :func:`~kivy.resources.resource_find`
function.
.. warning::
Depending on your text provider, the font file may be ignored. However,
you can mostly use this without problems.
If the font used lacks the glyphs for the particular language/symbols
you are using, you will see '[]' blank box characters instead of the
actual glyphs. The solution is to use a font that has the glyphs you
need to display. For example, to display |unicodechar|, use a font like
freesans.ttf that has the glyph.
.. |unicodechar| image:: images/unicode-char.png
:attr:`font_name` is a :class:`~kivy.properties.StringProperty` and
defaults to 'Roboto'.
'''
font_size = NumericProperty('15sp')
'''Font size of the text in pixels.
:attr:`font_size` is a :class:`~kivy.properties.NumericProperty` and
defaults to 10.
'''
hint_text = StringProperty('')
'''Hint text of the widget.
Shown if text is '' and focus is False.
.. versionadded:: 1.6.0
:attr:`hint_text` a :class:`~kivy.properties.StringProperty` and defaults
to ''.
'''
hint_text_color = ListProperty([0.5, 0.5, 0.5, 1.0])
'''Current color of the hint_text text, in (r, g, b, a) format.
.. versionadded:: 1.6.0
:attr:`hint_text_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [0.5, 0.5, 0.5, 1.0] (grey).
'''
auto_indent = BooleanProperty(False)
'''Automatically indent multiline text.
.. versionadded:: 1.7.0
:attr:`auto_indent` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
replace_crlf = BooleanProperty(True)
'''Automatically replace CRLF with LF.
.. versionadded:: 1.9.1
:attr:`replace_crlf` is a :class:`~kivy.properties.BooleanProperty` and
defaults to True.
'''
allow_copy = BooleanProperty(True)
'''Decides whether to allow copying the text.
.. versionadded:: 1.8.0
:attr:`allow_copy` is a :class:`~kivy.properties.BooleanProperty` and
defaults to True.
'''
def _get_min_height(self):
return (len(self._lines) * (self.line_height + self.line_spacing)
+ self.padding[1] + self.padding[3])
minimum_height = AliasProperty(_get_min_height, None,
bind=('_lines', 'line_spacing', 'padding',
'font_size', 'font_name', 'password',
'hint_text', 'line_height'))
'''Minimum height of the content inside the TextInput.
.. versionadded:: 1.8.0
:attr:`minimum_height` is a readonly
:class:`~kivy.properties.AliasProperty`.
.. warning::
:attr:`minimum_width` is calculated based on :attr:`width` therefore
code like this will lead to an infinite loop::
<FancyTextInput>:
height: self.minimum_height
width: self.height
'''
line_spacing = NumericProperty(0)
'''Space taken up between the lines.
.. versionadded:: 1.8.0
:attr:`line_spacing` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.
'''
input_filter = ObjectProperty(None, allownone=True)
''' Filters the input according to the specified mode, if not None. If
None, no filtering is applied.
.. versionadded:: 1.9.0
:attr:`input_filter` is an :class:`~kivy.properties.ObjectProperty` and
defaults to `None`. Can be one of `None`, `'int'` (string), or `'float'`
(string), or a callable. If it is `'int'`, it will only accept numbers.
If it is `'float'` it will also accept a single period. Finally, if it is
a callable it will be called with two parameter; the string to be added
and a bool indicating whether the string is a result of undo (True). The
callable should return a new substring that will be used instead.
'''
handle_image_middle = StringProperty(
'atlas://data/images/defaulttheme/selector_middle')
'''Image used to display the middle handle on the TextInput for cursor
positioning.
.. versionadded:: 1.8.0
:attr:`handle_image_middle` is a :class:`~kivy.properties.StringProperty`
and defaults to 'atlas://data/images/defaulttheme/selector_middle'.
'''
def on_handle_image_middle(self, instance, value):
if self._handle_middle:
self._handle_middle.source = value
handle_image_left = StringProperty(
'atlas://data/images/defaulttheme/selector_left')
'''Image used to display the Left handle on the TextInput for selection.
.. versionadded:: 1.8.0
:attr:`handle_image_left` is a :class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/selector_left'.
'''
def on_handle_image_left(self, instance, value):
if self._handle_left:
self._handle_left.source = value
handle_image_right = StringProperty(
'atlas://data/images/defaulttheme/selector_right')
'''Image used to display the Right handle on the TextInput for selection.
.. versionadded:: 1.8.0
:attr:`handle_image_right` is a
:class:`~kivy.properties.StringProperty` and defaults to
'atlas://data/images/defaulttheme/selector_right'.
'''
def on_handle_image_right(self, instance, value):
if self._handle_right:
self._handle_right.source = value
write_tab = BooleanProperty(True)
'''Whether the tab key should move focus to the next widget or if it should
enter a tab in the :class:`TextInput`. If `True` a tab will be written,
otherwise, focus will move to the next widget.
.. versionadded:: 1.9.0
:attr:`write_tab` is a :class:`~kivy.properties.BooleanProperty` and
defaults to `True`.
'''
if __name__ == '__main__':
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.lang import Builder
class TextInputApp(App):
def build(self):
Builder.load_string('''
<TextInput>
on_text:
self.suggestion_text = ''
self.suggestion_text = 'ion_text'
''')
root = BoxLayout(orientation='vertical')
textinput = TextInput(multiline=True, use_bubble=True,
use_handles=True)
#textinput.text = __doc__
root.add_widget(textinput)
textinput2 = TextInput(multiline=False, text='monoline textinput',
size_hint=(1, None), height=30)
root.add_widget(textinput2)
return root
TextInputApp().run()
| mit | 6,712,873,367,837,512,000 | 34.263362 | 80 | 0.549193 | false |
gregpuzzles1/Sandbox | Example Programs/Ch_12_Student_Files/teststudent.py | 1 | 1833 | """
File: teststudent.py
Unit test suite for the Student class.
"""
from student import Student
import unittest
class TestStudent(unittest.TestCase):
"""Defines a unit test suite for the Student class."""
def setUp(self):
"""Sets up the test fixture. Scores are 1-5."""
self._student = Student("TEST", 5)
for index in xrange(1, 6):
score = self._student.setScore(index, index)
def tearDown(self):
"""Cleans up the test fixture after testing."""
pass
def testGetAverage(self):
"""Unit test for getAverage."""
average = self._student.getAverage()
self.assertEquals(3, average)
def testGetHighScore(self):
"""Unit test for getHighScore."""
high = self._student.getHighScore()
self.assertEquals(5, high)
def testGetName(self):
"""Test case for getName."""
self.assertEquals("TEST", self._student.getName())
def testGetScore(self):
"""Unit test for getScore."""
for index in xrange(1, 6):
score = self._student.getScore(index)
self.assertEquals(index, score)
self.assertRaises(IndexError,
self._student.getScore,
0)
self.assertRaises(IndexError,
self._student.getScore,
6)
def testSetScore(self):
"""Unit test for setScore."""
for index in xrange(1, 6):
score = self._student.setScore(index, index + 1)
for index in xrange(1, 6):
score = self._student.getScore(index)
self.assertEquals(index + 1, score)
# Creates a suite and runs the text-based test on it
suite = unittest.makeSuite(TestStudent)
unittest.TextTestRunner().run(suite)
| gpl-3.0 | 2,613,367,701,992,044,500 | 28.095238 | 61 | 0.578833 | false |
robcarver17/systematictradingexamples | plots_for_perhaps/compareoptmethods.py | 1 | 22426 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import plot, show, xticks, xlabel, ylabel, legend, yscale, title, savefig, rcParams, figure, hist, text, bar, subplots
import Image
def file_process(filename):
fig = plt.gcf()
fig.set_size_inches(18.5,10.5)
fig.savefig("/home/rob/%s.png" % filename,dpi=300)
fig.savefig("/home/rob/%sLOWRES.png" % filename,dpi=50)
Image.open("/home/rob/%s.png" % filename).convert('L').save("/home/rob/%s.jpg" % filename)
Image.open("/home/rob/%sLOWRES.png" % filename).convert('L').save("/home/rob/%sLOWRES.jpg" % filename)
"""
compare:
handcrafting
bootstrapped
one shot
equal weights
market cap weights
"""
import pandas as pd
from datetime import datetime as dt
def read_ts_csv(fname, dindex="Date"):
data=pd.read_csv(fname)
dateindex=[dt.strptime(dx, "%d/%m/%y") for dx in list(data[dindex])]
data.index=dateindex
del(data[dindex])
return data
def calc_asset_returns(rawdata, tickers):
asset_returns=pd.concat([get_monthly_tr(tickname, rawdata) for tickname in tickers], axis=1)
asset_returns.columns=tickers
return asset_returns
def get_monthly_tr(tickname, rawdata):
total_returns=rawdata[tickname+"_TR"]
return (total_returns / total_returns.shift(1)) - 1.0
def portfolio_return(asset_returns, cash_weights):
index_returns=asset_returns.cumsum().ffill().diff()
cash_align = cash_weights.reindex(asset_returns.index, method="ffill")
cash_align[np.isnan(index_returns)]=0.0
cash_align[np.isnan(cash_align)]=0.0
vols=pd.ewmstd(asset_returns, span=100, min_periods=1)
riskweights=pd.DataFrame(cash_align.values / vols.values, index=vols.index)
riskweights.columns=asset_returns.columns
riskweights[np.isnan(riskweights)]=0.0
def _rowfix(x):
if all([y==0.0 for y in x]):
return x
sumx=sum(x)
return [y/sumx for y in x]
riskweights = riskweights.apply(_rowfix, axis=1)
portfolio_returns=asset_returns*riskweights
portfolio_returns[np.isnan(portfolio_returns)]=0.0
portfolio_returns=portfolio_returns.sum(axis=1)
return portfolio_returns
import matplotlib.pyplot as plt
from scipy import stats
import pandas as pd
import numpy as np
from datetime import datetime as dt
import datetime
from scipy.optimize import minimize
from copy import copy
import random
def correlation_matrix(returns):
"""
Calcs a correlation matrix using weekly returns from a pandas time series
We use weekly returns because otherwise end of day effects, especially over time zones, give
unrealistically low correlations
"""
asset_index=returns.cumsum().ffill()
asset_index=asset_index.resample('1W') ## Only want index, fill method is irrelevant
asset_index = asset_index - asset_index.shift(1)
return asset_index.corr().values
def create_dull_pd_matrix(dullvalue=0.0, dullname="A", startdate=pd.datetime(1970,1,1).date(), enddate=datetime.datetime.now().date(), index=None):
"""
create a single valued pd matrix
"""
if index is None:
index=pd.date_range(startdate, enddate)
dullvalue=np.array([dullvalue]*len(index))
ans=pd.DataFrame(dullvalue, index, columns=[dullname])
return ans
def addem(weights):
## Used for constraints
return 1.0 - sum(weights)
def variance(weights, sigma):
## returns the variance (NOT standard deviation) given weights and sigma
return (np.matrix(weights)*sigma*np.matrix(weights).transpose())[0,0]
def neg_SR(weights, sigma, mus):
## Returns minus the Sharpe Ratio (as we're minimising)
"""
estreturn=250.0*((np.matrix(x)*mus)[0,0])
variance=(variance(x,sigma)**.5)*16.0
"""
estreturn=(np.matrix(weights)*mus)[0,0]
std_dev=(variance(weights,sigma)**.5)
return -estreturn/std_dev
def sigma_from_corr(std, corr):
sigma=std*corr*std
return sigma
def basic_opt(std,corr,mus):
number_assets=mus.shape[0]
sigma=sigma_from_corr(std, corr)
start_weights=[1.0/number_assets]*number_assets
## Constraints - positive weights, adding to 1.0
bounds=[(0.0,1.0)]*number_assets
cdict=[{'type':'eq', 'fun':addem}]
return minimize(neg_SR_riskfree, start_weights, (sigma, mus), method='SLSQP', bounds=bounds, constraints=cdict, tol=0.00001)
def neg_SR_riskfree(weights, sigma, mus, riskfree=0.005):
## Returns minus the Sharpe Ratio (as we're minimising)
"""
estreturn=250.0*((np.matrix(x)*mus)[0,0])
variance=(variance(x,sigma)**.5)*16.0
"""
estreturn=(np.matrix(weights)*mus)[0,0] - riskfree
std_dev=(variance(weights,sigma)**.5)
return -estreturn/std_dev
def equalise_vols(returns, default_vol):
"""
Normalises returns so they have the in sample vol of defaul_vol (annualised)
Assumes daily returns
"""
factors=(default_vol/16.0)/returns.std(axis=0)
facmat=create_dull_pd_matrix(dullvalue=factors, dullname=returns.columns, index=returns.index)
norm_returns=returns*facmat
norm_returns.columns=returns.columns
return norm_returns
def offdiag_matrix(offvalue, nlength):
identity=np.diag([1.0]*nlength)
for x in range(nlength):
for y in range(nlength):
if x!=y:
identity[x][y]=offvalue
return identity
def get_avg_corr(sigma):
new_sigma=copy(sigma)
np.fill_diagonal(new_sigma,np.nan)
return np.nanmean(new_sigma)
def nearest_to_listvals(x, lvalues=[0.0, 0.25, 0.5, 0.75, 0.9]):
## return x rounded to nearest of lvalues
if len(lvalues)==1:
return lvalues[0]
d1=abs(x - lvalues[0])
d2=abs(x - lvalues[1])
if d1<d2:
return lvalues[0]
newlvalues=lvalues[1:]
return nearest_to_listvals(x, newlvalues)
def handcrafted(returns, equalisevols=True, default_vol=0.2):
"""
Handcrafted optimiser
"""
count_assets=len(returns.columns)
try:
assert equalisevols is True
assert count_assets<=3
except:
raise Exception("Handcrafting only works with equalised vols and 3 or fewer assets")
if count_assets<3:
## Equal weights
return [1.0/count_assets]*count_assets
est_corr=returns.corr().values
c1=nearest_to_listvals(est_corr[0][1])
c2=nearest_to_listvals(est_corr[0][2])
c3=nearest_to_listvals(est_corr[1][2])
wts_to_use=HANDCRAFTED_WTS[(HANDCRAFTED_WTS.c1==c1) & (HANDCRAFTED_WTS.c2==c2) & (HANDCRAFTED_WTS.c3==c3)].irow(0)
return [wts_to_use.w1, wts_to_use.w2, wts_to_use.w3]
def opt_shrinkage(returns, shrinkage_factors, equalisevols=True, default_vol=0.2):
"""
Returns the optimal portfolio for the dataframe returns using shrinkage
shrinkage_factors is a tuple, shrinkage of mean and correlation
If equalisevols=True then normalises returns to have same standard deviation; the weights returned
will be 'risk weightings'
"""
if equalisevols:
use_returns=equalise_vols(returns, default_vol)
else:
use_returns=returns
(shrinkage_mean, shrinkage_corr)=shrinkage_factors
## Sigma matrix
## Use correlation and then convert back to variance
est_corr=use_returns.corr().values
avg_corr=get_avg_corr(est_corr)
prior_corr=offdiag_matrix(avg_corr, est_corr.shape[0])
sigma_corr=shrinkage_corr*prior_corr+(1-shrinkage_corr)*est_corr
cov_vector=use_returns.std().values
sigma=cov_vector*sigma_corr*cov_vector
## mus vector
avg_return=np.mean(use_returns.mean())
est_mus=np.array([use_returns[asset_name].mean() for asset_name in use_returns.columns], ndmin=2).transpose()
prior_mus=np.array([avg_return for asset_name in use_returns.columns], ndmin=2).transpose()
mus=shrinkage_mean*prior_mus+(1-shrinkage_mean)*est_mus
## Starting weights
number_assets=use_returns.shape[1]
start_weights=[1.0/number_assets]*number_assets
## Constraints - positive weights, adding to 1.0
bounds=[(0.0,1.0)]*number_assets
cdict=[{'type':'eq', 'fun':addem}]
ans=minimize(neg_SR, start_weights, (sigma, mus), method='SLSQP', bounds=bounds, constraints=cdict, tol=0.00001)
return ans['x']
def handcraft_equal(returns):
"""
dynamic handcrafting, equal weights only
"""
## RETURNS Correlation matrix
use_returns=equalise_vols(returns, default_vol=16.0)
## Sigma matrix = correlations
sigma=use_returns.cov()
sigma[sigma<0.0]=0.0
ungroupedreturns=dict([(x,returns[x]) for x in returns.columns])
tree_data=hc_sigma(sigma, ungroupedreturns)
tree_data=grouping_tree(tree_data)
weights=tree_to_weights(tree_data)
return weights
def hc_sigma(ungrouped_sigma, ungroupedreturns, groupdata=None):
"""
handcraft weights from sigma matrix
Algo:
- Find pair of assets with highest correlation
- Form them into a new group with equal weights
- The group becomes like a new asset
- Once we only have two assets left, stop.
Need to
"""
if len(ungroupedreturns)==1:
return groupdata[1]
if groupdata is None:
## first run
## groupdata stores grouping information
## To begin with each group just consists of one asset
groupdata=[[],list(ungrouped_sigma.columns)]
groupedreturns=dict()
## iteration
while len(ungroupedreturns)>0:
## current_sigma consists of the correlation of things we currently have
if len(ungroupedreturns)==1:
idx_list=[0]
else:
idx_list=find_highest_corr(ungrouped_sigma)
name_list=tuple([ungrouped_sigma.columns[idx] for idx in idx_list])
## pair those things up
(ungrouped_sigma, ungroupedreturns, groupedreturns,
groupdata)=group_assets(ungrouped_sigma, ungroupedreturns, groupedreturns, groupdata, idx_list, name_list)
new_returns=pd.concat(groupedreturns, axis=1)
new_sigma=new_returns.corr()
## recursive
return hc_sigma(new_sigma, groupedreturns, groupdata=[[],groupdata[0]])
def find_highest_corr(sigmat):
new_sigmat=copy(sigmat.values)
np.fill_diagonal(new_sigmat, -100.0)
(i,j)=np.unravel_index(new_sigmat.argmax(), new_sigmat.shape)
return (i,j)
def group_assets(ungrouped_sigma, ungroupedreturns, groupedreturns, groupdata, idx_list, name_list):
"""
Group assets
"""
todelete=[]
names=[]
grouping=[]
group_returns=[]
weights=[1.0/len(idx_list)]*len(idx_list) ## could have more complex thing here...
for (itemweight,idx, iname) in zip(weights,idx_list, name_list):
gi=groupdata[1][idx]
grouping.append(gi)
gri=ungroupedreturns.pop(iname)
group_returns.append(gri*itemweight)
names.append(gri.name)
ungrouped_sigma=ungrouped_sigma.drop(iname, axis=0)
ungrouped_sigma=ungrouped_sigma.drop(iname, axis=1)
todelete.append(idx)
groupdata[0].append(grouping)
gr_returns=pd.concat(group_returns, axis=1)
gr_returns=gr_returns.sum(axis=1)
gr_returns.name="[%s]" % "+".join(names)
print "Pairing %s" % ", ".join(names)
groupedreturns[gr_returns.name]=gr_returns
groupdata[1]=[element for eindex, element in enumerate(groupdata[1]) if eindex not in todelete]
return (ungrouped_sigma, ungroupedreturns, groupedreturns,
groupdata)
def grouping_tree(tree_data, sigma):
"""
Group branches of 2 into larger if possible
"""
pass
def corrs_in_group(group, sigma):
asset_list=sum(group, [])
littlesigma=sigma.loc[asset_list, asset_list]
def corr_from_leaf(leaf, sigma):
return sigma[leaf[0]][leaf[1]]
def tree_to_weights(tree_data):
"""
convert a tree into weights
"""
pass
def markosolver(returns, equalisemeans=False, equalisevols=True, default_vol=0.2, default_SR=1.0):
"""
Returns the optimal portfolio for the dataframe returns
If equalisemeans=True then assumes all assets have same return if False uses the asset means
If equalisevols=True then normalises returns to have same standard deviation; the weights returned
will be 'risk weightings'
Note if usemeans=True and equalisevols=True effectively assumes all assets have same sharpe ratio
"""
if equalisevols:
use_returns=equalise_vols(returns, default_vol)
else:
use_returns=returns
## Sigma matrix
sigma=use_returns.cov().values
## Expected mean returns
est_mus=[use_returns[asset_name].mean() for asset_name in use_returns.columns]
missingvals=[np.isnan(x) for x in est_mus]
if equalisemeans:
## Don't use the data - Set to the average Sharpe Ratio
mus=[default_vol*default_SR]*returns.shape[1]
else:
mus=est_mus
mus=np.array(mus, ndmin=2).transpose()
## Starting weights
number_assets=use_returns.shape[1]
start_weights=[1.0/number_assets]*number_assets
## Constraints - positive weights, adding to 1.0
bounds=[(0.0,1.0)]*number_assets
cdict=[{'type':'eq', 'fun':addem}]
ans=minimize(neg_SR, start_weights, (sigma, mus), method='SLSQP', bounds=bounds, constraints=cdict, tol=0.00001)
wts=ans['x']
return wts
def bootstrap_portfolio(returns_to_bs, monte_carlo=200, monte_length=250, equalisemeans=False, equalisevols=True, default_vol=0.2, default_SR=1.0):
"""
Given dataframe of returns; returns_to_bs, performs a bootstrap optimisation
We run monte_carlo numbers of bootstraps
Each one contains monte_length days drawn randomly, with replacement
(so *not* block bootstrapping)
The other arguments are passed to the optimisation function markosolver
Note - doesn't deal gracefully with missing data. Will end up downweighting stuff depending on how
much data is missing in each boostrap. You'll need to think about how to solve this problem.
"""
weightlist=[]
for unused_index in range(monte_carlo):
bs_idx=[int(random.uniform(0,1)*len(returns_to_bs)) for i in range(monte_length)]
returns=returns_to_bs.iloc[bs_idx,:]
weight=markosolver(returns, equalisemeans=equalisemeans, equalisevols=equalisevols, default_vol=default_vol, default_SR=default_SR)
weightlist.append(weight)
### We can take an average here; only because our weights always add up to 1. If that isn't true
### then you will need to some kind of renormalisation
theweights_mean=list(np.mean(weightlist, axis=0))
return theweights_mean
def optimise_over_periods(data, date_method, fit_method, rollyears=20, equalisemeans=False, equalisevols=True,
monte_carlo=100, monte_length=None, shrinkage_factors=(0.5, 0.5),
weightdf=None):
"""
Do an optimisation
Returns data frame of weights
Note if fitting in sample weights will be somewhat boring
Doesn't deal with eg missing data in certain subperiods
"""
if monte_length is None:
monte_length=int(len(data.index)*.1)
## Get the periods
fit_periods=generate_fitting_dates(data, date_method, rollyears=rollyears)
## Do the fitting
## Build up a list of weights, which we'll concat
weight_list=[]
for fit_tuple in fit_periods:
## Fit on the slice defined by first two parts of the tuple
period_subset_data=data[fit_tuple[0]:fit_tuple[1]]
## Can be slow, if bootstrapping, so indicate where we are
print "Fitting data for %s to %s" % (str(fit_tuple[2]), str(fit_tuple[3]))
if fit_method=="one_period":
weights=markosolver(period_subset_data, equalisemeans=equalisemeans, equalisevols=equalisevols)
elif fit_method=="bootstrap":
weights=bootstrap_portfolio(period_subset_data, equalisemeans=equalisemeans,
equalisevols=equalisevols, monte_carlo=monte_carlo,
monte_length=monte_length)
elif fit_method=="shrinkage":
weights=opt_shrinkage(period_subset_data, shrinkage_factors=shrinkage_factors, equalisevols=equalisevols)
elif fit_method=="fixed":
weights=[float(weightdf[weightdf.Country==ticker].Weight.values) for ticker in list(period_subset_data.columns)]
else:
raise Exception("Fitting method %s unknown" % fit_method)
## We adjust dates slightly to ensure no overlaps
dindex=[fit_tuple[2]+datetime.timedelta(seconds=1), fit_tuple[3]-datetime.timedelta(seconds=1)]
## create a double row to delineate start and end of test period
weight_row=pd.DataFrame([weights]*2, index=dindex, columns=data.columns)
weight_list.append(weight_row)
weight_df=pd.concat(weight_list, axis=0)
return weight_df
"""
Now we need to do this with expanding or rolling window
"""
"""
Generate the date tuples
"""
def generate_fitting_dates(data, date_method, rollyears=20):
"""
generate a list 4 tuples, one element for each year in the data
each tuple contains [fit_start, fit_end, period_start, period_end] datetime objects
the last period will be a 'stub' if we haven't got an exact number of years
date_method can be one of 'in_sample', 'expanding', 'rolling'
if 'rolling' then use rollyears variable
"""
start_date=data.index[0]
end_date=data.index[-1]
## generate list of dates, one year apart, including the final date
yearstarts=list(pd.date_range(start_date, end_date, freq="12M"))+[end_date]
## loop through each period
periods=[]
for tidx in range(len(yearstarts))[1:-1]:
## these are the dates we test in
period_start=yearstarts[tidx]
period_end=yearstarts[tidx+1]
## now generate the dates we use to fit
if date_method=="in_sample":
fit_start=start_date
elif date_method=="expanding":
fit_start=start_date
elif date_method=="rolling":
yearidx_to_use=max(0, tidx-rollyears)
fit_start=yearstarts[yearidx_to_use]
else:
raise Exception("don't recognise date_method %s" % date_method)
if date_method=="in_sample":
fit_end=end_date
elif date_method in ['rolling', 'expanding']:
fit_end=period_start
else:
raise Exception("don't recognise date_method %s " % date_method)
periods.append([fit_start, fit_end, period_start, period_end])
## give the user back the list of periods
return periods
rawdata=read_ts_csv("/home/rob/workspace/systematictradingexamples/plots_for_perhaps/MSCI_data.csv")
refdata=pd.read_csv("/home/rob/workspace/systematictradingexamples/plots_for_perhaps/MSCI_ref.csv")
tickers=list(refdata[(refdata.EmorDEV=="DEV") & (refdata.Type=="Country")].Country.values) #mom 12bp
#tickers=list(refdata[refdata.Type=="Country"].Country.values) #mom 12bp
fix_hcweights=pd.read_csv("/home/rob/workspace/systematictradingexamples/plots_for_perhaps/devhcweights.csv")
fix_capweights=pd.read_csv("/home/rob/workspace/systematictradingexamples/plots_for_perhaps/devcapweights.csv")
fix_eqweights=pd.DataFrame(dict(Country=tickers, Weight=[1.0/len(tickers)]*len(tickers)))
data=calc_asset_returns(rawdata, tickers)
### IDEA: to boostrap the results
### Repeatedly draw from 'data' to make new pseudo series
oneperiodweights=optimise_over_periods(data, "expanding", "one_period", equalisemeans=False, equalisevols=True)
#bootstrapweights=optimise_over_periods(data, "expanding", "bootstrap", equalisemeans=True, equalisevols=True)
exposthcweights=optimise_over_periods(data, "expanding", "fixed", weightdf=fix_hcweights, equalisemeans=True, equalisevols=True)
equalweights=optimise_over_periods(data, "expanding", "fixed", weightdf=fix_eqweights, equalisemeans=True, equalisevols=True)
marketcapweights=optimise_over_periods(data, "expanding", "fixed", weightdf=fix_capweights, equalisemeans=True, equalisevols=True)
index_returns=(1.0+data).cumprod().ffill()
last_return=index_returns.irow(-1).values
last_return=pd.DataFrame(np.array([last_return]*len(data)), data.index)
last_return.columns=data.columns
index_returns = index_returns / last_return
marketcapweights = marketcapweights.reindex(index_returns.index, method="ffill")
marketcapweights=marketcapweights*index_returns
marketcapweights=marketcapweights.ffill()
## portfolio, take out missing weights
p1=portfolio_return(data, oneperiodweights)[pd.datetime(1994,1,1):]
#p2=portfolio_return(data, bootstrapweights)
p3=portfolio_return(data, exposthcweights)[pd.datetime(1994,1,1):]
p4=portfolio_return(data, equalweights)[pd.datetime(1994,1,1):]
p5=portfolio_return(data, marketcapweights)[pd.datetime(1994,1,1):]
drag1=p3 - p1
drag2=p4 - p5
def stats(x):
ann_mean=x.mean()*12
ann_std = x.std()*(12**.5)
geo_mean = ann_mean - (ann_std**2)/2.0
sharpe = geo_mean / ann_std
return (ann_mean, ann_std, geo_mean, sharpe)
print stats(p1)
print stats(p3)
print stats(p4)
print stats(p5)
toplot=pd.concat([p1, p3, p4, p5], axis=1)
toplot.columns=["Optimised", "Handcraft", "Equal", "Market Cap"]
toplot.cumsum().plot()
show()
p1.cumsum().plot(color="black", ls="solid")
p3.cumsum().plot(color="gray", ls="solid")
p4.cumsum().plot(color="black", ls="dashed")
p5.cumsum().plot(color="gray", ls="dashed")
legend( ["Optimised", "Handcraft", "Equal", "Market Cap"], loc="upper left")
frame=plt.gca()
#frame.get_yaxis().set_visible(False)
rcParams.update({'font.size': 18})
file_process("compareoptmethods")
show()
drag1.cumsum().plot(color="gray", ls="solid")
legend( [ "Handcraft vs MktCap"], loc="upper left")
frame=plt.gca()
#frame.get_yaxis().set_visible(False)
rcParams.update({'font.size': 18})
file_process("compareoptmethodstracking")
show()
| gpl-2.0 | 4,701,163,412,939,915,000 | 30.408964 | 147 | 0.66463 | false |
anchore/anchore-engine | tests/integration/subsys/auth/stores/test_basic_store.py | 1 | 5494 | import json
import unittest
from anchore_engine.db import session_scope
from anchore_engine.subsys import identities
from anchore_engine.subsys.identities import AccountTypes, UserAccessCredentialTypes
from anchore_engine.subsys.auth.stores import basic as basic_accountstore
from yosai.core import UsernamePasswordToken, DefaultPermission
from passlib.context import CryptContext
class TestBasicStore(unittest.TestCase):
"""
Tests for the auth subsys
Identities etc.
"""
@classmethod
def setup_engine_config(cls, db_connect_str):
"""
Sets the config for the service to bootstrap a specific db.
:param db_connect_str:
:return:
"""
from anchore_engine.configuration import localconfig
localconfig.load_defaults()
localconfig.localconfig["credentials"] = {
"database": {"db_connect": db_connect_str}
}
return localconfig.localconfig
@classmethod
def init_db(cls, connect_str="sqlite:///:memory:", do_bootstrap=True):
"""
Policy-Engine specific db initialization and setup for testing.
:param connect_str: connection string, defaults to sqllite in-memory if none provided
:return:
"""
conf = cls.setup_engine_config(connect_str)
from anchore_engine.db import (
initialize,
Account,
AccountUser,
AccessCredential,
Anchore,
)
from anchore_engine.db.entities.common import do_create
from anchore_engine.version import version, db_version
initialize(
versions={"service_version": version, "db_version": db_version},
localconfig=conf,
)
do_create(
specific_tables=[
Account.__table__,
AccountUser.__table__,
AccessCredential.__table__,
Anchore.__table__,
]
)
@classmethod
def setUpClass(cls):
cls.init_db()
#
# def tearDown(self):
# for accnt in identities.list_accounts():
# print('Deleting accnt: {}'.format(accnt))
# identities.delete_account(accnt['name'])
def test_account_store(self):
cc = CryptContext(schemes=["argon2"])
with session_scope() as session:
mgr = identities.manager_factory.for_session(session)
accnt = mgr.create_account(
account_name="accountAbc",
account_type=AccountTypes.user,
email="someemail",
)
user1 = mgr.create_user(
account_name=accnt["name"], username="testuser1", password="password123"
)
print("user 1: {}".format(user1))
user2 = mgr.create_user(
account_name=accnt["name"], username="testuser2", password="password123"
)
print("user 2: {}".format(user2))
accnt2 = mgr.create_account(
account_name="admin1",
account_type=AccountTypes.admin,
email="someemail",
)
user3 = mgr.create_user(
account_name=accnt2["name"], username="admin1", password="password123"
)
print("user 3: {}".format(user3))
store = basic_accountstore.DbAccountStore()
# Authc stuff
token = UsernamePasswordToken(
username="testuser1",
password=user1["credentials"][UserAccessCredentialTypes.password]["value"],
)
print(token.credentials)
resp = store.get_authc_info(token.identifier)
print(resp)
self.assertTrue(
token.credentials
== bytes(resp["authc_info"]["password"]["credential"], "utf8")
)
# Authz stuff
authz_resp = store.get_authz_permissions(token.identifier)
print(authz_resp)
# Standard user
self.assertTrue(
DefaultPermission(
parts=json.loads(authz_resp[user1["account_name"]])[0]
).implies(
DefaultPermission(
parts={
"domain": user1["account_name"],
"action": "*",
"target": "*",
}
)
)
)
self.assertIsNone(authz_resp.get("*"))
self.assertTrue(
DefaultPermission(
parts=json.loads(authz_resp[user1["account_name"]])[0]
).implies(
DefaultPermission(
parts={
"domain": user1["account_name"],
"action": "listImages",
"target": "*",
}
)
)
)
admin_token = UsernamePasswordToken(
username="admin1",
password=user3["credentials"][UserAccessCredentialTypes.password]["value"],
)
# Authz stuff
authz_resp = store.get_authz_permissions(admin_token.identifier)
print(authz_resp)
# Admin user
self.assertIsNotNone(authz_resp.get("*"))
self.assertIsNone(authz_resp.get(user3["account_name"]))
self.assertTrue(
DefaultPermission(parts=json.loads(authz_resp["*"])[0]).implies(
DefaultPermission(parts={"domain": "*", "action": "*", "target": "*"})
)
)
| apache-2.0 | -7,519,868,430,979,338,000 | 31.508876 | 93 | 0.542956 | false |
bqbn/addons-server | src/olympia/users/tests/test_user_utils.py | 1 | 1451 | # -*- coding: utf-8 -*-
import pytest
from olympia.amo.tests import user_factory
from olympia.users.utils import (
UnsubscribeCode, system_addon_submission_allowed)
def test_email_unsubscribe_code_parse():
email = u'nobody@mozîlla.org'
token, hash_ = UnsubscribeCode.create(email)
r_email = UnsubscribeCode.parse(token, hash_)
assert email == r_email
# A bad token or hash raises ValueError
with pytest.raises(ValueError):
UnsubscribeCode.parse(token, hash_[:-5])
with pytest.raises(ValueError):
UnsubscribeCode.parse(token[5:], hash_)
system_guids = pytest.mark.parametrize('guid', [
'foø@mozilla.org', '[email protected]', '[email protected]',
'blâ[email protected]', 'foø@Mozilla.Org', '[email protected]',
'[email protected]', '[email protected]', 'blâ[email protected]',
'[email protected]', '[email protected]',
'[email protected]'
])
@system_guids
@pytest.mark.django_db
def test_system_addon_submission_allowed_mozilla_allowed(guid):
user = user_factory(email='[email protected]')
data = {'guid': guid}
assert system_addon_submission_allowed(user, data)
@system_guids
@pytest.mark.django_db
def test_system_addon_submission_allowed_not_mozilla_not_allowed(guid):
user = user_factory(email='[email protected]')
data = {'guid': guid}
assert not system_addon_submission_allowed(user, data)
| bsd-3-clause | 2,683,786,618,169,446,000 | 31.133333 | 76 | 0.707469 | false |
maxive/erp | addons/google_drive/models/google_drive.py | 9 | 11240 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import json
import re
import requests
import werkzeug.urls
from odoo import api, fields, models
from odoo.exceptions import RedirectWarning, UserError
from odoo.tools import pycompat
from odoo.tools.safe_eval import safe_eval
from odoo.tools.translate import _
from odoo.addons.google_account.models.google_service import GOOGLE_TOKEN_ENDPOINT, TIMEOUT
_logger = logging.getLogger(__name__)
class GoogleDrive(models.Model):
_name = 'google.drive.config'
_description = "Google Drive templates config"
@api.multi
def get_google_drive_url(self, res_id, template_id):
self.ensure_one()
self = self.sudo()
model = self.model_id
filter_name = self.filter_id.name if self.filter_id else False
record = self.env[model.model].browse(res_id).read()[0]
record.update({
'model': model.name,
'filter': filter_name
})
name_gdocs = self.name_template
try:
name_gdocs = name_gdocs % record
except:
raise UserError(_("At least one key cannot be found in your Google Drive name pattern"))
attachments = self.env["ir.attachment"].search([('res_model', '=', model.model), ('name', '=', name_gdocs), ('res_id', '=', res_id)])
url = False
if attachments:
url = attachments[0].url
else:
url = self.copy_doc(res_id, template_id, name_gdocs, model.model).get('url')
return url
@api.model
def get_access_token(self, scope=None):
Config = self.env['ir.config_parameter'].sudo()
google_drive_refresh_token = Config.get_param('google_drive_refresh_token')
user_is_admin = self.env['res.users'].browse(self.env.user.id)._is_admin()
if not google_drive_refresh_token:
if user_is_admin:
dummy, action_id = self.env['ir.model.data'].get_object_reference('base_setup', 'action_general_configuration')
msg = _("You haven't configured 'Authorization Code' generated from google, Please generate and configure it .")
raise RedirectWarning(msg, action_id, _('Go to the configuration panel'))
else:
raise UserError(_("Google Drive is not yet configured. Please contact your administrator."))
google_drive_client_id = Config.get_param('google_drive_client_id')
google_drive_client_secret = Config.get_param('google_drive_client_secret')
#For Getting New Access Token With help of old Refresh Token
data = {
'client_id': google_drive_client_id,
'refresh_token': google_drive_refresh_token,
'client_secret': google_drive_client_secret,
'grant_type': "refresh_token",
'scope': scope or 'https://www.googleapis.com/auth/drive'
}
headers = {"Content-type": "application/x-www-form-urlencoded"}
try:
req = requests.post(GOOGLE_TOKEN_ENDPOINT, data=data, headers=headers, timeout=TIMEOUT)
req.raise_for_status()
except requests.HTTPError:
if user_is_admin:
dummy, action_id = self.env['ir.model.data'].get_object_reference('base_setup', 'action_general_configuration')
msg = _("Something went wrong during the token generation. Please request again an authorization code .")
raise RedirectWarning(msg, action_id, _('Go to the configuration panel'))
else:
raise UserError(_("Google Drive is not yet configured. Please contact your administrator."))
return req.json().get('access_token')
@api.model
def copy_doc(self, res_id, template_id, name_gdocs, res_model):
google_web_base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url')
access_token = self.get_access_token()
# Copy template in to drive with help of new access token
request_url = "https://www.googleapis.com/drive/v2/files/%s?fields=parents/id&access_token=%s" % (template_id, access_token)
headers = {"Content-type": "application/x-www-form-urlencoded"}
try:
req = requests.get(request_url, headers=headers, timeout=TIMEOUT)
req.raise_for_status()
parents_dict = req.json()
except requests.HTTPError:
raise UserError(_("The Google Template cannot be found. Maybe it has been deleted."))
record_url = "Click on link to open Record in Odoo\n %s/?db=%s#id=%s&model=%s" % (google_web_base_url, self._cr.dbname, res_id, res_model)
data = {
"title": name_gdocs,
"description": record_url,
"parents": parents_dict['parents']
}
request_url = "https://www.googleapis.com/drive/v2/files/%s/copy?access_token=%s" % (template_id, access_token)
headers = {
'Content-type': 'application/json',
'Accept': 'text/plain'
}
# resp, content = Http().request(request_url, "POST", data_json, headers)
req = requests.post(request_url, data=json.dumps(data), headers=headers, timeout=TIMEOUT)
req.raise_for_status()
content = req.json()
res = {}
if content.get('alternateLink'):
res['id'] = self.env["ir.attachment"].create({
'res_model': res_model,
'name': name_gdocs,
'res_id': res_id,
'type': 'url',
'url': content['alternateLink']
}).id
# Commit in order to attach the document to the current object instance, even if the permissions has not been written.
self._cr.commit()
res['url'] = content['alternateLink']
key = self._get_key_from_url(res['url'])
request_url = "https://www.googleapis.com/drive/v2/files/%s/permissions?emailMessage=This+is+a+drive+file+created+by+Odoo&sendNotificationEmails=false&access_token=%s" % (key, access_token)
data = {'role': 'writer', 'type': 'anyone', 'value': '', 'withLink': True}
try:
req = requests.post(request_url, data=json.dumps(data), headers=headers, timeout=TIMEOUT)
req.raise_for_status()
except requests.HTTPError:
raise self.env['res.config.settings'].get_config_warning(_("The permission 'reader' for 'anyone with the link' has not been written on the document"))
if self.env.user.email:
data = {'role': 'writer', 'type': 'user', 'value': self.env.user.email}
try:
requests.post(request_url, data=json.dumps(data), headers=headers, timeout=TIMEOUT)
except requests.HTTPError:
pass
return res
@api.model
def get_google_drive_config(self, res_model, res_id):
'''
Function called by the js, when no google doc are yet associated with a record, with the aim to create one. It
will first seek for a google.docs.config associated with the model `res_model` to find out what's the template
of google doc to copy (this is usefull if you want to start with a non-empty document, a type or a name
different than the default values). If no config is associated with the `res_model`, then a blank text document
with a default name is created.
:param res_model: the object for which the google doc is created
:param ids: the list of ids of the objects for which the google doc is created. This list is supposed to have
a length of 1 element only (batch processing is not supported in the code, though nothing really prevent it)
:return: the config id and config name
'''
# TO DO in master: fix my signature and my model
if isinstance(res_model, pycompat.string_types):
res_model = self.env['ir.model'].search([('model', '=', res_model)]).id
if not res_id:
raise UserError(_("Creating google drive may only be done by one at a time."))
# check if a model is configured with a template
configs = self.search([('model_id', '=', res_model)])
config_values = []
for config in configs.sudo():
if config.filter_id:
if config.filter_id.user_id and config.filter_id.user_id.id != self.env.user.id:
#Private
continue
domain = [('id', 'in', [res_id])] + safe_eval(config.filter_id.domain)
additionnal_context = safe_eval(config.filter_id.context)
google_doc_configs = self.env[config.filter_id.model_id].with_context(**additionnal_context).search(domain)
if google_doc_configs:
config_values.append({'id': config.id, 'name': config.name})
else:
config_values.append({'id': config.id, 'name': config.name})
return config_values
name = fields.Char('Template Name', required=True)
model_id = fields.Many2one('ir.model', 'Model', ondelete='set null', required=True)
model = fields.Char('Related Model', related='model_id.model', readonly=True)
filter_id = fields.Many2one('ir.filters', 'Filter', domain="[('model_id', '=', model)]")
google_drive_template_url = fields.Char('Template URL', required=True)
google_drive_resource_id = fields.Char('Resource Id', compute='_compute_ressource_id')
google_drive_client_id = fields.Char('Google Client', compute='_compute_client_id')
name_template = fields.Char('Google Drive Name Pattern', default='Document %(name)s', help='Choose how the new google drive will be named, on google side. Eg. gdoc_%(field_name)s', required=True)
active = fields.Boolean('Active', default=True)
def _get_key_from_url(self, url):
word = re.search("(key=|/d/)([A-Za-z0-9-_]+)", url)
if word:
return word.group(2)
return None
@api.multi
def _compute_ressource_id(self):
result = {}
for record in self:
word = self._get_key_from_url(record.google_drive_template_url)
if word:
record.google_drive_resource_id = word
else:
raise UserError(_("Please enter a valid Google Document URL."))
return result
@api.multi
def _compute_client_id(self):
google_drive_client_id = self.env['ir.config_parameter'].sudo().get_param('google_drive_client_id')
for record in self:
record.google_drive_client_id = google_drive_client_id
@api.onchange('model_id')
def _onchange_model_id(self):
if self.model_id:
self.model = self.model_id.model
else:
self.filter_id = False
self.model = False
@api.constrains('model_id', 'filter_id')
def _check_model_id(self):
if self.filter_id and self.model_id.model != self.filter_id.model_id:
return False
return True
def get_google_scope(self):
return 'https://www.googleapis.com/auth/drive https://www.googleapis.com/auth/drive.file'
| agpl-3.0 | 3,801,002,757,147,754,000 | 48.515419 | 201 | 0.611833 | false |
vst/normalazy | normalazy.py | 1 | 27657 | import copy
import datetime
from collections import OrderedDict
from decimal import Decimal
from functools import wraps
from six import add_metaclass
#: Defines the version of the `normalazy` library.
__version__ = "0.0.3"
def iffnotnull(func):
"""
Wraps a function, returns None if the first argument is None, invokes the method otherwise.
:param func: The function to be wrapped.
:return: None or the result of the function.
>>> test1 = iffnotnull(lambda x: x)
>>> test1(None)
>>> test1(1)
1
"""
@wraps(func)
def wrapper(value, *args, **kwargs):
return None if value is None else func(value, *args, **kwargs)
return wrapper
def iffnotblank(func):
"""
Wraps a function, returns None if the first argument is empty, invokes the method otherwise.
:param func: The function to be wrapped.
:return: Empty string or the result of the function.
>>> test1 = iffnotblank(lambda x: x)
>>> test1("")
''
>>> test1(1)
1
"""
@wraps(func)
def wrapper(value, *args, **kwargs):
return value if value == "" else func(value, *args, **kwargs)
return wrapper
def identity(x):
"""
Defines an identity function.
:param x: value
:return: value
>>> identity(None)
>>> identity(1)
1
"""
return x
@iffnotnull
def as_string(x):
"""
Converts the value to a trimmed string.
:param x: Value.
:return: Trimmed string value.
>>> as_string(None)
>>> as_string("")
''
>>> as_string("a")
'a'
>>> as_string(" a ")
'a'
"""
return str(x).strip()
@iffnotnull
def as_factor(x):
"""
Converts the value to a factor string.
:param x: Value.
:return: Trimmed, up-cased string value.
>>> as_factor(None)
>>> as_factor("")
''
>>> as_factor("a")
'A'
>>> as_factor(" a ")
'A'
"""
return as_string(x).upper()
@iffnotnull
@iffnotblank
def as_number(x):
"""
Converts the value to a decimal value.
:param x: The value to be converted to a decimal value.
:return: A Decimal instance.
>>> as_number(None)
>>> as_number(1)
Decimal('1')
>>> as_number("1")
Decimal('1')
>>> as_number(" 1 ")
Decimal('1')
"""
return Decimal(as_string(x))
def as_boolean(x, predicate=None):
"""
Converts the value to a boolean value.
:param x: The value to be converted to a boolean value.
:param predicate: The predicate function if required.
:return: Boolean
>>> as_boolean(None)
False
>>> as_boolean("")
False
>>> as_boolean(" ")
True
>>> as_boolean(1)
True
>>> as_boolean(0)
False
>>> as_boolean("1")
True
>>> as_boolean("0")
True
>>> as_boolean("1", predicate=lambda x: int(x) != 0)
True
>>> as_boolean("0", predicate=lambda x: int(x) != 0)
False
>>> as_boolean("1", predicate=int)
True
>>> as_boolean("0", predicate=int)
False
>>> as_boolean("1", int)
True
>>> as_boolean("0", int)
False
"""
return bool(x if predicate is None else predicate(x))
@iffnotnull
@iffnotblank
def as_datetime(x, fmt=None):
"""
Converts the value to a datetime value.
:param x: The value to be converted to a datetime value.
:param fmt: The format of the date/time string.
:return: A datetime.date instance.
>>> as_datetime(None)
>>> as_datetime("")
''
>>> as_datetime("2015-01-01 00:00:00")
datetime.datetime(2015, 1, 1, 0, 0)
>>> as_datetime("2015-01-01T00:00:00", "%Y-%m-%dT%H:%M:%S")
datetime.datetime(2015, 1, 1, 0, 0)
>>> as_datetime("2015-01-01T00:00:00", fmt="%Y-%m-%dT%H:%M:%S")
datetime.datetime(2015, 1, 1, 0, 0)
"""
return datetime.datetime.strptime(x, fmt or "%Y-%m-%d %H:%M:%S")
@iffnotnull
@iffnotblank
def as_date(x, fmt=None):
"""
Converts the value to a date value.
:param x: The value to be converted to a date value.
:param fmt: The format of the date string.
:return: A datetime.date instance.
>>> as_date(None)
>>> as_date('')
''
>>> as_date("2015-01-01")
datetime.date(2015, 1, 1)
>>> as_date("Date: 2015-01-01", "Date: %Y-%m-%d")
datetime.date(2015, 1, 1)
>>> as_date("Date: 2015-01-01", fmt="Date: %Y-%m-%d")
datetime.date(2015, 1, 1)
"""
return datetime.datetime.strptime(x, fmt or "%Y-%m-%d").date()
class Value:
"""
Defines an immutable *[sic.]* boxed value with message, status and extra data as payload if required.
>>> value = Value(value=42, message=None, status=Value.Status.Success, extras="41 + 1")
>>> value.value
42
>>> value.message
>>> value.status == Value.Status.Success
True
>>> value.extras
'41 + 1'
>>> value = Value.success(42, date="2015-01-01")
>>> value.value
42
>>> value.status == Value.Status.Success
True
>>> value.date
'2015-01-01'
>>> value = Value.warning(value="fortytwo", message="Failed to convert to integer.", date="2015-01-01")
>>> value.value
'fortytwo'
>>> value.status == Value.Status.Warning
True
>>> value.date
'2015-01-01'
>>> value.message
'Failed to convert to integer.'
>>> value = Value.error(message="Failed to compute the value.", date="2015-01-01")
>>> value.value
>>> value.status == Value.Status.Error
True
>>> value.date
'2015-01-01'
>>> value.message
'Failed to compute the value.'
"""
class Status:
"""
Defines an enumeration for value status.
"""
#: Indicates that value is mapped successfully.
Success = 1
#: Indicates that value is mapped successfully with warnings.
Warning = 2
#: Indicates that value could not be mapped successfully.
Error = 3
def __init__(self, value=None, message=None, status=None, **kwargs):
"""
Constructs an immutable Value class instance.
Note that the classmethods `success`, `warning` and `error` should be preferred over this
constructor.
:param value: The atomic value.
:param message: Any messages if required.
:param status: The value status.
:param kwargs: Extra payload for the value.
"""
self.__value = value
self.__status = status or self.Status.Success
self.__message = message
self.__payload = kwargs
@property
def value(self):
return self.__value
@property
def status(self):
return self.__status
@property
def message(self):
return self.__message
@property
def payload(self):
return self.__payload
def __getattr__(self, item):
"""
Provides access to payload through attributes.
:param item: The name of the attribute.
:return: The value for the attribute if the attribute name is in payload.
"""
## Check if the item is in the payload:
if item in self.payload:
## Yes, return it.
return self.payload.get(item)
## Nope, escalate:
return super(Value, self).__getattr__(item)
@classmethod
def success(cls, value=None, message=None, **kwargs):
"""
Provides a convenience constructor for successful Value instances.
:param value: The value of the Value instance to be constructed.
:param message: The message, if any.
:param kwargs: Extra payload for the value.
:return: A successful Value instance.
"""
return cls(value=value, message=message, status=cls.Status.Success, **kwargs)
@classmethod
def warning(cls, value=None, message=None, **kwargs):
"""
Provides a convenience constructor for Values instances with warnings.
:param value: The value of the Value instance to be constructed.
:param message: The message, if any.
:param kwargs: Extra payload for the value.
:return: A Value instance with warnings.
"""
return cls(value=value, message=message, status=cls.Status.Warning, **kwargs)
@classmethod
def error(cls, value=None, message=None, **kwargs):
"""
Provides a convenience constructor for Values instances with errors.
:param value: The value of the Value instance to be constructed.
:param message: The message, if any.
:param kwargs: Extra payload for the value.
:return: A Value instance with errors.
"""
return cls(value=value, message=message, status=cls.Status.Error, **kwargs)
class Field(object):
"""
Provides a concrete mapper field.
>>> field = Field()
>>> field.map(None, dict()).value
>>> field.map(None, dict()).status == Value.Status.Success
True
>>> field = Field(null=False)
>>> field.map(None, dict()).value
>>> field.map(None, dict()).status == Value.Status.Error
True
>>> field = Field(func=lambda i, r: r.get("a", None))
>>> field.map(None, dict(a="")).value
''
>>> field.map(None, dict(a="")).status == Value.Status.Success
True
>>> field = Field(func=lambda i, r: r.get("a", None), blank=False)
>>> field.map(None, dict(a="")).value
''
>>> field.map(None, dict(a="")).status == Value.Status.Error
True
>>> field = Field(func=lambda i, r: r.get("a", None))
>>> field.map(None, dict()).value
>>> field.map(None, dict(a=1)).value
1
>>> field.map(None, dict(a=1)).status == Value.Status.Success
True
"""
def __init__(self, name=None, func=None, blank=True, null=True):
"""
Constructs a mapper field with the given argument.
:param name: The name of the field.
:param func: The function which is to be used to map the value.
:param blank: Boolean indicating if blank values are allowed.
:param null: Boolean indicating if null values are allowed.
"""
self.__name = name
self.__func = func
self.__blank = blank
self.__null = null
@property
def name(self):
"""
Returns the name of the field.
:return: The name of the field.
"""
return self.__name
@property
def func(self):
"""
Returns the mapping function of the field.
:return: The mapping function of the field.
"""
return self.__func
@property
def blank(self):
"""
Indicates if the value is allowed to be blank.
:return: Boolean indicating if the value is allowed to be blank.
"""
return self.__blank
@property
def null(self):
"""
Indicates if the value is allowed to be null.
:return: Boolean indicating if the value is allowed to be null.
"""
return self.__null
def rename(self, name):
"""
Renames the field.
:param name: The new name of the field.
"""
self.__name = name
def treat_value(self, value):
"""
Treats the value and return.
:param value: The value to be treated.
:return: A Value instance.
"""
## By now we have a value. If it is an instance of Value
## class, return it as is:
if isinstance(value, Value):
return value
## If the value is string and empty, but is not allowed to be so, return
## with error:
if not self.blank and isinstance(value, str) and value == "":
return Value.error(value="", message="Value is not allowed to be blank.")
## If the value is None but is not allowed to be so, return
## with error:
if not self.null and value is None:
return Value.error(message="Value is not allowed to be None.")
## OK, we have a value to be boxed and returned successfully:
return Value.success(value=value)
def map(self, instance, record):
"""
Returns the value of for field as a Value instance.
:param instance: The instance for which the value will be retrieved.
:param record: The raw record.
:return: A Value instance.
"""
## Check if we have a function:
if self.func is None:
## OK, value shall be None:
value = None
## Check if the function is a callable or the name of an attribute of the instance:
elif hasattr(self.func, "__call__"):
## The function is a callable. Call it directly on the
## instance and the record and get the raw value:
value = self.func(instance, record)
else:
## The function is not a callable. We assume that it is
## the name of a method of the instance. Apply the
## instance method on the record and get the raw value:
value = getattr(instance, self.func)(record)
## Treat the value and return:
return self.treat_value(value)
class KeyField(Field):
"""
Provides a mapper field for a given key which belongs to the
record. The record can be an object which has `__getitem__` method
or a simple object just with attribute access.
The method starts reading the source value using the key provided
checking `__getitem__` method (for iterables such as `dict` or
`list`), then checks the attribute for simple object attribute
access.
>>> field = KeyField(key="a")
>>> field.map(None, dict(a="")).value
''
>>> field.map(None, dict(a="")).status == Value.Status.Success
True
>>> field = KeyField(key="a", blank=False)
>>> field.map(None, dict(a="")).value
''
>>> field.map(None, dict(a="")).status == Value.Status.Error
True
>>> field = KeyField(key="a", func=lambda i, r, v: as_number(v))
>>> field.map(None, dict(a="12")).value
Decimal('12')
>>> field.map(None, dict(a="12")).status == Value.Status.Success
True
>>> field = KeyField(key="a", cast=as_number)
>>> field.map(None, dict(a="12")).value
Decimal('12')
>>> field.map(None, dict(a="12")).status == Value.Status.Success
True
>>> class Student:
... def __init__(self, name):
... self.name = name
>>> field = KeyField(key="name")
>>> field.map(None, Student("Sinan")).value
'Sinan'
"""
def __init__(self, key=None, cast=None, **kwargs):
"""
Constructs a mapper field with the given argument.
:param key: The key of the property of the record to be mapped.
:param cast: The function to be applied to the value.
:param **kwargs: Keyword arguments to `Field`.
"""
super(KeyField, self).__init__(**kwargs)
self.__key = key
self.__cast = cast
@property
def key(self):
"""
Returns the key of for the field mapping.
"""
return self.__key
def rename(self, name):
"""
Renames the field.
:param name: The new name of the field.
"""
## Call the super:
super(KeyField, self).rename(name)
## If the key is None, set it with joy:
if self.__key is None:
self.__key = name
def map(self, instance, record):
"""
Returns the value of for field as a Value instance.
:param instance: The instance for which the value will be retrieved.
:param record: The raw record.
:return: A Value instance.
"""
## Does the record have __getitem__ method (Indexable) and key exist?
if hasattr(record, "__getitem__") and self.key in record:
## Yes, get the value:
value = record.get(self.key)
## Nope, let's check if the record has such an attribute:
elif hasattr(record, self.key):
## Yes, get the value using attribute access:
value = getattr(record, self.key)
## We can't access such a value in the record.
else:
## OK, Value shall be None:
value = None
## Do we have a function:
if self.func is None:
## Nope, skip:
pass
## Check if the function is a callable or the name of an attribute of the instance:
elif hasattr(self.func, "__call__"):
## The function is a callable. Call it directly on the
## instance, the record and the raw value:
value = self.func(instance, record, value)
else:
## The function is not a callable. We assume that it is
## the name of a method on the instance. Apply the
## instance method on the record and the raw value:
value = getattr(instance, self.func)(record, value)
## OK, now we will cast if required:
if self.__cast is not None:
## Is it a Value instance?
if isinstance(value, Value):
value = Value(value=self.__cast(value.value), status=value.status, message=value.message)
else:
value = self.__cast(value)
## Done, treat the value and return:
return self.treat_value(value)
class ChoiceKeyField(KeyField):
"""
Defines a choice mapper for the index of the record provided.
>>> field = ChoiceKeyField(key="a", choices=dict(a=1, b=2))
>>> field.map(None, dict(a="a")).value
1
>>> field = ChoiceKeyField(key="a", choices=dict(a=1, b=2), func=lambda i, r, v: Decimal(str(v)))
>>> field.map(None, dict(a="a")).value
Decimal('1')
"""
def __init__(self, *args, **kwargs):
## Choices?
choices = kwargs.pop("choices", {})
## Get the function:
functmp = kwargs.pop("func", None)
## Compute the func
if functmp is not None:
func = lambda i, r, v: functmp(i, r, choices.get(v, None))
else:
func = lambda i, r, v: choices.get(v, None)
## Add the func back:
kwargs["func"] = func
## OK, proceed as usual:
super(ChoiceKeyField, self).__init__(*args, **kwargs)
class RecordMetaclass(type):
"""
Provides a record metaclass.
"""
def __new__(mcs, name, bases, attrs, **kwargs):
## Pop all fields:
fields = dict([(key, attrs.pop(key)) for key in list(attrs.keys()) if isinstance(attrs.get(key), Field)])
## Check fields and make sure that names are added:
for name, field in fields.items():
if field.name is None:
field.rename(name)
## Get the record class as usual:
record_cls = super(RecordMetaclass, mcs).__new__(mcs, name, bases, attrs, **kwargs)
## Attach fields to the class:
record_cls._fields = {}
## Now, process the fields:
record_cls._fields.update(fields)
## Done, return the record class:
return record_cls
@add_metaclass(RecordMetaclass)
class Record(object):
"""
Provides a record normalizer base class.
>>> class Test1Record(Record):
... a = KeyField()
>>> record1 = Test1Record(dict(a=1))
>>> record1.a
1
>>> class Test2Record(Record):
... a = KeyField()
... b = ChoiceKeyField(choices={1: "Bir", 2: "Iki"})
>>> record2 = Test2Record(dict(a=1, b=2))
>>> record2.a
1
>>> record2.b
'Iki'
We can get the dictionary representation of records:
>>> record1.as_dict()
OrderedDict([('a', 1)])
>>> record2.as_dict()
OrderedDict([('a', 1), ('b', 'Iki')])
Or detailed:
>>> record1.as_dict(detailed=True)
OrderedDict([('a', OrderedDict([('value', '1'), ('status', 1), ('message', None)]))])
>>> record2.as_dict(detailed=True)
OrderedDict([('a', OrderedDict([('value', '1'), ('status', 1), ('message', None)])), \
('b', OrderedDict([('value', 'Iki'), ('status', 1), ('message', None)]))])
We can also create a new record from an existing record or dictionary:
>>> class Test3Record(Record):
... a = KeyField()
... b = KeyField()
>>> record3 = Test3Record.new(record2)
>>> record3.a
1
>>> record3.b
'Iki'
>>> record3.a == record2.a
True
>>> record3.b == record2.b
True
With dictionary:
>>> record4 = Test3Record.new({"a": 1, "b": "Iki"})
>>> record4.a
1
>>> record4.b
'Iki'
>>> record4.a == record2.a
True
>>> record4.b == record2.b
True
Or even override some fields:
>>> record5 = Test3Record.new(record3, b="Bir")
>>> record5.a
1
>>> record5.b
'Bir'
"""
## TODO: [Improvement] Rename _fields -> __fields, _values -> __value
def __init__(self, record):
## Save the record slot:
self.__record = record
## Declare the values map:
self._values = {}
def __getattr__(self, item):
"""
Returns the value of the attribute named `item`, particularly from within the fields set or pre-calculated
field values set.
:param item: The name of the attribute, in particular the field name.
:return: The value (value attribute of the Value).
"""
return self.getval(item).value
def hasval(self, name):
"""
Indicates if we have a value slot called ``name``.
:param name: The name of the value slot.
:return: ``True`` if we have a value slot called ``name``, ``False`` otherwise.
"""
return name in self._fields
def getval(self, name):
"""
Returns the value slot identified by the ``name``.
:param name: The name of the value slot.
:return: The value slot, ie. the boxed value instance of class :class:`Value`.
"""
## Did we compute this before?
if name in self._values:
## Yes, return the value slot:
return self._values.get(name)
## Do we have such a value slot?
if not self.hasval(name):
raise AttributeError("Record does not have value slot named '{}'".format(name))
## Apparently, we have never computed the value. Let's compute the value slot and return:
return self.setval(name, self._fields.get(name).map(self, self.__record))
def setval(self, name, value, status=None, message=None, **kwargs):
"""
Sets a value to the value slot.
:param name: The name of the value slot.
:param value: The value to be set (Either a Python value or a :class:`Value` instance.)
:param status: The status of the value slot if any.
:param message: The message of the value slot if any.
:param kwargs: Additional named values as payload to value.
:return: The :class:`Value` instance set.
"""
## Do we have such a value slot?
if not self.hasval(name):
raise AttributeError("Record does not have value slot named '{}'".format(name))
## Create a value instance:
if isinstance(value, Value):
## Get a copy of payload if any:
payload = copy.deepcopy(value.payload)
## Update the payload with kwargs:
payload.update(kwargs.copy())
## Create the new value:
value = Value(value=value.value, status=status or value.status, message=message or value.message, **payload)
else:
value = Value(value=value, status=status or Value.Status.Success, message=message, **kwargs)
## Save the slot:
self._values[name] = value
## Done, return the value set:
return value
def delval(self, name):
"""
Deletes a stored value.
:param name: The name of the value.
"""
if name in self._values:
del self._values[name]
def allvals(self):
"""
Returns all the value slots.
:return: A dictionary of all computed value slots.
"""
return {field: self.getval(field) for field in self._fields}
def val_none(self, name):
"""
Indicates if the value is None.
:param name: The name of the value slot.
:return: Boolean indicating if the value is None.
"""
return self.getval(name).value is None
def val_blank(self, name):
"""
Indicates if the value is blank.
:param name: The name of the value slot.
:return: Boolean indicating if the value is blank.
"""
return self.getval(name).value == ""
def val_some(self, name):
"""
Indicates if the value is something other than None or blank.
:param name: The name of the value slot.
:return: Boolean indicating if the value is something other than None or blank.
"""
return not self.val_none(name) and not self.val_blank(name)
def val_success(self, name):
"""
Indicates if the value is success.
:param name: The name of the value slot.
:return: Boolean indicating if the value is success.
"""
return self.getval(name).status == Value.Status.Success
def val_warning(self, name):
"""
Indicates if the value is warning.
:param name: The name of the value slot.
:return: Boolean indicating if the value is warning.
"""
return self.getval(name).status == Value.Status.Warning
def val_error(self, name):
"""
Indicates if the value is error.
:param name: The name of the value slot.
:return: Boolean indicating if the value is error.
"""
return self.getval(name).status == Value.Status.Error
def as_dict(self, detailed=False):
"""
Provides a JSON representation of the record instance.
:param detailed: Indicates if we need detailed result, ie. with status and message for each field.
:return: A JSON representation of the record instance.
"""
## We have the fields and values saved in the `_fields` and `_values` attributes respectively. We will
## simply iterate over these fields and their respective values.
##
## Let's start with defining the data dictionary:
retval = OrderedDict([])
## Iterate over fields and get their values:
for key in sorted(self._fields):
## Add the field to return value:
retval[key] = getattr(self, key, None)
## If detailed, override with real Value instance:
if detailed:
## Get the value:
value = self._values.get(key, None)
## Add the value:
retval[key] = OrderedDict([("value", str(value.value)),
("status", value.status),
("message", value.message)])
## Done, return the value:
return retval
@classmethod
def new(cls, record, **kwargs):
"""
Creates a new record from the provided record or dictionary and overriding values from the provided additional
named arguments.
:param record: The record or dictionary to be copied from.
:param kwargs: Named arguments to override.
:return: New record.
"""
## First of all, get the record as value dictionary:
base = copy.deepcopy(record.as_dict() if isinstance(record, Record) else record)
## Update the dictionary:
base.update(kwargs)
## Done, create the new record and return:
return cls(base)
| bsd-2-clause | 2,155,762,320,283,130,600 | 28.674893 | 120 | 0.576599 | false |
janhahne/nest-simulator | pynest/nest/tests/test_connect_arrays.py | 1 | 3293 | # -*- coding: utf-8 -*-
#
# test_connect_arrays.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import unittest
import warnings
import nest
import numpy as np
nest.set_verbosity('M_WARNING')
class TestConnectArrays(unittest.TestCase):
def setUp(self):
nest.ResetKernel()
def test_connect_arrays_nonunique_node_ids(self):
"""Connecting arrays with nonunique node IDs"""
nest.Create('iaf_psc_alpha', 4)
source = [1, 1, 2, 2]
target = [3, 3, 4, 4]
nest.Connect(source, target)
conns = nest.GetConnections()
st_pairs = np.array([(s, t) for s in source for t in target])
self.assertTrue(np.array_equal(st_pairs[:, 0], list(conns.sources())))
self.assertTrue(np.array_equal(st_pairs[:, 1], list(conns.targets())))
def test_connect_numpy_arrays_node_ids(self):
"""Connecting numpy arrays with nonunique node IDs"""
nest.Create('iaf_psc_alpha', 4)
source = np.array([1, 1, 2, 2])
target = np.array([3, 3, 4, 4])
nest.Connect(source, target)
conns = nest.GetConnections()
st_pairs = np.array([(s, t) for s in source for t in target])
self.assertTrue(np.array_equal(st_pairs[:, 0], list(conns.sources())))
self.assertTrue(np.array_equal(st_pairs[:, 1], list(conns.targets())))
def test_connect_arrays_unique_node_ids(self):
"""Connecting arrays with unique node IDs"""
n = nest.Create('iaf_psc_alpha', 4)
node_ids = n.tolist()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
nest.Connect(node_ids, node_ids)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, UserWarning))
self.assertTrue('unique' in str(w[-1].message))
conns = nest.GetConnections()
st_pairs = np.array([(s, t) for s in node_ids for t in node_ids])
self.assertTrue(np.array_equal(st_pairs[:, 0], list(conns.sources())))
self.assertTrue(np.array_equal(st_pairs[:, 1], list(conns.targets())))
def test_connect_array_with_nc(self):
"""Connecting one array with a NodeCollection"""
nc = nest.Create('iaf_psc_alpha', 4)
node_ids = [1, 1, 2, 2]
with self.assertRaises(TypeError):
nest.Connect(node_ids, nc)
with self.assertRaises(TypeError):
nest.Connect(nc, node_ids)
def suite():
suite = unittest.TestLoader().loadTestsFromTestCase(TestConnectArrays)
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
| gpl-2.0 | -6,105,020,392,207,008,000 | 36 | 78 | 0.641057 | false |
CIECODE-Madrid/tipi-engine | stats/process_stats.py | 1 | 6001 | from tipi_data.models.stats import Stats
from tipi_data.models.topic import Topic
from tipi_data.models.initiative import Initiative
class GenerateStats(object):
def __init__(self):
self.topics = Topic.objects()
self.subtopics = self.topics.distinct('tags.subtopic')
self.stats = Stats()
def generate(self):
Stats.objects().delete()
self.overall()
self.deputies_by_topics()
self.deputies_by_subtopics()
self.parliamentarygroups_by_topics()
self.parliamentarygroups_by_subtopics()
self.places_by_topics()
self.places_by_subtopics()
self.stats.save()
def overall(self):
self.stats['overall'] = {
'initiatives': Initiative.objects.count(),
'allinitiatives': Initiative.all.count(),
'topics': list(),
'subtopics': list()
}
pipeline = [
{'$match': {'topics': {'$exists': True, '$not': {'$size': 0}}}},
{'$unwind': '$topics'},
{'$group': {'_id': '$topics', 'initiatives': {'$sum': 1}}},
{'$sort': {'initiatives': -1}}
]
result = Initiative.objects().aggregate(*pipeline)
for item in result:
self.stats['overall']['topics'].append(item)
for subtopic in self.subtopics:
pipeline = [
{'$match': {'tags.subtopic': subtopic}},
{'$group': {'_id': subtopic, 'initiatives': {'$sum': 1}}}
]
result = Initiative.objects().aggregate(*pipeline)
if result._has_next():
self.stats['overall']['subtopics'].append(result.next())
self.stats['overall']['subtopics'].sort(key=lambda x: x['initiatives'], reverse=True)
def deputies_by_topics(self):
self.stats['deputiesByTopics'] = list()
for topic in self.topics:
pipeline = [
{'$match': {'topics': topic['name']}}, {'$unwind': '$author_deputies'},
{'$group': {'_id': '$author_deputies', 'initiatives': {'$sum': 1}}}, {'$sort': {'initiatives': -1}},
{'$limit': 10}
]
result = list(Initiative.objects().aggregate(*pipeline))
if len(result) > 0:
self.stats['deputiesByTopics'].append({
'_id': topic['name'],
'deputies': result
})
def parliamentarygroups_by_topics(self):
self.stats['parliamentarygroupsByTopics'] = list()
for topic in self.topics:
pipeline = [
{'$match': {'topics': topic['name']}}, {'$unwind': '$author_parliamentarygroups'},
{'$group': {'_id': '$author_parliamentarygroups', 'initiatives': {'$sum': 1}}}, {'$sort': {'initiatives': -1}}
]
result = list(Initiative.objects().aggregate(*pipeline))
if len(result) > 0:
self.stats['parliamentarygroupsByTopics'].append({
'_id': topic['name'],
'parliamentarygroups': result
})
def places_by_topics(self):
self.stats['placesByTopics'] = list()
for topic in self.topics:
pipeline = [
{'$match': {'topics': topic['name'], 'place': {'$not': {'$eq': ""}, '$exists': True}}},
{'$group': {'_id': '$place', 'initiatives': {'$sum': 1}}}, {'$sort': {'initiatives': -1}},
{'$limit': 5}
]
result = list(Initiative.objects().aggregate(*pipeline))
if len(result) > 0:
self.stats['placesByTopics'].append({
'_id': topic['name'],
'places': result
})
def deputies_by_subtopics(self):
self.stats['deputiesBySubtopics'] = list()
for subtopic in self.subtopics:
pipeline = [
{'$match': { 'tags.subtopic': subtopic } }, {'$unwind': '$author_deputies'},
{'$group': {'_id': '$author_deputies', 'initiatives': {'$sum': 1}}}, {'$sort': {'initiatives': -1}},
{'$limit': 10}
]
result = list(Initiative.objects().aggregate(*pipeline))
if len(result) > 0:
self.stats['deputiesBySubtopics'].append({
'_id': subtopic,
'deputies': result
})
def parliamentarygroups_by_subtopics(self):
self.stats['parliamentarygroupsBySubtopics'] = list()
for subtopic in self.subtopics:
pipeline = [
{'$match': { 'tags.subtopic': subtopic } }, {'$unwind': '$author_parliamentarygroups'},
{'$group': {'_id': '$author_parliamentarygroups', 'initiatives': {'$sum': 1}}}, {'$sort': {'initiatives': -1}}
]
result = list(Initiative.objects().aggregate(*pipeline))
if len(result) > 0:
self.stats['parliamentarygroupsBySubtopics'].append({
'_id': subtopic,
'parliamentarygroups': result
})
def places_by_subtopics(self):
self.stats['placesBySubtopics'] = list()
for subtopic in self.subtopics:
pipeline = [
{'$match': { 'tags.subtopic': subtopic, 'place': {'$not': {'$eq': ""}, '$exists': True}}},
{'$group': {'_id': '$place', 'initiatives': {'$sum': 1}}}, {'$sort': {'initiatives': -1}},
{'$limit': 5}
]
result = list(Initiative.objects().aggregate(*pipeline))
if len(result) > 0:
self.stats['placesBySubtopics'].append({
'_id': subtopic,
'places': result
})
if __name__ == "__main__":
GenerateStats().generate()
| gpl-3.0 | 3,043,947,720,192,462,300 | 41.864286 | 130 | 0.481753 | false |
DayGitH/Python-Challenges | DailyProgrammer/DP20120723C.py | 1 | 1700 | """
[7/23/2012] Challenge #80 [difficult] (Multi-word anagrams)
https://www.reddit.com/r/dailyprogrammer/comments/x0vj7/7232012_challenge_80_difficult_multiword_anagrams/
In [today's easy problem](http://www.reddit.com/r/dailyprogrammer/comments/x0v3e/7232012_challenge_80_easy_anagrams/),
we investigated anagrams that were single words. However, as is clear in the "I am Lord Voldemort" and "Tom Marvolo
Riddle" example, anagrams can also be several words long.
Your difficult task today is to write a program that given a word will generate all multi-word anagrams of that word.
Use [the same dictionary as in the easy
problem](http://code.google.com/p/dotnetperls-controls/downloads/detail?name=enable1.txt).
So, for instance, the word "PARLIAMENT" has (by my count) ~~6636~~ 8438 multi-word anagrams using that dictionary.
Examples include "MENIAL PRAT", "INEPT ALARM", "EAT NIL PRAM" (most of them will not make any sense) and "PARLIAMENT"
itself. Note that in this problem, if the difference between two permutation is only word order, they count as the same
anagram. So "INEPT ALARM" and "ALARM INEPT" should just count as one anagram.
Also, if there are single-word anagrams of the input, they should be counted in the total. For instance, in the 63
(again, by my count) multi-word anagrams of "MARBLES", the words "AMBLERS", "BLAMERS", "LAMBERS" and "RAMBLES" are
included, as well as "MARBLES" itself (a few examples of multi-word anagrams for "MARBLES" are "ARM BELS", "REM LABS"
and "ELM BARS").
How many multi-word anagrams is there for "CARPENTER" and "INHERITANCE"?
EDIT: Thanks to Cosmologicon for corrections!
"""
def main():
pass
if __name__ == "__main__":
main()
| mit | 8,144,650,209,013,004,000 | 55.666667 | 119 | 0.752941 | false |
mercycorps/tola-activity | htdocs/activitydb/migrations/0037_auto_20151028_1631.py | 1 | 1470 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('activitydb', '0036_auto_20151028_1519'),
]
operations = [
migrations.AlterField(
model_name='siteprofile',
name='avg_landholding_size',
field=models.DecimalField(decimal_places=14, max_digits=25, blank=True, help_text='In hectares/jeribs', null=True, verbose_name='Average Landholding Size'),
),
migrations.AlterField(
model_name='siteprofile',
name='populations_owning_land',
field=models.IntegerField(help_text='(%)', null=True, verbose_name='Households Owning Land', blank=True),
),
migrations.AlterField(
model_name='siteprofile',
name='literacy_rate',
field=models.IntegerField(help_text='%', null=True, verbose_name='Literacy Rate (%)', blank=True),
),
migrations.AlterField(
model_name='siteprofile',
name='literate_females',
field=models.IntegerField(help_text='%', null=True, verbose_name='% of Literate Females', blank=True),
),
migrations.AlterField(
model_name='siteprofile',
name='literate_males',
field=models.IntegerField(help_text='%', null=True, verbose_name='% of Literate Males', blank=True),
),
]
| gpl-2.0 | -7,011,314,465,358,522,000 | 35.75 | 168 | 0.6 | false |
williamthegrey/swift | test/probe/test_object_metadata_replication.py | 1 | 30416 | #!/usr/bin/python -u
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from io import StringIO
import unittest
import os
import uuid
from swift.common.direct_client import direct_get_suffix_hashes
from swift.common.exceptions import DiskFileDeleted
from swift.common.internal_client import UnexpectedResponse
from swift.container.backend import ContainerBroker
from swift.common import utils
from swiftclient import client
from swift.common.ring import Ring
from swift.common.utils import Timestamp, get_logger, hash_path
from swift.obj.diskfile import DiskFileManager
from swift.common.storage_policy import POLICIES
from test.probe.brain import BrainSplitter
from test.probe.common import ReplProbeTest
class Test(ReplProbeTest):
def setUp(self):
"""
Reset all environment and start all servers.
"""
super(Test, self).setUp()
self.container_name = 'container-%s' % uuid.uuid4()
self.object_name = 'object-%s' % uuid.uuid4()
self.brain = BrainSplitter(self.url, self.token, self.container_name,
self.object_name, 'object',
policy=self.policy)
self.int_client = self.make_internal_client(object_post_as_copy=False)
def tearDown(self):
super(Test, self).tearDown()
def _get_object_info(self, account, container, obj, number):
obj_conf = self.configs['object-server']
config_path = obj_conf[number]
options = utils.readconf(config_path, 'app:object-server')
swift_dir = options.get('swift_dir', '/etc/swift')
ring = POLICIES.get_object_ring(int(self.policy), swift_dir)
part, nodes = ring.get_nodes(account, container, obj)
for node in nodes:
# assumes one to one mapping
if node['port'] == int(options.get('bind_port')):
device = node['device']
break
else:
return None
mgr = DiskFileManager(options, get_logger(options))
disk_file = mgr.get_diskfile(device, part, account, container, obj,
self.policy)
info = disk_file.read_metadata()
return info
def _assert_consistent_object_metadata(self):
obj_info = []
for i in range(1, 5):
info_i = self._get_object_info(self.account, self.container_name,
self.object_name, i)
if info_i:
obj_info.append(info_i)
self.assertTrue(len(obj_info) > 1)
for other in obj_info[1:]:
self.assertDictEqual(obj_info[0], other)
def _assert_consistent_deleted_object(self):
for i in range(1, 5):
try:
info = self._get_object_info(self.account, self.container_name,
self.object_name, i)
if info is not None:
self.fail('Expected no disk file info but found %s' % info)
except DiskFileDeleted:
pass
def _get_db_info(self, account, container, number):
server_type = 'container'
obj_conf = self.configs['%s-server' % server_type]
config_path = obj_conf[number]
options = utils.readconf(config_path, 'app:container-server')
root = options.get('devices')
swift_dir = options.get('swift_dir', '/etc/swift')
ring = Ring(swift_dir, ring_name=server_type)
part, nodes = ring.get_nodes(account, container)
for node in nodes:
# assumes one to one mapping
if node['port'] == int(options.get('bind_port')):
device = node['device']
break
else:
return None
path_hash = utils.hash_path(account, container)
_dir = utils.storage_directory('%ss' % server_type, part, path_hash)
db_dir = os.path.join(root, device, _dir)
db_file = os.path.join(db_dir, '%s.db' % path_hash)
db = ContainerBroker(db_file)
return db.get_info()
def _assert_consistent_container_dbs(self):
db_info = []
for i in range(1, 5):
info_i = self._get_db_info(self.account, self.container_name, i)
if info_i:
db_info.append(info_i)
self.assertTrue(len(db_info) > 1)
for other in db_info[1:]:
self.assertEqual(db_info[0]['hash'], other['hash'],
'Container db hash mismatch: %s != %s'
% (db_info[0]['hash'], other['hash']))
def _assert_object_metadata_matches_listing(self, listing, metadata):
self.assertEqual(listing['bytes'], int(metadata['content-length']))
self.assertEqual(listing['hash'], metadata['etag'])
self.assertEqual(listing['content_type'], metadata['content-type'])
modified = Timestamp(metadata['x-timestamp']).isoformat
self.assertEqual(listing['last_modified'], modified)
def _put_object(self, headers=None, body=u'stuff'):
headers = headers or {}
self.int_client.upload_object(StringIO(body), self.account,
self.container_name,
self.object_name, headers)
def _post_object(self, headers):
self.int_client.set_object_metadata(self.account, self.container_name,
self.object_name, headers)
def _delete_object(self):
self.int_client.delete_object(self.account, self.container_name,
self.object_name)
def _get_object(self, headers=None, expect_statuses=(2,)):
return self.int_client.get_object(self.account,
self.container_name,
self.object_name,
headers,
acceptable_statuses=expect_statuses)
def _get_object_metadata(self):
return self.int_client.get_object_metadata(self.account,
self.container_name,
self.object_name)
def _assert_consistent_suffix_hashes(self):
opart, onodes = self.object_ring.get_nodes(
self.account, self.container_name, self.object_name)
name_hash = hash_path(
self.account, self.container_name, self.object_name)
results = []
for node in onodes:
results.append(
(node,
direct_get_suffix_hashes(node, opart, [name_hash[-3:]])))
for (node, hashes) in results[1:]:
self.assertEqual(results[0][1], hashes,
'Inconsistent suffix hashes found: %s' % results)
def test_object_delete_is_replicated(self):
self.brain.put_container(policy_index=int(self.policy))
# put object
self._put_object()
# put newer object with sysmeta to first server subset
self.brain.stop_primary_half()
self._put_object()
self.brain.start_primary_half()
# delete object on second server subset
self.brain.stop_handoff_half()
self._delete_object()
self.brain.start_handoff_half()
# run replicator
self.get_to_final_state()
# check object deletion has been replicated on first server set
self.brain.stop_primary_half()
self._get_object(expect_statuses=(4,))
self.brain.start_primary_half()
# check object deletion persists on second server set
self.brain.stop_handoff_half()
self._get_object(expect_statuses=(4,))
# put newer object to second server set
self._put_object()
self.brain.start_handoff_half()
# run replicator
self.get_to_final_state()
# check new object has been replicated on first server set
self.brain.stop_primary_half()
self._get_object()
self.brain.start_primary_half()
# check new object persists on second server set
self.brain.stop_handoff_half()
self._get_object()
def test_object_after_replication_with_subsequent_post(self):
self.brain.put_container(policy_index=0)
# put object
self._put_object(headers={'Content-Type': 'foo'}, body=u'older')
# put newer object to first server subset
self.brain.stop_primary_half()
self._put_object(headers={'Content-Type': 'bar'}, body=u'newer')
metadata = self._get_object_metadata()
etag = metadata['etag']
self.brain.start_primary_half()
# post some user meta to all servers
self._post_object({'x-object-meta-bar': 'meta-bar'})
# run replicator
self.get_to_final_state()
# check that newer data has been replicated to second server subset
self.brain.stop_handoff_half()
metadata = self._get_object_metadata()
self.assertEqual(etag, metadata['etag'])
self.assertEqual('bar', metadata['content-type'])
self.assertEqual('meta-bar', metadata['x-object-meta-bar'])
self.brain.start_handoff_half()
self._assert_consistent_object_metadata()
self._assert_consistent_container_dbs()
self._assert_consistent_suffix_hashes()
def test_sysmeta_after_replication_with_subsequent_put(self):
sysmeta = {'x-object-sysmeta-foo': 'older'}
sysmeta2 = {'x-object-sysmeta-foo': 'newer'}
usermeta = {'x-object-meta-bar': 'meta-bar'}
self.brain.put_container(policy_index=0)
# put object with sysmeta to first server subset
self.brain.stop_primary_half()
self._put_object(headers=sysmeta)
metadata = self._get_object_metadata()
for key in sysmeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], sysmeta[key])
self.brain.start_primary_half()
# put object with updated sysmeta to second server subset
self.brain.stop_handoff_half()
self._put_object(headers=sysmeta2)
metadata = self._get_object_metadata()
for key in sysmeta2:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], sysmeta2[key])
self._post_object(usermeta)
metadata = self._get_object_metadata()
for key in usermeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], usermeta[key])
for key in sysmeta2:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], sysmeta2[key])
self.brain.start_handoff_half()
# run replicator
self.get_to_final_state()
# check sysmeta has been replicated to first server subset
self.brain.stop_primary_half()
metadata = self._get_object_metadata()
for key in usermeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], usermeta[key])
for key in sysmeta2.keys():
self.assertTrue(key in metadata, key)
self.assertEqual(metadata[key], sysmeta2[key])
self.brain.start_primary_half()
# check user sysmeta ok on second server subset
self.brain.stop_handoff_half()
metadata = self._get_object_metadata()
for key in usermeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], usermeta[key])
for key in sysmeta2.keys():
self.assertTrue(key in metadata, key)
self.assertEqual(metadata[key], sysmeta2[key])
self.brain.start_handoff_half()
self._assert_consistent_object_metadata()
self._assert_consistent_container_dbs()
self._assert_consistent_suffix_hashes()
def test_sysmeta_after_replication_with_subsequent_post(self):
sysmeta = {'x-object-sysmeta-foo': 'sysmeta-foo'}
usermeta = {'x-object-meta-bar': 'meta-bar'}
self.brain.put_container(policy_index=int(self.policy))
# put object
self._put_object()
# put newer object with sysmeta to first server subset
self.brain.stop_primary_half()
self._put_object(headers=sysmeta)
metadata = self._get_object_metadata()
for key in sysmeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], sysmeta[key])
self.brain.start_primary_half()
# post some user meta to second server subset
self.brain.stop_handoff_half()
self._post_object(usermeta)
metadata = self._get_object_metadata()
for key in usermeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], usermeta[key])
for key in sysmeta:
self.assertFalse(key in metadata)
self.brain.start_handoff_half()
# run replicator
self.get_to_final_state()
# check user metadata has been replicated to first server subset
# and sysmeta is unchanged
self.brain.stop_primary_half()
metadata = self._get_object_metadata()
expected = dict(sysmeta)
expected.update(usermeta)
for key in expected.keys():
self.assertTrue(key in metadata, key)
self.assertEqual(metadata[key], expected[key])
self.brain.start_primary_half()
# check user metadata and sysmeta both on second server subset
self.brain.stop_handoff_half()
metadata = self._get_object_metadata()
for key in expected.keys():
self.assertTrue(key in metadata, key)
self.assertEqual(metadata[key], expected[key])
self.brain.start_handoff_half()
self._assert_consistent_object_metadata()
self._assert_consistent_container_dbs()
self._assert_consistent_suffix_hashes()
def test_sysmeta_after_replication_with_prior_post(self):
sysmeta = {'x-object-sysmeta-foo': 'sysmeta-foo'}
usermeta = {'x-object-meta-bar': 'meta-bar'}
self.brain.put_container(policy_index=int(self.policy))
# put object
self._put_object()
# put user meta to first server subset
self.brain.stop_handoff_half()
self._post_object(headers=usermeta)
metadata = self._get_object_metadata()
for key in usermeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], usermeta[key])
self.brain.start_handoff_half()
# put newer object with sysmeta to second server subset
self.brain.stop_primary_half()
self._put_object(headers=sysmeta)
metadata = self._get_object_metadata()
for key in sysmeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], sysmeta[key])
self.brain.start_primary_half()
# run replicator
self.get_to_final_state()
# check stale user metadata is not replicated to first server subset
# and sysmeta is unchanged
self.brain.stop_primary_half()
metadata = self._get_object_metadata()
for key in sysmeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], sysmeta[key])
for key in usermeta:
self.assertFalse(key in metadata)
self.brain.start_primary_half()
# check stale user metadata is removed from second server subset
# and sysmeta is replicated
self.brain.stop_handoff_half()
metadata = self._get_object_metadata()
for key in sysmeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], sysmeta[key])
for key in usermeta:
self.assertFalse(key in metadata)
self.brain.start_handoff_half()
self._assert_consistent_object_metadata()
self._assert_consistent_container_dbs()
self._assert_consistent_suffix_hashes()
def test_post_ctype_replicated_when_previous_incomplete_puts(self):
# primary half handoff half
# ------------ ------------
# t0.data: ctype = foo
# t1.data: ctype = bar
# t2.meta: ctype = baz
#
# ...run replicator and expect...
#
# t1.data:
# t2.meta: ctype = baz
self.brain.put_container(policy_index=0)
# incomplete write to primary half
self.brain.stop_handoff_half()
self._put_object(headers={'Content-Type': 'foo'})
self.brain.start_handoff_half()
# handoff write
self.brain.stop_primary_half()
self._put_object(headers={'Content-Type': 'bar'})
self.brain.start_primary_half()
# content-type update to primary half
self.brain.stop_handoff_half()
self._post_object(headers={'Content-Type': 'baz'})
self.brain.start_handoff_half()
self.get_to_final_state()
# check object metadata
metadata = client.head_object(self.url, self.token,
self.container_name,
self.object_name)
# check container listing metadata
container_metadata, objs = client.get_container(self.url, self.token,
self.container_name)
for obj in objs:
if obj['name'] == self.object_name:
break
expected = 'baz'
self.assertEqual(obj['content_type'], expected)
self._assert_object_metadata_matches_listing(obj, metadata)
self._assert_consistent_container_dbs()
self._assert_consistent_object_metadata()
self._assert_consistent_suffix_hashes()
def test_put_ctype_replicated_when_subsequent_post(self):
# primary half handoff half
# ------------ ------------
# t0.data: ctype = foo
# t1.data: ctype = bar
# t2.meta:
#
# ...run replicator and expect...
#
# t1.data: ctype = bar
# t2.meta:
self.brain.put_container(policy_index=0)
# incomplete write
self.brain.stop_handoff_half()
self._put_object(headers={'Content-Type': 'foo'})
self.brain.start_handoff_half()
# handoff write
self.brain.stop_primary_half()
self._put_object(headers={'Content-Type': 'bar'})
self.brain.start_primary_half()
# metadata update with newest data unavailable
self.brain.stop_handoff_half()
self._post_object(headers={'X-Object-Meta-Color': 'Blue'})
self.brain.start_handoff_half()
self.get_to_final_state()
# check object metadata
metadata = client.head_object(self.url, self.token,
self.container_name,
self.object_name)
# check container listing metadata
container_metadata, objs = client.get_container(self.url, self.token,
self.container_name)
for obj in objs:
if obj['name'] == self.object_name:
break
else:
self.fail('obj not found in container listing')
expected = 'bar'
self.assertEqual(obj['content_type'], expected)
self.assertEqual(metadata['x-object-meta-color'], 'Blue')
self._assert_object_metadata_matches_listing(obj, metadata)
self._assert_consistent_container_dbs()
self._assert_consistent_object_metadata()
self._assert_consistent_suffix_hashes()
def test_post_ctype_replicated_when_subsequent_post_without_ctype(self):
# primary half handoff half
# ------------ ------------
# t0.data: ctype = foo
# t1.data: ctype = bar
# t2.meta: ctype = bif
# t3.data: ctype = baz, color = 'Red'
# t4.meta: color = Blue
#
# ...run replicator and expect...
#
# t1.data:
# t4-delta.meta: ctype = baz, color = Blue
self.brain.put_container(policy_index=0)
# incomplete write
self.brain.stop_handoff_half()
self._put_object(headers={'Content-Type': 'foo',
'X-Object-Sysmeta-Test': 'older'})
self.brain.start_handoff_half()
# handoff write
self.brain.stop_primary_half()
self._put_object(headers={'Content-Type': 'bar',
'X-Object-Sysmeta-Test': 'newer'})
self.brain.start_primary_half()
# incomplete post with content type
self.brain.stop_handoff_half()
self._post_object(headers={'Content-Type': 'bif'})
self.brain.start_handoff_half()
# incomplete post to handoff with content type
self.brain.stop_primary_half()
self._post_object(headers={'Content-Type': 'baz',
'X-Object-Meta-Color': 'Red'})
self.brain.start_primary_half()
# complete post with no content type
self._post_object(headers={'X-Object-Meta-Color': 'Blue',
'X-Object-Sysmeta-Test': 'ignored'})
# 'baz' wins over 'bar' but 'Blue' wins over 'Red'
self.get_to_final_state()
# check object metadata
metadata = self._get_object_metadata()
# check container listing metadata
container_metadata, objs = client.get_container(self.url, self.token,
self.container_name)
for obj in objs:
if obj['name'] == self.object_name:
break
expected = 'baz'
self.assertEqual(obj['content_type'], expected)
self.assertEqual(metadata['x-object-meta-color'], 'Blue')
self.assertEqual(metadata['x-object-sysmeta-test'], 'newer')
self._assert_object_metadata_matches_listing(obj, metadata)
self._assert_consistent_container_dbs()
self._assert_consistent_object_metadata()
self._assert_consistent_suffix_hashes()
def test_put_ctype_replicated_when_subsequent_posts_without_ctype(self):
# primary half handoff half
# ------------ ------------
# t0.data: ctype = foo
# t1.data: ctype = bar
# t2.meta:
# t3.meta
#
# ...run replicator and expect...
#
# t1.data: ctype = bar
# t3.meta
self.brain.put_container(policy_index=0)
self._put_object(headers={'Content-Type': 'foo',
'X-Object-Sysmeta-Test': 'older'})
# incomplete write to handoff half
self.brain.stop_primary_half()
self._put_object(headers={'Content-Type': 'bar',
'X-Object-Sysmeta-Test': 'newer'})
self.brain.start_primary_half()
# incomplete post with no content type to primary half
self.brain.stop_handoff_half()
self._post_object(headers={'X-Object-Meta-Color': 'Red',
'X-Object-Sysmeta-Test': 'ignored'})
self.brain.start_handoff_half()
# incomplete post with no content type to handoff half
self.brain.stop_primary_half()
self._post_object(headers={'X-Object-Meta-Color': 'Blue'})
self.brain.start_primary_half()
self.get_to_final_state()
# check object metadata
metadata = self._get_object_metadata()
# check container listing metadata
container_metadata, objs = client.get_container(self.url, self.token,
self.container_name)
for obj in objs:
if obj['name'] == self.object_name:
break
expected = 'bar'
self.assertEqual(obj['content_type'], expected)
self._assert_object_metadata_matches_listing(obj, metadata)
self.assertEqual(metadata['x-object-meta-color'], 'Blue')
self.assertEqual(metadata['x-object-sysmeta-test'], 'newer')
self._assert_object_metadata_matches_listing(obj, metadata)
self._assert_consistent_container_dbs()
self._assert_consistent_object_metadata()
self._assert_consistent_suffix_hashes()
def test_posted_metadata_only_persists_after_prior_put(self):
# newer metadata posted to subset of nodes should persist after an
# earlier put on other nodes, but older content-type on that subset
# should not persist
self.brain.put_container(policy_index=0)
# incomplete put to handoff
self.brain.stop_primary_half()
self._put_object(headers={'Content-Type': 'oldest',
'X-Object-Sysmeta-Test': 'oldest',
'X-Object-Meta-Test': 'oldest'})
self.brain.start_primary_half()
# incomplete put to primary
self.brain.stop_handoff_half()
self._put_object(headers={'Content-Type': 'oldest',
'X-Object-Sysmeta-Test': 'oldest',
'X-Object-Meta-Test': 'oldest'})
self.brain.start_handoff_half()
# incomplete post with content-type to handoff
self.brain.stop_primary_half()
self._post_object(headers={'Content-Type': 'newer',
'X-Object-Meta-Test': 'newer'})
self.brain.start_primary_half()
# incomplete put to primary
self.brain.stop_handoff_half()
self._put_object(headers={'Content-Type': 'newest',
'X-Object-Sysmeta-Test': 'newest',
'X-Object-Meta-Test': 'newer'})
self.brain.start_handoff_half()
# incomplete post with no content-type to handoff which still has
# out of date content-type
self.brain.stop_primary_half()
self._post_object(headers={'X-Object-Meta-Test': 'newest'})
metadata = self._get_object_metadata()
self.assertEqual(metadata['x-object-meta-test'], 'newest')
self.assertEqual(metadata['content-type'], 'newer')
self.brain.start_primary_half()
self.get_to_final_state()
# check object metadata
metadata = self._get_object_metadata()
self.assertEqual(metadata['x-object-meta-test'], 'newest')
self.assertEqual(metadata['x-object-sysmeta-test'], 'newest')
self.assertEqual(metadata['content-type'], 'newest')
# check container listing metadata
container_metadata, objs = client.get_container(self.url, self.token,
self.container_name)
for obj in objs:
if obj['name'] == self.object_name:
break
self.assertEqual(obj['content_type'], 'newest')
self._assert_object_metadata_matches_listing(obj, metadata)
self._assert_object_metadata_matches_listing(obj, metadata)
self._assert_consistent_container_dbs()
self._assert_consistent_object_metadata()
self._assert_consistent_suffix_hashes()
def test_post_trumped_by_prior_delete(self):
# new metadata and content-type posted to subset of nodes should not
# cause object to persist after replication of an earlier delete on
# other nodes.
self.brain.put_container(policy_index=0)
# incomplete put
self.brain.stop_primary_half()
self._put_object(headers={'Content-Type': 'oldest',
'X-Object-Sysmeta-Test': 'oldest',
'X-Object-Meta-Test': 'oldest'})
self.brain.start_primary_half()
# incomplete put then delete
self.brain.stop_handoff_half()
self._put_object(headers={'Content-Type': 'oldest',
'X-Object-Sysmeta-Test': 'oldest',
'X-Object-Meta-Test': 'oldest'})
self._delete_object()
self.brain.start_handoff_half()
# handoff post
self.brain.stop_primary_half()
self._post_object(headers={'Content-Type': 'newest',
'X-Object-Sysmeta-Test': 'ignored',
'X-Object-Meta-Test': 'newest'})
# check object metadata
metadata = self._get_object_metadata()
self.assertEqual(metadata['x-object-sysmeta-test'], 'oldest')
self.assertEqual(metadata['x-object-meta-test'], 'newest')
self.assertEqual(metadata['content-type'], 'newest')
self.brain.start_primary_half()
# delete trumps later post
self.get_to_final_state()
# check object is now deleted
self.assertRaises(UnexpectedResponse, self._get_object_metadata)
container_metadata, objs = client.get_container(self.url, self.token,
self.container_name)
self.assertEqual(0, len(objs))
self._assert_consistent_container_dbs()
self._assert_consistent_deleted_object()
self._assert_consistent_suffix_hashes()
if __name__ == "__main__":
unittest.main()
| apache-2.0 | -900,956,658,026,259,200 | 39.500666 | 79 | 0.57233 | false |
srajag/contrail-controller | src/config/device-manager/device_manager/db.py | 1 | 14129 | #
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
"""
This file contains implementation of data model for physical router
configuration manager
"""
from vnc_api.common.exceptions import NoIdError
from physical_router_config import PhysicalRouterConfig
from sandesh.dm_introspect import ttypes as sandesh
from cfgm_common.vnc_db import DBBase
import copy
class BgpRouterDM(DBBase):
_dict = {}
obj_type = 'bgp_router'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.bgp_routers = {}
self.physical_router = None
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.params = obj['bgp_router_parameters']
self.update_single_ref('physical_router', obj)
new_peers = {}
for ref in obj.get('bgp_router_refs', []):
new_peers[ref['uuid']] = ref['attr']
for peer_id in set(self.bgp_routers.keys()) - set(new_peers.keys()):
peer = BgpRouterDM.get(peer_id)
if self.uuid in peer.bgp_routers:
del peer.bgp_routers[self.uuid]
for peer_id, attrs in new_peers.items():
peer = BgpRouterDM.get(peer_id)
if peer:
peer.bgp_routers[self.uuid] = attrs
self.bgp_routers = new_peers
def sandesh_build(self):
return sandesh.BgpRouter(name=self.name, uuid=self.uuid,
peers=self.bgp_routers,
physical_router=self.physical_router)
@classmethod
def sandesh_request(cls, req):
# Return the list of BGP routers
resp = sandesh.BgpRouterListResp(bgp_routers=[])
if req.name_or_uuid is None:
for router in cls.values():
sandesh_router = router.sandesh_build()
resp.bgp_routers.extend(sandesh_router)
else:
router = cls.find_by_name_or_uuid(req.name_or_uuid)
if router:
sandesh_router = router.sandesh_build()
resp.bgp_routers.extend(sandesh_router)
resp.response(req.context())
# end class BgpRouterDM
class PhysicalRouterDM(DBBase):
_dict = {}
obj_type = 'physical_router'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.virtual_networks = set()
self.bgp_router = None
self.update(obj_dict)
self.config_manager = PhysicalRouterConfig(
self.management_ip, self.user_credentials, self._logger)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.management_ip = obj.get('physical_router_management_ip')
self.vendor = obj.get('physical_router_vendor_name')
self.user_credentials = obj.get('physical_router_user_credentials')
self.update_single_ref('bgp_router', obj)
self.update_multiple_refs('virtual_network', obj)
self.physical_interfaces = set([pi['uuid'] for pi in
obj.get('physical_interfaces', [])])
self.logical_interfaces = set([li['uuid'] for li in
obj.get('logical_interfaces', [])])
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.config_manager.delete_bgp_config()
obj.update_single_ref('bgp_router', {})
obj.update_multiple_refs('virtual_network', {})
del cls._dict[uuid]
# end delete
def push_config(self):
self.config_manager.reset_bgp_config()
bgp_router = BgpRouterDM.get(self.bgp_router)
if bgp_router:
for peer_uuid, params in bgp_router.bgp_routers.items():
peer = BgpRouterDM.get(peer_uuid)
if peer is None:
continue
external = (bgp_router.params['autonomous_system'] ==
peer.params['autonomous_system'])
self.config_manager.add_bgp_peer(peer.params['address'],
params, external)
self.config_manager.set_bgp_config(bgp_router.params)
vn_dict = {}
for vn_id in self.virtual_networks:
vn_dict[vn_id] = []
li_set = self.logical_interfaces
for pi_uuid in self.physical_interfaces:
pi = PhysicalInterfaceDM.get(pi_uuid)
if pi is None:
continue
li_set |= pi.logical_interfaces
for li_uuid in li_set:
li = LogicalInterfaceDM.get(li_uuid)
if li is None:
continue
vmi_id = li.virtual_machine_interface
vmi = VirtualMachineInterfaceDM.get(vmi_id)
if vmi is None:
continue
vn_id = vmi.virtual_network
if vn_id in vn_dict:
vn_dict[vn_id].append(li.name)
else:
vn_dict[vn_id] = [li.name]
for vn_id, interfaces in vn_dict.items():
vn_obj = VirtualNetworkDM.get(vn_id)
if vn_obj is None:
continue
for ri_id in vn_obj.routing_instances:
# Find the primary RI by matching the name
ri_obj = RoutingInstanceDM.get(ri_id)
if ri_obj is None:
continue
if ri_obj.fq_name[-1] == vn_obj.fq_name[-1]:
vrf_name = ':'.join(vn_obj.fq_name)
export_set = copy.copy(ri_obj.export_targets)
import_set = copy.copy(ri_obj.import_targets)
for ri2_id in ri_obj.routing_instances:
ri2 = RoutingInstanceDM.get(ri2_id)
if ri2 is None:
continue
import_set |= ri2.export_targets
export_set |= ri2.import_targets
self.config_manager.add_routing_instance(vrf_name,
import_set,
export_set,
vn_obj.prefixes,
vn_obj.gateways,
vn_obj.router_external,
interfaces,
vn_obj.vxlan_vni)
break
self.config_manager.send_bgp_config()
# end push_config
# end PhysicalRouterDM
class PhysicalInterfaceDM(DBBase):
_dict = {}
obj_type = 'physical_interface'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.update(obj_dict)
pr = PhysicalRouterDM.get(self.physical_router)
if pr:
pr.physical_interfaces.add(self.uuid)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.physical_router = self.get_parent_uuid(obj)
self.logical_interfaces = set([li['uuid'] for li in
obj.get('logical_interfaces', [])])
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
pr = PhysicalRouterDM.get(obj.physical_router)
if pr:
pr.physical_interfaces.discard(obj.uuid)
del cls._dict[uuid]
# end delete
# end PhysicalInterfaceDM
class LogicalInterfaceDM(DBBase):
_dict = {}
obj_type = 'logical_interface'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.virtual_machine_interface = None
self.update(obj_dict)
if self.physical_interface:
parent = PhysicalInterfaceDM.get(self.physical_interface)
elif self.physical_router:
parent = PhysicalRouterDM.get(self.physical_router)
if parent:
parent.logical_interfaces.add(self.uuid)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
if obj['parent_type'] == 'physical-router':
self.physical_router = self.get_parent_uuid(obj)
self.physical_interface = None
else:
self.physical_interface = self.get_parent_uuid(obj)
self.physical_router = None
self.update_single_ref('virtual_machine_interface', obj)
self.name = obj['fq_name'][-1]
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
if obj.physical_interface:
parent = PhysicalInterfaceDM.get(obj.physical_interface)
elif obj.physical_router:
parent = PhysicalInterfaceDM.get(obj.physical_router)
if parent:
parent.logical_interfaces.discard(obj.uuid)
obj.update_single_ref('virtual_machine_interface', {})
del cls._dict[uuid]
# end delete
# end LogicalInterfaceDM
class VirtualMachineInterfaceDM(DBBase):
_dict = {}
obj_type = 'virtual_machine_interface'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.virtual_network = None
self.logical_interface = None
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.update_single_ref('logical_interface', obj)
self.update_single_ref('virtual_network', obj)
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_single_ref('logical_interface', {})
obj.update_single_ref('virtual_network', {})
del cls._dict[uuid]
# end delete
# end VirtualMachineInterfaceDM
class VirtualNetworkDM(DBBase):
_dict = {}
obj_type = 'virtual_network'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.physical_routers = set()
self.router_external = False
self.vxlan_configured = False
self.vxlan_vni = None
self.gateways = None
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.update_multiple_refs('physical_router', obj)
self.fq_name = obj['fq_name']
try:
self.router_external = obj['router_external']
except KeyError:
self.router_external = False
try:
prop = obj['virtual_network_properties']
if prop['vxlan_network_identifier'] is not None:
self.vxlan_configured = True
self.vxlan_vni = prop['vxlan_network_identifier']
except KeyError:
self.vxlan_configured = False
self.vxlan_vni = None
self.routing_instances = set([ri['uuid'] for ri in
obj.get('routing_instances', [])])
self.virtual_machine_interfaces = set(
[vmi['uuid'] for vmi in
obj.get('virtual_machine_interface_back_refs', [])])
self.prefixes = set()
self.gateways = set()
for ipam_ref in obj.get('network_ipam_refs', []):
for subnet in ipam_ref['attr'].get('ipam_subnets', []):
self.prefixes.add('%s/%d' % (subnet['subnet']['ip_prefix'],
subnet['subnet']['ip_prefix_len'])
)
self.gateways.add(subnet['default_gateway'])
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('physical_router', {})
del cls._dict[uuid]
# end delete
# end VirtualNetworkDM
class RoutingInstanceDM(DBBase):
_dict = {}
obj_type = 'routing_instance'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.virtual_network = None
self.import_targets = set()
self.export_targets = set()
self.routing_instances = set()
self.update(obj_dict)
vn = VirtualNetworkDM.get(self.virtual_network)
if vn:
vn.routing_instances.add(self.uuid)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.fq_name = obj['fq_name']
self.virtual_network = self.get_parent_uuid(obj)
self.import_targets = set()
self.export_targets = set()
for rt_ref in obj.get('route_target_refs', []):
rt_name = rt_ref['to'][0]
exim = rt_ref.get('attr').get('import_export')
if exim == 'export':
self.export_targets.add(rt_name)
elif exim == 'import':
self.import_targets.add(rt_name)
else:
self.import_targets.add(rt_name)
self.export_targets.add(rt_name)
self.update_multiple_refs('routing_instance', obj)
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
vn = VirtualNetworkDM.get(obj.virtual_network)
if vn:
vn.routing_instances.discard(obj.uuid)
del cls._dict[uuid]
# end delete
# end RoutingInstanceDM
DBBase._OBJ_TYPE_MAP = {
'bgp_router': BgpRouterDM,
'physical_router': PhysicalRouterDM,
'physical_interface': PhysicalInterfaceDM,
'logical_interface': LogicalInterfaceDM,
'virtual_machine_interface': VirtualMachineInterfaceDM,
'virtual_network': VirtualNetworkDM,
'routing_instance': RoutingInstanceDM,
}
| apache-2.0 | -7,415,153,349,394,149,000 | 33.972772 | 84 | 0.544978 | false |
sl4shme/pplloopp | images/shaddock/mistral/configparse.py | 1 | 2038 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Thibaut Lapierre <[email protected]>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ConfigParser
import os
configfile = '/mistral/etc/mistral.conf.sample'
config = ConfigParser.RawConfigParser()
config.read(configfile)
section = 'DEFAULT'
config.set(section, 'rpc_backend', 'rabbit')
config.set(section, 'rabbit_host', os.environ.get('HOST_IP'))
config.set(section, 'rabbit_password', os.environ.get('RABBIT_PASS'))
section = 'database'
if not set([section]).issubset(config.sections()):
config.add_section(section)
config.set(section, 'connection',
'mysql://mistral:%s@%s/mistral'
% (os.environ.get('MISTRAL_DBPASS'),
os.environ.get('HOST_IP')))
section = 'keystone_authtoken'
if not set([section]).issubset(config.sections()):
config.add_section(section)
config.set(section, 'auth_uri',
'http://%s:5000/v3' % os.environ.get('HOST_IP'))
config.set(section, 'identity_uri',
'http://%s:35357' % os.environ.get('HOST_IP'))
config.set(section, 'admin_tenant_name', 'service')
config.set(section, 'auth_version', 'v3')
config.set(section, 'admin_user', 'mistral')
config.set(section, 'admin_password', os.environ.get('MISTRAL_PASS'))
configfile = '/mistral/etc/mistral.conf'
print('Parsing of %s...' % configfile)
with open(configfile, 'w') as configfile:
config.write(configfile)
print('Done')
| apache-2.0 | 166,885,613,919,734,000 | 36.740741 | 78 | 0.677134 | false |
globocom/database-as-a-service | dbaas/drivers/tests/test_driver_pymongo.py | 1 | 9370 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import os
from mock import patch, MagicMock
from drivers import DriverFactory
from physical.tests import factory as factory_physical
from logical.tests import factory as factory_logical
from logical.models import Database
from drivers.mongodb import MongoDB, MongoDBReplicaSet
from drivers.tests.base import (BaseMongoDriverTestCase, FakeDriverClient,
BaseSingleInstanceUpdateSizesTest,
BaseHAInstanceUpdateSizesTest)
from physical.models import Instance
@patch('drivers.mongodb.MongoDB.pymongo', new=FakeDriverClient)
@patch('physical.models.DiskOffering.size_bytes',
new=MagicMock(return_value=90))
class MongoSingleUpdateSizesTestCase(
BaseSingleInstanceUpdateSizesTest,
BaseMongoDriverTestCase):
pass
@patch('drivers.mongodb.MongoDB.pymongo', new=FakeDriverClient)
@patch('physical.models.DiskOffering.size_bytes',
new=MagicMock(return_value=90))
class MongoReplicaSetUpdateSizesTestCase(
BaseMongoDriverTestCase,
BaseHAInstanceUpdateSizesTest):
driver_class = MongoDBReplicaSet
secondary_instance_quantity = 2
secondary_instance_type = Instance.MONGODB_ARBITER
class MongoUsedAndTotalTestCase(BaseMongoDriverTestCase):
"""
Tests Mongo total and used
"""
def test_masters_single_instance(self):
"""
Test validates return total and used size when has single instance
"""
self.instance.total_size_in_bytes = 105
self.instance.used_size_in_bytes = 55
self.instance.save()
self.assertEqual(self.driver.masters_total_size_in_bytes, 105)
expected_total_size_in_gb = 105 * self.GB_FACTOR
self.assertEqual(
self.driver.get_master_instance_total_size_in_gb(),
expected_total_size_in_gb
)
self.assertEqual(self.driver.masters_used_size_in_bytes, 55)
def test_masters_replicaset_instance(self):
"""
Test validates return total and used size when has single instance
"""
self.driver = MongoDBReplicaSet(databaseinfra=self.databaseinfra)
self.driver.check_instance_is_master = MagicMock(
side_effect=self.instance_helper.check_instance_is_master
)
self.instance_helper.create_instances_by_quant(
infra=self.databaseinfra, base_address='131',
instance_type=self.instance_type,
total_size_in_bytes=35, used_size_in_bytes=10
)
self.instance.total_size_in_bytes = 35
self.instance.used_size_in_bytes = 10
self.instance.save()
self.assertEqual(self.driver.masters_total_size_in_bytes, 35)
expected_total_size_in_gb = 35 * self.GB_FACTOR
self.assertEqual(
self.driver.get_master_instance_total_size_in_gb(),
expected_total_size_in_gb
)
self.assertEqual(self.driver.masters_used_size_in_bytes, 10)
class MongoDBEngineTestCase(BaseMongoDriverTestCase):
"""
Tests MongoDB Engine
"""
def test_mongodb_app_installed(self):
self.assertTrue(DriverFactory.is_driver_available("mongodb_single"))
self.assertTrue(
DriverFactory.is_driver_available("mongodb_replica_set")
)
# test mongo methods
def test_instantiate_mongodb_using_engine_factory(self):
self.assertEqual(MongoDB, type(self.driver))
self.assertEqual(self.databaseinfra, self.driver.databaseinfra)
def test_connection_string(self):
self.assertEqual(
"mongodb://<user>:<password>@{}".format(
self.instance_endpoint),
self.driver.get_connection()
)
def test_get_user(self):
self.assertEqual(self.databaseinfra.user, self.driver.get_user())
def test_get_password(self):
self.assertEqual(
self.databaseinfra.password, self.driver.get_password())
def test_get_default_port(self):
self.assertEqual(27017, self.driver.default_port)
@patch.object(MongoDB, 'get_replica_name')
def test_connection_string_when_in_replica_set(self, get_replica_name):
self.instance = factory_physical.InstanceFactory(
databaseinfra=self.databaseinfra, address='127.0.0.2', port=27018)
get_replica_name.return_value = 'my_repl'
expected_conn = ("mongodb://<user>:<password>"
"@{},127.0.0.2:27018"
"?replicaSet=my_repl").format(self.instance_endpoint)
self.assertEqual(expected_conn, self.driver.get_connection())
def test_connection_with_database(self):
self.database = factory_logical.DatabaseFactory(
name="my_db_url_name", databaseinfra=self.databaseinfra)
expected_conn = ("mongodb://<user>:<password>"
"@{}/my_db_url_name").format(self.instance_endpoint)
self.assertEqual(
expected_conn,
self.driver.get_connection(database=self.database)
)
@patch.object(MongoDB, 'get_replica_name')
def test_connection_with_database_and_replica(self, get_replica_name):
self.instance = factory_physical.InstanceFactory(
databaseinfra=self.databaseinfra, address='127.0.0.2', port=27018)
get_replica_name.return_value = 'my_repl'
self.database = factory_logical.DatabaseFactory(
name="my_db_url_name", databaseinfra=self.databaseinfra)
expected_conn = ("mongodb://<user>:<password>"
"@{},127.0.0.2:27018/my_db_url_name"
"?replicaSet=my_repl").format(self.instance_endpoint)
self.assertEqual(
expected_conn,
self.driver.get_connection(database=self.database)
)
class ManageDatabaseMongoDBTestCase(BaseMongoDriverTestCase):
""" Test case to managing database in mongodb engine """
def setUp(self):
super(ManageDatabaseMongoDBTestCase, self).setUp()
self.database = factory_logical.DatabaseFactory(
databaseinfra=self.databaseinfra)
self.instance.address = os.getenv('TESTS_MONGODB_HOST', '127.0.0.1')
self.instance.save()
# ensure database is dropped
self.driver_client.drop_database(self.database.name)
def tearDown(self):
if not Database.objects.filter(databaseinfra_id=self.databaseinfra.id):
self.database.delete()
super(ManageDatabaseMongoDBTestCase, self).tearDown()
def test_mongodb_create_database(self):
self.assertFalse(
self.database.name in self.driver_client.database_names())
self.driver.create_database(self.database)
self.assertTrue(
self.database.name in self.driver_client.database_names())
def test_mongodb_remove_database(self):
self.driver.create_database(self.database)
self.assertTrue(
self.database.name in self.driver_client.database_names())
self.driver.remove_database(self.database)
self.assertFalse(
self.database.name in self.driver_client.database_names())
class ManageCredentialsMongoDBTestCase(BaseMongoDriverTestCase):
""" Test case to managing credentials in mongodb engine """
def setUp(self):
super(ManageCredentialsMongoDBTestCase, self).setUp()
self.database = factory_logical.DatabaseFactory(
databaseinfra=self.databaseinfra)
self.credential = factory_logical.CredentialFactory(
database=self.database)
self.instance.address = os.getenv('TESTS_MONGODB_HOST', '127.0.0.1')
# self.instance.address = '127.0.0.1'
self.instance.save()
self.driver.create_database(self.database)
def tearDown(self):
self.driver.remove_database(self.database)
self.credential.delete()
self.database.delete()
super(ManageCredentialsMongoDBTestCase, self).tearDown()
def __find_user__(self, credential):
v = self.driver_client.server_info()['version']
if v < '2.6':
return getattr(
self.driver_client,
credential.database.name
).system.users.find_one({"user": credential.user})
else:
return getattr(
self.driver_client,
"admin"
).system.users.find_one(
{"user": credential.user, "db": credential.database.name}
)
def test_mongodb_create_credential(self):
self.assertIsNone(
self.__find_user__(self.credential),
"User %s already exists. Invalid test" % self.credential
)
self.driver.create_user(self.credential)
user = self.__find_user__(self.credential)
self.assertIsNotNone(user)
self.assertEquals(self.credential.user, user['user'])
self.driver.remove_user(self.credential)
def test_mongodb_remove_credential(self):
self.driver.create_user(self.credential)
self.assertIsNotNone(
self.__find_user__(self.credential),
"Error creating user %s. Invalid test" % self.credential
)
self.driver.remove_user(self.credential)
self.assertIsNone(self.__find_user__(self.credential))
| bsd-3-clause | -8,187,081,293,294,255,000 | 36.48 | 79 | 0.649733 | false |
aliyun/aliyun-oss-python-sdk | tests/test_utils.py | 1 | 14543 | # -*- coding: utf-8 -*-
import unittest
import oss2
from oss2.exceptions import make_exception
import os
import sys
import tempfile
import requests
import datetime
import locale
import io
from functools import partial
from .common import *
import logging
try:
xrange
except NameError:
xrange = range
is_py2 = (sys.version_info[0] == 2)
is_py3 = (sys.version_info[0] == 3)
class TestUtils(OssTestCase):
def test_is_ip(self):
self.assertTrue(oss2.utils.is_ip_or_localhost('1.2.3.4'))
self.assertTrue(oss2.utils.is_ip_or_localhost('[2401:b180::dc]'))
self.assertTrue(oss2.utils.is_ip_or_localhost('localhost'))
self.assertTrue(oss2.utils.is_ip_or_localhost('1.2.3.4:80'))
self.assertTrue(oss2.utils.is_ip_or_localhost('[2401:b180::dc]:80'))
self.assertTrue(oss2.utils.is_ip_or_localhost('localhost:80'))
self.assertTrue(not oss2.utils.is_ip_or_localhost('-1.2.3.4'))
self.assertTrue(not oss2.utils.is_ip_or_localhost('1.256.1.2'))
self.assertTrue(not oss2.utils.is_ip_or_localhost('一.二.三.四'))
self.assertTrue(not oss2.utils.is_ip_or_localhost('[2401:b180::dc'))
def test_is_valid_bucket_name(self):
self.assertTrue(oss2.is_valid_bucket_name('abc'))
self.assertTrue(oss2.is_valid_bucket_name('hello-world'))
self.assertTrue(not oss2.is_valid_bucket_name('HELLO'))
self.assertTrue(not oss2.is_valid_bucket_name('hello_world'))
self.assertTrue(not oss2.is_valid_bucket_name('hello-'))
self.assertTrue(not oss2.is_valid_bucket_name('-hello'))
access_key_id = "test_access_key_id"
access_key_secret = "test_access_key_secret"
endpoint = "oss-cn-shenzhen.aliyuncs.com"
bucket_name = "hello"
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)
bucket_name = "hello-"
self.assertRaises(oss2.exceptions.ClientError, oss2.Bucket, oss2.Auth(access_key_id, access_key_secret),
endpoint, bucket_name)
def test_is_valid_endpoint(self):
from oss2 import utils
self.assertTrue(utils.is_valid_endpoint('oss-cn-shenzhen.aliyuncs.com'))
self.assertTrue(utils.is_valid_endpoint('http://www.aliyun_cs.com'))
self.assertTrue(utils.is_valid_endpoint('http://www.aliyun_cs.com:80'))
self.assertTrue(utils.is_valid_endpoint('https://www.aliyuncs.com'))
self.assertTrue(utils.is_valid_endpoint('http://192.168.1.1'))
self.assertTrue(utils.is_valid_endpoint('http://192.168.1.1:3182'))
self.assertTrue(utils.is_valid_endpoint('ftp://www.aliyuncs.com:21'))
self.assertTrue(utils.is_valid_endpoint('192.168.1.1:80'))
self.assertTrue(utils.is_valid_endpoint('www.aliyun_cs.com:80'))
self.assertFalse(utils.is_valid_endpoint(None))
self.assertFalse(utils.is_valid_endpoint(''))
self.assertFalse(utils.is_valid_endpoint('www.aliyuncs.com:'))
self.assertFalse(utils.is_valid_endpoint('http://192.168.1.1:318r'))
self.assertFalse(utils.is_valid_endpoint('www.aliyuncs.com\\www.test.com'))
def test_compat(self):
# from unicode
u = u'中文'
self.assertEqual(u, oss2.to_unicode(u))
self.assertEqual(u.encode('utf-8'), oss2.to_bytes(u))
if is_py2:
self.assertEqual(u.encode('utf-8'), oss2.to_string(u))
if is_py3:
self.assertEqual(u, oss2.to_string(u))
# from bytes
b = u.encode('utf-8')
self.assertEqual(b.decode('utf-8'), oss2.to_unicode(b))
self.assertEqual(b, oss2.to_bytes(b))
if is_py2:
self.assertEqual(b, oss2.to_string(b))
if is_py3:
self.assertEqual(b.decode('utf-8'), oss2.to_string(b))
def test_makedir_p(self):
tempdir = tempfile.gettempdir()
dirpath = os.path.join(tempdir, random_string(10))
oss2.utils.makedir_p(dirpath)
os.path.isdir(dirpath)
# recreate same dir should not issue an error
oss2.utils.makedir_p(dirpath)
def __fake_response(self, status, error_body):
key = self.random_key()
self.bucket.put_object(key, oss2.to_bytes(error_body))
resp = self.bucket.get_object(key).resp
resp.status = status
return resp
def test_make_exception(self):
body = 'bad body'
e = make_exception(self.__fake_response(400, body))
self.assertTrue(isinstance(e, oss2.exceptions.ServerError))
self.assertEqual(e.status, 400)
self.assertEqual(e.body, oss2.to_bytes(body))
body = '<Error><Code>NoSuchKey</Code><Message>中文和控制字符</Message></Error>'
e = make_exception(self.__fake_response(404, body))
self.assertTrue(isinstance(e, oss2.exceptions.NoSuchKey))
self.assertEqual(e.status, 404)
self.assertEqual(e.code, 'NoSuchKey')
def test_len(self):
adapter = oss2.utils.SizedFileAdapter('ss', 2500000000)
self.assertEqual(requests.utils.super_len(adapter), 2500000000)
adapter = oss2.utils._BytesAndFileAdapter('ss', size=2500000000)
self.assertEqual(requests.utils.super_len(adapter), 2500000000)
def test_adapter_composition(self):
def progress_callback(consumed_bytes, total_bytes):
pass
crc_adapter = oss2.utils.make_crc_adapter('sss')
progress_adapter = oss2.utils.make_progress_adapter(crc_adapter, progress_callback)
self.assertEqual(progress_adapter.len, 3)
def test_default_logger_basic(self):
# verify default logger
# self.assertEqual(oss2.defaults.get_logger(), logging.getLogger())
# verify custom logger
# custom_logger = logging.getLogger('oss2')
# oss2.defaults.logger = custom_logger
# self.assertEqual(oss2.defaults.get_logger(), custom_logger)
custom_logger = logging.getLogger('oss2')
self.assertEqual(oss2.logger, custom_logger)
def test_default_logger_put(self):
custom_logger = logging.getLogger('oss2')
# oss2.defaults.logger = custom_logger
custom_logger.addHandler(logging.StreamHandler(sys.stdout))
custom_logger.setLevel(logging.DEBUG)
key = self.random_key()
self.bucket.put_object(key, 'abc')
resp = self.bucket.get_object(key).resp
self.assertEqual(b'abc', resp.read())
custom_logger.setLevel(logging.CRITICAL)
def test_http_to_unixtime_in_zh_CN_locale(self):
time_string = 'Sat, 06 Jan 2018 00:00:00 GMT'
time_val = 1515196800
saved_locale = locale.setlocale(locale.LC_TIME)
if os.name == 'nt':
locale.setlocale(locale.LC_TIME, '')
else:
locale.setlocale(locale.LC_TIME, 'zh_CN.UTF-8')
self.assertEqual(time_val, oss2.utils.http_to_unixtime(time_string))
self.assertRaises(ValueError, oss2.utils.to_unixtime, time_string, '%a, %d %b %Y %H:%M:%S GMT')
locale.setlocale(locale.LC_TIME, saved_locale)
def test_http_to_unixtime_basic(self):
case_list = [
('Sat, 06 Jan 2018 00:00:00 GMT', 1515196800),
('Fri, 09 Feb 2018 01:01:01 GMT', 1518138061),
('Sun, 11 Mar 2018 10:10:10 GMT', 1520763010),
('Mon, 23 Apr 2018 21:21:21 GMT', 1524518481),
('Thu, 31 May 2018 23:59:59 GMT', 1527811199),
('Wed, 20 Jun 2018 20:31:30 GMT', 1529526690),
('Tue, 10 Jul 2018 11:11:11 GMT', 1531221071),
('Tue, 21 Aug 1979 09:09:09 GMT', 304074549),
('Wed, 29 Sep 2100 10:21:32 GMT', 4125896492),
('Fri, 01 Oct 1999 08:00:00 GMT', 938764800),
('Wed, 11 Nov 2009 00:00:00 GMT', 1257897600),
('Wed, 12 Dec 2012 12:12:12 GMT', 1355314332)
]
for time_string, time_val in case_list:
t1 = oss2.utils.http_to_unixtime(time_string)
t2 = oss2.utils.to_unixtime(time_string, '%a, %d %b %Y %H:%M:%S GMT')
self.assertEqual(time_val, t1)
self.assertEqual(time_val, t2)
self.assertEqual(time_string, oss2.utils.http_date(time_val))
def test_http_to_unixtime_one_day(self):
now = int(time.time())
for t in xrange(now, now + 86400):
time_string = oss2.utils.http_date(t)
self.assertEqual(t, oss2.utils.http_to_unixtime(time_string))
def test_http_to_unixtime_one_year(self):
now = int(time.time())
for i in xrange(366):
t = now + i * 86400
time_string = oss2.utils.http_date(t)
self.assertEqual(t, oss2.utils.http_to_unixtime(time_string))
def test_http_to_unixtime_bad_format(self):
case_list = [
'',
'Sat',
'Sat, 06 ',
'Sat, 06 Jan',
'Sat, 06 Jan 20',
'Sat, 06 Jan 2018 ',
'Sat, 06 Jan 2018 00',
'Sat, 06 Jan 2018 00:',
'Sat, 06 Jan 2018 00:00:',
'Sat, 06 Jan 2018 00:00:00',
'Sat, 06 Jan 2018 00:00:00 G',
'Unk, 06 Jan 2018 00:00:00 GMT',
'Friday, 12 Dec 2012 12:12:12 GMT',
'We, 12 Dec 2012 12:12:12 GMT',
'Wed 12 Dec 2012 12:12:12 GMT',
'Wed, 32 Dec 2012 12:12:12 GMT',
'Wed, 31 December 2012 12:12:12 GMT',
'Wed, 31 De 2012 12:12:12 GMT',
'Wed, 31 2012 12:12:12 GMT',
'Wed, 12 Dec 12:12:12 GMT',
'Wed, 31 Dec 2012 24:12:12 GMT',
'Wed, 31 Dec 2012 23:60:12 GMT',
'Wed, 31 Dec 2012 23:10:60 GMT',
'Wed, 31 Dec 2012 2:10:60 GMT',
'Wed, 31 Dec 2012 :10:60 GMT',
'Wed, 31 Dec 2012 02:1:60 GMT',
'Wed, 31 Dec 2012 02:01:0 GMT',
'Wed, 31 Dec 2012 02:01:01 CST',
'Wed, 31 Dec 2012 02:01:01 GMTA',
'Wed, 31 Dec 2012 02:01:01 GMT ABC',
'X Wed, 31 Dec 2012 02:01:01 GMT',
' Wed, 31 Dec 2012 02:01:01 GMT'
]
for bad_string in case_list:
try:
oss2.utils.http_to_unixtime(bad_string)
except ValueError as e:
self.assertEqual(str(e), bad_string + ' is not in valid HTTP date format')
else:
self.assertTrue(False, bad_string)
def test_iso8601_to_unixtime_in_zh_CN_locale(self):
time_string = '2018-02-09T01:01:01.000Z'
time_val = 1518138061
saved_locale = locale.setlocale(locale.LC_TIME)
if os.name == 'nt':
locale.setlocale(locale.LC_TIME, '')
else:
locale.setlocale(locale.LC_TIME, 'zh_CN.UTF-8')
# iso8601 contains no locale related info, so it is OK to use to_unixtime()
self.assertEqual(time_val, oss2.utils.iso8601_to_unixtime(time_string))
self.assertEqual(time_val, oss2.utils.to_unixtime(time_string, '%Y-%m-%dT%H:%M:%S.000Z'))
locale.setlocale(locale.LC_TIME, saved_locale)
def test_iso8601_to_unixtime_basic(self):
case_list = [
('2018-01-06T00:00:00.000Z', 1515196800),
('2018-02-09T01:01:01.000Z', 1518138061),
('2018-03-11T10:10:10.000Z', 1520763010),
('2018-04-23T21:21:21.000Z', 1524518481),
('2018-05-31T23:59:59.000Z', 1527811199),
('2018-06-20T20:31:30.000Z', 1529526690),
('2018-07-10T11:11:11.000Z', 1531221071),
('1979-08-21T09:09:09.000Z', 304074549),
('2100-09-29T10:21:32.000Z', 4125896492),
('1999-10-01T08:00:00.000Z', 938764800),
('2009-11-11T00:00:00.000Z', 1257897600),
('2012-12-12T12:12:12.000Z', 1355314332)
]
for time_string, time_val in case_list:
t1 = oss2.utils.iso8601_to_unixtime(time_string)
t2 = oss2.utils.to_unixtime(time_string, '%Y-%m-%dT%H:%M:%S.000Z')
self.assertEqual(time_val, t1)
self.assertEqual(time_val, t2)
self.assertEqual(time_string, oss2.utils.date_to_iso8601(datetime.datetime.utcfromtimestamp(time_val)))
def test_iso8601_to_unixtime_one_day(self):
now = int(time.time())
for t in xrange(now, now + 86400):
time_string = oss2.utils.date_to_iso8601(datetime.datetime.utcfromtimestamp(t))
self.assertEqual(t, oss2.utils.iso8601_to_unixtime(time_string))
def test_iso8601_to_unixtime_one_year(self):
now = int(time.time())
for i in xrange(366):
t = now + i * 86400
time_string = oss2.utils.date_to_iso8601(datetime.datetime.utcfromtimestamp(t))
self.assertEqual(t, oss2.utils.iso8601_to_unixtime(time_string))
def test_iso8601_to_unixtime_bad_format(self):
case_list = [
'',
'2012',
'2012-',
'2012-12',
'2012-12-',
'2012-12-12',
'2012-12-12T',
'2012-12-12T12',
'2012-12-12T12:',
'2012-12-12T12:1',
'2012-12-12T12:12:',
'2012-12-12T12:12:12',
'2012-12-12T12:12:12.',
'2012-12-12T12:12:12.0',
'2012-12-12T12:12:12.00',
'2012-12-12T12:12:12.000',
'2012-12-12T12:12:12.000X',
'-12-12T12:12:12.000Z',
'2012-13-12T12:12:12.000Z',
'2012-12-32T12:12:12.000Z',
'2012-12-12X12:12:12.000Z',
'2012-12-12T:12:12.000Z',
'2012-12-12T0:12:12.000Z',
'2012-12-12T60:12:12.000Z',
'2012-12-12T12::12.000Z',
'2012-12-12T12:1:12.000Z',
'2012-12-12T12:60:12.000Z',
'2012-12-12T12:12:1.000Z',
'2012-12-12T12:12:60.000Z',
'2012-12-12T12:12:12.100Z',
'2012-12-12T12:12:12.010Z',
'2012-12-12T12:12:12.001Z',
'2012-12-12T12:12:12.000ZZ',
'2012-12-12T12:12:12.000Z X',
'2012-12-12T12:12:00.000Z ',
' 2012-12-12T12:12:00.000Z',
'X 2012-12-12T12:12:00.000Z',
]
for bad_string in case_list:
try:
oss2.utils.iso8601_to_unixtime(bad_string)
except ValueError as e:
self.assertEqual(str(e), bad_string + ' is not in valid ISO8601 format')
else:
self.assertTrue(False, bad_string)
if __name__ == '__main__':
unittest.main()
| mit | -59,843,685,782,884,540 | 36.318766 | 115 | 0.581112 | false |
rcomer/iris | lib/iris/tests/unit/fileformats/cf/test_CFReader.py | 1 | 14385 | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Unit tests for the `iris.fileformats.cf.CFReader` class.
"""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
from unittest import mock
import numpy as np
from iris.fileformats.cf import CFReader
def netcdf_variable(
name,
dimensions,
dtype,
ancillary_variables=None,
coordinates="",
bounds=None,
climatology=None,
formula_terms=None,
grid_mapping=None,
cell_measures=None,
standard_name=None,
):
"""Return a mock NetCDF4 variable."""
ndim = 0
if dimensions is not None:
dimensions = dimensions.split()
ndim = len(dimensions)
else:
dimensions = []
ncvar = mock.Mock(
name=name,
dimensions=dimensions,
ncattrs=mock.Mock(return_value=[]),
ndim=ndim,
dtype=dtype,
ancillary_variables=ancillary_variables,
coordinates=coordinates,
bounds=bounds,
climatology=climatology,
formula_terms=formula_terms,
grid_mapping=grid_mapping,
cell_measures=cell_measures,
standard_name=standard_name,
)
return ncvar
class Test_translate__global_attributes(tests.IrisTest):
def setUp(self):
ncvar = netcdf_variable("ncvar", "height", np.float64)
ncattrs = mock.Mock(return_value=["dimensions"])
getncattr = mock.Mock(return_value="something something_else")
self.dataset = mock.Mock(
file_format="NetCDF4",
variables={"ncvar": ncvar},
ncattrs=ncattrs,
getncattr=getncattr,
)
def test_create_global_attributes(self):
with mock.patch("netCDF4.Dataset", return_value=self.dataset):
global_attrs = CFReader("dummy").cf_group.global_attributes
self.assertEqual(
global_attrs["dimensions"], "something something_else"
)
class Test_translate__formula_terms(tests.IrisTest):
def setUp(self):
self.delta = netcdf_variable(
"delta", "height", np.float64, bounds="delta_bnds"
)
self.delta_bnds = netcdf_variable(
"delta_bnds", "height bnds", np.float
)
self.sigma = netcdf_variable(
"sigma", "height", np.float64, bounds="sigma_bnds"
)
self.sigma_bnds = netcdf_variable(
"sigma_bnds", "height bnds", np.float
)
self.orography = netcdf_variable("orography", "lat lon", np.float64)
formula_terms = "a: delta b: sigma orog: orography"
standard_name = "atmosphere_hybrid_height_coordinate"
self.height = netcdf_variable(
"height",
"height",
np.float64,
formula_terms=formula_terms,
bounds="height_bnds",
standard_name=standard_name,
)
# Over-specify the formula terms on the bounds variable,
# which will be ignored by the cf loader.
formula_terms = "a: delta_bnds b: sigma_bnds orog: orography"
self.height_bnds = netcdf_variable(
"height_bnds",
"height bnds",
np.float64,
formula_terms=formula_terms,
)
self.lat = netcdf_variable("lat", "lat", np.float64)
self.lon = netcdf_variable("lon", "lon", np.float64)
# Note that, only lat and lon are explicitly associated as coordinates.
self.temp = netcdf_variable(
"temp", "height lat lon", np.float64, coordinates="lat lon"
)
self.variables = dict(
delta=self.delta,
sigma=self.sigma,
orography=self.orography,
height=self.height,
lat=self.lat,
lon=self.lon,
temp=self.temp,
delta_bnds=self.delta_bnds,
sigma_bnds=self.sigma_bnds,
height_bnds=self.height_bnds,
)
ncattrs = mock.Mock(return_value=[])
self.dataset = mock.Mock(
file_format="NetCDF4", variables=self.variables, ncattrs=ncattrs
)
# Restrict the CFReader functionality to only performing translations.
build_patch = mock.patch(
"iris.fileformats.cf.CFReader._build_cf_groups"
)
reset_patch = mock.patch("iris.fileformats.cf.CFReader._reset")
build_patch.start()
reset_patch.start()
self.addCleanup(build_patch.stop)
self.addCleanup(reset_patch.stop)
def test_create_formula_terms(self):
with mock.patch("netCDF4.Dataset", return_value=self.dataset):
cf_group = CFReader("dummy").cf_group
self.assertEqual(len(cf_group), len(self.variables))
# Check there is a singular data variable.
group = cf_group.data_variables
self.assertEqual(len(group), 1)
self.assertEqual(list(group.keys()), ["temp"])
self.assertIs(group["temp"].cf_data, self.temp)
# Check there are three coordinates.
group = cf_group.coordinates
self.assertEqual(len(group), 3)
coordinates = ["height", "lat", "lon"]
self.assertEqual(set(group.keys()), set(coordinates))
for name in coordinates:
self.assertIs(group[name].cf_data, getattr(self, name))
# Check there are three auxiliary coordinates.
group = cf_group.auxiliary_coordinates
self.assertEqual(len(group), 3)
aux_coordinates = ["delta", "sigma", "orography"]
self.assertEqual(set(group.keys()), set(aux_coordinates))
for name in aux_coordinates:
self.assertIs(group[name].cf_data, getattr(self, name))
# Check all the auxiliary coordinates are formula terms.
formula_terms = cf_group.formula_terms
self.assertEqual(set(group.items()), set(formula_terms.items()))
# Check there are three bounds.
group = cf_group.bounds
self.assertEqual(len(group), 3)
bounds = ["height_bnds", "delta_bnds", "sigma_bnds"]
self.assertEqual(set(group.keys()), set(bounds))
for name in bounds:
self.assertEqual(group[name].cf_data, getattr(self, name))
class Test_build_cf_groups__formula_terms(tests.IrisTest):
def setUp(self):
self.delta = netcdf_variable(
"delta", "height", np.float64, bounds="delta_bnds"
)
self.delta_bnds = netcdf_variable(
"delta_bnds", "height bnds", np.float
)
self.sigma = netcdf_variable(
"sigma", "height", np.float64, bounds="sigma_bnds"
)
self.sigma_bnds = netcdf_variable(
"sigma_bnds", "height bnds", np.float
)
self.orography = netcdf_variable("orography", "lat lon", np.float64)
formula_terms = "a: delta b: sigma orog: orography"
standard_name = "atmosphere_hybrid_height_coordinate"
self.height = netcdf_variable(
"height",
"height",
np.float64,
formula_terms=formula_terms,
bounds="height_bnds",
standard_name=standard_name,
)
# Over-specify the formula terms on the bounds variable,
# which will be ignored by the cf loader.
formula_terms = "a: delta_bnds b: sigma_bnds orog: orography"
self.height_bnds = netcdf_variable(
"height_bnds",
"height bnds",
np.float64,
formula_terms=formula_terms,
)
self.lat = netcdf_variable("lat", "lat", np.float64)
self.lon = netcdf_variable("lon", "lon", np.float64)
self.x = netcdf_variable("x", "lat lon", np.float64)
self.y = netcdf_variable("y", "lat lon", np.float64)
# Note that, only lat and lon are explicitly associated as coordinates.
self.temp = netcdf_variable(
"temp", "height lat lon", np.float64, coordinates="x y"
)
self.variables = dict(
delta=self.delta,
sigma=self.sigma,
orography=self.orography,
height=self.height,
lat=self.lat,
lon=self.lon,
temp=self.temp,
delta_bnds=self.delta_bnds,
sigma_bnds=self.sigma_bnds,
height_bnds=self.height_bnds,
x=self.x,
y=self.y,
)
ncattrs = mock.Mock(return_value=[])
self.dataset = mock.Mock(
file_format="NetCDF4", variables=self.variables, ncattrs=ncattrs
)
# Restrict the CFReader functionality to only performing translations
# and building first level cf-groups for variables.
patcher = mock.patch("iris.fileformats.cf.CFReader._reset")
patcher.start()
self.addCleanup(patcher.stop)
def test_associate_formula_terms_with_data_variable(self):
with mock.patch("netCDF4.Dataset", return_value=self.dataset):
cf_group = CFReader("dummy").cf_group
self.assertEqual(len(cf_group), len(self.variables))
# Check the cf-group associated with the data variable.
temp_cf_group = cf_group["temp"].cf_group
# Check the data variable is associated with eight variables.
self.assertEqual(len(temp_cf_group), 8)
# Check there are three coordinates.
group = temp_cf_group.coordinates
self.assertEqual(len(group), 3)
coordinates = ["height", "lat", "lon"]
self.assertEqual(set(group.keys()), set(coordinates))
for name in coordinates:
self.assertIs(group[name].cf_data, getattr(self, name))
# Check the height coordinate is bounded.
group = group["height"].cf_group
self.assertEqual(len(group.bounds), 1)
self.assertIn("height_bnds", group.bounds)
self.assertIs(group["height_bnds"].cf_data, self.height_bnds)
# Check there are five auxiliary coordinates.
group = temp_cf_group.auxiliary_coordinates
self.assertEqual(len(group), 5)
aux_coordinates = ["delta", "sigma", "orography", "x", "y"]
self.assertEqual(set(group.keys()), set(aux_coordinates))
for name in aux_coordinates:
self.assertIs(group[name].cf_data, getattr(self, name))
# Check all the auxiliary coordinates are formula terms.
formula_terms = cf_group.formula_terms
self.assertTrue(
set(formula_terms.items()).issubset(list(group.items()))
)
# Check the terms by root.
for name, term in zip(aux_coordinates, ["a", "b", "orog"]):
self.assertEqual(
formula_terms[name].cf_terms_by_root, dict(height=term)
)
# Check the bounded auxiliary coordinates.
for name, name_bnds in zip(
["delta", "sigma"], ["delta_bnds", "sigma_bnds"]
):
aux_coord_group = group[name].cf_group
self.assertEqual(len(aux_coord_group.bounds), 1)
self.assertIn(name_bnds, aux_coord_group.bounds)
self.assertIs(
aux_coord_group[name_bnds].cf_data,
getattr(self, name_bnds),
)
def test_promote_reference(self):
with mock.patch("netCDF4.Dataset", return_value=self.dataset):
cf_group = CFReader("dummy").cf_group
self.assertEqual(len(cf_group), len(self.variables))
# Check the number of data variables.
self.assertEqual(len(cf_group.data_variables), 1)
self.assertEqual(list(cf_group.data_variables.keys()), ["temp"])
# Check the number of promoted variables.
self.assertEqual(len(cf_group.promoted), 1)
self.assertEqual(list(cf_group.promoted.keys()), ["orography"])
# Check the promoted variable dependencies.
group = cf_group.promoted["orography"].cf_group.coordinates
self.assertEqual(len(group), 2)
coordinates = ("lat", "lon")
self.assertEqual(set(group.keys()), set(coordinates))
for name in coordinates:
self.assertIs(group[name].cf_data, getattr(self, name))
def test_formula_terms_ignore(self):
self.orography.dimensions = ["lat", "wibble"]
with mock.patch(
"netCDF4.Dataset", return_value=self.dataset
), mock.patch("warnings.warn") as warn:
cf_group = CFReader("dummy").cf_group
group = cf_group.promoted
self.assertEqual(list(group.keys()), ["orography"])
self.assertIs(group["orography"].cf_data, self.orography)
self.assertEqual(warn.call_count, 1)
def test_auxiliary_ignore(self):
self.x.dimensions = ["lat", "wibble"]
with mock.patch(
"netCDF4.Dataset", return_value=self.dataset
), mock.patch("warnings.warn") as warn:
cf_group = CFReader("dummy").cf_group
promoted = ["x", "orography"]
group = cf_group.promoted
self.assertEqual(set(group.keys()), set(promoted))
for name in promoted:
self.assertIs(group[name].cf_data, getattr(self, name))
self.assertEqual(warn.call_count, 1)
def test_promoted_auxiliary_ignore(self):
self.wibble = netcdf_variable("wibble", "lat wibble", np.float64)
self.variables["wibble"] = self.wibble
self.orography.coordinates = "wibble"
with mock.patch(
"netCDF4.Dataset", return_value=self.dataset
), mock.patch("warnings.warn") as warn:
cf_group = CFReader("dummy").cf_group.promoted
promoted = ["wibble", "orography"]
self.assertEqual(set(cf_group.keys()), set(promoted))
for name in promoted:
self.assertIs(cf_group[name].cf_data, getattr(self, name))
self.assertEqual(warn.call_count, 2)
if __name__ == "__main__":
tests.main()
| lgpl-3.0 | 7,296,252,401,670,800,000 | 39.407303 | 79 | 0.583455 | false |
Impavidity/kim_cnn | model.py | 1 | 2614 | import torch
import torch.nn as nn
import torch.nn.functional as F
class KimCNN(nn.Module):
def __init__(self, config):
super(KimCNN, self).__init__()
output_channel = config.output_channel
target_class = config.target_class
words_num = config.words_num
words_dim = config.words_dim
embed_num = config.embed_num
embed_dim = config.embed_dim
self.mode = config.mode
Ks = 3 # There are three conv net here
if config.mode == 'multichannel':
input_channel = 2
else:
input_channel = 1
self.embed = nn.Embedding(words_num, words_dim)
self.static_embed = nn.Embedding(embed_num, embed_dim)
self.non_static_embed = nn.Embedding(embed_num, embed_dim)
self.static_embed.weight.requires_grad = False
self.conv1 = nn.Conv2d(input_channel, output_channel, (3, words_dim), padding=(2,0))
self.conv2 = nn.Conv2d(input_channel, output_channel, (4, words_dim), padding=(3,0))
self.conv3 = nn.Conv2d(input_channel, output_channel, (5, words_dim), padding=(4,0))
self.dropout = nn.Dropout(config.dropout)
self.fc1 = nn.Linear(Ks * output_channel, target_class)
def forward(self, x):
x = x.text
if self.mode == 'rand':
word_input = self.embed(x) # (batch, sent_len, embed_dim)
x = word_input.unsqueeze(1) # (batch, channel_input, sent_len, embed_dim)
elif self.mode == 'static':
static_input = self.static_embed(x)
x = static_input.unsqueeze(1) # (batch, channel_input, sent_len, embed_dim)
elif self.mode == 'non-static':
non_static_input = self.non_static_embed(x)
x = non_static_input.unsqueeze(1) # (batch, channel_input, sent_len, embed_dim)
elif self.mode == 'multichannel':
non_static_input = self.non_static_embed(x)
static_input = self.static_embed(x)
x = torch.stack([non_static_input, static_input], dim=1) # (batch, channel_input=2, sent_len, embed_dim)
else:
print("Unsupported Mode")
exit()
x = [F.relu(self.conv1(x)).squeeze(3), F.relu(self.conv2(x)).squeeze(3), F.relu(self.conv3(x)).squeeze(3)]
# (batch, channel_output, ~=sent_len) * Ks
x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] # max-over-time pooling
# (batch, channel_output) * Ks
x = torch.cat(x, 1) # (batch, channel_output * Ks)
x = self.dropout(x)
logit = self.fc1(x) # (batch, target_size)
return logit
| mit | -8,031,420,437,314,376,000 | 43.305085 | 116 | 0.590283 | false |
sony/nnabla | python/src/nnabla/backward_function/greater_equal_scalar.py | 1 | 1032 | # Copyright 2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnabla.functions as F
from .utils import no_grad
def greater_equal_scalar_backward(inputs, val=1):
"""
Args:
inputs (list of nn.Variable): Incomming grads/inputs to/of the forward function.
kwargs (dict of arguments): Dictionary of the corresponding function arguments.
Return:
list of Variable: Return the gradients wrt inputs of the corresponding function.
"""
return [None] * len(inputs)
| apache-2.0 | -4,171,969,917,226,927,600 | 34.586207 | 86 | 0.738372 | false |
Dzess/ALFIRT | alfirt.runner/src/readers/tests/TagReaderX3DUnitTests.py | 1 | 4002 | '''
Created on Jun 9, 2011
@author: Piotr
'''
import unittest
import os
from readers.TagReaderX3D import TagReaderX3D
class TagReaderX3DUnitTests(unittest.TestCase):
def setUp(self):
# Setting up the X3D string with ALFIRT namespace tags
x3dString = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE X3D PUBLIC "ISO//Web3D//DTD X3D 3.2//EN" "http://www.web3d.org/specifications/x3d-3.2.dtd">
<X3D profile="Interchange"
version="3.2"
xmlns:xsd="http://www.w3.org/2001/XMLSchema-instance"
xmlns:alfirt="ALFIRT"
xsd:noNamespaceSchemaLocation=" http://www.web3d.org/specifications/x3d-3.2.xsd ">
<Scene>
<Viewpoint description='Rear View' orientation='0 1 0 3.14159' position='0 0 -10'/>
<Shape alfirt:anchor_translate="0 1 2" alfirt:anchor_rotate="0.4 0.2 0.3">
<IndexedFaceSet coordIndex="0 1 2">
<Coordinate point="0 0 0 1 0 0 0.5 1 0"/>
</IndexedFaceSet>
</Shape>
</Scene>
</X3D>
"""
# Creating file
self.fileName = "test_file_name"
with open(self.fileName, 'w') as fileStream:
fileStream.write(x3dString)
fileStream.close()
def tearDown(self):
# Removing file after test
os.remove(self.fileName)
def test_reading_none_results_in_exception(self):
x3dReader = TagReaderX3D()
with self.assertRaises(ValueError):
x3dReader.readScene(None)
with self.assertRaises(ValueError):
x3dReader.readScene("some no existing file")
def test_reading_file_with_no_anchor_results_in_exception(self):
'''
The anchor is required for the polar transformations around the object.
'''
# Setting up the X3D string with ALFIRT namespace tags
x3dString = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE X3D PUBLIC "ISO//Web3D//DTD X3D 3.2//EN" "http://www.web3d.org/specifications/x3d-3.2.dtd">
<X3D profile="Interchange"
version="3.2"
xmlns:xsd="http://www.w3.org/2001/XMLSchema-instance"
xmlns:alfirt="ALFIRT"
xsd:noNamespaceSchemaLocation=" http://www.web3d.org/specifications/x3d-3.2.xsd ">
<Scene>
<Viewpoint description='Rear View' orientation='0 1 0 3.14159' position='0 0 -10'/>
<Shape>
<IndexedFaceSet coordIndex="0 1 2">
<Coordinate point="0 0 0 1 0 0 0.5 1 0"/>
</IndexedFaceSet>
</Shape>
</Scene>
</X3D>
"""
# Write this file into the data
fileName = "test_file_without_anchor"
with open(fileName, 'w') as fileStream:
fileStream.write(x3dString)
fileStream.close()
# Get reader
x3dReader = TagReaderX3D()
try:
x3dReader.readScene(fileName)
except RuntimeError:
return
finally:
os.remove(fileName)
self.fail("The exception should have been thrown")
def test_reading_file_with_alfirt_tags(self):
'''
Checks if the elements passed in X3D string are correct.
'''
x3dReader = TagReaderX3D()
results = x3dReader.readScene(self.fileName)
# assert the values
translateCamera = results.camera.translate
rotateCamera = results.camera.rotate
translateAnchor = results.anchor.translate
rotateAnchor = results.anchor.rotate
self.assertEqual(translateAnchor, [0.0, 1.0, 2.0], 'Translate of the anchor should be 0 1 2')
self.assertEqual(rotateAnchor , [0.4, 0.2, 0.3 ], "Rotate of the anchor should be 0.4, 0.2 0.3")
self.assertEqual(translateCamera, [0.0, -10, 0], "The position of the camera should be 0 0 -10")
self.assertEqual(rotateCamera, [1.5707963705062866, 1.7340079025429667e-13, 3.1415903568267822], "The rotation of the camera should be 0 1 0 3.14")
#===============================================================================
# Test runner
#===============================================================================
if (__name__ == 'main'):
unittest.main(verbosity=2)
| mit | 5,748,462,383,747,537,000 | 32.07438 | 155 | 0.615942 | false |
cloud-fan/spark | python/pyspark/pandas/internal.py | 1 | 68330 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
An internal immutable DataFrame with some metadata to manage indexes.
"""
import re
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union, TYPE_CHECKING, cast
from itertools import accumulate
import py4j
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype # noqa: F401
from pyspark import sql as spark
from pyspark._globals import _NoValue, _NoValueType
from pyspark.sql import functions as F, Window
from pyspark.sql.functions import pandas_udf
from pyspark.sql.types import ( # noqa: F401
BooleanType,
DataType,
IntegralType,
LongType,
StructField,
StructType,
StringType,
)
# For running doctests and reference resolution in PyCharm.
from pyspark import pandas as ps
if TYPE_CHECKING:
# This is required in old Python 3.5 to prevent circular reference.
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
from pyspark.pandas.spark.utils import as_nullable_spark_type, force_decimal_precision_scale
from pyspark.pandas.data_type_ops.base import DataTypeOps
from pyspark.pandas.typedef import (
Dtype,
as_spark_type,
extension_dtypes,
infer_pd_series_spark_type,
spark_type_to_pandas_dtype,
)
from pyspark.pandas.utils import (
column_labels_level,
default_session,
is_name_like_tuple,
is_testing,
lazy_property,
name_like_string,
scol_for,
spark_column_equals,
verify_temp_column_name,
)
# A function to turn given numbers to Spark columns that represent pandas-on-Spark index.
SPARK_INDEX_NAME_FORMAT = "__index_level_{}__".format
SPARK_DEFAULT_INDEX_NAME = SPARK_INDEX_NAME_FORMAT(0)
# A pattern to check if the name of a Spark column is a pandas-on-Spark index name or not.
SPARK_INDEX_NAME_PATTERN = re.compile(r"__index_level_[0-9]+__")
NATURAL_ORDER_COLUMN_NAME = "__natural_order__"
HIDDEN_COLUMNS = {NATURAL_ORDER_COLUMN_NAME}
DEFAULT_SERIES_NAME = 0
SPARK_DEFAULT_SERIES_NAME = str(DEFAULT_SERIES_NAME)
class InternalField:
"""
The internal field to store the dtype as well as the Spark's StructField optionally.
Parameters
----------
dtype : numpy.dtype or pandas' ExtensionDtype
The dtype for the field
struct_field : StructField, optional
The `StructField` for the field. If None, InternalFrame will properly set.
"""
def __init__(self, dtype: Dtype, struct_field: Optional[StructField] = None):
self._dtype = dtype
self._struct_field = struct_field
@staticmethod
def from_struct_field(
struct_field: StructField, *, use_extension_dtypes: bool = False
) -> "InternalField":
"""
Returns a new InternalField object created from the given StructField.
The dtype will be inferred from the data type of the given StructField.
Parameters
----------
struct_field : StructField
The StructField used to create a new InternalField object.
use_extension_dtypes : bool
If True, try to use the extension dtypes.
Returns
-------
InternalField
"""
return InternalField(
dtype=spark_type_to_pandas_dtype(
struct_field.dataType, use_extension_dtypes=use_extension_dtypes
),
struct_field=struct_field,
)
@property
def dtype(self) -> Dtype:
"""Return the dtype for the field."""
return self._dtype
@property
def struct_field(self) -> Optional[StructField]:
"""Return the StructField for the field."""
return self._struct_field
@property
def name(self) -> str:
"""Return the field name if the StructField exists."""
assert self.struct_field is not None
return self.struct_field.name
@property
def spark_type(self) -> DataType:
"""Return the spark data type for the field if the StructField exists."""
assert self.struct_field is not None
return self.struct_field.dataType
@property
def nullable(self) -> bool:
"""Return the nullability for the field if the StructField exists."""
assert self.struct_field is not None
return self.struct_field.nullable
@property
def metadata(self) -> Dict[str, Any]:
"""Return the metadata for the field if the StructField exists."""
assert self.struct_field is not None
return self.struct_field.metadata
@property
def is_extension_dtype(self) -> bool:
"""Return whether the dtype for the field is an extension type or not."""
return isinstance(self.dtype, extension_dtypes)
def normalize_spark_type(self) -> "InternalField":
"""Return a new InternalField object with normalized Spark data type."""
assert self.struct_field is not None
return self.copy(
spark_type=force_decimal_precision_scale(as_nullable_spark_type(self.spark_type)),
nullable=True,
)
def copy(
self,
*,
name: Union[str, _NoValueType] = _NoValue,
dtype: Union[Dtype, _NoValueType] = _NoValue,
spark_type: Union[DataType, _NoValueType] = _NoValue,
nullable: Union[bool, _NoValueType] = _NoValue,
metadata: Union[Optional[Dict[str, Any]], _NoValueType] = _NoValue,
) -> "InternalField":
"""Copy the InternalField object."""
if name is _NoValue:
name = self.name
if dtype is _NoValue:
dtype = self.dtype
if spark_type is _NoValue:
spark_type = self.spark_type
if nullable is _NoValue:
nullable = self.nullable
if metadata is _NoValue:
metadata = self.metadata
return InternalField(
dtype=cast(Dtype, dtype),
struct_field=StructField(
name=cast(str, name),
dataType=cast(DataType, spark_type),
nullable=cast(bool, nullable),
metadata=cast(Optional[Dict[str, Any]], metadata),
),
)
def __repr__(self) -> str:
return "InternalField(dtype={dtype},struct_field={struct_field})".format(
dtype=self.dtype, struct_field=self.struct_field
)
class InternalFrame(object):
"""
The internal immutable DataFrame which manages Spark DataFrame and column names and index
information.
.. note:: this is an internal class. It is not supposed to be exposed to users and users
should not directly access to it.
The internal immutable DataFrame represents the index information for a DataFrame it belongs to.
For instance, if we have a pandas-on-Spark DataFrame as below, pandas DataFrame does not
store the index as columns.
>>> psdf = ps.DataFrame({
... 'A': [1, 2, 3, 4],
... 'B': [5, 6, 7, 8],
... 'C': [9, 10, 11, 12],
... 'D': [13, 14, 15, 16],
... 'E': [17, 18, 19, 20]}, columns = ['A', 'B', 'C', 'D', 'E'])
>>> psdf # doctest: +NORMALIZE_WHITESPACE
A B C D E
0 1 5 9 13 17
1 2 6 10 14 18
2 3 7 11 15 19
3 4 8 12 16 20
However, all columns including index column are also stored in Spark DataFrame internally
as below.
>>> psdf._internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+-----------------+---+---+---+---+---+
|__index_level_0__| A| B| C| D| E|
+-----------------+---+---+---+---+---+
| 0| 1| 5| 9| 13| 17|
| 1| 2| 6| 10| 14| 18|
| 2| 3| 7| 11| 15| 19|
| 3| 4| 8| 12| 16| 20|
+-----------------+---+---+---+---+---+
In order to fill this gap, the current metadata is used by mapping Spark's internal column
to pandas-on-Spark's index. See the method below:
* `spark_frame` represents the internal Spark DataFrame
* `data_spark_column_names` represents non-indexing Spark column names
* `data_spark_columns` represents non-indexing Spark columns
* `data_fields` represents non-indexing InternalFields
* `index_spark_column_names` represents internal index Spark column names
* `index_spark_columns` represents internal index Spark columns
* `index_fields` represents index InternalFields
* `spark_column_names` represents all columns
* `index_names` represents the external index name as a label
* `to_internal_spark_frame` represents Spark DataFrame derived by the metadata. Includes index.
* `to_pandas_frame` represents pandas DataFrame derived by the metadata
>>> internal = psdf._internal
>>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+---+---+---+---+---+-----------------+
|__index_level_0__| A| B| C| D| E|__natural_order__|
+-----------------+---+---+---+---+---+-----------------+
| 0| 1| 5| 9| 13| 17| ...|
| 1| 2| 6| 10| 14| 18| ...|
| 2| 3| 7| 11| 15| 19| ...|
| 3| 4| 8| 12| 16| 20| ...|
+-----------------+---+---+---+---+---+-----------------+
>>> internal.data_spark_column_names
['A', 'B', 'C', 'D', 'E']
>>> internal.index_spark_column_names
['__index_level_0__']
>>> internal.spark_column_names
['__index_level_0__', 'A', 'B', 'C', 'D', 'E']
>>> internal.index_names
[None]
>>> internal.data_fields # doctest: +NORMALIZE_WHITESPACE
[InternalField(dtype=int64,struct_field=StructField(A,LongType,false)),
InternalField(dtype=int64,struct_field=StructField(B,LongType,false)),
InternalField(dtype=int64,struct_field=StructField(C,LongType,false)),
InternalField(dtype=int64,struct_field=StructField(D,LongType,false)),
InternalField(dtype=int64,struct_field=StructField(E,LongType,false))]
>>> internal.index_fields
[InternalField(dtype=int64,struct_field=StructField(__index_level_0__,LongType,false))]
>>> internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+-----------------+---+---+---+---+---+
|__index_level_0__| A| B| C| D| E|
+-----------------+---+---+---+---+---+
| 0| 1| 5| 9| 13| 17|
| 1| 2| 6| 10| 14| 18|
| 2| 3| 7| 11| 15| 19|
| 3| 4| 8| 12| 16| 20|
+-----------------+---+---+---+---+---+
>>> internal.to_pandas_frame
A B C D E
0 1 5 9 13 17
1 2 6 10 14 18
2 3 7 11 15 19
3 4 8 12 16 20
In case that index is set to one of the existing column as below:
>>> psdf1 = psdf.set_index("A")
>>> psdf1 # doctest: +NORMALIZE_WHITESPACE
B C D E
A
1 5 9 13 17
2 6 10 14 18
3 7 11 15 19
4 8 12 16 20
>>> psdf1._internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+---+---+---+---+---+
| A| B| C| D| E|
+---+---+---+---+---+
| 1| 5| 9| 13| 17|
| 2| 6| 10| 14| 18|
| 3| 7| 11| 15| 19|
| 4| 8| 12| 16| 20|
+---+---+---+---+---+
>>> internal = psdf1._internal
>>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+---+---+---+---+---+-----------------+
|__index_level_0__| A| B| C| D| E|__natural_order__|
+-----------------+---+---+---+---+---+-----------------+
| 0| 1| 5| 9| 13| 17| ...|
| 1| 2| 6| 10| 14| 18| ...|
| 2| 3| 7| 11| 15| 19| ...|
| 3| 4| 8| 12| 16| 20| ...|
+-----------------+---+---+---+---+---+-----------------+
>>> internal.data_spark_column_names
['B', 'C', 'D', 'E']
>>> internal.index_spark_column_names
['A']
>>> internal.spark_column_names
['A', 'B', 'C', 'D', 'E']
>>> internal.index_names
[('A',)]
>>> internal.data_fields
[InternalField(dtype=int64,struct_field=StructField(B,LongType,false)),
InternalField(dtype=int64,struct_field=StructField(C,LongType,false)),
InternalField(dtype=int64,struct_field=StructField(D,LongType,false)),
InternalField(dtype=int64,struct_field=StructField(E,LongType,false))]
>>> internal.index_fields
[InternalField(dtype=int64,struct_field=StructField(A,LongType,false))]
>>> internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+---+---+---+---+---+
| A| B| C| D| E|
+---+---+---+---+---+
| 1| 5| 9| 13| 17|
| 2| 6| 10| 14| 18|
| 3| 7| 11| 15| 19|
| 4| 8| 12| 16| 20|
+---+---+---+---+---+
>>> internal.to_pandas_frame # doctest: +NORMALIZE_WHITESPACE
B C D E
A
1 5 9 13 17
2 6 10 14 18
3 7 11 15 19
4 8 12 16 20
In case that index becomes a multi index as below:
>>> psdf2 = psdf.set_index("A", append=True)
>>> psdf2 # doctest: +NORMALIZE_WHITESPACE
B C D E
A
0 1 5 9 13 17
1 2 6 10 14 18
2 3 7 11 15 19
3 4 8 12 16 20
>>> psdf2._internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+-----------------+---+---+---+---+---+
|__index_level_0__| A| B| C| D| E|
+-----------------+---+---+---+---+---+
| 0| 1| 5| 9| 13| 17|
| 1| 2| 6| 10| 14| 18|
| 2| 3| 7| 11| 15| 19|
| 3| 4| 8| 12| 16| 20|
+-----------------+---+---+---+---+---+
>>> internal = psdf2._internal
>>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+---+---+---+---+---+-----------------+
|__index_level_0__| A| B| C| D| E|__natural_order__|
+-----------------+---+---+---+---+---+-----------------+
| 0| 1| 5| 9| 13| 17| ...|
| 1| 2| 6| 10| 14| 18| ...|
| 2| 3| 7| 11| 15| 19| ...|
| 3| 4| 8| 12| 16| 20| ...|
+-----------------+---+---+---+---+---+-----------------+
>>> internal.data_spark_column_names
['B', 'C', 'D', 'E']
>>> internal.index_spark_column_names
['__index_level_0__', 'A']
>>> internal.spark_column_names
['__index_level_0__', 'A', 'B', 'C', 'D', 'E']
>>> internal.index_names
[None, ('A',)]
>>> internal.data_fields # doctest: +NORMALIZE_WHITESPACE
[InternalField(dtype=int64,struct_field=StructField(B,LongType,false)),
InternalField(dtype=int64,struct_field=StructField(C,LongType,false)),
InternalField(dtype=int64,struct_field=StructField(D,LongType,false)),
InternalField(dtype=int64,struct_field=StructField(E,LongType,false))]
>>> internal.index_fields # doctest: +NORMALIZE_WHITESPACE
[InternalField(dtype=int64,struct_field=StructField(__index_level_0__,LongType,false)),
InternalField(dtype=int64,struct_field=StructField(A,LongType,false))]
>>> internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+-----------------+---+---+---+---+---+
|__index_level_0__| A| B| C| D| E|
+-----------------+---+---+---+---+---+
| 0| 1| 5| 9| 13| 17|
| 1| 2| 6| 10| 14| 18|
| 2| 3| 7| 11| 15| 19|
| 3| 4| 8| 12| 16| 20|
+-----------------+---+---+---+---+---+
>>> internal.to_pandas_frame # doctest: +NORMALIZE_WHITESPACE
B C D E
A
0 1 5 9 13 17
1 2 6 10 14 18
2 3 7 11 15 19
3 4 8 12 16 20
For multi-level columns, it also holds column_labels
>>> columns = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'),
... ('Y', 'C'), ('Y', 'D')])
>>> psdf3 = ps.DataFrame([
... [1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12],
... [13, 14, 15, 16],
... [17, 18, 19, 20]], columns = columns)
>>> psdf3 # doctest: +NORMALIZE_WHITESPACE
X Y
A B C D
0 1 2 3 4
1 5 6 7 8
2 9 10 11 12
3 13 14 15 16
4 17 18 19 20
>>> internal = psdf3._internal
>>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+------+------+------+------+-----------------+
|__index_level_0__|(X, A)|(X, B)|(Y, C)|(Y, D)|__natural_order__|
+-----------------+------+------+------+------+-----------------+
| 0| 1| 2| 3| 4| ...|
| 1| 5| 6| 7| 8| ...|
| 2| 9| 10| 11| 12| ...|
| 3| 13| 14| 15| 16| ...|
| 4| 17| 18| 19| 20| ...|
+-----------------+------+------+------+------+-----------------+
>>> internal.data_spark_column_names
['(X, A)', '(X, B)', '(Y, C)', '(Y, D)']
>>> internal.column_labels
[('X', 'A'), ('X', 'B'), ('Y', 'C'), ('Y', 'D')]
For Series, it also holds scol to represent the column.
>>> psseries = psdf1.B
>>> psseries
A
1 5
2 6
3 7
4 8
Name: B, dtype: int64
>>> internal = psseries._internal
>>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+---+---+---+---+---+-----------------+
|__index_level_0__| A| B| C| D| E|__natural_order__|
+-----------------+---+---+---+---+---+-----------------+
| 0| 1| 5| 9| 13| 17| ...|
| 1| 2| 6| 10| 14| 18| ...|
| 2| 3| 7| 11| 15| 19| ...|
| 3| 4| 8| 12| 16| 20| ...|
+-----------------+---+---+---+---+---+-----------------+
>>> internal.data_spark_column_names
['B']
>>> internal.index_spark_column_names
['A']
>>> internal.spark_column_names
['A', 'B']
>>> internal.index_names
[('A',)]
>>> internal.data_fields
[InternalField(dtype=int64,struct_field=StructField(B,LongType,false))]
>>> internal.index_fields
[InternalField(dtype=int64,struct_field=StructField(A,LongType,false))]
>>> internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+---+---+
| A| B|
+---+---+
| 1| 5|
| 2| 6|
| 3| 7|
| 4| 8|
+---+---+
>>> internal.to_pandas_frame # doctest: +NORMALIZE_WHITESPACE
B
A
1 5
2 6
3 7
4 8
"""
def __init__(
self,
spark_frame: spark.DataFrame,
index_spark_columns: Optional[List[spark.Column]],
index_names: Optional[List[Optional[Tuple]]] = None,
index_fields: Optional[List[InternalField]] = None,
column_labels: Optional[List[Tuple]] = None,
data_spark_columns: Optional[List[spark.Column]] = None,
data_fields: Optional[List[InternalField]] = None,
column_label_names: Optional[List[Optional[Tuple]]] = None,
):
"""
Create a new internal immutable DataFrame to manage Spark DataFrame, column fields and
index fields and names.
:param spark_frame: Spark DataFrame to be managed.
:param index_spark_columns: list of Spark Column
Spark Columns for the index.
:param index_names: list of tuples
the index names.
:param index_fields: list of InternalField
the InternalFields for the index columns
:param column_labels: list of tuples with the same length
The multi-level values in the tuples.
:param data_spark_columns: list of Spark Column
Spark Columns to appear as columns. If this is None, calculated
from spark_frame.
:param data_fields: list of InternalField
the InternalFields for the data columns
:param column_label_names: Names for each of the column index levels.
See the examples below to refer what each parameter means.
>>> column_labels = pd.MultiIndex.from_tuples(
... [('a', 'x'), ('a', 'y'), ('b', 'z')], names=["column_labels_a", "column_labels_b"])
>>> row_index = pd.MultiIndex.from_tuples(
... [('foo', 'bar'), ('foo', 'bar'), ('zoo', 'bar')],
... names=["row_index_a", "row_index_b"])
>>> psdf = ps.DataFrame(
... [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=row_index, columns=column_labels)
>>> psdf.set_index(('a', 'x'), append=True, inplace=True)
>>> psdf # doctest: +NORMALIZE_WHITESPACE
column_labels_a a b
column_labels_b y z
row_index_a row_index_b (a, x)
foo bar 1 2 3
4 5 6
zoo bar 7 8 9
>>> internal = psdf._internal
>>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+-----------------+------+------+------+...
|__index_level_0__|__index_level_1__|(a, x)|(a, y)|(b, z)|...
+-----------------+-----------------+------+------+------+...
| foo| bar| 1| 2| 3|...
| foo| bar| 4| 5| 6|...
| zoo| bar| 7| 8| 9|...
+-----------------+-----------------+------+------+------+...
>>> internal.index_spark_columns # doctest: +SKIP
[Column<'__index_level_0__'>, Column<'__index_level_1__'>, Column<'(a, x)'>]
>>> internal.index_names
[('row_index_a',), ('row_index_b',), ('a', 'x')]
>>> internal.index_fields # doctest: +NORMALIZE_WHITESPACE
[InternalField(dtype=object,struct_field=StructField(__index_level_0__,StringType,false)),
InternalField(dtype=object,struct_field=StructField(__index_level_1__,StringType,false)),
InternalField(dtype=int64,struct_field=StructField((a, x),LongType,false))]
>>> internal.column_labels
[('a', 'y'), ('b', 'z')]
>>> internal.data_spark_columns # doctest: +SKIP
[Column<'(a, y)'>, Column<'(b, z)'>]
>>> internal.data_fields # doctest: +NORMALIZE_WHITESPACE
[InternalField(dtype=int64,struct_field=StructField((a, y),LongType,false)),
InternalField(dtype=int64,struct_field=StructField((b, z),LongType,false))]
>>> internal.column_label_names
[('column_labels_a',), ('column_labels_b',)]
"""
assert isinstance(spark_frame, spark.DataFrame)
assert not spark_frame.isStreaming, "pandas-on-Spark does not support Structured Streaming."
if not index_spark_columns:
if data_spark_columns is not None:
if column_labels is not None:
data_spark_columns = [
scol.alias(name_like_string(label))
for scol, label in zip(data_spark_columns, column_labels)
]
spark_frame = spark_frame.select(data_spark_columns)
assert not any(SPARK_INDEX_NAME_PATTERN.match(name) for name in spark_frame.columns), (
"Index columns should not appear in columns of the Spark DataFrame. Avoid "
"index column names [%s]." % SPARK_INDEX_NAME_PATTERN
)
# Create default index.
spark_frame, force_nullable = InternalFrame.attach_default_index(spark_frame)
index_spark_columns = [scol_for(spark_frame, SPARK_DEFAULT_INDEX_NAME)]
index_fields = [
InternalField.from_struct_field(
StructField(SPARK_DEFAULT_INDEX_NAME, LongType(), nullable=False)
)
]
if data_spark_columns is not None:
data_struct_fields = [
field
for field in spark_frame.schema.fields
if field.name != SPARK_DEFAULT_INDEX_NAME
]
data_spark_columns = [
scol_for(spark_frame, field.name) for field in data_struct_fields
]
if data_fields is not None:
data_fields = [
field.copy(
name=name_like_string(struct_field.name),
nullable=(force_nullable or field.nullable),
)
for field, struct_field in zip(data_fields, data_struct_fields)
]
if NATURAL_ORDER_COLUMN_NAME not in spark_frame.columns:
spark_frame = spark_frame.withColumn(
NATURAL_ORDER_COLUMN_NAME, F.monotonically_increasing_id()
)
self._sdf = spark_frame # type: spark.DataFrame
# index_spark_columns
assert all(
isinstance(index_scol, spark.Column) for index_scol in index_spark_columns
), index_spark_columns
self._index_spark_columns = index_spark_columns # type: List[spark.Column]
# data_spark_columns
if data_spark_columns is None:
data_spark_columns = [
scol_for(spark_frame, col)
for col in spark_frame.columns
if all(
not spark_column_equals(scol_for(spark_frame, col), index_scol)
for index_scol in index_spark_columns
)
and col not in HIDDEN_COLUMNS
]
self._data_spark_columns = data_spark_columns # type: List[spark.Column]
else:
assert all(isinstance(scol, spark.Column) for scol in data_spark_columns)
self._data_spark_columns = data_spark_columns
# fields
if index_fields is None:
index_fields = [None] * len(index_spark_columns)
if data_fields is None:
data_fields = [None] * len(data_spark_columns)
assert len(index_spark_columns) == len(index_fields), (
len(index_spark_columns),
len(index_fields),
)
assert len(data_spark_columns) == len(data_fields), (
len(data_spark_columns),
len(data_fields),
)
if any(field is None or field.struct_field is None for field in index_fields) and any(
field is None or field.struct_field is None for field in data_fields
):
schema = spark_frame.select(index_spark_columns + data_spark_columns).schema
fields = [
InternalField.from_struct_field(struct_field)
if field is None
else InternalField(field.dtype, struct_field)
if field.struct_field is None
else field
for field, struct_field in zip(index_fields + data_fields, schema.fields)
]
index_fields = fields[: len(index_spark_columns)]
data_fields = fields[len(index_spark_columns) :]
elif any(field is None or field.struct_field is None for field in index_fields):
schema = spark_frame.select(index_spark_columns).schema
index_fields = [
InternalField.from_struct_field(struct_field)
if field is None
else InternalField(field.dtype, struct_field)
if field.struct_field is None
else field
for field, struct_field in zip(index_fields, schema.fields)
]
elif any(field is None or field.struct_field is None for field in data_fields):
schema = spark_frame.select(data_spark_columns).schema
data_fields = [
InternalField.from_struct_field(struct_field)
if field is None
else InternalField(field.dtype, struct_field)
if field.struct_field is None
else field
for field, struct_field in zip(data_fields, schema.fields)
]
assert all(
isinstance(ops.dtype, Dtype.__args__) # type: ignore
and (
ops.dtype == np.dtype("object")
or as_spark_type(ops.dtype, raise_error=False) is not None
)
for ops in index_fields
), index_fields
if is_testing():
struct_fields = spark_frame.select(index_spark_columns).schema.fields
assert all(
index_field.struct_field == struct_field
for index_field, struct_field in zip(index_fields, struct_fields)
), (index_fields, struct_fields)
self._index_fields = index_fields # type: List[InternalField]
assert all(
isinstance(ops.dtype, Dtype.__args__) # type: ignore
and (
ops.dtype == np.dtype("object")
or as_spark_type(ops.dtype, raise_error=False) is not None
)
for ops in data_fields
), data_fields
if is_testing():
struct_fields = spark_frame.select(data_spark_columns).schema.fields
assert all(
data_field.struct_field == struct_field
for data_field, struct_field in zip(data_fields, struct_fields)
), (data_fields, struct_fields)
self._data_fields = data_fields # type: List[InternalField]
# index_names
if not index_names:
index_names = [None] * len(index_spark_columns)
assert len(index_spark_columns) == len(index_names), (
len(index_spark_columns),
len(index_names),
)
assert all(
is_name_like_tuple(index_name, check_type=True) for index_name in index_names
), index_names
self._index_names = index_names # type: List[Optional[Tuple]]
# column_labels
if column_labels is None:
self._column_labels = [
(col,) for col in spark_frame.select(self._data_spark_columns).columns
] # type: List[Tuple]
else:
assert len(column_labels) == len(self._data_spark_columns), (
len(column_labels),
len(self._data_spark_columns),
)
if len(column_labels) == 1:
column_label = column_labels[0]
assert is_name_like_tuple(column_label, check_type=True), column_label
else:
assert all(
is_name_like_tuple(column_label, check_type=True)
for column_label in column_labels
), column_labels
assert len(set(len(label) for label in column_labels)) <= 1, column_labels
self._column_labels = column_labels
# column_label_names
if column_label_names is None:
self._column_label_names = [None] * column_labels_level(
self._column_labels
) # type: List[Optional[Tuple]]
else:
if len(self._column_labels) > 0:
assert len(column_label_names) == column_labels_level(self._column_labels), (
len(column_label_names),
column_labels_level(self._column_labels),
)
else:
assert len(column_label_names) > 0, len(column_label_names)
assert all(
is_name_like_tuple(column_label_name, check_type=True)
for column_label_name in column_label_names
), column_label_names
self._column_label_names = column_label_names
@staticmethod
def attach_default_index(
sdf: spark.DataFrame, default_index_type: Optional[str] = None
) -> Tuple[spark.DataFrame, bool]:
"""
This method attaches a default index to Spark DataFrame. Spark does not have the index
notion so corresponding column should be generated.
There are several types of default index can be configured by `compute.default_index_type`.
>>> spark_frame = ps.range(10).to_spark()
>>> spark_frame
DataFrame[id: bigint]
It adds the default index column '__index_level_0__'.
>>> spark_frame = InternalFrame.attach_default_index(spark_frame)[0]
>>> spark_frame
DataFrame[__index_level_0__: bigint, id: bigint]
It throws an exception if the given column name already exists.
>>> InternalFrame.attach_default_index(spark_frame)[0]
... # doctest: +ELLIPSIS
Traceback (most recent call last):
...
AssertionError: '__index_level_0__' already exists...
"""
index_column = SPARK_DEFAULT_INDEX_NAME
assert (
index_column not in sdf.columns
), "'%s' already exists in the Spark column names '%s'" % (index_column, sdf.columns)
if default_index_type is None:
default_index_type = ps.get_option("compute.default_index_type")
if default_index_type == "sequence":
return InternalFrame.attach_sequence_column(sdf, column_name=index_column)
elif default_index_type == "distributed-sequence":
return InternalFrame.attach_distributed_sequence_column(sdf, column_name=index_column)
elif default_index_type == "distributed":
return InternalFrame.attach_distributed_column(sdf, column_name=index_column)
else:
raise ValueError(
"'compute.default_index_type' should be one of 'sequence',"
" 'distributed-sequence' and 'distributed'"
)
@staticmethod
def attach_sequence_column(
sdf: spark.DataFrame, column_name: str
) -> Tuple[spark.DataFrame, bool]:
scols = [scol_for(sdf, column) for column in sdf.columns]
sequential_index = (
F.row_number().over(Window.orderBy(F.monotonically_increasing_id())).cast("long") - 1
)
return sdf.select(sequential_index.alias(column_name), *scols), False
@staticmethod
def attach_distributed_column(
sdf: spark.DataFrame, column_name: str
) -> Tuple[spark.DataFrame, bool]:
scols = [scol_for(sdf, column) for column in sdf.columns]
return sdf.select(F.monotonically_increasing_id().alias(column_name), *scols), False
@staticmethod
def attach_distributed_sequence_column(
sdf: spark.DataFrame, column_name: str
) -> Tuple[spark.DataFrame, bool]:
"""
This method attaches a Spark column that has a sequence in a distributed manner.
This is equivalent to the column assigned when default index type 'distributed-sequence'.
>>> sdf = ps.DataFrame(['a', 'b', 'c']).to_spark()
>>> sdf, force_nullable = (
... InternalFrame.attach_distributed_sequence_column(sdf, column_name="sequence")
... )
>>> sdf.show() # doctest: +NORMALIZE_WHITESPACE
+--------+---+
|sequence| 0|
+--------+---+
| 0| a|
| 1| b|
| 2| c|
+--------+---+
>>> force_nullable
True
"""
if len(sdf.columns) > 0:
try:
jdf = sdf._jdf.toDF() # type: ignore
sql_ctx = sdf.sql_ctx
encoders = sql_ctx._jvm.org.apache.spark.sql.Encoders # type: ignore
encoder = encoders.tuple(jdf.exprEnc(), encoders.scalaLong())
jrdd = jdf.localCheckpoint(False).rdd().zipWithIndex()
df = spark.DataFrame(
sql_ctx.sparkSession._jsparkSession.createDataset( # type: ignore
jrdd, encoder
).toDF(),
sql_ctx,
)
columns = df.columns
return (
df.selectExpr(
"`{}` as `{}`".format(columns[1], column_name), "`{}`.*".format(columns[0])
),
True,
)
except py4j.protocol.Py4JError:
if is_testing():
raise
return InternalFrame._attach_distributed_sequence_column(sdf, column_name)
else:
cnt = sdf.count()
if cnt > 0:
return default_session().range(cnt).toDF(column_name), False
else:
return (
default_session().createDataFrame(
[],
schema=StructType().add(column_name, data_type=LongType(), nullable=False),
),
False,
)
@staticmethod
def _attach_distributed_sequence_column(
sdf: spark.DataFrame, column_name: str
) -> Tuple[spark.DataFrame, bool]:
"""
>>> sdf = ps.DataFrame(['a', 'b', 'c']).to_spark()
>>> sdf, force_nullable = (
... InternalFrame._attach_distributed_sequence_column(sdf, column_name="sequence")
... )
>>> sdf.sort("sequence").show() # doctest: +NORMALIZE_WHITESPACE
+--------+---+
|sequence| 0|
+--------+---+
| 0| a|
| 1| b|
| 2| c|
+--------+---+
>>> force_nullable
False
"""
scols = [scol_for(sdf, column) for column in sdf.columns]
spark_partition_column = verify_temp_column_name(sdf, "__spark_partition_id__")
offset_column = verify_temp_column_name(sdf, "__offset__")
row_number_column = verify_temp_column_name(sdf, "__row_number__")
# 1. Calculates counts per each partition ID. `counts` here is, for instance,
# {
# 1: 83,
# 6: 83,
# 3: 83,
# ...
# }
sdf = sdf.withColumn(spark_partition_column, F.spark_partition_id())
# Checkpoint the DataFrame to fix the partition ID.
sdf = sdf.localCheckpoint(eager=False)
counts = map(
lambda x: (x["key"], x["count"]),
sdf.groupby(sdf[spark_partition_column].alias("key")).count().collect(),
)
# 2. Calculates cumulative sum in an order of partition id.
# Note that it does not matter if partition id guarantees its order or not.
# We just need a one-by-one sequential id.
# sort by partition key.
sorted_counts = sorted(counts, key=lambda x: x[0])
# get cumulative sum in an order of partition key.
cumulative_counts = [0] + list(accumulate(map(lambda count: count[1], sorted_counts)))
# zip it with partition key.
sums = dict(zip(map(lambda count: count[0], sorted_counts), cumulative_counts))
# 3. Attach offset for each partition.
@pandas_udf(returnType=LongType()) # type: ignore
def offset(id: pd.Series) -> pd.Series:
current_partition_offset = sums[id.iloc[0]]
return pd.Series(current_partition_offset).repeat(len(id))
sdf = sdf.withColumn(offset_column, offset(spark_partition_column))
# 4. Calculate row_number in each partition.
w = Window.partitionBy(spark_partition_column).orderBy(F.monotonically_increasing_id())
row_number = F.row_number().over(w)
sdf = sdf.withColumn(row_number_column, row_number)
# 5. Calculate the index.
return (
sdf.select(
(sdf[offset_column] + sdf[row_number_column] - 1).alias(column_name), *scols
),
False,
)
def spark_column_for(self, label: Tuple) -> spark.Column:
"""Return Spark Column for the given column label."""
column_labels_to_scol = dict(zip(self.column_labels, self.data_spark_columns))
if label in column_labels_to_scol:
return column_labels_to_scol[label]
else:
raise KeyError(name_like_string(label))
def spark_column_name_for(self, label_or_scol: Union[Tuple, spark.Column]) -> str:
"""Return the actual Spark column name for the given column label."""
if isinstance(label_or_scol, spark.Column):
return self.spark_frame.select(label_or_scol).columns[0]
else:
return self.field_for(label_or_scol).name
def spark_type_for(self, label_or_scol: Union[Tuple, spark.Column]) -> DataType:
"""Return DataType for the given column label."""
if isinstance(label_or_scol, spark.Column):
return self.spark_frame.select(label_or_scol).schema[0].dataType
else:
return self.field_for(label_or_scol).spark_type
def spark_column_nullable_for(self, label_or_scol: Union[Tuple, spark.Column]) -> bool:
"""Return nullability for the given column label."""
if isinstance(label_or_scol, spark.Column):
return self.spark_frame.select(label_or_scol).schema[0].nullable
else:
return self.field_for(label_or_scol).nullable
def field_for(self, label: Tuple) -> InternalField:
"""Return InternalField for the given column label."""
column_labels_to_fields = dict(zip(self.column_labels, self.data_fields))
if label in column_labels_to_fields:
return column_labels_to_fields[label]
else:
raise KeyError(name_like_string(label))
@property
def spark_frame(self) -> spark.DataFrame:
"""Return the managed Spark DataFrame."""
return self._sdf
@lazy_property
def data_spark_column_names(self) -> List[str]:
"""Return the managed column field names."""
return [field.name for field in self.data_fields]
@property
def data_spark_columns(self) -> List[spark.Column]:
"""Return Spark Columns for the managed data columns."""
return self._data_spark_columns
@property
def index_spark_column_names(self) -> List[str]:
"""Return the managed index field names."""
return [field.name for field in self.index_fields]
@property
def index_spark_columns(self) -> List[spark.Column]:
"""Return Spark Columns for the managed index columns."""
return self._index_spark_columns
@lazy_property
def spark_column_names(self) -> List[str]:
"""Return all the field names including index field names."""
return self.spark_frame.select(self.spark_columns).columns
@lazy_property
def spark_columns(self) -> List[spark.Column]:
"""Return Spark Columns for the managed columns including index columns."""
index_spark_columns = self.index_spark_columns
return index_spark_columns + [
spark_column
for spark_column in self.data_spark_columns
if all(not spark_column_equals(spark_column, scol) for scol in index_spark_columns)
]
@property
def index_names(self) -> List[Optional[Tuple]]:
"""Return the managed index names."""
return self._index_names
@lazy_property
def index_level(self) -> int:
"""Return the level of the index."""
return len(self._index_names)
@property
def column_labels(self) -> List[Tuple]:
"""Return the managed column index."""
return self._column_labels
@lazy_property
def column_labels_level(self) -> int:
"""Return the level of the column index."""
return len(self._column_label_names)
@property
def column_label_names(self) -> List[Optional[Tuple]]:
"""Return names of the index levels."""
return self._column_label_names
@property
def index_fields(self) -> List[InternalField]:
"""Return InternalFields for the managed index columns."""
return self._index_fields
@property
def data_fields(self) -> List[InternalField]:
"""Return InternalFields for the managed columns."""
return self._data_fields
@lazy_property
def to_internal_spark_frame(self) -> spark.DataFrame:
"""
Return as Spark DataFrame. This contains index columns as well
and should be only used for internal purposes.
"""
index_spark_columns = self.index_spark_columns
data_columns = []
for spark_column in self.data_spark_columns:
if all(not spark_column_equals(spark_column, scol) for scol in index_spark_columns):
data_columns.append(spark_column)
return self.spark_frame.select(index_spark_columns + data_columns)
@lazy_property
def to_pandas_frame(self) -> pd.DataFrame:
"""Return as pandas DataFrame."""
sdf = self.to_internal_spark_frame
pdf = sdf.toPandas()
if len(pdf) == 0 and len(sdf.schema) > 0:
pdf = pdf.astype(
{field.name: spark_type_to_pandas_dtype(field.dataType) for field in sdf.schema}
)
return InternalFrame.restore_index(pdf, **self.arguments_for_restore_index)
@lazy_property
def arguments_for_restore_index(self) -> Dict:
"""Create arguments for `restore_index`."""
column_names = []
fields = self.index_fields.copy()
ext_fields = {
col: field
for col, field in zip(self.index_spark_column_names, self.index_fields)
if isinstance(field.dtype, extension_dtypes)
}
for spark_column, column_name, field in zip(
self.data_spark_columns, self.data_spark_column_names, self.data_fields
):
for index_spark_column_name, index_spark_column in zip(
self.index_spark_column_names, self.index_spark_columns
):
if spark_column_equals(spark_column, index_spark_column):
column_names.append(index_spark_column_name)
break
else:
column_names.append(column_name)
fields.append(field)
if isinstance(field.dtype, extension_dtypes):
ext_fields[column_name] = field
return dict(
index_columns=self.index_spark_column_names,
index_names=self.index_names,
data_columns=column_names,
column_labels=self.column_labels,
column_label_names=self.column_label_names,
fields=fields,
ext_fields=ext_fields,
)
@staticmethod
def restore_index(
pdf: pd.DataFrame,
*,
index_columns: List[str],
index_names: List[Tuple],
data_columns: List[str],
column_labels: List[Tuple],
column_label_names: List[Tuple],
fields: List[InternalField] = None,
ext_fields: Dict[str, InternalField] = None,
) -> pd.DataFrame:
"""
Restore pandas DataFrame indices using the metadata.
:param pdf: the pandas DataFrame to be processed.
:param index_columns: the original column names for index columns.
:param index_names: the index names after restored.
:param data_columns: the original column names for data columns.
:param column_labels: the column labels after restored.
:param column_label_names: the column label names after restored.
:param fields: the fields after restored.
:param ext_fields: the map from the original column names to extension data fields.
:return: the restored pandas DataFrame
>>> from numpy import dtype
>>> pdf = pd.DataFrame({"index": [10, 20, 30], "a": ['a', 'b', 'c'], "b": [0, 2, 1]})
>>> InternalFrame.restore_index(
... pdf,
... index_columns=["index"],
... index_names=[("idx",)],
... data_columns=["a", "b", "index"],
... column_labels=[("x",), ("y",), ("z",)],
... column_label_names=[("lv1",)],
... fields=[
... InternalField(
... dtype=dtype('int64'),
... struct_field=StructField(name='index', dataType=LongType(), nullable=False),
... ),
... InternalField(
... dtype=dtype('object'),
... struct_field=StructField(name='a', dataType=StringType(), nullable=False),
... ),
... InternalField(
... dtype=CategoricalDtype(categories=["i", "j", "k"]),
... struct_field=StructField(name='b', dataType=LongType(), nullable=False),
... ),
... ],
... ext_fields=None,
... ) # doctest: +NORMALIZE_WHITESPACE
lv1 x y z
idx
10 a i 10
20 b k 20
30 c j 30
"""
if ext_fields is not None and len(ext_fields) > 0:
pdf = pdf.astype({col: field.dtype for col, field in ext_fields.items()}, copy=True)
for col, field in zip(pdf.columns, fields):
pdf[col] = DataTypeOps(field.dtype, field.spark_type).restore(pdf[col])
append = False
for index_field in index_columns:
drop = index_field not in data_columns
pdf = pdf.set_index(index_field, drop=drop, append=append)
append = True
pdf = pdf[data_columns]
pdf.index.names = [
name if name is None or len(name) > 1 else name[0] for name in index_names
]
names = [name if name is None or len(name) > 1 else name[0] for name in column_label_names]
if len(column_label_names) > 1:
pdf.columns = pd.MultiIndex.from_tuples(column_labels, names=names)
else:
pdf.columns = pd.Index(
[None if label is None else label[0] for label in column_labels],
name=names[0],
)
return pdf
@lazy_property
def resolved_copy(self) -> "InternalFrame":
"""Copy the immutable InternalFrame with the updates resolved."""
sdf = self.spark_frame.select(self.spark_columns + list(HIDDEN_COLUMNS))
return self.copy(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in self.index_spark_column_names],
data_spark_columns=[scol_for(sdf, col) for col in self.data_spark_column_names],
)
def with_new_sdf(
self,
spark_frame: spark.DataFrame,
*,
index_fields: Optional[List[InternalField]] = None,
data_columns: Optional[List[str]] = None,
data_fields: Optional[List[InternalField]] = None,
) -> "InternalFrame":
"""Copy the immutable InternalFrame with the updates by the specified Spark DataFrame.
:param spark_frame: the new Spark DataFrame
:param index_fields: the new InternalFields for the index columns.
If None, the original dtyeps are used.
:param data_columns: the new column names. If None, the original one is used.
:param data_fields: the new InternalFields for the data columns.
If None, the original dtyeps are used.
:return: the copied InternalFrame.
"""
if index_fields is None:
index_fields = self.index_fields
else:
assert len(index_fields) == len(self.index_fields), (
len(index_fields),
len(self.index_fields),
)
if data_columns is None:
data_columns = self.data_spark_column_names
else:
assert len(data_columns) == len(self.column_labels), (
len(data_columns),
len(self.column_labels),
)
if data_fields is None:
data_fields = self.data_fields
else:
assert len(data_fields) == len(self.column_labels), (
len(data_fields),
len(self.column_labels),
)
sdf = spark_frame.drop(NATURAL_ORDER_COLUMN_NAME)
return self.copy(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in self.index_spark_column_names],
index_fields=index_fields,
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
data_fields=data_fields,
)
def with_new_columns(
self,
scols_or_pssers: Sequence[Union[spark.Column, "Series"]],
*,
column_labels: Optional[List[Tuple]] = None,
data_fields: Optional[List[InternalField]] = None,
column_label_names: Union[Optional[List[Optional[Tuple]]], _NoValueType] = _NoValue,
keep_order: bool = True,
) -> "InternalFrame":
"""
Copy the immutable InternalFrame with the updates by the specified Spark Columns or Series.
:param scols_or_pssers: the new Spark Columns or Series.
:param column_labels: the new column index.
If None, the column_labels of the corresponding `scols_or_pssers` is used if it is
Series; otherwise the original one is used.
:param data_fields: the new InternalFields for the data columns.
If None, the dtypes of the corresponding `scols_or_pssers` is used if it is Series;
otherwise the dtypes will be inferred from the corresponding `scols_or_pssers`.
:param column_label_names: the new names of the column index levels.
:return: the copied InternalFrame.
"""
from pyspark.pandas.series import Series
if column_labels is None:
if all(isinstance(scol_or_psser, Series) for scol_or_psser in scols_or_pssers):
column_labels = [cast(Series, psser)._column_label for psser in scols_or_pssers]
else:
assert len(scols_or_pssers) == len(self.column_labels), (
len(scols_or_pssers),
len(self.column_labels),
)
column_labels = []
for scol_or_psser, label in zip(scols_or_pssers, self.column_labels):
if isinstance(scol_or_psser, Series):
column_labels.append(scol_or_psser._column_label)
else:
column_labels.append(label)
else:
assert len(scols_or_pssers) == len(column_labels), (
len(scols_or_pssers),
len(column_labels),
)
data_spark_columns = []
for scol_or_psser in scols_or_pssers:
if isinstance(scol_or_psser, Series):
scol = scol_or_psser.spark.column
else:
scol = scol_or_psser
data_spark_columns.append(scol)
if data_fields is None:
data_fields = []
for scol_or_psser in scols_or_pssers:
if isinstance(scol_or_psser, Series):
data_fields.append(scol_or_psser._internal.data_fields[0])
else:
data_fields.append(None)
else:
assert len(scols_or_pssers) == len(data_fields), (
len(scols_or_pssers),
len(data_fields),
)
sdf = self.spark_frame
if not keep_order:
sdf = self.spark_frame.select(self.index_spark_columns + data_spark_columns)
index_spark_columns = [scol_for(sdf, col) for col in self.index_spark_column_names]
data_spark_columns = [
scol_for(sdf, col) for col in self.spark_frame.select(data_spark_columns).columns
]
else:
index_spark_columns = self.index_spark_columns
if column_label_names is _NoValue:
column_label_names = self._column_label_names
return self.copy(
spark_frame=sdf,
index_spark_columns=index_spark_columns,
column_labels=column_labels,
data_spark_columns=data_spark_columns,
data_fields=data_fields,
column_label_names=column_label_names,
)
def with_filter(self, pred: Union[spark.Column, "Series"]) -> "InternalFrame":
"""
Copy the immutable InternalFrame with the updates by the predicate.
:param pred: the predicate to filter.
:return: the copied InternalFrame.
"""
from pyspark.pandas.series import Series
if isinstance(pred, Series):
assert isinstance(pred.spark.data_type, BooleanType), pred.spark.data_type
condition = pred.spark.column
else:
spark_type = self.spark_frame.select(pred).schema[0].dataType
assert isinstance(spark_type, BooleanType), spark_type
condition = pred
return self.with_new_sdf(self.spark_frame.filter(condition).select(self.spark_columns))
def with_new_spark_column(
self,
column_label: Tuple,
scol: spark.Column,
*,
field: Optional[InternalField] = None,
keep_order: bool = True,
) -> "InternalFrame":
"""
Copy the immutable InternalFrame with the updates by the specified Spark Column.
:param column_label: the column label to be updated.
:param scol: the new Spark Column
:param field: the new InternalField for the data column.
If not specified, the InternalField will be inferred from the spark Column.
:return: the copied InternalFrame.
"""
assert column_label in self.column_labels, column_label
idx = self.column_labels.index(column_label)
data_spark_columns = self.data_spark_columns.copy()
data_spark_columns[idx] = scol
data_fields = self.data_fields.copy()
data_fields[idx] = field
return self.with_new_columns(
data_spark_columns, data_fields=data_fields, keep_order=keep_order
)
def select_column(self, column_label: Tuple) -> "InternalFrame":
"""
Copy the immutable InternalFrame with the specified column.
:param column_label: the column label to use.
:return: the copied InternalFrame.
"""
assert column_label in self.column_labels, column_label
return self.copy(
column_labels=[column_label],
data_spark_columns=[self.spark_column_for(column_label)],
data_fields=[self.field_for(column_label)],
column_label_names=None,
)
def copy(
self,
*,
spark_frame: Union[spark.DataFrame, _NoValueType] = _NoValue,
index_spark_columns: Union[List[spark.Column], _NoValueType] = _NoValue,
index_names: Union[Optional[List[Optional[Tuple]]], _NoValueType] = _NoValue,
index_fields: Union[Optional[List[InternalField]], _NoValueType] = _NoValue,
column_labels: Union[Optional[List[Tuple]], _NoValueType] = _NoValue,
data_spark_columns: Union[Optional[List[spark.Column]], _NoValueType] = _NoValue,
data_fields: Union[Optional[List[InternalField]], _NoValueType] = _NoValue,
column_label_names: Union[Optional[List[Optional[Tuple]]], _NoValueType] = _NoValue,
) -> "InternalFrame":
"""
Copy the immutable InternalFrame.
:param spark_frame: the new Spark DataFrame. If not specified, the original one is used.
:param index_spark_columns: the list of Spark Column.
If not specified, the original ones are used.
:param index_names: the index names. If not specified, the original ones are used.
:param index_fields: the new InternalFields for the index columns.
If not specified, the original metadata are used.
:param column_labels: the new column labels. If not specified, the original ones are used.
:param data_spark_columns: the new Spark Columns.
If not specified, the original ones are used.
:param data_fields: the new InternalFields for the data columns.
If not specified, the original metadata are used.
:param column_label_names: the new names of the column index levels.
If not specified, the original ones are used.
:return: the copied immutable InternalFrame.
"""
if spark_frame is _NoValue:
spark_frame = self.spark_frame
if index_spark_columns is _NoValue:
index_spark_columns = self.index_spark_columns
if index_names is _NoValue:
index_names = self.index_names
if index_fields is _NoValue:
index_fields = self.index_fields
if column_labels is _NoValue:
column_labels = self.column_labels
if data_spark_columns is _NoValue:
data_spark_columns = self.data_spark_columns
if data_fields is _NoValue:
data_fields = self.data_fields
if column_label_names is _NoValue:
column_label_names = self.column_label_names
return InternalFrame(
spark_frame=cast(spark.DataFrame, spark_frame),
index_spark_columns=cast(List[spark.Column], index_spark_columns),
index_names=cast(Optional[List[Optional[Tuple]]], index_names),
index_fields=cast(Optional[List[InternalField]], index_fields),
column_labels=cast(Optional[List[Tuple]], column_labels),
data_spark_columns=cast(Optional[List[spark.Column]], data_spark_columns),
data_fields=cast(Optional[List[InternalField]], data_fields),
column_label_names=cast(Optional[List[Optional[Tuple]]], column_label_names),
)
@staticmethod
def from_pandas(pdf: pd.DataFrame) -> "InternalFrame":
"""Create an immutable DataFrame from pandas DataFrame.
:param pdf: :class:`pd.DataFrame`
:return: the created immutable DataFrame
"""
index_names = [
name if name is None or isinstance(name, tuple) else (name,) for name in pdf.index.names
]
columns = pdf.columns
if isinstance(columns, pd.MultiIndex):
column_labels = columns.tolist()
else:
column_labels = [(col,) for col in columns]
column_label_names = [
name if name is None or isinstance(name, tuple) else (name,) for name in columns.names
]
(
pdf,
index_columns,
index_fields,
data_columns,
data_fields,
) = InternalFrame.prepare_pandas_frame(pdf)
schema = StructType([field.struct_field for field in index_fields + data_fields])
sdf = default_session().createDataFrame(pdf, schema=schema)
return InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in index_columns],
index_names=index_names,
index_fields=index_fields,
column_labels=column_labels,
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
data_fields=data_fields,
column_label_names=column_label_names,
)
@staticmethod
def prepare_pandas_frame(
pdf: pd.DataFrame, *, retain_index: bool = True
) -> Tuple[pd.DataFrame, List[str], List[InternalField], List[str], List[InternalField]]:
"""
Prepare pandas DataFrame for creating Spark DataFrame.
:param pdf: the pandas DataFrame to be prepared.
:param retain_index: whether the indices should be retained.
:return: the tuple of
- the prepared pandas dataFrame
- index column names for Spark DataFrame
- the InternalFields for the index columns of the given pandas DataFrame
- data column names for Spark DataFrame
- the InternalFields for the data columns of the given pandas DataFrame
>>> pdf = pd.DataFrame(
... {("x", "a"): ['a', 'b', 'c'],
... ("y", "b"): pd.Categorical(["i", "k", "j"], categories=["i", "j", "k"])},
... index=[10, 20, 30])
>>> prepared, index_columns, index_fields, data_columns, data_fields = (
... InternalFrame.prepare_pandas_frame(pdf)
... )
>>> prepared
__index_level_0__ (x, a) (y, b)
0 10 a 0
1 20 b 2
2 30 c 1
>>> index_columns
['__index_level_0__']
>>> index_fields
[InternalField(dtype=int64,struct_field=StructField(__index_level_0__,LongType,false))]
>>> data_columns
['(x, a)', '(y, b)']
>>> data_fields # doctest: +NORMALIZE_WHITESPACE
[InternalField(dtype=object,struct_field=StructField((x, a),StringType,false)),
InternalField(dtype=category,struct_field=StructField((y, b),ByteType,false))]
"""
pdf = pdf.copy()
data_columns = [name_like_string(col) for col in pdf.columns]
pdf.columns = data_columns
if retain_index:
index_nlevels = pdf.index.nlevels
index_columns = [SPARK_INDEX_NAME_FORMAT(i) for i in range(index_nlevels)]
pdf.index.names = index_columns
reset_index = pdf.reset_index()
else:
index_nlevels = 0
index_columns = []
reset_index = pdf
index_dtypes = list(reset_index.dtypes)[:index_nlevels]
data_dtypes = list(reset_index.dtypes)[index_nlevels:]
for col, dtype in zip(reset_index.columns, reset_index.dtypes):
spark_type = infer_pd_series_spark_type(reset_index[col], dtype)
reset_index[col] = DataTypeOps(dtype, spark_type).prepare(reset_index[col])
fields = [
InternalField(
dtype=dtype,
struct_field=StructField(
name=name,
dataType=infer_pd_series_spark_type(col, dtype),
nullable=bool(col.isnull().any()),
),
)
for (name, col), dtype in zip(reset_index.iteritems(), index_dtypes + data_dtypes)
]
return (
reset_index,
index_columns,
fields[:index_nlevels],
data_columns,
fields[index_nlevels:],
)
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.internal
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.internal.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.internal tests")
.getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.internal,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 | 5,017,516,604,919,470,000 | 39.052755 | 100 | 0.549305 | false |
Russell-IO/ansible | lib/ansible/plugins/action/gather_facts.py | 1 | 1434 | # Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from collections import MutableMapping
from ansible import constants as C
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
''' handler for package operations '''
self._supports_check_mode = True
self._supports_async = True
result = super(ActionModule, self).run(tmp, task_vars)
result['ansible_facts'] = {}
for fact_module in C.config.get_config_value('FACTS_MODULES', variables=task_vars):
mod_args = task_vars.get('ansible_facts_modules', {}).get(fact_module, {})
if isinstance(mod_args, MutableMapping):
mod_args.update(self._task.args.copy())
else:
mod_args = self._task.args.copy()
if fact_module != 'setup':
del mod_args['gather_subset']
self._display.vvvv("Running %s" % fact_module)
result.update(self._execute_module(module_name=fact_module, module_args=mod_args, task_vars=task_vars, wrap_async=self._task.async_val))
# tell executor facts were gathered
result['ansible_facts']['_ansible_facts_gathered'] = True
return result
| gpl-3.0 | -5,338,959,078,966,732,000 | 33.97561 | 148 | 0.640167 | false |
h1ds/h1ds | h1ds/h1ds/migrations/0010_auto__chg_field_nodepath_parent.py | 1 | 11299 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'NodePath.parent'
db.alter_column(u'h1ds_nodepath', 'parent_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['h1ds.NodePath'], null=True))
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'NodePath.parent'
raise RuntimeError("Cannot reverse this migration. 'NodePath.parent' and its values cannot be restored.")
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'h1ds.device': {
'Meta': {'object_name': 'Device'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'latest_shot': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['h1ds.Shot']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
u'h1ds.filter': {
'Meta': {'object_name': 'Filter'},
'code': ('python_field.fields.PythonCodeField', [], {}),
'data_dim': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['h1ds.FilterDim']", 'symmetrical': 'False'}),
'data_type': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['h1ds.FilterDtype']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
u'h1ds.filterdim': {
'Meta': {'object_name': 'FilterDim'},
'code': ('python_field.fields.PythonCodeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'h1ds.filterdtype': {
'Meta': {'object_name': 'FilterDtype'},
'code': ('python_field.fields.PythonCodeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'h1ds.h1dssignal': {
'Meta': {'object_name': 'H1DSSignal'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
u'h1ds.h1dssignalinstance': {
'Meta': {'ordering': "('-time',)", 'object_name': 'H1DSSignalInstance'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'signal': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['h1ds.H1DSSignal']"}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'})
},
u'h1ds.node': {
'Meta': {'object_name': 'Node'},
'children': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'children_rel_+'", 'to': u"orm['h1ds.Node']"}),
'dtype': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'has_data': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'n_channels': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'n_dimensions': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'subtree_hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'})
},
u'h1ds.nodepath': {
'Meta': {'object_name': 'NodePath'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'node': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['h1ds.Node']", 'through': u"orm['h1ds.PathMap']", 'symmetrical': 'False'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['h1ds.NodePath']", 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
},
u'h1ds.pagelet': {
'Meta': {'object_name': 'Pagelet'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'pagelet_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '2048'})
},
u'h1ds.pageletcoordinates': {
'Meta': {'object_name': 'PageletCoordinates'},
'coordinates': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pagelet': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['h1ds.Pagelet']"}),
'worksheet': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['h1ds.Worksheet']"})
},
u'h1ds.pathmap': {
'Meta': {'object_name': 'PathMap'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['h1ds.Node']"}),
'node_path': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['h1ds.NodePath']"}),
'shot': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['h1ds.Shot']"})
},
u'h1ds.shot': {
'Meta': {'object_name': 'Shot'},
'device': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['h1ds.Device']", 'on_delete': 'models.PROTECT'}),
'number': ('django.db.models.fields.PositiveIntegerField', [], {'primary_key': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {})
},
u'h1ds.usersignal': {
'Meta': {'object_name': 'UserSignal'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '2048'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'h1ds.worksheet': {
'Meta': {'object_name': 'Worksheet'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'pagelets': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['h1ds.Pagelet']", 'through': u"orm['h1ds.PageletCoordinates']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['h1ds'] | mit | 7,576,822,004,211,700,000 | 67.902439 | 187 | 0.543588 | false |
SCPR/firetracker | calfire_tracker/utilities.py | 1 | 3930 | from django.conf import settings
from django.db import models
from django.utils.encoding import smart_str
from django.utils import timezone
from django.template.defaultfilters import slugify
from geopy import geocoders
import pytz
import time
import datetime
import requests
import logging
logger = logging.getLogger("firetracker")
def search_assethost_for_image(kpcc_image_token, **kwargs):
''' model save function to query kpcc image api given an asset_host_id '''
if kwargs['image_id'] is not None:
url_prefix = 'https://a.scpr.org/api/assets/'
url_suffix = '.json?auth_token='
search_url = '%s%s%s%s' % (url_prefix, kwargs['image_id'], url_suffix, kpcc_image_token)
kpcc_query_api = requests.get(search_url, verify=False, headers={"From": "[email protected]","User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/525.19 (KHTML, like Gecko) Chrome/1.0.154.53 Safari/525.19"})
kpcc_image_asset = kpcc_query_api.json()
try:
kpcc_image_data = {'asset_url_link': kpcc_image_asset['urls']['full'], 'asset_photo_credit': kpcc_image_asset['owner'], 'asset_host_image_id': kwargs['image_id']}
except:
kpcc_image_data = {'asset_url_link': None, 'asset_photo_credit': None, 'asset_host_image_id': None}
else:
kpcc_image_data = {'asset_url_link': None, 'asset_photo_credit': None, 'asset_host_image_id': None}
return kpcc_image_data
def fill_air_quality_data(location_latitude, location_longitude):
try:
air_quality_url = 'http://www.airnowapi.org/aq/observation/latLong/current/?format=application/json&latitude=%s&longitude=%s&distance=30&API_KEY=AABE5F75-6C5A-47C2-AB74-2D138C9055B2' % (location_latitude, location_longitude)
air_quality_query = requests.get(air_quality_url, headers= {"User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/525.19 (KHTML, like Gecko) Chrome/1.0.154.53 Safari/525.19"})
air_quality_json = air_quality_query.json()
if len(air_quality_json) == 0:
air_quality_rating = None
air_quality_parameter = None
elif len(air_quality_json) >= 1:
for data in air_quality_json:
if data["ParameterName"] == "PM2.5":
air_quality_rating = data["AQI"]
air_quality_parameter = "Fine particles (PM2.5)"
elif data["ParameterName"] == "O3":
air_quality_rating = data["AQI"]
air_quality_parameter = "Ozone (O3)"
else:
air_quality_rating = None
air_quality_parameter = None
else:
air_quality_rating = None
air_quality_parameter = None
except:
air_quality_rating = None
air_quality_parameter = None
print "exception for %s, %s\n" % (location_latitude, location_longitude)
return {"air_quality_rating": air_quality_rating, "air_quality_parameter": air_quality_parameter}
def fill_geocode_data(computed_location):
if computed_location is not None:
try:
g = geocoders.GoogleV3()
address = smart_str(computed_location)
computed_location, (location_latitude, location_longitude) = g.geocode(address)
geolocation_data = {
'computed_location': str(computed_location),
'location_latitude': location_latitude,
'location_longitude': location_longitude,
'location_geocode_error': False,
}
except (UnboundLocalError, ValueError,geocoders.google.GQueryError):
geolocation_data = {
'computed_location': str(computed_location),
'location_latitude': None,
'location_longitude': None,
'location_geocode_error': True,
}
return geolocation_data
| gpl-2.0 | -4,657,212,522,448,050,000 | 48.125 | 236 | 0.622901 | false |
AlexEKoren/grumpy | compiler/stmt_test.py | 1 | 14166 | # coding=utf-8
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for StatementVisitor."""
import ast
import re
import subprocess
import textwrap
import unittest
from grumpy.compiler import block
from grumpy.compiler import shard_test
from grumpy.compiler import stmt
from grumpy.compiler import util
class StatementVisitorTest(unittest.TestCase):
def testAssertNoMsg(self):
self.assertEqual((0, 'AssertionError()\n'), _GrumpRun(textwrap.dedent("""\
try:
assert False
except AssertionError as e:
print repr(e)""")))
def testAssertMsg(self):
want = (0, "AssertionError('foo',)\n")
self.assertEqual(want, _GrumpRun(textwrap.dedent("""\
try:
assert False, 'foo'
except AssertionError as e:
print repr(e)""")))
def testAssignAttribute(self):
self.assertEqual((0, '123\n'), _GrumpRun(textwrap.dedent("""\
e = Exception()
e.foo = 123
print e.foo""")))
def testAssignName(self):
self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
foo = 'bar'
print foo""")))
def testAssignMultiple(self):
self.assertEqual((0, 'baz baz\n'), _GrumpRun(textwrap.dedent("""\
foo = bar = 'baz'
print foo, bar""")))
def testAssignSubscript(self):
self.assertEqual((0, "{'bar': None}\n"), _GrumpRun(textwrap.dedent("""\
foo = {}
foo['bar'] = None
print foo""")))
def testAssignTuple(self):
self.assertEqual((0, 'a b\n'), _GrumpRun(textwrap.dedent("""\
baz = ('a', 'b')
foo, bar = baz
print foo, bar""")))
def testAugAssign(self):
self.assertEqual((0, '42\n'), _GrumpRun(textwrap.dedent("""\
foo = 41
foo += 1
print foo""")))
def testAugAssignBitAnd(self):
self.assertEqual((0, '3\n'), _GrumpRun(textwrap.dedent("""\
foo = 7
foo &= 3
print foo""")))
def testAugAssignUnsupportedOp(self):
expected = 'augmented assignment op not implemented'
self.assertRaisesRegexp(util.ParseError, expected,
_ParseAndVisit, 'foo **= bar')
def testClassDef(self):
self.assertEqual((0, "<type 'type'>\n"), _GrumpRun(textwrap.dedent("""\
class Foo(object):
pass
print type(Foo)""")))
def testClassDefWithVar(self):
self.assertEqual((0, 'abc\n'), _GrumpRun(textwrap.dedent("""\
class Foo(object):
bar = 'abc'
print Foo.bar""")))
def testDeleteAttribute(self):
self.assertEqual((0, 'False\n'), _GrumpRun(textwrap.dedent("""\
class Foo(object):
bar = 42
del Foo.bar
print hasattr(Foo, 'bar')""")))
def testDeleteClassLocal(self):
self.assertEqual((0, 'False\n'), _GrumpRun(textwrap.dedent("""\
class Foo(object):
bar = 'baz'
del bar
print hasattr(Foo, 'bar')""")))
def testDeleteGlobal(self):
self.assertEqual((0, 'False\n'), _GrumpRun(textwrap.dedent("""\
foo = 42
del foo
print 'foo' in globals()""")))
def testDeleteLocal(self):
self.assertEqual((0, 'ok\n'), _GrumpRun(textwrap.dedent("""\
def foo():
bar = 123
del bar
try:
print bar
raise AssertionError
except UnboundLocalError:
print 'ok'
foo()""")))
def testDeleteNonexistentLocal(self):
self.assertRaisesRegexp(
util.ParseError, 'cannot delete nonexistent local',
_ParseAndVisit, 'def foo():\n del bar')
def testDeleteSubscript(self):
self.assertEqual((0, '{}\n'), _GrumpRun(textwrap.dedent("""\
foo = {'bar': 'baz'}
del foo['bar']
print foo""")))
def testExprCall(self):
self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
def foo():
print 'bar'
foo()""")))
def testExprNameGlobal(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
foo = 42
foo""")))
def testExprNameLocal(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
foo = 42
def bar():
foo
bar()""")))
def testFor(self):
self.assertEqual((0, '1\n2\n3\n'), _GrumpRun(textwrap.dedent("""\
for i in (1, 2, 3):
print i""")))
def testForBreak(self):
self.assertEqual((0, '1\n'), _GrumpRun(textwrap.dedent("""\
for i in (1, 2, 3):
print i
break""")))
def testForContinue(self):
self.assertEqual((0, '1\n2\n3\n'), _GrumpRun(textwrap.dedent("""\
for i in (1, 2, 3):
print i
continue
raise AssertionError""")))
def testForElse(self):
self.assertEqual((0, 'foo\nbar\n'), _GrumpRun(textwrap.dedent("""\
for i in (1,):
print 'foo'
else:
print 'bar'""")))
def testFunctionDef(self):
self.assertEqual((0, 'bar baz\n'), _GrumpRun(textwrap.dedent("""\
def foo(a, b):
print a, b
foo('bar', 'baz')""")))
def testFunctionDefGenerator(self):
self.assertEqual((0, "['foo', 'bar']\n"), _GrumpRun(textwrap.dedent("""\
def gen():
yield 'foo'
yield 'bar'
print list(gen())""")))
def testFunctionDefGeneratorReturnValue(self):
self.assertRaisesRegexp(
util.ParseError, 'returning a value in a generator function',
_ParseAndVisit, 'def foo():\n yield 1\n return 2')
def testFunctionDefLocal(self):
self.assertEqual((0, 'baz\n'), _GrumpRun(textwrap.dedent("""\
def foo():
def bar():
print 'baz'
bar()
foo()""")))
def testIf(self):
self.assertEqual((0, 'foo\n'), _GrumpRun(textwrap.dedent("""\
if 123:
print 'foo'
if '':
print 'bar'""")))
def testIfElif(self):
self.assertEqual((0, 'foo\nbar\n'), _GrumpRun(textwrap.dedent("""\
if True:
print 'foo'
elif False:
print 'bar'
if False:
print 'foo'
elif True:
print 'bar'""")))
def testIfElse(self):
self.assertEqual((0, 'foo\nbar\n'), _GrumpRun(textwrap.dedent("""\
if True:
print 'foo'
else:
print 'bar'
if False:
print 'foo'
else:
print 'bar'""")))
def testImport(self):
self.assertEqual((0, "<type 'dict'>\n"), _GrumpRun(textwrap.dedent("""\
import sys
print type(sys.modules)""")))
def testImportConflictingPackage(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
import time
from __go__.time import Now""")))
def testImportNative(self):
self.assertEqual((0, '1 1000000000\n'), _GrumpRun(textwrap.dedent("""\
from __go__.time import Nanosecond, Second
print Nanosecond, Second""")))
def testImportGrump(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
from __go__.grumpy import Assert
Assert(__frame__(), True, 'bad')""")))
def testImportNativeModuleRaises(self):
regexp = r'for native imports use "from __go__\.xyz import \.\.\." syntax'
self.assertRaisesRegexp(util.ParseError, regexp, _ParseAndVisit,
'import __go__.foo')
def testImportNativeType(self):
self.assertEqual((0, "<type 'Duration'>\n"), _GrumpRun(textwrap.dedent("""\
from __go__.time import type_Duration as Duration
print Duration""")))
def testPrint(self):
self.assertEqual((0, 'abc 123\nfoo bar\n'), _GrumpRun(textwrap.dedent("""\
print 'abc',
print '123'
print 'foo', 'bar'""")))
def testRaiseExitStatus(self):
self.assertEqual(1, _GrumpRun('raise Exception')[0])
def testRaiseInstance(self):
self.assertEqual((0, 'foo\n'), _GrumpRun(textwrap.dedent("""\
try:
raise RuntimeError('foo')
print 'bad'
except RuntimeError as e:
print e""")))
def testRaiseTypeAndArg(self):
self.assertEqual((0, 'foo\n'), _GrumpRun(textwrap.dedent("""\
try:
raise KeyError('foo')
print 'bad'
except KeyError as e:
print e""")))
def testRaiseAgain(self):
self.assertEqual((0, 'foo\n'), _GrumpRun(textwrap.dedent("""\
try:
try:
raise AssertionError('foo')
except AssertionError:
raise
except Exception as e:
print e""")))
def testRaiseTraceback(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
import sys
try:
try:
raise Exception
except:
e, _, tb = sys.exc_info()
raise e, None, tb
except:
e2, _, tb2 = sys.exc_info()
assert e is e2
assert tb is tb2""")))
def testReturn(self):
self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
def foo():
return 'bar'
print foo()""")))
def testTryBareExcept(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
try:
raise AssertionError
except:
pass""")))
def testTryElse(self):
self.assertEqual((0, 'foo baz\n'), _GrumpRun(textwrap.dedent("""\
try:
print 'foo',
except:
print 'bar'
else:
print 'baz'""")))
def testTryMultipleExcept(self):
self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
try:
raise AssertionError
except RuntimeError:
print 'foo'
except AssertionError:
print 'bar'
except:
print 'baz'""")))
def testTryFinally(self):
result = _GrumpRun(textwrap.dedent("""\
try:
print 'foo',
finally:
print 'bar'
try:
print 'foo',
raise Exception
finally:
print 'bar'"""))
self.assertEqual(1, result[0])
# Some platforms show "exit status 1" message so don't test strict equality.
self.assertIn('foo bar\nfoo bar\nException\n', result[1])
def testWhile(self):
self.assertEqual((0, '2\n1\n'), _GrumpRun(textwrap.dedent("""\
i = 2
while i:
print i
i -= 1""")))
def testWhileElse(self):
self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
while False:
print 'foo'
else:
print 'bar'""")))
def testWith(self):
self.assertEqual((0, 'enter\n1\nexit\nenter\n2\nexit\n3\n'),
_GrumpRun(textwrap.dedent("""\
class ContextManager(object):
def __enter__(self):
print "enter"
def __exit__(self, exc_type, value, traceback):
print "exit"
a = ContextManager()
with a:
print 1
try:
with a:
print 2
raise RuntimeError
except RuntimeError:
print 3
""")))
def testWithAs(self):
self.assertEqual((0, '1 2 3\n'),
_GrumpRun(textwrap.dedent("""\
class ContextManager(object):
def __enter__(self):
return (1, (2, 3))
def __exit__(self, *args):
pass
with ContextManager() as [x, (y, z)]:
print x, y, z
""")))
def testWriteExceptDispatcherBareExcept(self):
visitor = stmt.StatementVisitor(_MakeModuleBlock())
handlers = [ast.ExceptHandler(type=ast.Name(id='foo')),
ast.ExceptHandler(type=None)]
self.assertEqual(visitor._write_except_dispatcher( # pylint: disable=protected-access
'exc', 'tb', handlers), [1, 2])
expected = re.compile(r'ResolveGlobal\(.*foo.*\bIsInstance\(.*'
r'goto Label1.*goto Label2', re.DOTALL)
self.assertRegexpMatches(visitor.writer.out.getvalue(), expected)
def testWriteExceptDispatcherBareExceptionNotLast(self):
visitor = stmt.StatementVisitor(_MakeModuleBlock())
handlers = [ast.ExceptHandler(type=None),
ast.ExceptHandler(type=ast.Name(id='foo'))]
self.assertRaisesRegexp(util.ParseError, r"default 'except:' must be last",
visitor._write_except_dispatcher, # pylint: disable=protected-access
'exc', 'tb', handlers)
def testWriteExceptDispatcherMultipleExcept(self):
visitor = stmt.StatementVisitor(_MakeModuleBlock())
handlers = [ast.ExceptHandler(type=ast.Name(id='foo')),
ast.ExceptHandler(type=ast.Name(id='bar'))]
self.assertEqual(visitor._write_except_dispatcher( # pylint: disable=protected-access
'exc', 'tb', handlers), [1, 2])
expected = re.compile(
r'ResolveGlobal\(.*foo.*\bif .*\bIsInstance\(.*\{.*goto Label1.*'
r'ResolveGlobal\(.*bar.*\bif .*\bIsInstance\(.*\{.*goto Label2.*'
r'\bRaise\(exc\.ToObject\(\), nil, tb\.ToObject\(\)\)', re.DOTALL)
self.assertRegexpMatches(visitor.writer.out.getvalue(), expected)
def _MakeModuleBlock():
return block.ModuleBlock('__main__', 'grumpy', 'grumpy/lib', '<test>', [])
def _ParseAndVisit(source):
b = block.ModuleBlock('__main__', 'grumpy', 'grumpy/lib', '<test>',
source.split('\n'))
visitor = stmt.StatementVisitor(b)
visitor.visit(ast.parse(source))
return visitor
def _GrumpRun(cmd):
p = subprocess.Popen(['grumprun'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = p.communicate(cmd)
return p.returncode, out
if __name__ == '__main__':
shard_test.main()
| apache-2.0 | 3,293,463,276,318,064,600 | 28.94926 | 97 | 0.567203 | false |
h2oloopan/easymerge | EasyMerge/tests/beets/ui/migrate.py | 1 | 13881 | # This file is part of beets.
# Copyright 2013, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Conversion from legacy (pre-1.1) configuration to Confit/YAML
configuration.
"""
import os
import ConfigParser
import codecs
import yaml
import logging
import time
import itertools
import re
import beets
from beets import util
from beets import ui
from beets.util import confit
CONFIG_PATH_VAR = 'BEETSCONFIG'
DEFAULT_CONFIG_FILENAME_UNIX = '.beetsconfig'
DEFAULT_CONFIG_FILENAME_WINDOWS = 'beetsconfig.ini'
DEFAULT_LIBRARY_FILENAME_UNIX = '.beetsmusic.blb'
DEFAULT_LIBRARY_FILENAME_WINDOWS = 'beetsmusic.blb'
WINDOWS_BASEDIR = os.environ.get('APPDATA') or '~'
OLD_CONFIG_SUFFIX = '.old'
PLUGIN_NAMES = {
'rdm': 'random',
'fuzzy_search': 'fuzzy',
}
AUTO_KEYS = ('automatic', 'autofetch', 'autoembed', 'autoscrub')
IMPORTFEEDS_PREFIX = 'feeds_'
CONFIG_MIGRATED_MESSAGE = u"""
You appear to be upgrading from beets 1.0 (or earlier) to 1.1. Your
configuration file has been migrated automatically to:
{newconfig}
Edit this file to configure beets. You might want to remove your
old-style ".beetsconfig" file now. See the documentation for more
details on the new configuration system:
http://beets.readthedocs.org/page/reference/config.html
""".strip()
DB_MIGRATED_MESSAGE = u'Your database file has also been copied to:\n{newdb}'
YAML_COMMENT = '# Automatically migrated from legacy .beetsconfig.\n\n'
log = logging.getLogger('beets')
# An itertools recipe.
def grouper(n, iterable):
args = [iter(iterable)] * n
return itertools.izip_longest(*args)
def _displace(fn):
"""Move a file aside using a timestamp suffix so a new file can be
put in its place.
"""
util.move(
fn,
u'{0}.old.{1}'.format(fn, int(time.time())),
True
)
def default_paths():
"""Produces the appropriate default config and library database
paths for the current system. On Unix, this is always in ~. On
Windows, tries ~ first and then $APPDATA for the config and library
files (for backwards compatibility).
"""
windows = os.path.__name__ == 'ntpath'
if windows:
windata = os.environ.get('APPDATA') or '~'
# Shorthand for joining paths.
def exp(*vals):
return os.path.expanduser(os.path.join(*vals))
config = exp('~', DEFAULT_CONFIG_FILENAME_UNIX)
if windows and not os.path.exists(config):
config = exp(windata, DEFAULT_CONFIG_FILENAME_WINDOWS)
libpath = exp('~', DEFAULT_LIBRARY_FILENAME_UNIX)
if windows and not os.path.exists(libpath):
libpath = exp(windata, DEFAULT_LIBRARY_FILENAME_WINDOWS)
return config, libpath
def get_config():
"""Using the same logic as beets 1.0, locate and read the
.beetsconfig file. Return a ConfigParser instance or None if no
config is found.
"""
default_config, default_libpath = default_paths()
if CONFIG_PATH_VAR in os.environ:
configpath = os.path.expanduser(os.environ[CONFIG_PATH_VAR])
else:
configpath = default_config
config = ConfigParser.SafeConfigParser()
if os.path.exists(util.syspath(configpath)):
with codecs.open(configpath, 'r', encoding='utf-8') as f:
config.readfp(f)
return config, configpath
else:
return None, configpath
def flatten_config(config):
"""Given a ConfigParser, flatten the values into a dict-of-dicts
representation where each section gets its own dictionary of values.
"""
out = confit.OrderedDict()
for section in config.sections():
sec_dict = out[section] = confit.OrderedDict()
for option in config.options(section):
sec_dict[option] = config.get(section, option, True)
return out
def transform_value(value):
"""Given a string read as the value of a config option, return a
massaged version of that value (possibly with a different type).
"""
# Booleans.
if value.lower() in ('false', 'no', 'off'):
return False
elif value.lower() in ('true', 'yes', 'on'):
return True
# Integers.
try:
return int(value)
except ValueError:
pass
# Floats.
try:
return float(value)
except ValueError:
pass
return value
def transform_data(data):
"""Given a dict-of-dicts representation of legacy config data, tweak
the data into a new form. This new form is suitable for dumping as
YAML.
"""
out = confit.OrderedDict()
for section, pairs in data.items():
if section == 'beets':
# The "main" section. In the new config system, these values
# are in the "root": no section at all.
for key, value in pairs.items():
value = transform_value(value)
if key.startswith('import_'):
# Importer config is now under an "import:" key.
if 'import' not in out:
out['import'] = confit.OrderedDict()
out['import'][key[7:]] = value
elif key == 'plugins':
# Renamed plugins.
plugins = value.split()
new_plugins = [PLUGIN_NAMES.get(p, p) for p in plugins]
out['plugins'] = ' '.join(new_plugins)
elif key == 'replace':
# YAMLy representation for character replacements.
replacements = confit.OrderedDict()
for pat, repl in grouper(2, value.split()):
if repl == '<strip>':
repl = ''
replacements[pat] = repl
out['replace'] = replacements
elif key == 'pluginpath':
# Used to be a colon-separated string. Now a list.
out['pluginpath'] = value.split(':')
else:
out[key] = value
elif pairs:
# Other sections (plugins, etc).
sec_out = out[section] = confit.OrderedDict()
for key, value in pairs.items():
# Standardized "auto" option.
if key in AUTO_KEYS:
key = 'auto'
# Unnecessary : hack in queries.
if section == 'paths':
key = key.replace('_', ':')
# Changed option names for importfeeds plugin.
if section == 'importfeeds':
if key.startswith(IMPORTFEEDS_PREFIX):
key = key[len(IMPORTFEEDS_PREFIX):]
sec_out[key] = transform_value(value)
return out
def outsideFunc(self):
return "outside"
class outsideClass:
def __init__(self):
print "outside"
class Dumper(yaml.SafeDumper):
"""A PyYAML Dumper that represents OrderedDicts as ordinary mappings
(in order, of course).
"""
# From http://pyyaml.org/attachment/ticket/161/use_ordered_dict.py
class insideClass:
def __init__(self):
print "inside"
def represent_mapping(self, tag, mapping="0,0", flow_style=None):
def insideFunc(inside):
return "Inside"
value = []
node = yaml.MappingNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
testFunc = outsideFunc()
testFunc = insideFunc()
testFunc = outsideClass()
testFunc = Dumper.insideClass()
if hasattr(mapping, 'items'):
mapping = list(mapping.items())
for item_key, item_value in mapping:
node_key = self.represent_data(item_key)
node_value = self.represent_data(item_value)
if not (isinstance(node_key, yaml.ScalarNode) and \
not node_key.style):
best_style = False
if not (isinstance(node_value, yaml.ScalarNode) and \
not node_value.style):
best_style = False
value.append((node_key, node_value))
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
Dumper.add_representer(confit.OrderedDict, Dumper.represent_dict)
def migrate_config(replace=False):
"""Migrate a legacy beetsconfig file to a new-style config.yaml file
in an appropriate place. If `replace` is enabled, then any existing
config.yaml will be moved aside. Otherwise, the process is aborted
when the file exists.
"""
# Load legacy configuration data, if any.
config, configpath = get_config()
if not config:
log.debug(u'no config file found at {0}'.format(
util.displayable_path(configpath)
))
return
# Get the new configuration file path and possibly move it out of
# the way.
destfn = os.path.join(beets.config.config_dir(), confit.CONFIG_FILENAME)
if os.path.exists(destfn):
if replace:
log.debug(u'moving old config aside: {0}'.format(
util.displayable_path(destfn)
))
_displace(destfn)
else:
# File exists and we won't replace it. We're done.
return
log.debug(u'migrating config file {0}'.format(
util.displayable_path(configpath)
))
# Convert the configuration to a data structure ready to be dumped
# as the new Confit file.
data = transform_data(flatten_config(config))
# Encode result as YAML.
yaml_out = yaml.dump(
data,
Dumper=Dumper,
default_flow_style=False,
indent=4,
width=1000,
)
# A ridiculous little hack to add some whitespace between "sections"
# in the YAML output. I hope this doesn't break any YAML syntax.
yaml_out = re.sub(r'(\n\w+:\n [^-\s])', '\n\\1', yaml_out)
yaml_out = YAML_COMMENT + yaml_out
# Write the data to the new config destination.
log.debug(u'writing migrated config to {0}'.format(
util.displayable_path(destfn)
))
with open(destfn, 'w') as f:
f.write(yaml_out)
return destfn
def migrate_db(replace=False):
"""Copy the beets library database file to the new location (e.g.,
from ~/.beetsmusic.blb to ~/.config/beets/library.db).
"""
_, srcfn = default_paths()
destfn = beets.config['library'].as_filename()
if not os.path.exists(srcfn) or srcfn == destfn:
# Old DB does not exist or we're configured to point to the same
# database. Do nothing.
return
if os.path.exists(destfn):
if replace:
log.debug(u'moving old database aside: {0}'.format(
util.displayable_path(destfn)
))
_displace(destfn)
else:
return
log.debug(u'copying database from {0} to {1}'.format(
util.displayable_path(srcfn), util.displayable_path(destfn)
))
util.copy(srcfn, destfn)
return destfn
def migrate_state(replace=False):
"""Copy the beets runtime state file from the old path (i.e.,
~/.beetsstate) to the new path (i.e., ~/.config/beets/state.pickle).
"""
srcfn = os.path.expanduser(os.path.join('~', '.beetsstate'))
if not os.path.exists(srcfn):
return
destfn = beets.config['statefile'].as_filename()
if os.path.exists(destfn):
if replace:
_displace(destfn)
else:
return
log.debug(u'copying state file from {0} to {1}'.format(
util.displayable_path(srcfn), util.displayable_path(destfn)
))
util.copy(srcfn, destfn)
return destfn
# Automatic migration when beets starts.
def automigrate():
"""Migrate the configuration, database, and state files. If any
migration occurs, print out a notice with some helpful next steps.
"""
config_fn = migrate_config()
db_fn = migrate_db()
migrate_state()
if config_fn:
ui.print_(ui.colorize('fuchsia', u'MIGRATED CONFIGURATION'))
ui.print_(CONFIG_MIGRATED_MESSAGE.format(
newconfig=util.displayable_path(config_fn))
)
if db_fn:
ui.print_(DB_MIGRATED_MESSAGE.format(
newdb=util.displayable_path(db_fn)
))
ui.input_(ui.colorize('fuchsia', u'Press ENTER to continue:'))
ui.print_()
# CLI command for explicit migration.
migrate_cmd = ui.Subcommand('migrate', help='convert legacy config')
def migrate_func(lib, opts, args):
"""Explicit command for migrating files. Existing files in each
destination are moved aside.
"""
config_fn = migrate_config(replace=True)
if config_fn:
log.info(u'Migrated configuration to: {0}'.format(
util.displayable_path(config_fn)
))
db_fn = migrate_db(replace=True)
if db_fn:
log.info(u'Migrated library database to: {0}'.format(
util.displayable_path(db_fn)
))
state_fn = migrate_state(replace=True)
if state_fn:
log.info(u'Migrated state file to: {0}'.format(
util.displayable_path(state_fn)
))
migrate_cmd.func = migrate_func
| mit | -8,236,018,070,992,035,000 | 32.367788 | 77 | 0.610187 | false |
tinyms/ArchiveX | tinyms/controller/setting.py | 1 | 3046 | __author__ = 'tinyms'
#coding=UTF8
from sqlalchemy import func
from tinyms.core.common import Utils
from tinyms.core.web import IAuthRequest
from tinyms.core.entity import Account
from tinyms.core.orm import SessionFactory
from tinyms.core.annotation import ObjectPool, route, setting, api
from tinyms.core.setting import UserSettingHelper, AppSettingHelper
@route("/workbench/setting")
class SettingPage(IAuthRequest):
def get(self, *args, **kwargs):
return self.render("workbench/setting.html", items=ObjectPool.setting)
@api("tinyms.core.setting")
class SettingApi():
def load(self):
usr = self.request.current_user
level_u = UserSettingHelper(usr)
level_u_ = level_u.load()
level_s = AppSettingHelper.load()
level_all = dict(level_u_, **level_s)
return level_all
def save(self):
kv = self.request.wrap_params_to_dict()
level_user = dict()
level_system = dict()
for k in kv:
if k.startswith("u_"):
level_user[k] = kv[k]
elif k.startswith("s_"):
level_system[k] = kv[k]
AppSettingHelper.set(level_system)
u = UserSettingHelper("%s" % self.request.current_user)
u.set(level_user)
#允许用户在设置保存之后再做其它数据变更
items = ObjectPool.setting
for k in items.keys():
obj = items[k].cls()
if hasattr(obj, "save"):
msg = obj.save(kv, self.request)
if msg:
return msg
AppSettingHelper.reload()
return "success"
@setting("tinyms_core_setting_sys", "workbench/sys_setting_page.html", "基本", "tinyms.entity.setting.system")
class SystemSetting():
def save(self, kv, http_req):
return ""
def form_submit_javascript(self, http_req):
pass
def form_fill_javascript(self, http_req):
pass
@setting("tinyms_core_setting_user", "workbench/user_setting_page.html", "个人", "tinyms.entity.setting.user")
class SystemSetting():
def save(self, kv, http_req):
_usr_old_pwd = kv.get("_usr_old_pwd")
_usr_new_pwd = kv.get("_usr_new_pwd")
_usr_new_repwd = kv.get("_usr_new_repwd")
if _usr_old_pwd and _usr_new_pwd:
if _usr_new_pwd == _usr_new_repwd:
usr_id = http_req.current_user
sf = SessionFactory.new()
num = sf.query(func.count(Account.id)).filter(Account.id == usr_id) \
.filter(Account.login_pwd == Utils.md5(_usr_old_pwd)).scalar()
if num > 0:
a = sf.query(Account).get(usr_id)
a.login_pwd = Utils.md5(_usr_new_pwd)
sf.commit()
return ""
else:
return "PasswordError"
else:
return "PasswordNotSame"
def form_submit_javascript(self, req):
pass
def form_fill_javascript(self, req):
pass | bsd-3-clause | -2,948,846,204,067,711,000 | 30.925532 | 108 | 0.576 | false |
phobson/statsmodels | statsmodels/tools/tests/test_tools.py | 1 | 20793 | """
Test functions for models.tools
"""
from statsmodels.compat.python import lrange, range
import numpy as np
from numpy.random import standard_normal
from numpy.testing import (assert_equal, assert_array_equal,
assert_almost_equal, assert_string_equal, TestCase)
from nose.tools import (assert_true, assert_false, assert_raises)
import pandas as pd
from pandas.util.testing import assert_frame_equal, assert_series_equal
from statsmodels.datasets import longley
from statsmodels.tools import tools
from statsmodels.tools.tools import pinv_extended
from statsmodels.compat.numpy import np_matrix_rank
class TestTools(TestCase):
def test_add_constant_list(self):
x = lrange(1,5)
x = tools.add_constant(x)
y = np.asarray([[1,1,1,1],[1,2,3,4.]]).T
assert_equal(x, y)
def test_add_constant_1d(self):
x = np.arange(1,5)
x = tools.add_constant(x)
y = np.asarray([[1,1,1,1],[1,2,3,4.]]).T
assert_equal(x, y)
def test_add_constant_has_constant1d(self):
x = np.ones(5)
x = tools.add_constant(x, has_constant='skip')
assert_equal(x, np.ones((5,1)))
assert_raises(ValueError, tools.add_constant, x, has_constant='raise')
assert_equal(tools.add_constant(x, has_constant='add'),
np.ones((5, 2)))
def test_add_constant_has_constant2d(self):
x = np.asarray([[1,1,1,1],[1,2,3,4.]]).T
y = tools.add_constant(x, has_constant='skip')
assert_equal(x, y)
assert_raises(ValueError, tools.add_constant, x, has_constant='raise')
assert_equal(tools.add_constant(x, has_constant='add'),
np.column_stack((np.ones(4), x)))
def test_add_constant_recarray(self):
dt = np.dtype([('', int), ('', '<S4'), ('', np.float32), ('', np.float64)])
x = np.array([(1, 'abcd', 1.0, 2.0),
(7, 'abcd', 2.0, 4.0),
(21, 'abcd', 2.0, 8.0)], dt)
x = x.view(np.recarray)
y = tools.add_constant(x)
assert_equal(y['const'],np.array([1.0,1.0,1.0]))
for f in x.dtype.fields:
assert_true(y[f].dtype == x[f].dtype)
def test_add_constant_series(self):
s = pd.Series([1.0,2.0,3.0])
output = tools.add_constant(s)
expected = pd.Series([1.0,1.0,1.0],name='const')
assert_series_equal(expected, output['const'])
def test_add_constant_dataframe(self):
df = pd.DataFrame([[1.0, 'a', 4], [2.0, 'bc', 9], [3.0, 'def', 16]])
output = tools.add_constant(df)
expected = pd.Series([1.0, 1.0, 1.0], name='const')
assert_series_equal(expected, output['const'])
dfc = df.copy()
dfc.insert(0, 'const', np.ones(3))
assert_frame_equal(dfc, output)
def test_add_constant_zeros(self):
a = np.zeros(100)
output = tools.add_constant(a)
assert_equal(output[:,0],np.ones(100))
s = pd.Series([0.0,0.0,0.0])
output = tools.add_constant(s)
expected = pd.Series([1.0, 1.0, 1.0], name='const')
assert_series_equal(expected, output['const'])
df = pd.DataFrame([[0.0, 'a', 4], [0.0, 'bc', 9], [0.0, 'def', 16]])
output = tools.add_constant(df)
dfc = df.copy()
dfc.insert(0, 'const', np.ones(3))
assert_frame_equal(dfc, output)
df = pd.DataFrame([[1.0, 'a', 0], [0.0, 'bc', 0], [0.0, 'def', 0]])
output = tools.add_constant(df)
dfc = df.copy()
dfc.insert(0, 'const', np.ones(3))
assert_frame_equal(dfc, output)
def test_recipr(self):
X = np.array([[2,1],[-1,0]])
Y = tools.recipr(X)
assert_almost_equal(Y, np.array([[0.5,1],[0,0]]))
def test_recipr0(self):
X = np.array([[2,1],[-4,0]])
Y = tools.recipr0(X)
assert_almost_equal(Y, np.array([[0.5,1],[-0.25,0]]))
def test_rank(self):
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
X = standard_normal((40,10))
self.assertEquals(tools.rank(X), np_matrix_rank(X))
X[:,0] = X[:,1] + X[:,2]
self.assertEquals(tools.rank(X), np_matrix_rank(X))
def test_extendedpinv(self):
X = standard_normal((40, 10))
np_inv = np.linalg.pinv(X)
np_sing_vals = np.linalg.svd(X, 0, 0)
sm_inv, sing_vals = pinv_extended(X)
assert_almost_equal(np_inv, sm_inv)
assert_almost_equal(np_sing_vals, sing_vals)
def test_extendedpinv_singular(self):
X = standard_normal((40, 10))
X[:, 5] = X[:, 1] + X[:, 3]
np_inv = np.linalg.pinv(X)
np_sing_vals = np.linalg.svd(X, 0, 0)
sm_inv, sing_vals = pinv_extended(X)
assert_almost_equal(np_inv, sm_inv)
assert_almost_equal(np_sing_vals, sing_vals)
def test_fullrank(self):
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
X = standard_normal((40,10))
X[:,0] = X[:,1] + X[:,2]
Y = tools.fullrank(X)
self.assertEquals(Y.shape, (40,9))
self.assertEquals(tools.rank(Y), 9)
X[:,5] = X[:,3] + X[:,4]
Y = tools.fullrank(X)
self.assertEquals(Y.shape, (40,8))
warnings.simplefilter("ignore")
self.assertEquals(tools.rank(Y), 8)
def test_estimable():
rng = np.random.RandomState(20120713)
N, P = (40, 10)
X = rng.normal(size=(N, P))
C = rng.normal(size=(1, P))
isestimable = tools.isestimable
assert_true(isestimable(C, X))
assert_true(isestimable(np.eye(P), X))
for row in np.eye(P):
assert_true(isestimable(row, X))
X = np.ones((40, 2))
assert_true(isestimable([1, 1], X))
assert_false(isestimable([1, 0], X))
assert_false(isestimable([0, 1], X))
assert_false(isestimable(np.eye(2), X))
halfX = rng.normal(size=(N, 5))
X = np.hstack([halfX, halfX])
assert_false(isestimable(np.hstack([np.eye(5), np.zeros((5, 5))]), X))
assert_false(isestimable(np.hstack([np.zeros((5, 5)), np.eye(5)]), X))
assert_true(isestimable(np.hstack([np.eye(5), np.eye(5)]), X))
# Test array-like for design
XL = X.tolist()
assert_true(isestimable(np.hstack([np.eye(5), np.eye(5)]), XL))
# Test ValueError for incorrect number of columns
X = rng.normal(size=(N, 5))
for n in range(1, 4):
assert_raises(ValueError, isestimable, np.ones((n,)), X)
assert_raises(ValueError, isestimable, np.eye(4), X)
class TestCategoricalNumerical(object):
#TODO: use assert_raises to check that bad inputs are taken care of
def __init__(self):
#import string
stringabc = 'abcdefghijklmnopqrstuvwxy'
self.des = np.random.randn(25,2)
self.instr = np.floor(np.arange(10,60, step=2)/10)
x=np.zeros((25,5))
x[:5,0]=1
x[5:10,1]=1
x[10:15,2]=1
x[15:20,3]=1
x[20:25,4]=1
self.dummy = x
structdes = np.zeros((25,1),dtype=[('var1', 'f4'),('var2', 'f4'),
('instrument','f4'),('str_instr','a10')])
structdes['var1'] = self.des[:,0][:,None]
structdes['var2'] = self.des[:,1][:,None]
structdes['instrument'] = self.instr[:,None]
string_var = [stringabc[0:5], stringabc[5:10],
stringabc[10:15], stringabc[15:20],
stringabc[20:25]]
string_var *= 5
self.string_var = np.array(sorted(string_var))
structdes['str_instr'] = self.string_var[:,None]
self.structdes = structdes
self.recdes = structdes.view(np.recarray)
def test_array2d(self):
des = np.column_stack((self.des, self.instr, self.des))
des = tools.categorical(des, col=2)
assert_array_equal(des[:,-5:], self.dummy)
assert_equal(des.shape[1],10)
def test_array1d(self):
des = tools.categorical(self.instr)
assert_array_equal(des[:,-5:], self.dummy)
assert_equal(des.shape[1],6)
def test_array2d_drop(self):
des = np.column_stack((self.des, self.instr, self.des))
des = tools.categorical(des, col=2, drop=True)
assert_array_equal(des[:,-5:], self.dummy)
assert_equal(des.shape[1],9)
def test_array1d_drop(self):
des = tools.categorical(self.instr, drop=True)
assert_array_equal(des, self.dummy)
assert_equal(des.shape[1],5)
def test_recarray2d(self):
des = tools.categorical(self.recdes, col='instrument')
# better way to do this?
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray2dint(self):
des = tools.categorical(self.recdes, col=2)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray1d(self):
instr = self.structdes['instrument'].view(np.recarray)
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_recarray1d_drop(self):
instr = self.structdes['instrument'].view(np.recarray)
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
def test_recarray2d_drop(self):
des = tools.categorical(self.recdes, col='instrument', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray2d(self):
des = tools.categorical(self.structdes, col='instrument')
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray2dint(self):
des = tools.categorical(self.structdes, col=2)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray1d(self):
instr = self.structdes['instrument'].view(dtype=[('var1', 'f4')])
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_structarray2d_drop(self):
des = tools.categorical(self.structdes, col='instrument', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray1d_drop(self):
instr = self.structdes['instrument'].view(dtype=[('var1', 'f4')])
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
# def test_arraylike2d(self):
# des = tools.categorical(self.structdes.tolist(), col=2)
# test_des = des[:,-5:]
# assert_array_equal(test_des, self.dummy)
# assert_equal(des.shape[1], 9)
# def test_arraylike1d(self):
# instr = self.structdes['instrument'].tolist()
# dum = tools.categorical(instr)
# test_dum = dum[:,-5:]
# assert_array_equal(test_dum, self.dummy)
# assert_equal(dum.shape[1], 6)
# def test_arraylike2d_drop(self):
# des = tools.categorical(self.structdes.tolist(), col=2, drop=True)
# test_des = des[:,-5:]
# assert_array_equal(test__des, self.dummy)
# assert_equal(des.shape[1], 8)
# def test_arraylike1d_drop(self):
# instr = self.structdes['instrument'].tolist()
# dum = tools.categorical(instr, drop=True)
# assert_array_equal(dum, self.dummy)
# assert_equal(dum.shape[1], 5)
class TestCategoricalString(TestCategoricalNumerical):
# comment out until we have type coercion
# def test_array2d(self):
# des = np.column_stack((self.des, self.instr, self.des))
# des = tools.categorical(des, col=2)
# assert_array_equal(des[:,-5:], self.dummy)
# assert_equal(des.shape[1],10)
# def test_array1d(self):
# des = tools.categorical(self.instr)
# assert_array_equal(des[:,-5:], self.dummy)
# assert_equal(des.shape[1],6)
# def test_array2d_drop(self):
# des = np.column_stack((self.des, self.instr, self.des))
# des = tools.categorical(des, col=2, drop=True)
# assert_array_equal(des[:,-5:], self.dummy)
# assert_equal(des.shape[1],9)
def test_array1d_drop(self):
des = tools.categorical(self.string_var, drop=True)
assert_array_equal(des, self.dummy)
assert_equal(des.shape[1],5)
def test_recarray2d(self):
des = tools.categorical(self.recdes, col='str_instr')
# better way to do this?
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray2dint(self):
des = tools.categorical(self.recdes, col=3)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray1d(self):
instr = self.structdes['str_instr'].view(np.recarray)
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_recarray1d_drop(self):
instr = self.structdes['str_instr'].view(np.recarray)
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
def test_recarray2d_drop(self):
des = tools.categorical(self.recdes, col='str_instr', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray2d(self):
des = tools.categorical(self.structdes, col='str_instr')
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray2dint(self):
des = tools.categorical(self.structdes, col=3)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray1d(self):
instr = self.structdes['str_instr'].view(dtype=[('var1', 'a10')])
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_structarray2d_drop(self):
des = tools.categorical(self.structdes, col='str_instr', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray1d_drop(self):
instr = self.structdes['str_instr'].view(dtype=[('var1', 'a10')])
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
def test_arraylike2d(self):
pass
def test_arraylike1d(self):
pass
def test_arraylike2d_drop(self):
pass
def test_arraylike1d_drop(self):
pass
def test_rec_issue302():
arr = np.rec.fromrecords([[10], [11]], names='group')
actual = tools.categorical(arr)
expected = np.rec.array([(10, 1.0, 0.0), (11, 0.0, 1.0)],
dtype=[('group', int), ('group_10', float), ('group_11', float)])
assert_array_equal(actual, expected)
def test_issue302():
arr = np.rec.fromrecords([[10, 12], [11, 13]], names=['group', 'whatever'])
actual = tools.categorical(arr, col=['group'])
expected = np.rec.array([(10, 12, 1.0, 0.0), (11, 13, 0.0, 1.0)],
dtype=[('group', int), ('whatever', int), ('group_10', float),
('group_11', float)])
assert_array_equal(actual, expected)
def test_pandas_const_series():
dta = longley.load_pandas()
series = dta.exog['GNP']
series = tools.add_constant(series, prepend=False)
assert_string_equal('const', series.columns[1])
assert_equal(series.var(0)[1], 0)
def test_pandas_const_series_prepend():
dta = longley.load_pandas()
series = dta.exog['GNP']
series = tools.add_constant(series, prepend=True)
assert_string_equal('const', series.columns[0])
assert_equal(series.var(0)[0], 0)
def test_pandas_const_df():
dta = longley.load_pandas().exog
dta = tools.add_constant(dta, prepend=False)
assert_string_equal('const', dta.columns[-1])
assert_equal(dta.var(0)[-1], 0)
def test_pandas_const_df_prepend():
dta = longley.load_pandas().exog
# regression test for #1025
dta['UNEMP'] /= dta['UNEMP'].std()
dta = tools.add_constant(dta, prepend=True)
assert_string_equal('const', dta.columns[0])
assert_equal(dta.var(0)[0], 0)
def test_chain_dot():
A = np.arange(1,13).reshape(3,4)
B = np.arange(3,15).reshape(4,3)
C = np.arange(5,8).reshape(3,1)
assert_equal(tools.chain_dot(A,B,C), np.array([[1820],[4300],[6780]]))
class TestNanDot(object):
@classmethod
def setupClass(cls):
nan = np.nan
cls.mx_1 = np.array([[nan, 1.], [2., 3.]])
cls.mx_2 = np.array([[nan, nan], [2., 3.]])
cls.mx_3 = np.array([[0., 0.], [0., 0.]])
cls.mx_4 = np.array([[1., 0.], [1., 0.]])
cls.mx_5 = np.array([[0., 1.], [0., 1.]])
cls.mx_6 = np.array([[1., 2.], [3., 4.]])
def test_11(self):
test_res = tools.nan_dot(self.mx_1, self.mx_1)
expected_res = np.array([[ np.nan, np.nan], [ np.nan, 11.]])
assert_array_equal(test_res, expected_res)
def test_12(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_1, self.mx_2)
expected_res = np.array([[ nan, nan], [ nan, nan]])
assert_array_equal(test_res, expected_res)
def test_13(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_1, self.mx_3)
expected_res = np.array([[ 0., 0.], [ 0., 0.]])
assert_array_equal(test_res, expected_res)
def test_14(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_1, self.mx_4)
expected_res = np.array([[ nan, 0.], [ 5., 0.]])
assert_array_equal(test_res, expected_res)
def test_41(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_4, self.mx_1)
expected_res = np.array([[ nan, 1.], [ nan, 1.]])
assert_array_equal(test_res, expected_res)
def test_23(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_2, self.mx_3)
expected_res = np.array([[ 0., 0.], [ 0., 0.]])
assert_array_equal(test_res, expected_res)
def test_32(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_3, self.mx_2)
expected_res = np.array([[ 0., 0.], [ 0., 0.]])
assert_array_equal(test_res, expected_res)
def test_24(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_2, self.mx_4)
expected_res = np.array([[ nan, 0.], [ 5., 0.]])
assert_array_equal(test_res, expected_res)
def test_25(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_2, self.mx_5)
expected_res = np.array([[ 0., nan], [ 0., 5.]])
assert_array_equal(test_res, expected_res)
def test_66(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_6, self.mx_6)
expected_res = np.array([[ 7., 10.], [ 15., 22.]])
assert_array_equal(test_res, expected_res)
| bsd-3-clause | -8,172,001,138,678,050,000 | 36.943431 | 83 | 0.581879 | false |
IonAgorria/SysWatcher | modules/smart.py | 1 | 2212 | import logging
from subprocess import run, PIPE
from module import Module
from utils import break_lines, show_message
__author__ = 'Ion Agorria'
class Smart(Module):
name = "SMART"
def __init__(self, modules):
super().__init__(modules)
self.update_notified = []
def run(self):
"""Called at interval, must return number (negative if error)"""
for device, config_code in self.get_config():
error = None
status, firmware, test, code = self.get_smart(device)
device = device.split(" ")[0]
if status is None:
continue
if status != "Enabled":
error = "Device %s doesn't have SMART enabled: %s [%s]" % (device, status, code)
elif test != "PASSED":
error = "Device %s has test result: %s [%s]" % (device, test, code)
elif code != int(config_code):
error = "Device %s returned code %s instead of %s" % (device, code, config_code)
elif firmware and device not in self.update_notified:
self.update_notified.append(device)
text = "Device %s has firmware update [%s]" % (device, code)
show_message(self.logger, logging.INFO, text)
if error is not None:
show_message(self.logger, logging.WARNING, error)
return 0
@staticmethod
def get_smart(device):
process = run("smartctl -H -i /dev/%s" % device, stdout=PIPE, shell=True)
result = str(process.stdout, "UTF-8")
code = process.returncode
result = break_lines(result)
line_status = "SMART support is:"
line_firmware = "A firmware update for this drive may be available"
line_test = "SMART overall-health self-assessment test result:"
status = None
firmware = False
test = None
for line in result:
parts = line.split(" ")
if line_status in line:
status = parts[-1]
if line_firmware in line:
firmware = True
if line_test in line:
test = parts[-1]
return status, firmware, test, code
| gpl-3.0 | -7,410,101,434,767,311,000 | 35.866667 | 96 | 0.55651 | false |
jcnix/shade | social/auth.py | 1 | 2295 | from django.contrib import auth
from django.contrib.auth.models import User
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, render_to_response
from django.template import RequestContext
import forms as myforms
def login(request):
if not request.user.is_authenticated():
form = myforms.LoginForm()
if request.method == 'POST':
form = myforms.LoginForm(request.POST)
if form.is_valid():
e = form.cleaned_data['email']
p = form.cleaned_data['password']
user = auth.authenticate(username=e, password=p)
if user is not None:
auth.login(request, user)
return HttpResponseRedirect('/dashboard/')
else:
form._errors['email'] = [u'Unable to authenticate']
return render(request, 'registration/login.html', {'form': form})
return render(request, 'registration/login.html', {'form': form})
else:
return HttpResponseRedirect('/')
def logout(request):
auth.logout(request)
return HttpResponseRedirect('/login/')
def register(request):
if not request.user.is_authenticated():
if request.method == 'POST':
form = myforms.RegisterForm(request.POST)
if form.is_valid():
e = form.cleaned_data['email']
p = form.cleaned_data['password']
fn = form.cleaned_data['first_name']
ln = form.cleaned_data['last_name']
user = User.objects.create_user(
username=e,
email=e,
password=p
)
user.first_name = fn
user.last_name= ln
user.save()
return HttpResponseRedirect('/')
else:
return render_to_response('register.html', {'form': form},
context_instance=RequestContext(request))
else:
form = myforms.RegisterForm()
return render_to_response('register.html', {'form': form},
context_instance=RequestContext(request))
else:
return HttpResponseRedirect('/')
| gpl-3.0 | -197,508,900,974,582,820 | 37.898305 | 85 | 0.554684 | false |
CoachCoen/games | settings.py | 1 | 2112 | from vector import Vector
class Config:
"""
Configuration - mostly the (relative) position of various screen elements
"""
# TODO: Work this out dynamically
tabletop_size = Vector(1368, 768)
scaling_factor = 1
card_size = Vector(100, 120)
card_spacing = 10
column_width = card_size.x + card_spacing
row_height = card_size.y + card_spacing
points_location = Vector(40, 10)
cost_location = Vector(20, 18)
card_decks_location = Vector(50, 120)
chip_size = 23
chip_spacing = 10
central_area_location = Vector(360, 130)
chip_stack_location = Vector(0, 100)
chip_stack_size = 25
chip_stack_spacing = 5
chip_cost_scaling = 0.47
tile_size = Vector(100, 100)
tile_spacing = 10
tiles_row_location = Vector(50, 0)
chip_font_size = 36
reward_chip_location = Vector(75, 25)
reward_chip_scaling = 0.7
player_area_size = Vector(300, 225)
player_name_location = Vector(5, 5)
player_chip_stack_location = Vector(40, 40)
player_chip_stack_scaling = 0.5
player_item_size = 30
player_reserved_location = Vector(10, 95)
player_points_location = Vector(player_area_size.x - 30, 5)
player_winner_message_location = Vector(player_area_size.x / 2 - 50,
player_area_size.y - 50)
player_winner_font_size = 36
holding_area_size = Vector(350, 200)
holding_area_location = Vector(1000, 300)
holding_area_tile_location = Vector(30, 30)
holding_area_card_location = Vector(30, 30)
holding_area_chips_location = Vector(150, 30)
holding_area_name_location = Vector(5, 5)
holding_area_chip_scaling = 1
holding_area_too_many_chips_location = Vector(30, 120)
button_text_location = Vector(5, 5)
# TODO: Dynamically determine button size
button_size = Vector(80, 30)
cancel_button_location = Vector(10, 160)
confirm_button_location = Vector(100, 160)
config = Config()
try:
from settings_local import local_config
config.__dict__.update(local_config)
except ImportError:
pass
| mit | -4,934,462,521,332,214,000 | 24.445783 | 78 | 0.645833 | false |
pierrelux/mathnotes | mathnotes/views/auth.py | 1 | 1936 | from flask_oauthlib.client import OAuth
from mathnotes.models import db, ZoteroAuthorization
from flask import Flask, redirect, url_for, render_template, jsonify, Response, current_app, Blueprint, request
from flask.ext.login import login_required, current_user
oauth = OAuth()
auth = Blueprint('auth', __name__, url_prefix='/auth')
zotero = oauth.remote_app(
'zotero',
base_url='https://api.zotero.org',
request_token_url='https://www.zotero.org/oauth/request',
access_token_url='https://www.zotero.org/oauth/access',
authorize_url='https://www.zotero.org/oauth/authorize',
app_key='ZOTERO'
)
@zotero.tokengetter
def get_zotero_token():
auth=current_user.authorizations.first()
if auth is not None:
return auth.oauth_token, auth.oauth_secret
return None
@auth.route('/oauth/zotero')
@login_required
def zotero_auth():
callback_url = url_for('auth.zotero_authorized', next=request.args.get('next'))
return zotero.authorize(callback=callback_url or request.referrer or None)
@auth.route('/oauth/zotero/authorized')
@login_required
@zotero.authorized_handler
def zotero_authorized(resp):
if resp is not None:
auth = ZoteroAuthorization(oauth_token=resp['oauth_token'],
oauth_secret=resp['oauth_token_secret'],
userID=resp['userID'],
username=resp['username'],
user_id=current_user.id)
db.session.add(auth)
db.session.commit()
else:
flash("Remote authentication to Zotero failed")
return redirect(request.args.get("next") or url_for("frontend.index"))
@auth.route('/oauth/zotero/disconnect')
@login_required
def zotero_disconnect():
auth=current_user.authorizations.first()
db.session.delete(auth)
db.session.commit()
return redirect(request.args.get("next") or url_for("frontend.index"))
| bsd-3-clause | 237,288,060,501,950,180 | 32.964912 | 111 | 0.668388 | false |
malaterre/dicom-private-dicts | re/pms/dump1.py | 1 | 3032 | #!/usr/bin/env python
""" dump 1 """
import sys, json
from struct import *
array=[]
def doit(f):
chunk = f.read(0x2)
l0 = unpack('>H', chunk)
assert l0[0] == 50
chunk = f.read(l0[0])
s = unpack('>%ds' % l0[0], chunk)
chunk = f.read(0x1)
l2 = unpack('>B', chunk)
#assert l2[0] == 0
chunk = f.read(0x2)
l1 = unpack('>H', chunk)
#print l1[0] # wotsit ?
#print l0[0],s[0].decode('utf-16'),l1[0],l2[0]
#print l0[0],s[0].decode('utf-16'),l1[0]+l2[0]
#print s[0].decode('utf-16'),l1[0]
el = {}
el['name'] = s[0].decode('utf-16')
el['index'] = l1[0]+l2[0]
array.append( el )
def extract_name(i,f):
chunk = f.read(0x1)
o = unpack('>B', chunk)
assert o[0] == 1
chunk = f.read(0x1)
l0 = unpack('>B', chunk)
chunk = f.read(l0[0])
s = unpack('>%ds' % l0[0], chunk)
#print s[0]
array[i]['value']=s[0]
array[i]['len']=l0[0]
def isnull(instr):
for c in instr:
assert ord(c) == 0
def extract_dad_file(i,f):
print f.tell()
corr = 1 # old (orig file)
corr = 0 # already aligned ???
assert (f.tell() - corr) % 8 == 0 # 8bytes alignement
# read length:
chunk = f.read(0x4)
z = unpack('<I', chunk)
fl = z[0]
chunk = f.read(fl)
with open("output_%03d.dad" % i, "wb") as binfile:
binfile.write( chunk )
# trailing stuff handling:
pad = (f.tell() - corr) % 8
if pad != 0:
chunk = f.read(8 - pad)
isnull(chunk) # no digital trash, must be an in-memory representation
# the intersting stuff lie in:
# $ dd if=PmsDView.DMP of=dummy2.exe skip=104921721 count=1802240 bs=1
# as a side note we also have:
# $ dd if=PmsDView.DMP of=dummy3.exe skip=106723961 count=1802240 bs=1
# $ md5sum dummy2.exe dummy3.exe
# 6a58cd8dc039b2cfbeb4529b4fd13106 dummy2.exe
# 6a58cd8dc039b2cfbeb4529b4fd13106 dummy3.exe
if __name__ == "__main__":
filename = sys.argv[1]
with open(filename,'rb') as f:
# MZ starts at 0x640FA79
#f.seek( 104932524 ) # 0x64124ac # orig file
f.seek( 0x12F86F3 ) # new
# file type 1:
#print "start:", f.tell()
chunk = f.read(0x2)
d = unpack('>H', chunk)
assert d[0] == 120 # number of elements (x2)?
chunk = f.read(0x2)
d = unpack('>H', chunk)
print d # wotsit ?
assert d[0] == 0x0f00
for i in range(0,60):
doit(f)
chunk = f.read(0x1)
z = unpack('>B', chunk)
assert z[0] == 0
#print (f.tell() - 1) % 4
for i in range(0,60):
extract_name(i,f)
#print "end:", f.tell()
# file type dad/dotd:
chunk = f.read(5)
for i in range(0,153):
# i > 153 is junk...
extract_dad_file(i,f)
print format(f.tell(), '08x')
chunk = f.read(2000)
# Some .NET stuff (BSJB)
# The initials correspond to Brian Harry, Susan Radke-Sproull, Jason
# Zander, and Bill Evans who were part of the team in 1998 that worked on
# the CLR.
with open("general_metadata_header.bin" , "wb") as binfile:
binfile.write( chunk )
#print array
#print json.dumps(array, sort_keys=True, indent=4)
| bsd-3-clause | -5,247,748,825,108,255,000 | 26.315315 | 77 | 0.581464 | false |
parksandwildlife/wastd | taxonomy/migrations/0003_auto_20181022_1156.py | 1 | 3205 | # Generated by Django 2.0.8 on 2018-10-22 03:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('taxonomy', '0002_auto_20180926_1147'),
]
operations = [
migrations.AlterField(
model_name='hbvfamily',
name='class_name',
field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='Class'),
),
migrations.AlterField(
model_name='hbvfamily',
name='division_name',
field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='Division'),
),
migrations.AlterField(
model_name='hbvfamily',
name='family_name',
field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='Family Name'),
),
migrations.AlterField(
model_name='hbvfamily',
name='kingdom_name',
field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='Kingdom'),
),
migrations.AlterField(
model_name='hbvfamily',
name='order_name',
field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='Order Name'),
),
migrations.AlterField(
model_name='hbvfamily',
name='supra_code',
field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='HBV Suprafamily Group Code'),
),
migrations.AlterField(
model_name='hbvgroup',
name='class_id',
field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='HBV Suprafamily Group Code'),
),
migrations.AlterField(
model_name='hbvparent',
name='class_id',
field=models.CharField(blank=True, help_text='', max_length=100, null=True, verbose_name='WACensus ClassID'),
),
migrations.AlterField(
model_name='hbvspecies',
name='consv_code',
field=models.CharField(blank=True, help_text='', max_length=100, null=True, verbose_name='Conservation Code'),
),
migrations.AlterField(
model_name='hbvspecies',
name='naturalised',
field=models.CharField(blank=True, help_text='', max_length=100, null=True, verbose_name='Naturalised'),
),
migrations.AlterField(
model_name='hbvspecies',
name='ranking',
field=models.CharField(blank=True, help_text='', max_length=100, null=True, verbose_name='Ranking'),
),
migrations.AlterField(
model_name='hbvvernacular',
name='name',
field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='Name'),
),
migrations.AlterField(
model_name='hbvvernacular',
name='vernacular',
field=models.CharField(blank=True, help_text='', max_length=1000, null=True, verbose_name='Vernacular Name'),
),
]
| mit | 3,855,156,913,284,721,700 | 40.089744 | 132 | 0.582527 | false |
jamespcole/home-assistant | homeassistant/components/openuv/binary_sensor.py | 1 | 3751 | """Support for OpenUV binary sensors."""
import logging
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.util.dt import as_local, parse_datetime, utcnow
from . import (
BINARY_SENSORS, DATA_OPENUV_CLIENT, DATA_PROTECTION_WINDOW, DOMAIN,
TOPIC_UPDATE, TYPE_PROTECTION_WINDOW, OpenUvEntity)
_LOGGER = logging.getLogger(__name__)
ATTR_PROTECTION_WINDOW_ENDING_TIME = 'end_time'
ATTR_PROTECTION_WINDOW_ENDING_UV = 'end_uv'
ATTR_PROTECTION_WINDOW_STARTING_TIME = 'start_time'
ATTR_PROTECTION_WINDOW_STARTING_UV = 'start_uv'
DEPENDENCIES = ['openuv']
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up an OpenUV sensor based on existing config."""
pass
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up an OpenUV sensor based on a config entry."""
openuv = hass.data[DOMAIN][DATA_OPENUV_CLIENT][entry.entry_id]
binary_sensors = []
for sensor_type in openuv.binary_sensor_conditions:
name, icon = BINARY_SENSORS[sensor_type]
binary_sensors.append(
OpenUvBinarySensor(
openuv, sensor_type, name, icon, entry.entry_id))
async_add_entities(binary_sensors, True)
class OpenUvBinarySensor(OpenUvEntity, BinarySensorDevice):
"""Define a binary sensor for OpenUV."""
def __init__(self, openuv, sensor_type, name, icon, entry_id):
"""Initialize the sensor."""
super().__init__(openuv)
self._async_unsub_dispatcher_connect = None
self._entry_id = entry_id
self._icon = icon
self._latitude = openuv.client.latitude
self._longitude = openuv.client.longitude
self._name = name
self._sensor_type = sensor_type
self._state = None
@property
def icon(self):
"""Return the icon."""
return self._icon
@property
def is_on(self):
"""Return the status of the sensor."""
return self._state
@property
def should_poll(self):
"""Disable polling."""
return False
@property
def unique_id(self) -> str:
"""Return a unique, HASS-friendly identifier for this entity."""
return '{0}_{1}_{2}'.format(
self._latitude, self._longitude, self._sensor_type)
async def async_added_to_hass(self):
"""Register callbacks."""
@callback
def update():
"""Update the state."""
self.async_schedule_update_ha_state(True)
self._async_unsub_dispatcher_connect = async_dispatcher_connect(
self.hass, TOPIC_UPDATE, update)
async def async_will_remove_from_hass(self):
"""Disconnect dispatcher listener when removed."""
if self._async_unsub_dispatcher_connect:
self._async_unsub_dispatcher_connect()
async def async_update(self):
"""Update the state."""
data = self.openuv.data[DATA_PROTECTION_WINDOW]
if not data:
return
if self._sensor_type == TYPE_PROTECTION_WINDOW:
self._state = parse_datetime(
data['from_time']) <= utcnow() <= parse_datetime(
data['to_time'])
self._attrs.update({
ATTR_PROTECTION_WINDOW_ENDING_TIME:
as_local(parse_datetime(data['to_time'])),
ATTR_PROTECTION_WINDOW_ENDING_UV: data['to_uv'],
ATTR_PROTECTION_WINDOW_STARTING_UV: data['from_uv'],
ATTR_PROTECTION_WINDOW_STARTING_TIME:
as_local(parse_datetime(data['from_time'])),
})
| apache-2.0 | 5,144,571,123,839,647,000 | 32.491071 | 72 | 0.625966 | false |
gokmen/Rasta | rasta_lib/model.py | 1 | 1631 | #!/usr/bin/python
# -*- coding: utf-8 -*-
''' Rasta RST Editor
2010 - Gökmen Göksel <gokmeng:gmail.com> '''
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as Published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
from PyQt4.QtCore import Qt
from PyQt4.QtCore import QVariant
from PyQt4.QtCore import QAbstractTableModel
# i18n Support
import gettext
_ = gettext.translation('rasta', fallback=True).ugettext
class LogTableModel(QAbstractTableModel):
''' Log table model for showing the logs in a proper way '''
def __init__(self, logs, parent=None, *args):
QAbstractTableModel.__init__(self, parent, *args)
self.arraydata = logs
self.headerdata = [_('Line'), _('Message')]
def rowCount(self, parent):
''' Return number of logs '''
return len(self.arraydata)
def columnCount(self, parent):
''' It always returns 2 for now: Line and Message '''
return len(self.headerdata)
def data(self, index, role):
''' Return data for given index and role '''
if not index.isValid():
return QVariant()
elif role != Qt.DisplayRole:
return QVariant()
return QVariant(self.arraydata[index.row()][index.column()])
def headerData(self, col, orientation, role):
''' Return Header data for given column '''
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return QVariant(self.headerdata[col])
return QVariant()
| gpl-2.0 | 9,176,064,816,522,145,000 | 32.9375 | 79 | 0.654389 | false |
mdavoodi/konkourse-python | messages/views.py | 1 | 2496 | # Create your views here.
from django.http import HttpResponse
from django.template import loader
from django.template.context import Context
from django.template import RequestContext
from django.shortcuts import redirect
from django.shortcuts import render
from conversation.models import ConvoWall, ConversationPost
def messages(request):
if request.user.is_authenticated():
first_name = request.user.first_name
last_name = request.user.last_name
username = request.user.username
messageWall = request.user.get_profile().messages
messages = ConversationPost.objects.filter(wall=request.user.get_profile().messages)
variables_for_template = {
'first_name': first_name,
'last_name': last_name,
'username': username,
'messages': messages,
'messageWall': messageWall,
}
return render(request, 'website/messages.html', variables_for_template,
context_instance=RequestContext(request))
def messages_compose(request):
if request.user.is_authenticated():
first_name = request.user.first_name
last_name = request.user.last_name
username = request.user.username
messageWall = request.user.get_profile().messages
messages = ConversationPost.objects.filter(wall=request.user.get_profile().messages)
variables_for_template = {
'first_name': first_name,
'last_name': last_name,
'username': username,
'messages': messages,
'messageWall': messageWall,
}
return render(request, 'website/messages_compose.html', variables_for_template,
context_instance=RequestContext(request))
def messages_view(request):
if request.user.is_authenticated():
first_name = request.user.first_name
last_name = request.user.last_name
username = request.user.username
messageWall = request.user.get_profile().messages
messages = ConversationPost.objects.filter(wall=request.user.get_profile().messages)
variables_for_template = {
'first_name': first_name,
'last_name': last_name,
'username': username,
'messages': messages,
'messageWall': messageWall,
}
return render(request, 'website/messages_view.html', variables_for_template,
context_instance=RequestContext(request))
| mit | -7,466,661,102,449,948,000 | 37.4 | 92 | 0.648638 | false |
jelly/calibre | src/calibre/gui2/tweak_book/ui.py | 1 | 39312 | #!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import os
from functools import partial
from itertools import product
from future_builtins import map
from PyQt5.Qt import (
QDockWidget, Qt, QLabel, QIcon, QAction, QApplication, QWidget, QEvent,
QVBoxLayout, QStackedWidget, QTabWidget, QImage, QPixmap, pyqtSignal,
QMenu, QHBoxLayout, QTimer, QUrl, QSize)
from calibre import prints
from calibre.constants import __appname__, get_version, isosx, DEBUG
from calibre.gui2 import elided_text, open_url
from calibre.gui2.dbus_export.widgets import factory
from calibre.gui2.keyboard import Manager as KeyboardManager
from calibre.gui2.main_window import MainWindow
from calibre.gui2.throbber import ThrobbingButton
from calibre.gui2.tweak_book import (
current_container, tprefs, actions, capitalize, toolbar_actions, editors, update_mark_text_action)
from calibre.gui2.tweak_book.file_list import FileListWidget
from calibre.gui2.tweak_book.job import BlockingJob
from calibre.gui2.tweak_book.boss import Boss
from calibre.gui2.tweak_book.undo import CheckpointView
from calibre.gui2.tweak_book.preview import Preview
from calibre.gui2.tweak_book.plugin import create_plugin_actions
from calibre.gui2.tweak_book.search import SearchPanel
from calibre.gui2.tweak_book.check import Check
from calibre.gui2.tweak_book.check_links import CheckExternalLinks
from calibre.gui2.tweak_book.spell import SpellCheck
from calibre.gui2.tweak_book.search import SavedSearches
from calibre.gui2.tweak_book.text_search import TextSearch
from calibre.gui2.tweak_book.toc import TOCViewer
from calibre.gui2.tweak_book.char_select import CharSelect
from calibre.gui2.tweak_book.live_css import LiveCSS
from calibre.gui2.tweak_book.reports import Reports
from calibre.gui2.tweak_book.manage_fonts import ManageFonts
from calibre.gui2.tweak_book.function_replace import DebugOutput
from calibre.gui2.tweak_book.editor.widget import register_text_editor_actions
from calibre.gui2.tweak_book.editor.insert_resource import InsertImage
from calibre.utils.icu import character_name, sort_key
from calibre.utils.localization import localize_user_manual_link
def open_donate():
open_url(QUrl('https://calibre-ebook.com/donate'))
class Central(QStackedWidget): # {{{
' The central widget, hosts the editors '
current_editor_changed = pyqtSignal()
close_requested = pyqtSignal(object)
def __init__(self, parent=None):
QStackedWidget.__init__(self, parent)
self.welcome = w = QLabel('<p>'+_(
'Double click a file in the left panel to start editing'
' it.'))
self.addWidget(w)
w.setWordWrap(True)
w.setAlignment(Qt.AlignTop | Qt.AlignHCenter)
self.container = c = QWidget(self)
self.addWidget(c)
l = c.l = QVBoxLayout(c)
c.setLayout(l)
l.setContentsMargins(0, 0, 0, 0)
self.editor_tabs = t = QTabWidget(c)
l.addWidget(t)
t.setDocumentMode(True)
t.setTabsClosable(True)
t.setMovable(True)
pal = self.palette()
if pal.color(pal.WindowText).lightness() > 128:
i = QImage(I('modified.png'))
i.invertPixels()
self.modified_icon = QIcon(QPixmap.fromImage(i))
else:
self.modified_icon = QIcon(I('modified.png'))
self.editor_tabs.currentChanged.connect(self.current_editor_changed)
self.editor_tabs.tabCloseRequested.connect(self._close_requested)
self.search_panel = SearchPanel(self)
l.addWidget(self.search_panel)
self.restore_state()
self.editor_tabs.tabBar().installEventFilter(self)
def _close_requested(self, index):
editor = self.editor_tabs.widget(index)
self.close_requested.emit(editor)
def add_editor(self, name, editor):
fname = name.rpartition('/')[2]
index = self.editor_tabs.addTab(editor, fname)
self.editor_tabs.setTabToolTip(index, _('Full path:') + ' ' + name)
editor.modification_state_changed.connect(self.editor_modified)
@property
def tab_order(self):
ans = []
rmap = {v:k for k, v in editors.iteritems()}
for i in xrange(self.editor_tabs.count()):
name = rmap.get(self.editor_tabs.widget(i))
if name is not None:
ans.append(name)
return ans
def rename_editor(self, editor, name):
for i in xrange(self.editor_tabs.count()):
if self.editor_tabs.widget(i) is editor:
fname = name.rpartition('/')[2]
self.editor_tabs.setTabText(i, fname)
self.editor_tabs.setTabToolTip(i, _('Full path:') + ' ' + name)
def show_editor(self, editor):
self.setCurrentIndex(1)
self.editor_tabs.setCurrentWidget(editor)
def close_editor(self, editor):
for i in xrange(self.editor_tabs.count()):
if self.editor_tabs.widget(i) is editor:
self.editor_tabs.removeTab(i)
if self.editor_tabs.count() == 0:
self.setCurrentIndex(0)
return True
return False
def editor_modified(self, *args):
tb = self.editor_tabs.tabBar()
for i in xrange(self.editor_tabs.count()):
editor = self.editor_tabs.widget(i)
modified = getattr(editor, 'is_modified', False)
tb.setTabIcon(i, self.modified_icon if modified else QIcon())
def close_current_editor(self):
ed = self.current_editor
if ed is not None:
self.close_requested.emit(ed)
def close_all_but_current_editor(self):
self.close_all_but(self.current_editor)
def close_all_but(self, ed):
close = []
if ed is not None:
for i in xrange(self.editor_tabs.count()):
q = self.editor_tabs.widget(i)
if q is not None and q is not ed:
close.append(q)
for q in close:
self.close_requested.emit(q)
@property
def current_editor(self):
return self.editor_tabs.currentWidget()
def save_state(self):
tprefs.set('search-panel-visible', self.search_panel.isVisible())
self.search_panel.save_state()
for ed in editors.itervalues():
ed.save_state()
if self.current_editor is not None:
self.current_editor.save_state() # Ensure the current editor saves it state last
def restore_state(self):
self.search_panel.setVisible(tprefs.get('search-panel-visible', False))
self.search_panel.restore_state()
def show_find(self):
self.search_panel.show_panel()
def pre_fill_search(self, text):
self.search_panel.pre_fill(text)
def eventFilter(self, obj, event):
base = super(Central, self)
if obj is not self.editor_tabs.tabBar() or event.type() != QEvent.MouseButtonPress or event.button() not in (Qt.RightButton, Qt.MidButton):
return base.eventFilter(obj, event)
index = self.editor_tabs.tabBar().tabAt(event.pos())
if index < 0:
return base.eventFilter(obj, event)
if event.button() == Qt.MidButton:
self._close_requested(index)
ed = self.editor_tabs.widget(index)
if ed is not None:
menu = QMenu(self)
menu.addAction(actions['close-current-tab'].icon(), _('Close tab'), partial(self.close_requested.emit, ed))
menu.addSeparator()
menu.addAction(actions['close-all-but-current-tab'].icon(), _('Close other tabs'), partial(self.close_all_but, ed))
menu.exec_(self.editor_tabs.tabBar().mapToGlobal(event.pos()))
return True
# }}}
class CursorPositionWidget(QWidget): # {{{
def __init__(self, parent):
QWidget.__init__(self, parent)
self.l = QHBoxLayout(self)
self.setLayout(self.l)
self.la = QLabel('')
self.l.addWidget(self.la)
self.l.setContentsMargins(0, 0, 0, 0)
f = self.la.font()
f.setBold(False)
self.la.setFont(f)
def update_position(self, line=None, col=None, character=None):
if line is None:
self.la.setText('')
else:
try:
name = character_name(character) if character and tprefs['editor_show_char_under_cursor'] else None
except Exception:
name = None
text = _('Line: {0} : {1}').format(line, col)
if not name:
name = {'\t':'TAB'}.get(character, None)
if name and tprefs['editor_show_char_under_cursor']:
text = name + ' : ' + text
self.la.setText(text)
# }}}
class Main(MainWindow):
APP_NAME = _('Edit book')
STATE_VERSION = 0
def __init__(self, opts, notify=None):
MainWindow.__init__(self, opts, disable_automatic_gc=True)
self.setWindowTitle(self.APP_NAME)
self.boss = Boss(self, notify=notify)
self.setWindowIcon(QIcon(I('tweak.png')))
self.opts = opts
self.path_to_ebook = None
self.container = None
self.current_metadata = None
self.blocking_job = BlockingJob(self)
self.keyboard = KeyboardManager(self, config_name='shortcuts/tweak_book')
self.central = Central(self)
self.setCentralWidget(self.central)
self.check_book = Check(self)
self.spell_check = SpellCheck(parent=self)
self.toc_view = TOCViewer(self)
self.text_search = TextSearch(self)
self.saved_searches = SavedSearches(self)
self.image_browser = InsertImage(self, for_browsing=True)
self.reports = Reports(self)
self.check_external_links = CheckExternalLinks(self)
self.insert_char = CharSelect(self)
self.manage_fonts = ManageFonts(self)
self.sr_debug_output = DebugOutput(self)
self.create_actions()
self.create_toolbars()
self.create_docks()
self.create_menubar()
self.status_bar = self.statusBar()
self.status_bar.addPermanentWidget(self.boss.save_manager.status_widget)
self.cursor_position_widget = CursorPositionWidget(self)
self.status_bar.addPermanentWidget(self.cursor_position_widget)
self.status_bar_default_msg = la = QLabel(' ' + _('{0} {1} created by {2}').format(__appname__, get_version(), 'Kovid Goyal'))
la.base_template = unicode(la.text())
self.status_bar.addWidget(la)
f = self.status_bar.font()
f.setBold(True)
self.status_bar.setFont(f)
self.boss(self)
g = QApplication.instance().desktop().availableGeometry(self)
self.resize(g.width()-50, g.height()-50)
self.restore_state()
self.apply_settings()
def apply_settings(self):
self.keyboard.finalize()
self.setDockNestingEnabled(tprefs['nestable_dock_widgets'])
for v, h in product(('top', 'bottom'), ('left', 'right')):
p = 'dock_%s_%s' % (v, h)
pref = tprefs[p] or tprefs.defaults[p]
area = getattr(Qt, '%sDockWidgetArea' % capitalize({'vertical':h, 'horizontal':v}[pref]))
self.setCorner(getattr(Qt, '%s%sCorner' % tuple(map(capitalize, (v, h)))), area)
self.preview.apply_settings()
self.live_css.apply_theme()
for bar in (self.global_bar, self.tools_bar, self.plugins_bar):
bar.setIconSize(QSize(tprefs['toolbar_icon_size'], tprefs['toolbar_icon_size']))
def show_status_message(self, msg, timeout=5):
self.status_bar.showMessage(msg, int(timeout*1000))
def elided_text(self, text, width=300):
return elided_text(text, font=self.font(), width=width)
@property
def editor_tabs(self):
return self.central.editor_tabs
def create_actions(self):
group = _('Global actions')
def reg(icon, text, target, sid, keys, description, toolbar_allowed=False):
if not isinstance(icon, QIcon):
icon = QIcon(I(icon))
ac = actions[sid] = QAction(icon, text, self) if icon else QAction(text, self)
ac.setObjectName('action-' + sid)
if toolbar_allowed:
toolbar_actions[sid] = ac
if target is not None:
ac.triggered.connect(target)
if isinstance(keys, type('')):
keys = (keys,)
self.keyboard.register_shortcut(
sid, unicode(ac.text()).replace('&', ''), default_keys=keys, description=description, action=ac, group=group)
self.addAction(ac)
return ac
def treg(icon, text, target, sid, keys, description):
return reg(icon, text, target, sid, keys, description, toolbar_allowed=icon is not None)
self.action_new_file = treg('document-new.png', _('&New file (images/fonts/HTML/etc.)'), self.boss.add_file,
'new-file', (), _('Create a new file in the current book'))
self.action_import_files = treg('document-import.png', _('&Import files into book'), self.boss.add_files, 'new-files', (), _('Import files into book'))
self.action_open_book = treg('document_open.png', _('&Open book'), self.boss.open_book, 'open-book', 'Ctrl+O', _('Open a new book'))
self.action_open_book_folder = treg('mimetypes/dir.png', _('Open &folder (unzipped EPUB) as book'), partial(self.boss.open_book, open_folder=True),
'open-folder-as-book', (), _('Open a folder (unzipped EPUB) as a book'))
# Qt does not generate shortcut overrides for cmd+arrow on os x which
# means these shortcuts interfere with editing
self.action_global_undo = treg('back.png', _('&Revert to before'), self.boss.do_global_undo, 'global-undo', () if isosx else 'Ctrl+Left',
_('Revert book to before the last action (Undo)'))
self.action_global_redo = treg('forward.png', _('&Revert to after'), self.boss.do_global_redo, 'global-redo', () if isosx else 'Ctrl+Right',
_('Revert book state to after the next action (Redo)'))
self.action_save = treg('save.png', _('&Save'), self.boss.save_book, 'save-book', 'Ctrl+S', _('Save book'))
self.action_save.setEnabled(False)
self.action_save_copy = treg('save.png', _('Save a ©'), self.boss.save_copy, 'save-copy', 'Ctrl+Alt+S', _('Save a copy of the book'))
self.action_quit = treg('window-close.png', _('&Quit'), self.boss.quit, 'quit', 'Ctrl+Q', _('Quit'))
self.action_preferences = treg('config.png', _('&Preferences'), self.boss.preferences, 'preferences', 'Ctrl+P', _('Preferences'))
self.action_new_book = treg('plus.png', _('Create &new, empty book'), self.boss.new_book, 'new-book', (), _('Create a new, empty book'))
self.action_import_book = treg('add_book.png', _('&Import an HTML or DOCX file as a new book'),
self.boss.import_book, 'import-book', (), _('Import an HTML or DOCX file as a new book'))
self.action_quick_edit = treg('modified.png', _('&Quick open a file to edit'), self.boss.quick_open, 'quick-open', ('Ctrl+T'), _(
'Quickly open a file from the book to edit it'))
# Editor actions
group = _('Editor actions')
self.action_editor_undo = reg('edit-undo.png', _('&Undo'), self.boss.do_editor_undo, 'editor-undo', 'Ctrl+Z',
_('Undo typing'))
self.action_editor_redo = reg('edit-redo.png', _('R&edo'), self.boss.do_editor_redo, 'editor-redo', 'Ctrl+Y',
_('Redo typing'))
self.action_editor_cut = reg('edit-cut.png', _('Cut &text'), self.boss.do_editor_cut, 'editor-cut', ('Ctrl+X', 'Shift+Delete', ),
_('Cut text'))
self.action_editor_copy = reg('edit-copy.png', _('&Copy to clipboard'), self.boss.do_editor_copy, 'editor-copy', ('Ctrl+C', 'Ctrl+Insert'),
_('Copy to clipboard'))
self.action_editor_paste = reg('edit-paste.png', _('P&aste from clipboard'), self.boss.do_editor_paste, 'editor-paste', ('Ctrl+V', 'Shift+Insert', ),
_('Paste from clipboard'))
self.action_editor_cut.setEnabled(False)
self.action_editor_copy.setEnabled(False)
self.action_editor_undo.setEnabled(False)
self.action_editor_redo.setEnabled(False)
# Tool actions
group = _('Tools')
self.action_toc = treg('toc.png', _('&Edit Table of Contents'), self.boss.edit_toc, 'edit-toc', (), _('Edit Table of Contents'))
self.action_inline_toc = treg('chapters.png', _('&Insert inline Table of Contents'),
self.boss.insert_inline_toc, 'insert-inline-toc', (), _('Insert inline Table of Contents'))
self.action_fix_html_current = reg('html-fix.png', _('&Fix HTML'), partial(self.boss.fix_html, True), 'fix-html-current', (),
_('Fix HTML in the current file'))
self.action_fix_html_all = treg('html-fix.png', _('&Fix HTML - all files'), partial(self.boss.fix_html, False), 'fix-html-all', (),
_('Fix HTML in all files'))
self.action_pretty_current = reg('beautify.png', _('&Beautify current file'), partial(self.boss.pretty_print, True), 'pretty-current', (),
_('Beautify current file'))
self.action_pretty_all = treg('beautify.png', _('&Beautify all files'), partial(self.boss.pretty_print, False), 'pretty-all', (),
_('Beautify all files'))
self.action_insert_char = treg('character-set.png', _('&Insert special character'), self.boss.insert_character, 'insert-character', (),
_('Insert special character'))
self.action_rationalize_folders = treg('mimetypes/dir.png', _('&Arrange into folders'), self.boss.rationalize_folders, 'rationalize-folders', (),
_('Arrange into folders'))
self.action_set_semantics = treg('tags.png', _('Set &semantics'), self.boss.set_semantics, 'set-semantics', (),
_('Set semantics'))
self.action_filter_css = treg('filter.png', _('&Filter style information'), self.boss.filter_css, 'filter-css', (),
_('Filter style information'))
self.action_manage_fonts = treg('font.png', _('&Manage fonts'), self.boss.manage_fonts, 'manage-fonts', (), _('Manage fonts in the book'))
self.action_add_cover = treg('default_cover.png', _('Add &cover'), self.boss.add_cover, 'add-cover', (), _('Add a cover to the book'))
self.action_reports = treg(
'reports.png', _('&Reports'), self.boss.show_reports, 'show-reports', ('Ctrl+Shift+R',), _('Show a report on various aspects of the book'))
self.action_check_external_links = treg('insert-link.png', _('Check &external links'), self.boss.check_external_links, 'check-external-links', (), _(
'Check external links in the book'))
self.action_compress_images = treg('compress-image.png', _('C&ompress images losslessly'), self.boss.compress_images, 'compress-images', (), _(
'Compress images losslessly'))
self.action_transform_styles = treg('wizard.png', _('Transform &styles'), self.boss.transform_styles, 'transform-styles', (), _(
'Transform styles used in the book'))
self.action_get_ext_resources = treg('download-metadata.png', _('Download external &resources'),
self.boss.get_external_resources, 'get-external-resources', (), _(
'Download external resources in the book (images/stylesheets/etc/ that are not included in the book)'))
def ereg(icon, text, target, sid, keys, description):
return reg(icon, text, partial(self.boss.editor_action, target), sid, keys, description)
register_text_editor_actions(ereg, self.palette())
# Polish actions
group = _('Polish book')
self.action_subset_fonts = treg(
'subset-fonts.png', _('&Subset embedded fonts'), partial(
self.boss.polish, 'subset', _('Subset fonts')), 'subset-fonts', (), _('Subset embedded fonts'))
self.action_embed_fonts = treg(
'embed-fonts.png', _('&Embed referenced fonts'), partial(
self.boss.polish, 'embed', _('Embed fonts')), 'embed-fonts', (), _('Embed referenced fonts'))
self.action_smarten_punctuation = treg(
'smarten-punctuation.png', _('&Smarten punctuation (works best for English)'), partial(
self.boss.polish, 'smarten_punctuation', _('Smarten punctuation')),
'smarten-punctuation', (), _('Smarten punctuation'))
self.action_remove_unused_css = treg(
'edit-clear.png', _('Remove &unused CSS rules'), partial(
self.boss.polish, 'remove_unused_css', _('Remove unused CSS rules')), 'remove-unused-css', (), _('Remove unused CSS rules'))
# Preview actions
group = _('Preview')
self.action_auto_reload_preview = reg('auto-reload.png', _('Auto reload preview'), None, 'auto-reload-preview', (), _('Auto reload preview'))
self.action_auto_sync_preview = reg('sync-right.png', _('Sync preview position to editor position'), None, 'sync-preview-to-editor', (), _(
'Sync preview position to editor position'))
self.action_reload_preview = reg('view-refresh.png', _('Refresh preview'), None, 'reload-preview', ('F5',), _('Refresh preview'))
self.action_split_in_preview = reg('document-split.png', _('Split this file'), None, 'split-in-preview', (), _(
'Split file in the preview panel'))
self.action_find_next_preview = reg('arrow-down.png', _('Find next'), None, 'find-next-preview', (), _('Find next in preview'))
self.action_find_prev_preview = reg('arrow-up.png', _('Find previous'), None, 'find-prev-preview', (), _('Find previous in preview'))
# Search actions
group = _('Search')
self.action_find = treg('search.png', _('&Find/replace'), self.boss.show_find, 'find-replace', ('Ctrl+F',), _('Show the Find/replace panel'))
def sreg(name, text, action, overrides={}, keys=(), description=None, icon=None):
return reg(icon, text, partial(self.boss.search_action_triggered, action, overrides), name, keys, description or text.replace('&', ''))
self.action_find_next = sreg('find-next', _('Find &next'),
'find', {'direction':'down'}, ('F3', 'Ctrl+G'), _('Find next match'))
self.action_find_previous = sreg('find-previous', _('Find &previous'),
'find', {'direction':'up'}, ('Shift+F3', 'Shift+Ctrl+G'), _('Find previous match'))
self.action_replace = sreg('replace', _('&Replace'),
'replace', keys=('Ctrl+R'), description=_('Replace current match'))
self.action_replace_next = sreg('replace-next', _('&Replace and find next'),
'replace-find', {'direction':'down'}, ('Ctrl+]'), _('Replace current match and find next'))
self.action_replace_previous = sreg('replace-previous', _('R&eplace and find previous'),
'replace-find', {'direction':'up'}, ('Ctrl+['), _('Replace current match and find previous'))
self.action_replace_all = sreg('replace-all', _('Replace &all'),
'replace-all', keys=('Ctrl+A'), description=_('Replace all matches'))
self.action_count = sreg('count-matches', _('&Count all'),
'count', keys=('Ctrl+N'), description=_('Count number of matches'))
self.action_mark = reg(None, _('&Mark selected text'), self.boss.mark_selected_text, 'mark-selected-text', ('Ctrl+Shift+M',),
_('Mark selected text or unmark already marked text'))
self.action_mark.default_text = self.action_mark.text()
self.action_go_to_line = reg(None, _('Go to &line'), self.boss.go_to_line_number, 'go-to-line-number', ('Ctrl+.',), _('Go to line number'))
self.action_saved_searches = treg('folder_saved_search.png', _('Sa&ved searches'),
self.boss.saved_searches, 'saved-searches', (), _('Show the saved searches dialog'))
self.action_text_search = treg('view.png', _('&Search ignoring HTML markup'),
self.boss.show_text_search, 'text-search', (), _('Show the text search panel'))
# Check Book actions
group = _('Check book')
self.action_check_book = treg('debug.png', _('&Check book'), self.boss.check_requested, 'check-book', ('F7'), _('Check book for errors'))
self.action_spell_check_book = treg('spell-check.png', _('Check &spelling'), self.boss.spell_check_requested, 'spell-check-book', ('Alt+F7'), _(
'Check book for spelling errors'))
self.action_check_book_next = reg('forward.png', _('&Next error'), partial(
self.check_book.next_error, delta=1), 'check-book-next', ('Ctrl+F7'), _('Show next error'))
self.action_check_book_previous = reg('back.png', _('&Previous error'), partial(
self.check_book.next_error, delta=-1), 'check-book-previous', ('Ctrl+Shift+F7'), _('Show previous error'))
self.action_spell_check_next = reg('forward.png', _('&Next spelling mistake'),
self.boss.next_spell_error, 'spell-next', ('F8'), _('Go to next spelling mistake'))
# Miscellaneous actions
group = _('Miscellaneous')
self.action_create_checkpoint = treg(
'marked.png', _('&Create checkpoint'), self.boss.create_checkpoint, 'create-checkpoint', (), _(
'Create a checkpoint with the current state of the book'))
self.action_close_current_tab = reg(
'window-close.png', _('&Close current tab'), self.central.close_current_editor, 'close-current-tab', 'Ctrl+W', _(
'Close the currently open tab'))
self.action_close_all_but_current_tab = reg(
'edit-clear.png', _('&Close other tabs'), self.central.close_all_but_current_editor, 'close-all-but-current-tab', 'Ctrl+Alt+W', _(
'Close all tabs except the current tab'))
self.action_help = treg(
'help.png', _('User &Manual'), lambda : open_url(QUrl(localize_user_manual_link(
'https://manual.calibre-ebook.com/edit.html'))), 'user-manual', 'F1', _(
'Show User Manual'))
self.action_browse_images = treg(
'view-image.png', _('&Browse images in book'), self.boss.browse_images, 'browse-images', (), _(
'Browse images in the books visually'))
self.action_multiple_split = treg(
'document-split.png', _('&Split at multiple locations'), self.boss.multisplit, 'multisplit', (), _(
'Split HTML file at multiple locations'))
self.action_compare_book = treg('diff.png', _('Compare to &another book'), self.boss.compare_book, 'compare-book', (), _(
'Compare to another book'))
self.action_manage_snippets = treg(
'snippets.png', _('Manage &Snippets'), self.boss.manage_snippets, 'manage-snippets', (), _(
'Manage user created snippets'))
self.plugin_menu_actions = []
create_plugin_actions(actions, toolbar_actions, self.plugin_menu_actions)
def create_menubar(self):
if isosx:
p, q = self.create_application_menubar()
q.triggered.connect(self.action_quit.trigger)
p.triggered.connect(self.action_preferences.trigger)
f = factory(app_id='com.calibre-ebook.EditBook-%d' % os.getpid())
b = f.create_window_menubar(self)
f = b.addMenu(_('&File'))
f.addAction(self.action_new_file)
f.addAction(self.action_import_files)
f.addSeparator()
f.addAction(self.action_open_book)
f.addAction(self.action_new_book)
f.addAction(self.action_import_book)
f.addAction(self.action_open_book_folder)
self.recent_books_menu = f.addMenu(_('&Recently opened books'))
self.update_recent_books()
f.addSeparator()
f.addAction(self.action_save)
f.addAction(self.action_save_copy)
f.addSeparator()
f.addAction(self.action_compare_book)
f.addAction(self.action_quit)
e = b.addMenu(_('&Edit'))
e.addAction(self.action_global_undo)
e.addAction(self.action_global_redo)
e.addAction(self.action_create_checkpoint)
e.addSeparator()
e.addAction(self.action_editor_undo)
e.addAction(self.action_editor_redo)
e.addSeparator()
e.addAction(self.action_editor_cut)
e.addAction(self.action_editor_copy)
e.addAction(self.action_editor_paste)
e.addAction(self.action_insert_char)
e.addSeparator()
e.addAction(self.action_quick_edit)
e.addAction(self.action_preferences)
e = b.addMenu(_('&Tools'))
tm = e.addMenu(_('Table of Contents'))
tm.addAction(self.action_toc)
tm.addAction(self.action_inline_toc)
e.addAction(self.action_manage_fonts)
e.addAction(self.action_embed_fonts)
e.addAction(self.action_subset_fonts)
e.addAction(self.action_compress_images)
e.addAction(self.action_smarten_punctuation)
e.addAction(self.action_remove_unused_css)
e.addAction(self.action_transform_styles)
e.addAction(self.action_fix_html_all)
e.addAction(self.action_pretty_all)
e.addAction(self.action_rationalize_folders)
e.addAction(self.action_add_cover)
e.addAction(self.action_set_semantics)
e.addAction(self.action_filter_css)
e.addAction(self.action_spell_check_book)
er = e.addMenu(_('External &links'))
er.addAction(self.action_check_external_links)
er.addAction(self.action_get_ext_resources)
e.addAction(self.action_check_book)
e.addAction(self.action_reports)
e = b.addMenu(_('&View'))
t = e.addMenu(_('Tool&bars'))
e.addSeparator()
for name in sorted(actions, key=lambda x:sort_key(actions[x].text())):
ac = actions[name]
if name.endswith('-dock'):
e.addAction(ac)
elif name.endswith('-bar'):
t.addAction(ac)
e.addAction(self.action_browse_images)
e.addSeparator()
e.addAction(self.action_close_current_tab)
e.addAction(self.action_close_all_but_current_tab)
e = b.addMenu(_('&Search'))
a = e.addAction
a(self.action_find)
e.addSeparator()
a(self.action_find_next)
a(self.action_find_previous)
e.addSeparator()
a(self.action_replace)
a(self.action_replace_next)
a(self.action_replace_previous)
a(self.action_replace_all)
e.addSeparator()
a(self.action_count)
e.addSeparator()
a(self.action_mark)
e.addSeparator()
a(self.action_go_to_line)
e.addSeparator()
a(self.action_saved_searches)
e.aboutToShow.connect(self.search_menu_about_to_show)
e.addSeparator()
a(self.action_text_search)
if self.plugin_menu_actions:
e = b.addMenu(_('&Plugins'))
for ac in sorted(self.plugin_menu_actions, key=lambda x:sort_key(unicode(x.text()))):
e.addAction(ac)
e = b.addMenu(_('&Help'))
a = e.addAction
a(self.action_help)
a(QIcon(I('donate.png')), _('&Donate to support calibre development'), open_donate)
a(self.action_preferences)
def search_menu_about_to_show(self):
ed = self.central.current_editor
update_mark_text_action(ed)
def update_recent_books(self):
m = self.recent_books_menu
m.clear()
books = tprefs.get('recent-books', [])
for path in books:
m.addAction(self.elided_text(path, width=500), partial(self.boss.open_book, path=path))
def create_toolbars(self):
def create(text, name):
name += '-bar'
b = self.addToolBar(text)
b.setObjectName(name) # Needed for saveState
actions[name] = b.toggleViewAction()
b.setIconSize(QSize(tprefs['toolbar_icon_size'], tprefs['toolbar_icon_size']))
return b
self.global_bar = create(_('Book tool bar'), 'global')
self.tools_bar = create(_('Tools tool bar'), 'tools')
self.plugins_bar = create(_('Plugins tool bar'), 'plugins')
self.populate_toolbars(animate=True)
def populate_toolbars(self, animate=False):
self.global_bar.clear(), self.tools_bar.clear(), self.plugins_bar.clear()
def add(bar, ac):
if ac is None:
bar.addSeparator()
elif ac == 'donate':
self.donate_button = b = ThrobbingButton(self)
b.clicked.connect(open_donate)
b.setAutoRaise(True)
b.setToolTip(_('Donate to support calibre development'))
if animate:
QTimer.singleShot(10, b.start_animation)
bar.addWidget(b)
else:
try:
bar.addAction(actions[ac])
except KeyError:
if DEBUG:
prints('Unknown action for toolbar %r: %r' % (unicode(bar.objectName()), ac))
for x in tprefs['global_book_toolbar']:
add(self.global_bar, x)
for x in tprefs['global_tools_toolbar']:
add(self.tools_bar, x)
for x in tprefs['global_plugins_toolbar']:
add(self.plugins_bar, x)
self.plugins_bar.setVisible(bool(tprefs['global_plugins_toolbar']))
def create_docks(self):
def create(name, oname):
oname += '-dock'
d = QDockWidget(name, self)
d.setObjectName(oname) # Needed for saveState
ac = d.toggleViewAction()
desc = _('Toggle %s') % name.replace('&', '')
self.keyboard.register_shortcut(
oname, desc, description=desc, action=ac, group=_('Windows'))
actions[oname] = ac
setattr(self, oname.replace('-', '_'), d)
return d
d = create(_('File browser'), 'files-browser')
d.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea)
self.file_list = FileListWidget(d)
d.setWidget(self.file_list)
self.addDockWidget(Qt.LeftDockWidgetArea, d)
d = create(_('File preview'), 'preview')
d.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea)
self.preview = Preview(d)
d.setWidget(self.preview)
self.addDockWidget(Qt.RightDockWidgetArea, d)
d = create(_('Live CSS'), 'live-css')
d.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea | Qt.BottomDockWidgetArea | Qt.TopDockWidgetArea)
self.live_css = LiveCSS(self.preview, parent=d)
d.setWidget(self.live_css)
self.addDockWidget(Qt.RightDockWidgetArea, d)
d.close() # Hidden by default
d = create(_('Check book'), 'check-book')
d.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea | Qt.BottomDockWidgetArea | Qt.TopDockWidgetArea)
d.setWidget(self.check_book)
self.addDockWidget(Qt.TopDockWidgetArea, d)
d.close() # By default the check window is closed
d = create(_('Inspector'), 'inspector')
d.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea | Qt.BottomDockWidgetArea | Qt.TopDockWidgetArea)
d.setWidget(self.preview.inspector)
self.preview.inspector.setParent(d)
self.addDockWidget(Qt.BottomDockWidgetArea, d)
d.close() # By default the inspector window is closed
d.setFeatures(d.DockWidgetClosable | d.DockWidgetMovable) # QWebInspector does not work in a floating dock
d = create(_('Table of Contents'), 'toc-viewer')
d.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea | Qt.BottomDockWidgetArea | Qt.TopDockWidgetArea)
d.setWidget(self.toc_view)
self.addDockWidget(Qt.LeftDockWidgetArea, d)
d.close() # Hidden by default
d = create(_('Text search'), 'text-search')
d.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea | Qt.BottomDockWidgetArea | Qt.TopDockWidgetArea)
d.setWidget(self.text_search)
self.addDockWidget(Qt.LeftDockWidgetArea, d)
d.close() # Hidden by default
d = create(_('Checkpoints'), 'checkpoints')
d.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea | Qt.BottomDockWidgetArea | Qt.TopDockWidgetArea)
self.checkpoints = CheckpointView(self.boss.global_undo, parent=d)
d.setWidget(self.checkpoints)
self.addDockWidget(Qt.LeftDockWidgetArea, d)
d.close() # Hidden by default
d = create(_('Saved searches'), 'saved-searches')
d.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea | Qt.BottomDockWidgetArea | Qt.TopDockWidgetArea)
d.setWidget(self.saved_searches)
self.addDockWidget(Qt.LeftDockWidgetArea, d)
d.close() # Hidden by default
def resizeEvent(self, ev):
self.blocking_job.resize(ev.size())
return super(Main, self).resizeEvent(ev)
def update_window_title(self):
fname = os.path.basename(current_container().path_to_ebook)
self.setWindowTitle(self.current_metadata.title + ' [%s] :: %s :: %s' %(current_container().book_type.upper(), fname, self.APP_NAME))
def closeEvent(self, e):
if self.boss.quit():
e.accept()
else:
e.ignore()
def save_state(self):
tprefs.set('main_window_geometry', bytearray(self.saveGeometry()))
tprefs.set('main_window_state', bytearray(self.saveState(self.STATE_VERSION)))
self.central.save_state()
self.saved_searches.save_state()
self.check_book.save_state()
self.text_search.save_state()
def restore_state(self):
geom = tprefs.get('main_window_geometry', None)
if geom is not None:
self.restoreGeometry(geom)
state = tprefs.get('main_window_state', None)
if state is not None:
self.restoreState(state, self.STATE_VERSION)
self.central.restore_state()
self.saved_searches.restore_state()
def contextMenuEvent(self, ev):
ev.ignore()
| gpl-3.0 | 1,148,555,098,009,233,500 | 49.142857 | 159 | 0.604777 | false |
andrewlewis/camera-id | test_characteristic.py | 1 | 1197 | #!/usr/bin/env python
from make_characteristic import get_noise_from_file
import cPickle
import glob
import numpy
import sys
from PIL import Image, ImageOps
TILE_OVERLAP = 8
if len(sys.argv) != 3:
print "Usage:\n\t%s noise_file_name path_with_png_files" % (sys.argv[0],)
sys.exit(0)
noise_file_name = sys.argv[1]
image_path_name = sys.argv[2]
# Load the camera noise.
camera_noise = numpy.loadtxt(noise_file_name, dtype=numpy.float)
camera_noise_average = numpy.average(camera_noise)
camera_noise -= camera_noise_average
camera_noise_norm = numpy.sqrt(numpy.sum(camera_noise * camera_noise))
file_list = glob.glob(image_path_name + '/*.png')
print "Processing %d images" % (len(file_list),)
for f in file_list:
# Get this image's noise.
image_noise = get_noise_from_file(f)[1]
image_noise_average = numpy.average(image_noise)
image_noise -= image_noise_average
image_noise_norm = numpy.sqrt(numpy.sum(image_noise * image_noise))
# Calculate the correlation between the two signals.
print "Dot product %s is: %s" % (f,
numpy.sum(camera_noise * image_noise) /
(camera_noise_norm * image_noise_norm))
| mit | 6,780,480,121,013,508,000 | 29.692308 | 76 | 0.675856 | false |
mitodl/micromasters | dashboard/api_edx_cache_test.py | 1 | 21604 | """
Tests for the dashboard APIs functions that deal with the edx cached data
"""
from datetime import timedelta
from unittest.mock import patch, MagicMock, ANY
import ddt
from requests.exceptions import HTTPError
from edx_api.certificates.models import Certificate, Certificates
from edx_api.enrollments.models import Enrollment, Enrollments
from edx_api.grades.models import CurrentGrade, CurrentGrades
from backends.edxorg import EdxOrgOAuth2
from backends.exceptions import InvalidCredentialStored
from courses.factories import (
FullProgramFactory,
CourseFactory,
CourseRunFactory,
)
from dashboard import models
from dashboard.api_edx_cache import (
CachedEdxUserData,
CachedEdxDataApi,
UserCachedRunData,
)
from dashboard.factories import (
CachedEnrollmentFactory,
CachedCertificateFactory,
CachedCurrentGradeFactory,
UserCacheRefreshTimeFactory,
)
from dashboard.models import (
UserCacheRefreshTime,
CachedEnrollment,
)
from micromasters.factories import UserFactory
from micromasters.utils import (
load_json_from_file,
now_in_utc,
)
from search.base import MockedESTestCase
class CachedEdxUserDataTests(MockedESTestCase):
"""
Tests for the CachedEdxUserData class
"""
@classmethod
def setUpTestData(cls):
cls.user = UserFactory.create()
# Create Programs, Courses, CourseRuns...
cls.p1_course_run_keys = ['p1_course_run']
cls.p2_course_run_keys = ['p2_course_run_1', 'p2_course_run_2']
cls.p1_course_run = CourseRunFactory.create(edx_course_key=cls.p1_course_run_keys[0])
p2 = FullProgramFactory.create()
first_course = p2.course_set.first()
extra_course = CourseFactory.create(program=p2)
cls.p2_course_run_1 = CourseRunFactory.create(course=first_course, edx_course_key=cls.p2_course_run_keys[0])
cls.p2_course_run_2 = CourseRunFactory.create(course=extra_course, edx_course_key=cls.p2_course_run_keys[1])
all_course_runs = [cls.p1_course_run, cls.p2_course_run_1, cls.p2_course_run_2]
# Create cached edX data
cls.enrollments = [
CachedEnrollmentFactory.create(user=cls.user, course_run=course_run) for course_run in all_course_runs
]
cls.certificates = [
CachedCertificateFactory.create(user=cls.user, course_run=course_run) for course_run in all_course_runs
]
cls.current_grades = [
CachedCurrentGradeFactory.create(user=cls.user, course_run=course_run) for course_run in all_course_runs
]
def assert_edx_data_has_given_ids(self, edx_user_data, ids):
"""Asserts that all edX object course id sets match the given list of ids"""
assert sorted(edx_user_data.enrollments.get_enrolled_course_ids()) == sorted(ids)
assert sorted(edx_user_data.certificates.all_courses_verified_certs) == sorted(ids)
assert sorted(edx_user_data.current_grades.all_course_ids) == sorted(ids)
def test_edx_data_fetch_and_set(self):
"""Test that a user's edX data is properly fetched and set onto object properties"""
edx_user_data = CachedEdxUserData(self.user)
assert isinstance(edx_user_data.enrollments, Enrollments)
assert isinstance(edx_user_data.certificates, Certificates)
assert isinstance(edx_user_data.current_grades, CurrentGrades)
self.assert_edx_data_has_given_ids(edx_user_data, self.p1_course_run_keys + self.p2_course_run_keys)
def test_edx_data_with_program(self):
"""Test that a user's edX data is filtered by program when specified"""
p1_course_run_program = self.p1_course_run.course.program
edx_user_data = CachedEdxUserData(self.user, program=p1_course_run_program)
self.assert_edx_data_has_given_ids(edx_user_data, self.p1_course_run_keys)
p2_course_run_program = self.p2_course_run_1.course.program
edx_user_data = CachedEdxUserData(self.user, program=p2_course_run_program)
self.assert_edx_data_has_given_ids(edx_user_data, self.p2_course_run_keys)
def test_get_run_data(self):
"""Test for the get_run_data method"""
edx_user_data = CachedEdxUserData(self.user)
run_data = edx_user_data.get_run_data(self.p1_course_run_keys[0])
assert isinstance(run_data, UserCachedRunData)
assert isinstance(run_data.enrollment, Enrollment)
assert isinstance(run_data.certificate, Certificate)
assert isinstance(run_data.current_grade, CurrentGrade)
assert run_data.enrollment.course_id == self.p1_course_run_keys[0]
assert run_data.certificate.course_id == self.p1_course_run_keys[0]
assert run_data.current_grade.course_id == self.p1_course_run_keys[0]
@ddt.ddt
class CachedEdxDataApiTests(MockedESTestCase):
"""
Tests for the CachedEdxDataApi class
"""
@classmethod
def setUpTestData(cls):
"""
Set up data
"""
cls.user = UserFactory.create()
cls.user.social_auth.create(
provider=EdxOrgOAuth2.name,
uid="{}_edx".format(cls.user.username),
extra_data={"access_token": "fooooootoken"}
)
certificates_json = load_json_from_file('dashboard/fixtures/certificates.json')
cls.certificates = Certificates([Certificate(cert_json) for cert_json in certificates_json])
enrollments_json = load_json_from_file('dashboard/fixtures/user_enrollments.json')
cls.enrollments = Enrollments(enrollments_json)
# the grades need to have all the same usernames
current_grades_json = []
for grade in load_json_from_file('dashboard/fixtures/current_grades.json'):
grade.update({'username': cls.user.username})
current_grades_json.append(grade)
cls.current_grades = CurrentGrades([CurrentGrade(grade_json) for grade_json in current_grades_json])
cls.certificates_ids = set(cls.certificates.all_courses_certs)
cls.verified_certificates_ids = set(cls.certificates.all_courses_verified_certs)
cls.enrollment_ids = set(cls.enrollments.get_enrolled_course_ids())
cls.grades_ids = set(cls.current_grades.all_course_ids)
cls.all_course_run_ids = list(
cls.certificates_ids | cls.enrollment_ids | cls.grades_ids
)
cls.all_runs = []
for course_id in cls.all_course_run_ids:
cls.all_runs.append(CourseRunFactory.create(
edx_course_key=course_id,
course__program__live=True,
))
cls.edx_client = MagicMock()
cls.edx_client.enrollments.get_student_enrollments.return_value = cls.enrollments
cls.edx_client.certificates.get_student_certificates.return_value = cls.certificates
cls.edx_client.current_grades.get_student_current_grades.return_value = cls.current_grades
def assert_cache_in_db(self, enrollment_keys=None, certificate_keys=None, grades_keys=None):
"""
Helper function to assert the course keys in the database cache
"""
enrollment_keys = enrollment_keys or []
certificate_keys = certificate_keys or []
grades_keys = grades_keys or []
enrollments = CachedEdxDataApi.get_cached_edx_data(self.user, CachedEdxDataApi.ENROLLMENT)
certificates = CachedEdxDataApi.get_cached_edx_data(self.user, CachedEdxDataApi.CERTIFICATE)
grades = CachedEdxDataApi.get_cached_edx_data(self.user, CachedEdxDataApi.CURRENT_GRADE)
assert sorted(list(enrollments.enrollments.keys())) == sorted(enrollment_keys)
assert sorted(list(certificates.certificates.keys())) == sorted(certificate_keys)
assert sorted(list(grades.current_grades.keys())) == sorted(grades_keys)
def test_constants(self):
"""Tests class constants"""
assert CachedEdxDataApi.SUPPORTED_CACHES == (
CachedEdxDataApi.ENROLLMENT,
CachedEdxDataApi.CERTIFICATE,
CachedEdxDataApi.CURRENT_GRADE,
)
assert CachedEdxDataApi.CACHED_EDX_MODELS == {
CachedEdxDataApi.ENROLLMENT: models.CachedEnrollment,
CachedEdxDataApi.CERTIFICATE: models.CachedCertificate,
CachedEdxDataApi.CURRENT_GRADE: models.CachedCurrentGrade,
}
assert CachedEdxDataApi.CACHE_EXPIRATION_DELTAS == {
CachedEdxDataApi.ENROLLMENT: timedelta(minutes=5),
CachedEdxDataApi.CERTIFICATE: timedelta(hours=6),
CachedEdxDataApi.CURRENT_GRADE: timedelta(hours=1),
}
def test_get_cached_edx_data(self):
"""
Test for get_cached_edx_data
"""
with self.assertRaises(ValueError):
CachedEdxDataApi.get_cached_edx_data(self.user, 'footype')
self.assert_cache_in_db()
for run in self.all_runs:
CachedEnrollmentFactory.create(user=self.user, course_run=run)
CachedCertificateFactory.create(user=self.user, course_run=run)
CachedCurrentGradeFactory.create(user=self.user, course_run=run)
self.assert_cache_in_db(self.all_course_run_ids, self.all_course_run_ids, self.all_course_run_ids)
def test_update_cache_last_access(self):
"""Test for update_cache_last_access"""
with self.assertRaises(ValueError):
CachedEdxDataApi.update_cache_last_access(self.user, 'footype')
assert UserCacheRefreshTime.objects.filter(user=self.user).exists() is False
CachedEdxDataApi.update_cache_last_access(self.user, CachedEdxDataApi.ENROLLMENT)
cache_time = UserCacheRefreshTime.objects.get(user=self.user)
assert cache_time.enrollment <= now_in_utc()
assert cache_time.certificate is None
assert cache_time.current_grade is None
old_timestamp = now_in_utc() - timedelta(days=1)
CachedEdxDataApi.update_cache_last_access(self.user, CachedEdxDataApi.ENROLLMENT, old_timestamp)
cache_time.refresh_from_db()
assert cache_time.enrollment == old_timestamp
def test_is_cache_fresh(self):
"""Test for is_cache_fresh"""
with self.assertRaises(ValueError):
CachedEdxDataApi.is_cache_fresh(self.user, 'footype')
# if there is no entry in the table, the cache is not fresh
assert UserCacheRefreshTime.objects.filter(user=self.user).exists() is False
for cache_type in CachedEdxDataApi.SUPPORTED_CACHES:
assert CachedEdxDataApi.is_cache_fresh(self.user, cache_type) is False
now = now_in_utc()
user_cache = UserCacheRefreshTimeFactory.create(
user=self.user,
enrollment=now,
certificate=now,
current_grade=now,
)
for cache_type in CachedEdxDataApi.SUPPORTED_CACHES:
assert CachedEdxDataApi.is_cache_fresh(self.user, cache_type) is True
# moving back the timestamp of one day, makes the cache not fresh again
yesterday = now - timedelta(days=1)
user_cache.enrollment = yesterday
user_cache.certificate = yesterday
user_cache.current_grade = yesterday
user_cache.save()
for cache_type in CachedEdxDataApi.SUPPORTED_CACHES:
assert CachedEdxDataApi.is_cache_fresh(self.user, cache_type) is False
@ddt.data('certificate', 'enrollment', 'current_grade')
def test_are_all_caches_fresh(self, cache_type):
"""Test for are_all_caches_fresh"""
assert UserCacheRefreshTime.objects.filter(user=self.user).exists() is False
assert CachedEdxDataApi.are_all_caches_fresh(self.user) is False
now = now_in_utc()
yesterday = now - timedelta(days=1)
user_cache = UserCacheRefreshTimeFactory.create(
user=self.user,
enrollment=now,
certificate=now,
current_grade=now,
)
assert CachedEdxDataApi.are_all_caches_fresh(self.user) is True
setattr(user_cache, cache_type, yesterday)
user_cache.save()
assert CachedEdxDataApi.are_all_caches_fresh(self.user) is False
setattr(user_cache, cache_type, now)
user_cache.save()
assert CachedEdxDataApi.are_all_caches_fresh(self.user) is True
@patch('search.tasks.index_users', autospec=True)
def test_update_cached_enrollment(self, mocked_index):
"""Test for update_cached_enrollment"""
course_id = list(self.enrollment_ids)[0]
enrollment = self.enrollments.get_enrollment_for_course(course_id)
self.assert_cache_in_db()
# normal update that creates also the entry
CachedEdxDataApi.update_cached_enrollment(self.user, enrollment, course_id, False)
self.assert_cache_in_db(enrollment_keys=[course_id])
cached_enr = CachedEnrollment.objects.get(user=self.user, course_run__edx_course_key=course_id)
assert cached_enr.data == enrollment.json
assert mocked_index.delay.called is False
# update of different data with indexing
enr_json = {
"course_details": {
"course_id": course_id,
},
"is_active": True,
"mode": "verified",
"user": self.user.username
}
enrollment_new = Enrollment(enr_json)
CachedEdxDataApi.update_cached_enrollment(self.user, enrollment_new, course_id, True)
self.assert_cache_in_db(enrollment_keys=[course_id])
cached_enr.refresh_from_db()
assert cached_enr.data == enr_json
mocked_index.delay.assert_any_call([self.user.id], check_if_changed=True)
@patch('search.tasks.index_users', autospec=True)
def test_update_cached_enrollments(self, mocked_index):
"""Test for update_cached_enrollments."""
self.assert_cache_in_db()
assert UserCacheRefreshTime.objects.filter(user=self.user).exists() is False
CachedEdxDataApi.update_cached_enrollments(self.user, self.edx_client)
self.assert_cache_in_db(enrollment_keys=self.enrollment_ids)
cache_time = UserCacheRefreshTime.objects.get(user=self.user)
now = now_in_utc()
assert cache_time.enrollment <= now
assert mocked_index.delay.called is True
mocked_index.reset_mock()
# add another cached element for another course that will be removed by the refresh
cached_enr = CachedEnrollmentFactory.create(user=self.user)
self.assert_cache_in_db(enrollment_keys=list(self.enrollment_ids) + [cached_enr.course_run.edx_course_key])
CachedEdxDataApi.update_cached_enrollments(self.user, self.edx_client)
self.assert_cache_in_db(enrollment_keys=self.enrollment_ids)
cache_time.refresh_from_db()
assert cache_time.enrollment >= now
mocked_index.delay.assert_called_once_with([self.user.id], check_if_changed=True)
@patch('search.tasks.index_users', autospec=True)
def test_update_cached_certificates(self, mocked_index):
"""Test for update_cached_certificates."""
assert self.verified_certificates_ids.issubset(self.certificates_ids)
self.assert_cache_in_db()
assert UserCacheRefreshTime.objects.filter(user=self.user).exists() is False
CachedEdxDataApi.update_cached_certificates(self.user, self.edx_client)
self.assert_cache_in_db(certificate_keys=self.verified_certificates_ids)
cache_time = UserCacheRefreshTime.objects.get(user=self.user)
now = now_in_utc()
assert cache_time.certificate <= now
assert mocked_index.delay.called is True
mocked_index.reset_mock()
# add another cached element for another course that will be removed by the refresh
cached_cert = CachedCertificateFactory.create(user=self.user)
self.assert_cache_in_db(
certificate_keys=list(self.verified_certificates_ids) + [cached_cert.course_run.edx_course_key])
CachedEdxDataApi.update_cached_certificates(self.user, self.edx_client)
self.assert_cache_in_db(certificate_keys=self.verified_certificates_ids)
cache_time.refresh_from_db()
assert cache_time.certificate >= now
mocked_index.delay.assert_called_once_with([self.user.id], check_if_changed=True)
@patch('search.tasks.index_users', autospec=True)
def test_update_cached_current_grades(self, mocked_index):
"""Test for update_cached_current_grades."""
self.assert_cache_in_db()
assert UserCacheRefreshTime.objects.filter(user=self.user).exists() is False
CachedEdxDataApi.update_cached_current_grades(self.user, self.edx_client)
self.assert_cache_in_db(grades_keys=self.grades_ids)
cache_time = UserCacheRefreshTime.objects.get(user=self.user)
now = now_in_utc()
assert cache_time.current_grade <= now
assert mocked_index.delay.called is True
mocked_index.reset_mock()
# add another cached element for another course that will be removed by the refresh
cached_grade = CachedCurrentGradeFactory.create(user=self.user)
self.assert_cache_in_db(grades_keys=list(self.grades_ids) + [cached_grade.course_run.edx_course_key])
CachedEdxDataApi.update_cached_current_grades(self.user, self.edx_client)
self.assert_cache_in_db(grades_keys=self.grades_ids)
cache_time.refresh_from_db()
assert cache_time.current_grade >= now
mocked_index.delay.assert_called_once_with([self.user.id], check_if_changed=True)
@patch('dashboard.api_edx_cache.CachedEdxDataApi.update_cached_current_grades')
@patch('dashboard.api_edx_cache.CachedEdxDataApi.update_cached_certificates')
@patch('dashboard.api_edx_cache.CachedEdxDataApi.update_cached_enrollments')
def test_update_cache_if_expired(self, mock_enr, mock_cert, mock_grade):
"""Test for update_cache_if_expired"""
all_mocks = (mock_enr, mock_cert, mock_grade, )
with self.assertRaises(ValueError):
CachedEdxDataApi.update_cache_if_expired(self.user, self.edx_client, 'footype')
# if there is no entry in the UserCacheRefreshTime the cache is not fresh and needs to be refreshed
for cache_type in CachedEdxDataApi.SUPPORTED_CACHES:
# the following is possible only because a mocked function is called
assert UserCacheRefreshTime.objects.filter(user=self.user).exists() is False
CachedEdxDataApi.update_cache_if_expired(self.user, self.edx_client, cache_type)
for mock_func in all_mocks:
assert mock_func.called is True
mock_func.reset_mock()
# if we create a fresh entry in the UserCacheRefreshTime, no update is called
now = now_in_utc()
user_cache = UserCacheRefreshTimeFactory.create(
user=self.user,
enrollment=now,
certificate=now,
current_grade=now,
)
for cache_type in CachedEdxDataApi.SUPPORTED_CACHES:
CachedEdxDataApi.update_cache_if_expired(self.user, self.edx_client, cache_type)
for mock_func in all_mocks:
assert mock_func.called is False
mock_func.reset_mock()
# moving back the last access time, the functions are called again
yesterday = now - timedelta(days=1)
user_cache.enrollment = yesterday
user_cache.certificate = yesterday
user_cache.current_grade = yesterday
user_cache.save()
for cache_type in CachedEdxDataApi.SUPPORTED_CACHES:
CachedEdxDataApi.update_cache_if_expired(self.user, self.edx_client, cache_type)
for mock_func in all_mocks:
assert mock_func.called is True
@patch('dashboard.api_edx_cache.CachedEdxDataApi.update_cached_enrollments')
@ddt.data(400, 401, 405,)
def test_update_cache_if_expired_http_errors(self, status_code, mock_enr):
"""
Test for update_cache_if_expired in case a backend function raises an HTTPError
"""
def raise_http_error(*args, **kwargs): # pylint: disable=unused-argument
"""Mock function to raise an exception"""
error = HTTPError()
error.response = MagicMock()
error.response.status_code = status_code
raise error
mock_enr.side_effect = raise_http_error
if status_code in (400, 401):
with self.assertRaises(InvalidCredentialStored):
CachedEdxDataApi.update_cache_if_expired(self.user, self.edx_client, CachedEdxDataApi.ENROLLMENT)
else:
with self.assertRaises(HTTPError):
CachedEdxDataApi.update_cache_if_expired(self.user, self.edx_client, CachedEdxDataApi.ENROLLMENT)
@patch('dashboard.api_edx_cache.CachedEdxDataApi.update_cached_current_grades')
@patch('dashboard.api_edx_cache.CachedEdxDataApi.update_cached_certificates')
@patch('dashboard.api_edx_cache.CachedEdxDataApi.update_cached_enrollments')
@patch('backends.utils.refresh_user_token', autospec=True)
def test_update_all_cached_grade_data(self, mock_refr, mock_enr, mock_cert, mock_grade):
"""Test for update_all_cached_grade_data"""
for mock_func in (mock_refr, mock_enr, mock_cert, mock_grade, ):
assert mock_func.called is False
CachedEdxDataApi.update_all_cached_grade_data(self.user)
assert mock_enr.called is False
mock_refr.assert_called_once_with(self.user.social_auth.get(provider=EdxOrgOAuth2.name))
for mock_func in (mock_cert, mock_grade, ):
mock_func.assert_called_once_with(self.user, ANY)
| bsd-3-clause | 3,159,953,239,633,138,000 | 47.767494 | 116 | 0.679133 | false |
unt-libraries/django-nomination | nomination/migrations/0004_auto_20190927_1904.py | 1 | 7862 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.24 on 2019-09-27 19:04
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('nomination', '0003_project_archive_url'),
]
operations = [
migrations.AlterField(
model_name='metadata',
name='name',
field=models.SlugField(help_text='Assign a name for the metadata field (letters, numbers, underscores, and hyphens are permissible).'),
),
migrations.AlterField(
model_name='metadata',
name='value_sets',
field=models.ManyToManyField(blank=True, help_text='In addition to values manually assigned, values in selected pre-defined sets will also be available to metadata fields.', to='nomination.ValueSet', verbose_name='metadata value sets'),
),
migrations.AlterField(
model_name='metadata',
name='values',
field=models.ManyToManyField(blank=True, help_text='Allowed value for metadata field.', through='nomination.Metadata_Values', to='nomination.Value', verbose_name='values'),
),
migrations.AlterField(
model_name='metadata_values',
name='value_order',
field=models.PositiveIntegerField(default=1, help_text='Change the ordering of the value fields, ordered lowest to highest'),
),
migrations.AlterField(
model_name='nominator',
name='nominator_email',
field=models.CharField(help_text='An email address for identifying your nominations in the system.', max_length=100),
),
migrations.AlterField(
model_name='nominator',
name='nominator_institution',
field=models.CharField(help_text='Your institutional affiliation.', max_length=100),
),
migrations.AlterField(
model_name='nominator',
name='nominator_name',
field=models.CharField(help_text='Your name.', max_length=100),
),
migrations.AlterField(
model_name='project',
name='admin_email',
field=models.CharField(help_text='Email address of project administrator.', max_length=80),
),
migrations.AlterField(
model_name='project',
name='admin_name',
field=models.CharField(help_text='Name of project administrator.', max_length=80),
),
migrations.AlterField(
model_name='project',
name='archive_url',
field=models.URLField(blank=True, help_text='Base URL for accessing site archives.', null=True),
),
migrations.AlterField(
model_name='project',
name='nomination_end',
field=models.DateTimeField(help_text='Date to stop accepting URL nominations.'),
),
migrations.AlterField(
model_name='project',
name='nomination_start',
field=models.DateTimeField(help_text='Date to start accepting URL nominations.'),
),
migrations.AlterField(
model_name='project',
name='project_description',
field=models.TextField(help_text='Description of project.'),
),
migrations.AlterField(
model_name='project',
name='project_end',
field=models.DateTimeField(help_text='Ending date for project.'),
),
migrations.AlterField(
model_name='project',
name='project_name',
field=models.CharField(help_text='Name given to nomination project.', max_length=250),
),
migrations.AlterField(
model_name='project',
name='project_slug',
field=models.CharField(help_text='Up to 25 character identifier for the project (used in URLS, etc.).', max_length=25, unique=True),
),
migrations.AlterField(
model_name='project',
name='project_start',
field=models.DateTimeField(help_text='Starting date for project.'),
),
migrations.AlterField(
model_name='project',
name='project_url',
field=models.CharField(help_text='Project affiliated URL.', max_length=255),
),
migrations.AlterField(
model_name='project_metadata',
name='description',
field=models.CharField(help_text='Used as a descriptive title for the metadata field on Web forms.', max_length=255),
),
migrations.AlterField(
model_name='project_metadata',
name='form_type',
field=models.CharField(choices=[('checkbox', 'checkbox'), ('date', 'date'), ('radio', 'radio button'), ('select', 'menu-select multiple values'), ('selectsingle', 'menu-select single value'), ('text', 'text input'), ('textarea', 'text area')], help_text='Type of HTML form element that should represent the field.', max_length=30),
),
migrations.AlterField(
model_name='project_metadata',
name='help',
field=models.CharField(blank=True, help_text='String used on Web forms to prompt users for accurate data.', max_length=255),
),
migrations.AlterField(
model_name='project_metadata',
name='metadata_order',
field=models.PositiveIntegerField(default=1, help_text='Change the ordering of the metadata fields, ordered lowest to highest'),
),
migrations.AlterField(
model_name='project_metadata',
name='required',
field=models.BooleanField(help_text='Are users required to submit data for this field when nominating a URL?'),
),
migrations.AlterField(
model_name='url',
name='attribute',
field=models.CharField(help_text='A property of the URL you wish to describe.', max_length=255),
),
migrations.AlterField(
model_name='url',
name='entity',
field=models.CharField(help_text='The URL to nominate for capture.', max_length=300),
),
migrations.AlterField(
model_name='url',
name='url_project',
field=models.ForeignKey(help_text='The project for which you want to add a URL.', on_delete=django.db.models.deletion.CASCADE, to='nomination.Project'),
),
migrations.AlterField(
model_name='url',
name='value',
field=models.CharField(help_text='The value of the associated attribute.', max_length=255),
),
migrations.AlterField(
model_name='value',
name='key',
field=models.CharField(help_text='Up to 35 character identifier for the metadata field.', max_length=35, unique=True),
),
migrations.AlterField(
model_name='value',
name='value',
field=models.CharField(help_text='Permitted value for associated metadata field.', max_length=255),
),
migrations.AlterField(
model_name='valueset',
name='name',
field=models.CharField(help_text='Name given to value set.', max_length=75, unique=True),
),
migrations.AlterField(
model_name='valueset',
name='values',
field=models.ManyToManyField(through='nomination.Valueset_Values', to='nomination.Value', verbose_name='values'),
),
migrations.AlterField(
model_name='valueset_values',
name='value_order',
field=models.PositiveIntegerField(default=1, help_text='Change the ordering of the value fields, ordered lowest to highest'),
),
]
| bsd-3-clause | 2,851,599,570,489,149,000 | 43.670455 | 343 | 0.598067 | false |
medunigraz/outpost | src/outpost/django/kages/api.py | 1 | 1284 | import logging
import ldap
from rest_framework import (
exceptions,
permissions,
viewsets,
)
from rest_framework.response import Response
from . import models
from .conf import settings
logger = logging.getLogger(__name__)
class TranslateViewSet(viewsets.ViewSet):
permission_classes = (
permissions.IsAuthenticated,
)
def list(self, request):
return Response()
def retrieve(self, request, pk=None):
if not pk:
return Response(False)
try:
conn = ldap.initialize(settings.AUTH_LDAP_SERVER_URI)
conn.simple_bind_s(
settings.AUTH_LDAP_BIND_DN,
settings.AUTH_LDAP_BIND_PASSWORD
)
result = conn.search_s(
settings.AUTH_LDAP_USER_SEARCH.base_dn,
settings.AUTH_LDAP_USER_SEARCH.scope,
settings.KAGES_PERS_ID_FILTER.format(id=int(pk)),
settings.KAGES_PERS_FIELDS
)
found = len(result) == 1
except Exception as e:
logger.warn(
f'LDAP query failed when matching KAGes ID: {e}'
)
found = False
logger.debug(f'Matched KAGes ID: {found}')
return Response({'exists': found})
| bsd-2-clause | -7,632,317,316,049,999,000 | 26.319149 | 65 | 0.576324 | false |
jscn/django | django/contrib/contenttypes/models.py | 1 | 7319 | from __future__ import unicode_literals
from django.apps import apps
from django.db import models
from django.db.utils import IntegrityError, OperationalError, ProgrammingError
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
class ContentTypeManager(models.Manager):
use_in_migrations = True
def __init__(self, *args, **kwargs):
super(ContentTypeManager, self).__init__(*args, **kwargs)
# Cache shared by all the get_for_* methods to speed up
# ContentType retrieval.
self._cache = {}
def get_by_natural_key(self, app_label, model):
try:
ct = self._cache[self.db][(app_label, model)]
except KeyError:
ct = self.get(app_label=app_label, model=model)
self._add_to_cache(self.db, ct)
return ct
def _get_opts(self, model, for_concrete_model):
if for_concrete_model:
model = model._meta.concrete_model
return model._meta
def _get_from_cache(self, opts):
key = (opts.app_label, opts.model_name)
return self._cache[self.db][key]
def get_for_model(self, model, for_concrete_model=True):
"""
Returns the ContentType object for a given model, creating the
ContentType if necessary. Lookups are cached so that subsequent lookups
for the same model don't hit the database.
"""
opts = self._get_opts(model, for_concrete_model)
try:
return self._get_from_cache(opts)
except KeyError:
pass
# The ContentType entry was not found in the cache, therefore we
# proceed to load or create it.
try:
try:
# We start with get() and not get_or_create() in order to use
# the db_for_read (see #20401).
ct = self.get(app_label=opts.app_label, model=opts.model_name)
except self.model.DoesNotExist:
# Not found in the database; we proceed to create it. This time we
# use get_or_create to take care of any race conditions.
ct, created = self.get_or_create(
app_label=opts.app_label,
model=opts.model_name,
)
except (OperationalError, ProgrammingError, IntegrityError):
# It's possible to migrate a single app before contenttypes,
# as it's not a required initial dependency (it's contrib!)
# Have a nice error for this.
raise RuntimeError(
"Error creating new content types. Please make sure contenttypes "
"is migrated before trying to migrate apps individually."
)
self._add_to_cache(self.db, ct)
return ct
def get_for_models(self, *models, **kwargs):
"""
Given *models, returns a dictionary mapping {model: content_type}.
"""
for_concrete_models = kwargs.pop('for_concrete_models', True)
# Final results
results = {}
# models that aren't already in the cache
needed_app_labels = set()
needed_models = set()
needed_opts = set()
for model in models:
opts = self._get_opts(model, for_concrete_models)
try:
ct = self._get_from_cache(opts)
except KeyError:
needed_app_labels.add(opts.app_label)
needed_models.add(opts.model_name)
needed_opts.add(opts)
else:
results[model] = ct
if needed_opts:
cts = self.filter(
app_label__in=needed_app_labels,
model__in=needed_models
)
for ct in cts:
model = ct.model_class()
if model._meta in needed_opts:
results[model] = ct
needed_opts.remove(model._meta)
self._add_to_cache(self.db, ct)
for opts in needed_opts:
# These weren't in the cache, or the DB, create them.
ct = self.create(
app_label=opts.app_label,
model=opts.model_name,
)
self._add_to_cache(self.db, ct)
results[ct.model_class()] = ct
return results
def get_for_id(self, id):
"""
Lookup a ContentType by ID. Uses the same shared cache as get_for_model
(though ContentTypes are obviously not created on-the-fly by get_by_id).
"""
try:
ct = self._cache[self.db][id]
except KeyError:
# This could raise a DoesNotExist; that's correct behavior and will
# make sure that only correct ctypes get stored in the cache dict.
ct = self.get(pk=id)
self._add_to_cache(self.db, ct)
return ct
def clear_cache(self):
"""
Clear out the content-type cache. This needs to happen during database
flushes to prevent caching of "stale" content type IDs (see
django.contrib.contenttypes.management.update_contenttypes for where
this gets called).
"""
self._cache.clear()
def _add_to_cache(self, using, ct):
"""Insert a ContentType into the cache."""
# Note it's possible for ContentType objects to be stale; model_class() will return None.
# Hence, there is no reliance on model._meta.app_label here, just using the model fields instead.
key = (ct.app_label, ct.model)
self._cache.setdefault(using, {})[key] = ct
self._cache.setdefault(using, {})[ct.id] = ct
@python_2_unicode_compatible
class ContentType(models.Model):
app_label = models.CharField(max_length=100)
model = models.CharField(_('python model class name'), max_length=100)
objects = ContentTypeManager()
class Meta:
verbose_name = _('content type')
verbose_name_plural = _('content types')
db_table = 'django_content_type'
unique_together = (('app_label', 'model'),)
def __str__(self):
return self.name
@property
def name(self):
model = self.model_class()
if not model:
return self.model
return force_text(model._meta.verbose_name)
def model_class(self):
"Returns the Python model class for this type of content."
try:
return apps.get_model(self.app_label, self.model)
except LookupError:
return None
def get_object_for_this_type(self, **kwargs):
"""
Returns an object of this type for the keyword arguments given.
Basically, this is a proxy around this object_type's get_object() model
method. The ObjectNotExist exception, if thrown, will not be caught,
so code that calls this method should catch it.
"""
return self.model_class()._base_manager.using(self._state.db).get(**kwargs)
def get_all_objects_for_this_type(self, **kwargs):
"""
Returns all objects of this type for the keyword arguments given.
"""
return self.model_class()._base_manager.using(self._state.db).filter(**kwargs)
def natural_key(self):
return (self.app_label, self.model)
| bsd-3-clause | 8,636,261,011,933,846,000 | 37.119792 | 105 | 0.585872 | false |
Acidburn0zzz/archiso-gui | releng/root-image/usr/share/cnchi/src/misc.py | 1 | 28903 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2012 Canonical Ltd.
# Copyright (c) 2013 Antergos
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from collections import namedtuple
import contextlib
import grp
import os
import pwd
import re
import shutil
import subprocess
import syslog
import socket
import osextras
def copytree(src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def utf8(s, errors="strict"):
"""Decode a string as UTF-8 if it isn't already Unicode."""
if isinstance(s, str):
return s
else:
return str(s, "utf-8", errors)
def is_swap(device):
try:
with open('/proc/swaps') as fp:
for line in fp:
if line.startswith(device + ' '):
return True
except Exception:
pass
return False
_dropped_privileges = 0
def set_groups_for_uid(uid):
if uid == os.geteuid() or uid == os.getuid():
return
user = pwd.getpwuid(uid).pw_name
try:
os.setgroups([g.gr_gid for g in grp.getgrall() if user in g.gr_mem])
except OSError:
import traceback
for line in traceback.format_exc().split('\n'):
syslog.syslog(syslog.LOG_ERR, line)
def drop_all_privileges():
# gconf needs both the UID and effective UID set.
global _dropped_privileges
uid = os.environ.get('SUDO_UID')
gid = os.environ.get('SUDO_GID')
if uid is not None:
uid = int(uid)
set_groups_for_uid(uid)
if gid is not None:
gid = int(gid)
os.setregid(gid, gid)
if uid is not None:
uid = int(uid)
os.setreuid(uid, uid)
os.environ['HOME'] = pwd.getpwuid(uid).pw_dir
os.environ['LOGNAME'] = pwd.getpwuid(uid).pw_name
_dropped_privileges = None
def drop_privileges():
global _dropped_privileges
assert _dropped_privileges is not None
if _dropped_privileges == 0:
uid = os.environ.get('SUDO_UID')
gid = os.environ.get('SUDO_GID')
if uid is not None:
uid = int(uid)
set_groups_for_uid(uid)
if gid is not None:
gid = int(gid)
os.setegid(gid)
if uid is not None:
os.seteuid(uid)
_dropped_privileges += 1
def regain_privileges():
global _dropped_privileges
assert _dropped_privileges is not None
_dropped_privileges -= 1
if _dropped_privileges == 0:
os.seteuid(0)
os.setegid(0)
os.setgroups([])
def drop_privileges_save():
"""Drop the real UID/GID as well, and hide them in saved IDs."""
# At the moment, we only know how to handle this when effective
# privileges were already dropped.
assert _dropped_privileges is not None and _dropped_privileges > 0
uid = os.environ.get('SUDO_UID')
gid = os.environ.get('SUDO_GID')
if uid is not None:
uid = int(uid)
set_groups_for_uid(uid)
if gid is not None:
gid = int(gid)
os.setresgid(gid, gid, 0)
if uid is not None:
os.setresuid(uid, uid, 0)
def regain_privileges_save():
"""Recover our real UID/GID after calling drop_privileges_save."""
assert _dropped_privileges is not None and _dropped_privileges > 0
os.setresuid(0, 0, 0)
os.setresgid(0, 0, 0)
os.setgroups([])
@contextlib.contextmanager
def raised_privileges():
"""As regain_privileges/drop_privileges, but in context manager style."""
regain_privileges()
try:
yield
finally:
drop_privileges()
def raise_privileges(func):
"""As raised_privileges, but as a function decorator."""
from functools import wraps
@wraps(func)
def helper(*args, **kwargs):
with raised_privileges():
return func(*args, **kwargs)
return helper
@raise_privileges
def grub_options():
""" Generates a list of suitable targets for grub-installer
@return empty list or a list of ['/dev/sda1','Ubuntu Hardy 8.04'] """
from ubiquity.parted_server import PartedServer
l = []
try:
oslist = {}
subp = subprocess.Popen(
['os-prober'], stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
result = subp.communicate()[0].splitlines()
for res in result:
res = res.split(':')
oslist[res[0]] = res[1]
p = PartedServer()
for disk in p.disks():
p.select_disk(disk)
with open(p.device_entry('model')) as fp:
mod = fp.readline()
with open(p.device_entry('device')) as fp:
dev = fp.readline()
with open(p.device_entry('size')) as fp:
size = fp.readline()
if dev and mod:
if size.isdigit():
size = format_size(int(size))
l.append([dev, '%s (%s)' % (mod, size)])
else:
l.append([dev, mod])
for part in p.partitions():
ostype = ''
if part[4] == 'linux-swap':
continue
if part[4] == 'free':
continue
if os.path.exists(p.part_entry(part[1], 'format')):
# Don't bother looking for an OS type.
pass
elif part[5] in oslist.keys():
ostype = oslist[part[5]]
l.append([part[5], ostype])
except:
import traceback
for line in traceback.format_exc().split('\n'):
syslog.syslog(syslog.LOG_ERR, line)
return l
@raise_privileges
def boot_device():
from ubiquity.parted_server import PartedServer
boot = None
root = None
try:
p = PartedServer()
for disk in p.disks():
p.select_disk(disk)
for part in p.partitions():
part = part[1]
if p.has_part_entry(part, 'mountpoint'):
mp = p.readline_part_entry(part, 'mountpoint')
if mp == '/boot':
boot = disk.replace('=', '/')
elif mp == '/':
root = disk.replace('=', '/')
except Exception:
import traceback
for line in traceback.format_exc().split('\n'):
syslog.syslog(syslog.LOG_ERR, line)
if boot:
return boot
return root
def is_removable(device):
if device is None:
return None
device = os.path.realpath(device)
devpath = None
is_partition = False
removable_bus = False
subp = subprocess.Popen(['udevadm', 'info', '-q', 'property',
'-n', device],
stdout=subprocess.PIPE, universal_newlines=True)
for line in subp.communicate()[0].splitlines():
line = line.strip()
if line.startswith('DEVPATH='):
devpath = line[8:]
elif line == 'DEVTYPE=partition':
is_partition = True
elif line == 'ID_BUS=usb' or line == 'ID_BUS=ieee1394':
removable_bus = True
if devpath is not None:
if is_partition:
devpath = os.path.dirname(devpath)
is_removable = removable_bus
try:
with open('/sys%s/removable' % devpath) as removable:
if removable.readline().strip() != '0':
is_removable = True
except IOError:
pass
if is_removable:
try:
subp = subprocess.Popen(['udevadm', 'info', '-q', 'name',
'-p', devpath],
stdout=subprocess.PIPE,
universal_newlines=True)
return ('/dev/%s' %
subp.communicate()[0].splitlines()[0].strip())
except Exception:
pass
return None
def mount_info(path):
"""Return filesystem name, type, and ro/rw for a given mountpoint."""
fsname = ''
fstype = ''
writable = ''
with open('/proc/mounts') as fp:
for line in fp:
line = line.split()
if line[1] == path:
fsname = line[0]
fstype = line[2]
writable = line[3].split(',')[0]
return fsname, fstype, writable
def udevadm_info(args):
fullargs = ['udevadm', 'info', '-q', 'property']
fullargs.extend(args)
udevadm = {}
subp = subprocess.Popen(
fullargs, stdout=subprocess.PIPE, universal_newlines=True)
for line in subp.communicate()[0].splitlines():
line = line.strip()
if '=' not in line:
continue
name, value = line.split('=', 1)
udevadm[name] = value
return udevadm
def partition_to_disk(partition):
"""Convert a partition device to its disk device, if any."""
udevadm_part = udevadm_info(['-n', partition])
if ('DEVPATH' not in udevadm_part or
udevadm_part.get('DEVTYPE') != 'partition'):
return partition
disk_syspath = '/sys%s' % udevadm_part['DEVPATH'].rsplit('/', 1)[0]
udevadm_disk = udevadm_info(['-p', disk_syspath])
return udevadm_disk.get('DEVNAME', partition)
def is_boot_device_removable(boot=None):
if boot:
return is_removable(boot)
else:
return is_removable(boot_device())
def cdrom_mount_info():
"""Return mount information for /cdrom.
This is the same as mount_info, except that the partition is converted to
its containing disk, and we don't care whether the mount point is
writable.
"""
cdsrc, cdfs, _ = mount_info('/cdrom')
cdsrc = partition_to_disk(cdsrc)
return cdsrc, cdfs
@raise_privileges
def grub_device_map():
"""Return the contents of the default GRUB device map."""
subp = subprocess.Popen(['grub-mkdevicemap', '--no-floppy', '-m', '-'],
stdout=subprocess.PIPE, universal_newlines=True)
return subp.communicate()[0].splitlines()
def grub_default(boot=None):
"""Return the default GRUB installation target."""
# Much of this is intentionally duplicated from grub-installer, so that
# we can show the user what device GRUB will be installed to before
# grub-installer is run. Pursuant to that, we intentionally run this in
# the installer root as /target might not yet be available.
bootremovable = is_boot_device_removable(boot=boot)
if bootremovable is not None:
return bootremovable
devices = grub_device_map()
target = None
if devices:
try:
target = os.path.realpath(devices[0].split('\t')[1])
except (IndexError, OSError):
pass
# last resort
if target is None:
target = '(hd0)'
cdsrc, cdfs = cdrom_mount_info()
try:
# The target is usually under /dev/disk/by-id/, so string equality
# is insufficient.
same = os.path.samefile(cdsrc, target)
except OSError:
same = False
if ((same or target == '(hd0)') and
((cdfs and cdfs != 'iso9660') or is_removable(cdsrc))):
# Installing from removable media other than a CD. Make sure that
# we don't accidentally install GRUB to it.
boot = boot_device()
try:
if boot:
target = boot
else:
# Try the next disk along (which can't also be the CD source).
target = os.path.realpath(devices[1].split('\t')[1])
target = re.sub(r'(/dev/(cciss|ida)/c[0-9]d[0-9]|/dev/[a-z]+).*',
r'\1', target)
except (IndexError, OSError):
pass
return target
_os_prober_oslist = {}
_os_prober_osvers = {}
_os_prober_called = False
def find_in_os_prober(device, with_version=False):
"""Look for the device name in the output of os-prober.
Return the friendly name of the device, or the empty string on error.
"""
try:
oslist, osvers = os_prober()
if device in oslist:
ret = oslist[device]
elif is_swap(device):
ret = 'swap'
else:
syslog.syslog('Device %s not found in os-prober output' % device)
ret = ''
ret = utf8(ret, errors='replace')
ver = utf8(osvers.get(device, ''), errors='replace')
if with_version:
return ret, ver
else:
return ret
except (KeyboardInterrupt, SystemExit):
pass
except:
import traceback
syslog.syslog(syslog.LOG_ERR, "Error in find_in_os_prober:")
for line in traceback.format_exc().split('\n'):
syslog.syslog(syslog.LOG_ERR, line)
return ''
@raise_privileges
def os_prober():
global _os_prober_oslist
global _os_prober_osvers
global _os_prober_called
if not _os_prober_called:
_os_prober_called = True
subp = subprocess.Popen(
['os-prober'], stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
result = subp.communicate()[0].splitlines()
for res in result:
res = res.split(':')
if res[2] == 'Ubuntu':
version = [v for v in re.findall('[0-9.]*', res[1]) if v][0]
# Get rid of the superfluous (development version) (11.04)
text = re.sub('\s*\(.*\).*', '', res[1])
_os_prober_oslist[res[0]] = text
_os_prober_osvers[res[0]] = version
else:
# Get rid of the bootloader indication. It's not relevant here.
_os_prober_oslist[res[0]] = res[1].replace(' (loader)', '')
return _os_prober_oslist, _os_prober_osvers
@raise_privileges
def remove_os_prober_cache():
osextras.unlink_force('/var/lib/ubiquity/os-prober-cache')
shutil.rmtree('/var/lib/ubiquity/linux-boot-prober-cache',
ignore_errors=True)
def windows_startup_folder(mount_path):
locations = [
# Windows 8
'ProgramData/Microsoft/Windows/Start Menu/Programs/StartUp',
# Windows 7
'ProgramData/Microsoft/Windows/Start Menu/Programs/Startup',
# Windows XP
'Documents and Settings/All Users/Start Menu/Programs/Startup',
# Windows NT
'Winnt/Profiles/All Users/Start Menu/Programs/Startup',
]
for location in locations:
path = os.path.join(mount_path, location)
if os.path.exists(path):
return path
return ''
ReleaseInfo = namedtuple('ReleaseInfo', 'name, version')
def get_release():
if get_release.release_info is None:
try:
with open('/cdrom/.disk/info') as fp:
line = fp.readline()
if line:
line = line.split()
if line[2] == 'LTS':
line[1] += ' LTS'
get_release.release_info = ReleaseInfo(
name=line[0], version=line[1])
except:
syslog.syslog(syslog.LOG_ERR, 'Unable to determine the release.')
if not get_release.release_info:
get_release.release_info = ReleaseInfo(name='Ubuntu', version='')
return get_release.release_info
get_release.release_info = None
def get_release_name():
import warnings
warnings.warn('get_release_name() is deprecated, '
'use get_release().name instead.',
category=DeprecationWarning)
if not get_release_name.release_name:
try:
with open('/cdrom/.disk/info') as fp:
line = fp.readline()
if line:
line = line.split()
if line[2] == 'LTS':
get_release_name.release_name = ' '.join(line[:3])
else:
get_release_name.release_name = ' '.join(line[:2])
except:
syslog.syslog(
syslog.LOG_ERR,
"Unable to determine the distribution name from "
"/cdrom/.disk/info")
if not get_release_name.release_name:
get_release_name.release_name = 'Ubuntu'
return get_release_name.release_name
get_release_name.release_name = ''
@raise_privileges
def get_install_medium():
if not get_install_medium.medium:
try:
if os.access('/cdrom', os.W_OK):
get_install_medium.medium = 'USB'
else:
get_install_medium.medium = 'CD'
except:
syslog.syslog(
syslog.LOG_ERR, "Unable to determine install medium.")
get_install_medium.medium = 'CD'
return get_install_medium.medium
get_install_medium.medium = ''
def execute(*args):
"""runs args* in shell mode. Output status is taken."""
log_args = ['log-output', '-t', 'ubiquity']
log_args.extend(args)
try:
status = subprocess.call(log_args)
except IOError as e:
syslog.syslog(syslog.LOG_ERR, ' '.join(log_args))
syslog.syslog(syslog.LOG_ERR,
"OS error(%s): %s" % (e.errno, e.strerror))
return False
else:
if status != 0:
syslog.syslog(syslog.LOG_ERR, ' '.join(log_args))
return False
syslog.syslog(' '.join(log_args))
return True
@raise_privileges
def execute_root(*args):
return execute(*args)
def format_size(size):
"""Format a partition size."""
if size < 1000:
unit = 'B'
factor = 1
elif size < 1000 * 1000:
unit = 'kB'
factor = 1000
elif size < 1000 * 1000 * 1000:
unit = 'MB'
factor = 1000 * 1000
elif size < 1000 * 1000 * 1000 * 1000:
unit = 'GB'
factor = 1000 * 1000 * 1000
else:
unit = 'TB'
factor = 1000 * 1000 * 1000 * 1000
return '%.1f %s' % (float(size) / factor, unit)
def debconf_escape(text):
escaped = text.replace('\\', '\\\\').replace('\n', '\\n')
return re.sub(r'(\s)', r'\\\1', escaped)
def create_bool(text):
if text == 'true':
return True
elif text == 'false':
return False
else:
return text
@raise_privileges
def dmimodel():
model = ''
kwargs = {}
if os.geteuid() != 0:
# Silence annoying warnings during the test suite.
kwargs['stderr'] = open('/dev/null', 'w')
try:
proc = subprocess.Popen(
['dmidecode', '--string', 'system-manufacturer'],
stdout=subprocess.PIPE, universal_newlines=True, **kwargs)
manufacturer = proc.communicate()[0]
if not manufacturer:
return
manufacturer = manufacturer.lower()
if 'to be filled' in manufacturer:
# Don't bother with products in development.
return
if 'bochs' in manufacturer or 'vmware' in manufacturer:
model = 'virtual machine'
# VirtualBox sets an appropriate system-product-name.
else:
if 'lenovo' in manufacturer or 'ibm' in manufacturer:
key = 'system-version'
else:
key = 'system-product-name'
proc = subprocess.Popen(['dmidecode', '--string', key],
stdout=subprocess.PIPE,
universal_newlines=True)
model = proc.communicate()[0]
if 'apple' in manufacturer:
# MacBook4,1 - strip the 4,1
model = re.sub('[^a-zA-Z\s]', '', model)
# Replace each gap of non-alphanumeric characters with a dash.
# Ensure the resulting string does not begin or end with a dash.
model = re.sub('[^a-zA-Z0-9]+', '-', model).rstrip('-').lstrip('-')
if model.lower() == 'not-available':
return
except Exception:
syslog.syslog(syslog.LOG_ERR, 'Unable to determine the model from DMI')
finally:
if 'stderr' in kwargs:
kwargs['stderr'].close()
return model
def set_indicator_keymaps(lang):
import xml.etree.cElementTree as ElementTree
from gi.repository import Xkl, GdkX11
# GdkX11.x11_get_default_xdisplay() segfaults if Gtk hasn't been
# imported; possibly finer-grained than this, but anything using this
# will already have imported Gtk anyway ...
from gi.repository import Gtk
from ubiquity import gsettings
# pacify pyflakes
Gtk
gsettings_key = ['org.gnome.libgnomekbd.keyboard', 'layouts']
lang = lang.split('_')[0]
variants = []
# Map inspired from that of gfxboot-theme-ubuntu that's itself
# based on console-setup's. This one has been restricted to
# language => keyboard layout not locale => keyboard layout as
# we don't actually know the exact locale
default_keymap = {
'ar': 'ara',
'bs': 'ba',
'de': 'de',
'el': 'gr',
'en': 'us',
'eo': 'epo',
'fr': 'fr_oss',
'gu': 'in_guj',
'hi': 'in',
'hr': 'hr',
'hy': 'am',
'ka': 'ge',
'kn': 'in_kan',
'lo': 'la',
'ml': 'in_mal',
'pa': 'in_guru',
'sr': 'rs',
'sv': 'se',
'ta': 'in_tam',
'te': 'in_tel',
'zh': 'cn',
}
def item_str(s):
'''Convert a zero-terminated byte array to a proper str'''
i = s.find(b'\x00')
return s[:i].decode()
def process_variant(*args):
if hasattr(args[2], 'name'):
variants.append(
'%s\t%s' % (item_str(args[1].name), item_str(args[2].name)))
else:
variants.append(item_str(args[1].name))
def restrict_list(variants):
new_variants = []
# Start by looking by an explicit default layout in the keymap
if lang in default_keymap:
if default_keymap[lang] in variants:
variants.remove(default_keymap[lang])
new_variants.append(default_keymap[lang])
else:
tab_keymap = default_keymap[lang].replace('_', '\t')
if tab_keymap in variants:
variants.remove(tab_keymap)
new_variants.append(tab_keymap)
# Prioritize the layout matching the language (if any)
if lang in variants:
variants.remove(lang)
new_variants.append(lang)
# Uniquify our list (just in case)
variants = list(set(variants))
if len(variants) > 4:
# We have a problem, X only supports 4
# Add as many entry as we can that are layouts without variant
country_variants = sorted(
entry for entry in variants if '\t' not in entry)
for entry in country_variants[:4 - len(new_variants)]:
new_variants.append(entry)
variants.remove(entry)
if len(new_variants) < 4:
# We can add some more
simple_variants = sorted(
entry for entry in variants if '_' not in entry)
for entry in simple_variants[:4 - len(new_variants)]:
new_variants.append(entry)
variants.remove(entry)
if len(new_variants) < 4:
# Now just add anything left
for entry in variants[:4 - len(new_variants)]:
new_variants.append(entry)
variants.remove(entry)
else:
new_variants += list(variants)
# gsettings doesn't understand utf8
new_variants = [str(variant) for variant in new_variants]
return new_variants
def call_setxkbmap(variants):
kb_layouts = []
kb_variants = []
for entry in variants:
fields = entry.split('\t')
if len(fields) > 1:
kb_layouts.append(fields[0])
kb_variants.append(fields[1])
else:
kb_layouts.append(fields[0])
kb_variants.append("")
execute(
"setxkbmap", "-layout", ",".join(kb_layouts),
"-variant", ",".join(kb_variants))
iso_639_3 = ElementTree.parse('/usr/share/xml/iso-codes/iso_639_3.xml')
nodes = [element for element in iso_639_3.findall('iso_639_3_entry')
if element.get('part1_code') == lang]
display = GdkX11.x11_get_default_xdisplay()
engine = Xkl.Engine.get_instance(display)
if nodes:
configreg = Xkl.ConfigRegistry.get_instance(engine)
configreg.load(False)
# Apparently part2_code doesn't always work (fails with French)
for prop in ('part2_code', 'id', 'part1_code'):
code = nodes[0].get(prop)
if code is not None:
configreg.foreach_language_variant(code, process_variant, None)
if variants:
restricted_variants = restrict_list(variants)
call_setxkbmap(restricted_variants)
gsettings.set_list(
gsettings_key[0], gsettings_key[1],
restricted_variants)
break
else:
# Use the system default if no other keymaps can be determined.
gsettings.set_list(gsettings_key[0], gsettings_key[1], [])
engine.lock_group(0)
NM = 'org.freedesktop.NetworkManager'
NM_STATE_CONNECTED_GLOBAL = 70
def get_prop(obj, iface, prop):
import dbus
try:
return obj.Get(iface, prop, dbus_interface=dbus.PROPERTIES_IFACE)
except dbus.DBusException as e:
if e.get_dbus_name() == 'org.freedesktop.DBus.Error.UnknownMethod':
return None
else:
raise
def is_wireless_enabled():
import dbus
bus = dbus.SystemBus()
manager = bus.get_object(NM, '/org/freedesktop/NetworkManager')
return get_prop(manager, NM, 'WirelessEnabled')
def has_connection():
import dbus
bus = dbus.SystemBus()
manager = bus.get_object(NM, '/org/freedesktop/NetworkManager')
state = get_prop(manager, NM, 'state')
return state == NM_STATE_CONNECTED_GLOBAL
def add_connection_watch(func):
import dbus
def connection_cb(state):
func(state == NM_STATE_CONNECTED_GLOBAL)
bus = dbus.SystemBus()
bus.add_signal_receiver(connection_cb, 'StateChanged', NM, NM)
try:
func(has_connection())
except dbus.DBusException:
# We can't talk to NM, so no idea. Wild guess: we're connected
# using ssh with X forwarding, and are therefore connected. This
# allows us to proceed with a minimum of complaint.
func(True)
def install_size():
if min_install_size:
return min_install_size
# Fallback size to 5 GB
size = 5 * 1024 * 1024 * 1024
# Maximal size to 8 GB
max_size = 8 * 1024 * 1024 * 1024
try:
with open('/cdrom/casper/filesystem.size') as fp:
size = int(fp.readline())
except IOError:
pass
# TODO substitute into the template for the state box.
min_disk_size = size * 2 # fudge factor
# Set minimum size to 8GB if current minimum size is larger
# than 8GB and we still have an extra 20% of free space
if min_disk_size > max_size and size * 1.2 < max_size:
min_disk_size = max_size
return min_disk_size
min_install_size = None
# vim:ai:et:sts=4:tw=80:sw=4:
def get_network():
intip = False
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(("antergos.com",1234))
except:
return ""
myip = s.getsockname()[0]
s.close()
spip = myip.split(".")
if spip[0] == '192':
if spip[1] == '168':
intip = True
elif spip[0] == '10':
intip = True
elif spip[0] == '172':
if int(spip[1]) > 15 and int(spip[1]) < 32:
intip = True
if intip:
ipran = '.'.join(spip[:-1]) + ".0/24"
else:
ipran = '.'.join(spip)
return ipran
| gpl-3.0 | -3,050,432,571,631,182,000 | 30.484749 | 79 | 0.558593 | false |
bytedance/fedlearner | test/data_join/test_data_portal_job_manager.py | 1 | 8196 | # Copyright 2021 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import os
import time
import unittest
import logging
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from tensorflow.compat.v1 import gfile
from fnmatch import fnmatch
from google.protobuf import text_format
from fedlearner.data_join import common
from fedlearner.common import data_portal_service_pb2 as dp_pb
from fedlearner.common.db_client import DBClient
from fedlearner.data_join.data_portal_job_manager import DataPortalJobManager
class Timer:
def __init__(self, content):
self._content = content
self._start_time = 0
def __enter__(self):
self._start_time = time.time()
def __exit__(self, exc_type, exc_val, exc_tb):
logging.info("%s takes %s second", self._content,
time.time() - self._start_time)
class TestDataPortalJobManager(unittest.TestCase):
def setUp(self) -> None:
logging.getLogger().setLevel(logging.DEBUG)
self._data_portal_name = 'test_data_portal_job_manager'
self._kvstore = DBClient('etcd', True)
self._portal_input_base_dir = './portal_input_dir'
self._portal_output_base_dir = './portal_output_dir'
self._raw_data_publish_dir = 'raw_data_publish_dir'
if gfile.Exists(self._portal_input_base_dir):
gfile.DeleteRecursively(self._portal_input_base_dir)
gfile.MakeDirs(self._portal_input_base_dir)
self._data_fnames = ['1001/{}.data'.format(i) for i in range(100)]
self._data_fnames_without_success = \
['1002/{}.data'.format(i) for i in range(100)]
self._csv_fnames = ['1003/{}.csv'.format(i) for i in range(100)]
self._unused_fnames = ['{}.xx'.format(100)]
self._ignored_fnames = [f'.part-{i}.crc' for i in range(10)]
self._all_fnames = self._data_fnames + \
self._data_fnames_without_success + \
self._csv_fnames + self._unused_fnames
all_fnames_with_success = ['1001/_SUCCESS'] + ['1003/_SUCCESS'] +\
self._all_fnames + self._ignored_fnames
for fname in all_fnames_with_success:
fpath = os.path.join(self._portal_input_base_dir, fname)
gfile.MakeDirs(os.path.dirname(fpath))
with gfile.Open(fpath, "w") as f:
f.write('xxx')
def tearDown(self) -> None:
gfile.DeleteRecursively(self._portal_input_base_dir)
def _list_input_dir(self, portal_options, file_wildcard,
target_fnames, max_files_per_job=8000):
portal_manifest = dp_pb.DataPortalManifest(
name=self._data_portal_name,
data_portal_type=dp_pb.DataPortalType.Streaming,
output_partition_num=4,
input_file_wildcard=file_wildcard,
input_base_dir=self._portal_input_base_dir,
output_base_dir=self._portal_output_base_dir,
raw_data_publish_dir=self._raw_data_publish_dir,
processing_job_id=-1,
next_job_id=0
)
self._kvstore.set_data(
common.portal_kvstore_base_dir(self._data_portal_name),
text_format.MessageToString(portal_manifest))
with Timer("DataPortalJobManager initialization"):
data_portal_job_manager = DataPortalJobManager(
self._kvstore, self._data_portal_name,
portal_options.long_running,
portal_options.check_success_tag,
portal_options.single_subfolder,
portal_options.files_per_job_limit,
max_files_per_job
)
portal_job = data_portal_job_manager._sync_processing_job()
target_fnames.sort()
fpaths = [os.path.join(self._portal_input_base_dir, f)
for f in target_fnames]
self.assertEqual(len(fpaths), len(portal_job.fpaths))
for index, fpath in enumerate(fpaths):
self.assertEqual(fpath, portal_job.fpaths[index])
def test_list_input_dir(self):
portal_options = dp_pb.DataPotraMasterlOptions(
use_mock_etcd=True,
long_running=False,
check_success_tag=True,
single_subfolder=False,
files_per_job_limit=None
)
self._list_input_dir(portal_options, "*.data", self._data_fnames)
def test_list_input_dir_single_folder(self):
portal_options = dp_pb.DataPotraMasterlOptions(
use_mock_etcd=True,
long_running=False,
check_success_tag=False,
single_subfolder=True,
files_per_job_limit=None,
)
self._list_input_dir(
portal_options, "*.data", self._data_fnames)
def test_list_input_dir_files_limit(self):
portal_options = dp_pb.DataPotraMasterlOptions(
use_mock_etcd=True,
long_running=False,
check_success_tag=False,
single_subfolder=False,
files_per_job_limit=1,
)
self._list_input_dir(
portal_options, "*.data", self._data_fnames)
portal_options = dp_pb.DataPotraMasterlOptions(
use_mock_etcd=True,
long_running=False,
check_success_tag=False,
single_subfolder=False,
files_per_job_limit=150,
)
self._list_input_dir(
portal_options, "*.data", self._data_fnames)
portal_options = dp_pb.DataPotraMasterlOptions(
use_mock_etcd=True,
long_running=False,
check_success_tag=False,
single_subfolder=False,
files_per_job_limit=200,
)
self._list_input_dir(
portal_options, "*.data",
self._data_fnames + self._data_fnames_without_success)
def test_list_input_dir_over_limit(self):
portal_options = dp_pb.DataPotraMasterlOptions(
use_mock_etcd=True,
long_running=False,
check_success_tag=False,
single_subfolder=False,
)
self._list_input_dir(
portal_options, "*.data", self._data_fnames, max_files_per_job=100)
self._list_input_dir(
portal_options, "*.data",
self._data_fnames + self._data_fnames_without_success,
max_files_per_job=200)
def test_list_input_dir_without_success_check(self):
portal_options = dp_pb.DataPotraMasterlOptions(
use_mock_etcd=True,
long_running=False,
check_success_tag=False,
single_subfolder=False,
files_per_job_limit=None
)
self._list_input_dir(
portal_options, "*.data",
self._data_fnames + self._data_fnames_without_success)
def test_list_input_dir_without_wildcard(self):
portal_options = dp_pb.DataPotraMasterlOptions(
use_mock_etcd=True,
long_running=False,
check_success_tag=True,
single_subfolder=False,
files_per_job_limit=None
)
self._list_input_dir(
portal_options, None,
self._data_fnames + self._csv_fnames)
def test_list_input_dir_without_wildcard_and_success_check(self):
portal_options = dp_pb.DataPotraMasterlOptions(
use_mock_etcd=True,
long_running=False,
check_success_tag=False,
single_subfolder=False,
files_per_job_limit=None
)
self._list_input_dir(portal_options, None, self._all_fnames)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 402,065,272,321,751,940 | 36.424658 | 79 | 0.599683 | false |
mosbasik/fluidspaces | src/fluidspaces/i3_commands.py | 1 | 1215 | import subprocess
class i3Commands(object):
@staticmethod
def send_to_wp(i3_name):
'''Send the currently focused window/container to the named workspace'''
subprocess.Popen(['i3-msg', 'move container to workspace', i3_name], stdout=subprocess.PIPE)
@staticmethod
def go_to_wp(i3_name):
'''Go to the named workspace'''
subprocess.Popen(['i3-msg', 'workspace', i3_name], stdout=subprocess.PIPE)
@staticmethod
def get_wps_str():
'''Query i3 for current workspaces and return stdout as a string'''
completed_proc = subprocess.run(['i3-msg', '-t', 'get_workspaces'], stdout=subprocess.PIPE)
stdout = completed_proc.stdout.decode('utf-8')
return stdout
@staticmethod
def rename_wp(old_i3_name, new_i3_name):
subprocess.run([
'i3-msg',
'rename workspace',
'"{}"'.format(old_i3_name),
'to',
'"{}"'.format(new_i3_name),
], stdout=subprocess.PIPE)
@staticmethod
def rename_wps(old_i3_names, new_i3_names):
for old_i3_name, new_i3_name in zip(old_i3_names, new_i3_names):
i3Commands.rename_wp(old_i3_name, new_i3_name)
| mit | -4,274,740,972,160,197,000 | 32.75 | 100 | 0.604938 | false |
gigglearrows/anniesbot | pajbot/models/timer.py | 1 | 6625 | import json
import logging
from pajbot.models.db import DBManager, Base
from pajbot.models.action import ActionParser
from pajbot.tbutil import find
from sqlalchemy import orm
from sqlalchemy import Column, Integer, String, Boolean
from sqlalchemy.dialects.mysql import TEXT
log = logging.getLogger('pajbot')
class Timer(Base):
__tablename__ = 'tb_timer'
id = Column(Integer, primary_key=True)
name = Column(String(256), nullable=False)
action_json = Column('action', TEXT, nullable=False)
interval_online = Column(Integer, nullable=False)
interval_offline = Column(Integer, nullable=False)
enabled = Column(Boolean, nullable=False, default=True)
def __init__(self, **options):
self.id = None
self.name = '??'
self.action_json = '{}'
self.interval_online = 5
self.interval_offline = 30
self.enabled = True
self.refresh_tts()
self.set(**options)
def set(self, **options):
self.name = options.get('name', self.name)
log.debug(options)
if 'action' in options:
log.info('new action!')
self.action_json = json.dumps(options['action'])
self.action = ActionParser.parse(self.action_json)
self.interval_online = options.get('interval_online', self.interval_online)
self.interval_offline = options.get('interval_offline', self.interval_offline)
self.enabled = options.get('enabled', self.enabled)
@orm.reconstructor
def init_on_load(self):
self.action = ActionParser.parse(self.action_json)
self.refresh_tts()
def refresh_tts(self):
self.time_to_send_online = self.interval_online
self.time_to_send_offline = self.interval_offline
def refresh_action(self):
self.action = ActionParser.parse(self.action_json)
def run(self, bot):
self.action.run(bot, source=None, message=None)
class TimerManager:
def __init__(self, bot):
self.bot = bot
self.bot.execute_every(60, self.tick)
if self.bot:
self.bot.socket_manager.add_handler('timer.update', self.on_timer_update)
self.bot.socket_manager.add_handler('timer.remove', self.on_timer_remove)
def on_timer_update(self, data, conn):
try:
timer_id = int(data['timer_id'])
except (KeyError, ValueError):
log.warn('No timer ID found in on_timer_update')
return False
updated_timer = find(lambda timer: timer.id == timer_id, self.timers)
if updated_timer:
with DBManager.create_session_scope(expire_on_commit=False) as db_session:
db_session.add(updated_timer)
db_session.refresh(updated_timer)
updated_timer.refresh_action()
db_session.expunge(updated_timer)
else:
with DBManager.create_session_scope(expire_on_commit=False) as db_session:
updated_timer = db_session.query(Timer).filter_by(id=timer_id).one_or_none()
# Add the updated timer to the timer lists if required
if updated_timer:
if updated_timer not in self.timers:
self.timers.append(updated_timer)
if updated_timer not in self.online_timers and updated_timer.interval_online > 0:
self.online_timers.append(updated_timer)
updated_timer.refresh_tts()
if updated_timer not in self.offline_timers and updated_timer.interval_offline > 0:
self.offline_timers.append(updated_timer)
updated_timer.refresh_tts()
for timer in self.online_timers:
if timer.enabled is False or timer.interval_online <= 0:
self.online_timers.remove(timer)
for timer in self.offline_timers:
if timer.enabled is False or timer.interval_offline <= 0:
self.offline_timers.remove(timer)
def on_timer_remove(self, data, conn):
try:
timer_id = int(data['timer_id'])
except (KeyError, ValueError):
log.warn('No timer ID found in on_timer_update')
return False
removed_timer = find(lambda timer: timer.id == timer_id, self.timers)
if removed_timer:
if removed_timer in self.timers:
self.timers.remove(removed_timer)
if removed_timer in self.online_timers:
self.online_timers.remove(removed_timer)
if removed_timer in self.offline_timers:
self.offline_timers.remove(removed_timer)
def tick(self):
if self.bot.is_online:
for timer in self.online_timers:
timer.time_to_send_online -= 1
timer = find(lambda timer: timer.time_to_send_online <= 0, self.online_timers)
if timer:
timer.run(self.bot)
timer.time_to_send_online = timer.interval_online
self.online_timers.remove(timer)
self.online_timers.append(timer)
else:
for timer in self.offline_timers:
timer.time_to_send_offline -= 1
timer = find(lambda timer: timer.time_to_send_offline <= 0, self.offline_timers)
if timer:
timer.run(self.bot)
timer.time_to_send_offline = timer.interval_offline
self.offline_timers.remove(timer)
self.offline_timers.append(timer)
def redistribute_timers(self):
for x in range(0, len(self.offline_timers)):
timer = self.offline_timers[x]
timer.time_to_send_offline = timer.interval_offline * ((x + 1) / len(self.offline_timers))
for x in range(0, len(self.online_timers)):
timer = self.online_timers[x]
timer.time_to_send_online = timer.interval_online * ((x + 1) / len(self.online_timers))
def load(self):
self.timers = []
with DBManager.create_session_scope(expire_on_commit=False) as db_session:
self.timers = db_session.query(Timer).order_by(Timer.interval_online, Timer.interval_offline, Timer.name).all()
db_session.expunge_all()
self.online_timers = [timer for timer in self.timers if timer.interval_online > 0 and timer.enabled]
self.offline_timers = [timer for timer in self.timers if timer.interval_offline > 0 and timer.enabled]
self.redistribute_timers()
log.info('Loaded {} timers ({} online/{} offline)'.format(len(self.timers), len(self.online_timers), len(self.offline_timers)))
return self
| mit | -3,706,695,500,516,823,600 | 38.201183 | 135 | 0.616453 | false |
white-lab/pyproteome | brainrnaseq/__init__.py | 1 | 2373 |
from . import cache, mapping, enrichments
CELL_TYPE_COLS = {
'Homo sapiens': {
'Astrocyte': [
'8yo',
'13yo', '16yo', '21yo.1', '22yo.1', '35yo', '47yo', '51yo', '53yo',
'60yo', '63yo - 1', '63yo - 2',
],
'Neuron': [
'25yo',
],
'OPC': [
'22yoGC', '63yoGC - 1',
'63yo GC - 2', '47yoO4', '63yoO4',
],
'New Oligodendrocytes': [
'22yoGC', '63yoGC - 1',
'63yo GC - 2', '47yoO4', '63yoO4',
],
'Myelinating Oligodendrocytes': [
'22yoGC', '63yoGC - 1',
'63yo GC - 2', '47yoO4', '63yoO4',
],
'Microglia': [
'45yo', '51yo.1', '63yo',
],
'Endothelia': [
'13yo.1',
'47yo.1',
],
},
'Mus musculus': {
'Astrocyte': [
# 'FACS - p69',
# 'FACS p70',
'1 month',
'4 months',
'7 months',
'9 months',
],
'Neuron': [
'Neuron 3',
'Neuron 4',
],
'OPC': [
'Oligodendrocyte precursor cell 3',
'Oligodendrocyte precursor cell 4',
],
'New Oligodendrocytes': [
'Newly formed oligodendrocyte 3',
'Newly formed oligodendrocyte 4',
],
'Myelinating Oligodendrocytes': [
'Myelinating oligodendrocyte 4',
'Myelinating oligodenrocyte 5',
],
'Microglia': [
'Microglia 1',
'Microglia 2',
],
'Endothelia': [
'Endo 1',
'Endo 2',
],
},
}
CELL_TYPES = [
'Astrocyte',
'Endothelia',
'Microglia',
'Myelinating Oligodendrocytes',
'Neuron',
'New Oligodendrocytes',
'OPC',
]
DEFAULT_CELL_TYPES = [
i
for i in CELL_TYPES
if i not in ['OPC', 'New Oligodendrocytes']
]
CELL_COLORS = colors = {
'Astrocyte': '#bfee90',
'Endothelia': '#ff9b90',
'Microglia': '#5bd3ff',
'Myelinating Oligodendrocytes': '#ff39ff',
'Neuron': '#ffc467',
'New Oligodendrocytes': 'lightpurple',
'OPC': 'darkpurple',
}
__all__ = [
'cache',
'mapping',
'enrichments',
'CELL_TYPE_COLS',
'CELL_TYPES',
'DEFAULT_CELL_TYPES',
'CELL_COLORS',
]
| bsd-2-clause | 7,507,329,908,707,769,000 | 21.6 | 79 | 0.435735 | false |
arthur-e/gass | public/views.py | 1 | 3181 | import datetime
from django.shortcuts import render_to_response
from django.http import HttpResponse
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django.views.decorators.cache import cache_page
from public.models import News
from bering.models import Station, Ablation
def load_defaults():
'''
The base.html template requires this load routine.
'''
sites = Station.objects.all().order_by('site')
stations = []
campaigns = []
then = datetime.datetime(1900,1,1,0,0,0)
for each in sites:
try:
latest_campaign = each.campaign_set.latest()
latest_ablation = each.ablation_set.latest()
latest_ablation.operational = each.operational
except ObjectDoesNotExist:
return {
'stations': [],
'campaigns': [],
'then': then
}
# Check for site visits where height of sensor may have changed
try:
last_visit = latest_campaign.site_visits.latest()
if last_visit.ablato_adjusted:
# Subtract sensor height when last adjusted
latest_ablation.rng_cm -= last_visit.ablation_height_cm
else:
# Subtract sensor height when sensor was installed
latest_ablation.rng_cm -= latest_campaign.site.init_height_cm
except ObjectDoesNotExist:
# No visits? Subtract sensor height when sensor was installed
latest_ablation.rng_cm -= latest_campaign.site.init_height_cm
# Get the latest ablation observation for each site
stations.append(latest_ablation)
# Get a list of field campaigns and latest observational data
campaigns.append({
'region': latest_campaign.region,
'site': latest_campaign.site,
'lat': latest_ablation.lat,
'lng': latest_ablation.lng,
'datetime': latest_ablation.datetime,
'gps_valid': latest_ablation.gps_valid,
'rng_cm_valid': latest_ablation.rng_cm_valid,
'operational': each.operational
})
if latest_ablation.datetime > then:
then = latest_ablation.datetime
return {
'stations': stations,
'campaigns': campaigns,
'then': then
}
def display_index(request):
'''
localhost/gass/
'''
data_dict = load_defaults()
data_dict['news'] = News.objects.all().order_by('-timestamp')[0:5]
data_dict['now'] = datetime.datetime.now()
return render_to_response('index.html', data_dict)
def display_about(request):
'''
localhost/gass/about/
'''
return render_to_response('about.html', load_defaults())
def display_access(request):
'''
localhost/gass/access/
'''
return render_to_response('data.html', load_defaults())
def display_instruments(request):
'''
localhost/gass/hardware/
'''
return render_to_response('instruments.html', load_defaults())
def display_team(request):
'''
localhost/gass/team/
'''
return render_to_response('team.html', load_defaults())
| mit | 8,327,074,673,310,716,000 | 29.009434 | 77 | 0.61773 | false |
synthesio/infra-ovh-ansible-module | plugins/modules/dedicated_server_install.py | 1 | 2843 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.basic import AnsibleModule
DOCUMENTATION = '''
---
module: dedicated_server_install
short_description: Install a new dedicated server
description:
- Install a new dedicated server
author: Synthesio SRE Team
requirements:
- ovh >= 0.5.0
options:
service_name:
required: true
description: Ovh name of the server
hostname:
required: true
description: Name of the new dedicated server
template:
required: true
description: template to use to spawn the server
'''
EXAMPLES = '''
synthesio.ovh.dedicated_server_install:
service_name: "ns12345.ip-1-2-3.eu"
hostname: "server01.example.net"
template: "debian10_64"
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible_collections.synthesio.ovh.plugins.module_utils.ovh import ovh_api_connect, ovh_argument_spec
try:
from ovh.exceptions import APIError
HAS_OVH = True
except ImportError:
HAS_OVH = False
def run_module():
module_args = ovh_argument_spec()
module_args.update(dict(
service_name=dict(required=True),
hostname=dict(required=True),
template=dict(required=True)
))
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
client = ovh_api_connect(module)
service_name = module.params['service_name']
hostname = module.params['hostname']
template = module.params['template']
if module.check_mode:
module.exit_json(msg="Installation in progress on {} as {} with template {} - (dry run mode)".format(service_name, hostname, template),
changed=True)
try:
compatible_templates = client.get(
'/dedicated/server/%s/install/compatibleTemplates' % service_name
)
if template not in compatible_templates["ovh"] and template not in compatible_templates["personal"]:
module.fail_json(msg="{} doesn't exist in compatibles templates".format(template))
except APIError as api_error:
return module.fail_json(msg="Failed to call OVH API: {0}".format(api_error))
details = {"details":
{"language": "en",
"customHostname": hostname}
}
try:
client.post(
'/dedicated/server/%s/install/start' % service_name, **details, templateName=template)
module.exit_json(msg="Installation in progress on {} as {} with template {}!".format(service_name, hostname, template), changed=True)
except APIError as api_error:
module.fail_json(msg="Failed to call OVH API: {0}".format(api_error))
def main():
run_module()
if __name__ == '__main__':
main()
| mit | 5,904,091,304,778,387,000 | 27.148515 | 143 | 0.646148 | false |
certik/sfepy | tests/test_parsing.py | 1 | 3180 | from sfepy.base.testing import TestCommon
##
# 16.07.2007, c
class Test( TestCommon ):
##
# 16.07.2007, c
def from_conf( conf, options ):
return Test( conf = conf, options = options )
from_conf = staticmethod( from_conf )
##
# c: 16.07.2007, r: 08.07.2008
def test_parse_equations( self ):
from sfepy.fem.parseEq import create_bnf
test_strs = [
"""- d_volume.i1.Omega( uc )""",
"""2 * dw_term.i1.Omega( uc ) = - 3.0 * dw_term2.i1.Omega2( uc )""",
"""d_term1.Y( fluid, u, w, Nu, dcf, mode )
+ d_term2.Omega( u, w, Nu, dcf, mode )
- d_another_term.Elsewhere( w, p, Nu, dcf, mode )
= - dw_rhs.Y3.a( u, q, Nu, dcf, mode )""",
"""no_args() = 0""",
"""+ something( a, b, c ) = + something_else( c, a, d[-1] )""",
"""term_.a.a( u )""",
"""term.i1.Omega( v, du/dt ) + term2.i2.Gamma( v, dphi/dt)"""
]
n_fail = 0
term_descs = []
for test_str in test_strs:
term_descs[:] = []
try:
bnf = create_bnf( term_descs, {} )
bnf.parseString( test_str )
except:
self.report( 'failed: %s' % test_str )
if self.options.debug:
raise
n_fail += 1
for td in term_descs:
print td
self.report( '%d failure(s)' % n_fail )
if n_fail:
raise AssertionError
return True
##
# c: 16.07.2007, r: 14.07.2008
def test_parse_regions( self ):
from sfepy.fem.parseReg import create_bnf, _test_strs
test_strs = ['nodes of surface -n r.Omega',
'r.Y_2 +n copy r.Y_1',
'nodes in (y <= 0.00001) & (x < 0.11)',
'nodes in ((y <= 0.00001) & (x < 0.11))',
'nodes in (((y <= 0.00001) & (x < 0.11)))',
'nodes in (((0.00001 < y) & (x < 0.11)))',
'all -n nodes in (y == 0.00001)',
'all -n nodes of surface',
'all -e r.DOmega_100',
'r.Y_1 -n nodes of surface *e r.Z_8 *n nodes in (y > 0)',
'nodes of surface +n nodes by pokus( x, y, z )',
'elements of group 6 +e nodes by fn2_3c( x )',
"""r.Y_1 *n (r.Y_2 +e (nodes in (y > 0) *n r.Y_32))
-n nodes of surface -e r.Y_5""",
'nodes by noargs()',
'nodes by extraargs( x, y, z, abc,3 )',
'node in r.Gamma_3',
'node 10',
'elements by afun( domain )']
stack = []
bnf = create_bnf( stack )
n_fail = 0
for test_str in test_strs:
stack[:] = []
try:
out = bnf.parseString( test_str )
except:
self.report( 'failed: %s' % test_str )
n_fail += 1
self.report( '%d failures' % n_fail )
if n_fail:
raise AssertionError
return True
| bsd-3-clause | -26,389,724,490,425,080 | 33.193548 | 80 | 0.416981 | false |
robtherad/BC-Mod | tools/make.py | 1 | 56000 | #!/usr/bin/env python3
# vim: set fileencoding=utf-8 :
# make.py
# An Arma 3 addon build system
# Created by ACE 3 team, modified by BC: https://github.com/acemod/ACE3
###############################################################################
# The MIT License (MIT)
# Copyright (c) 2013-2014 Ryan Schultz
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###############################################################################
__version__ = "0.7"
import sys
if sys.version_info[0] == 2:
print("Python 3 is required.")
sys.exit(1)
import os
import os.path
import shutil
import platform
import glob
import subprocess
import hashlib
import configparser
import json
import traceback
import time
import timeit
import re
from tempfile import mkstemp
if sys.platform == "win32":
import winreg
######## GLOBALS #########
project = "@bc"
project_version = "0.1.0"
arma3tools_path = ""
work_drive = ""
module_root = ""
make_root = ""
release_dir = ""
module_root_parent = ""
optionals_root = ""
key_name = "bc"
key = ""
dssignfile = ""
prefix = "bc"
pbo_name_prefix = "bc_"
signature_blacklist = []
importantFiles = ["mod.cpp", "README.md", "ACE_LICENSE", "logo_bc.paa", "meta.cpp"]
versionFiles = []
ciBuild = False # Used for CI builds
###############################################################################
# http://akiscode.com/articles/sha-1directoryhash.shtml
# Copyright (c) 2009 Stephen Akiki
# MIT License (Means you can do whatever you want with this)
# See http://www.opensource.org/licenses/mit-license.php
# Error Codes:
# -1 -> Directory does not exist
# -2 -> General error (see stack traceback)
def get_directory_hash(directory):
directory_hash = hashlib.sha1()
if not os.path.exists (directory):
return -1
try:
for root, dirs, files in os.walk(directory):
for names in files:
path = os.path.join(root, names)
try:
f = open(path, 'rb')
except:
# You can't open the file for some reason
f.close()
continue
while 1:
# Read file in as little chunks
buf = f.read(4096)
if not buf: break
new = hashlib.sha1(buf)
directory_hash.update(new.digest())
f.close()
except:
# Print the stack traceback
traceback.print_exc()
return -2
retVal = directory_hash.hexdigest()
#print_yellow("Hash Value for {} is {}".format(directory,retVal))
return directory_hash.hexdigest()
def Fract_Sec(s):
temp = float()
temp = float(s) / (60*60*24)
d = int(temp)
temp = (temp - d) * 24
h = int(temp)
temp = (temp - h) * 60
m = int(temp)
temp = (temp - m) * 60
sec = temp
return d,h,m,sec
#endef Fract_Sec
# Copyright (c) André Burgaud
# http://www.burgaud.com/bring-colors-to-the-windows-console-with-python/
if sys.platform == "win32":
from ctypes import windll, Structure, c_short, c_ushort, byref
SHORT = c_short
WORD = c_ushort
class COORD(Structure):
"""struct in wincon.h."""
_fields_ = [
("X", SHORT),
("Y", SHORT)]
class SMALL_RECT(Structure):
"""struct in wincon.h."""
_fields_ = [
("Left", SHORT),
("Top", SHORT),
("Right", SHORT),
("Bottom", SHORT)]
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", WORD),
("srWindow", SMALL_RECT),
("dwMaximumWindowSize", COORD)]
# winbase.h
STD_INPUT_HANDLE = -10
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# wincon.h
FOREGROUND_BLACK = 0x0000
FOREGROUND_BLUE = 0x0001
FOREGROUND_GREEN = 0x0002
FOREGROUND_CYAN = 0x0003
FOREGROUND_RED = 0x0004
FOREGROUND_MAGENTA = 0x0005
FOREGROUND_YELLOW = 0x0006
FOREGROUND_GREY = 0x0007
FOREGROUND_INTENSITY = 0x0008 # foreground color is intensified.
BACKGROUND_BLACK = 0x0000
BACKGROUND_BLUE = 0x0010
BACKGROUND_GREEN = 0x0020
BACKGROUND_CYAN = 0x0030
BACKGROUND_RED = 0x0040
BACKGROUND_MAGENTA = 0x0050
BACKGROUND_YELLOW = 0x0060
BACKGROUND_GREY = 0x0070
BACKGROUND_INTENSITY = 0x0080 # background color is intensified.
stdout_handle = windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
def get_text_attr():
"""Returns the character attributes (colors) of the console screen
buffer."""
csbi = CONSOLE_SCREEN_BUFFER_INFO()
GetConsoleScreenBufferInfo(stdout_handle, byref(csbi))
return csbi.wAttributes
def set_text_attr(color):
"""Sets the character attributes (colors) of the console screen
buffer. Color is a combination of foreground and background color,
foreground and background intensity."""
SetConsoleTextAttribute(stdout_handle, color)
###############################################################################
def find_bi_tools(work_drive):
"""Find BI tools."""
reg = winreg.ConnectRegistry(None, winreg.HKEY_CURRENT_USER)
try:
k = winreg.OpenKey(reg, r"Software\bohemia interactive\arma 3 tools")
arma3tools_path = winreg.QueryValueEx(k, "path")[0]
winreg.CloseKey(k)
except:
raise Exception("BadTools","Arma 3 Tools are not installed correctly or the P: drive needs to be created.")
addonbuilder_path = os.path.join(arma3tools_path, "AddonBuilder", "AddonBuilder.exe")
dssignfile_path = os.path.join(arma3tools_path, "DSSignFile", "DSSignFile.exe")
dscreatekey_path = os.path.join(arma3tools_path, "DSSignFile", "DSCreateKey.exe")
cfgconvert_path = os.path.join(arma3tools_path, "CfgConvert", "CfgConvert.exe")
if os.path.isfile(addonbuilder_path) and os.path.isfile(dssignfile_path) and os.path.isfile(dscreatekey_path) and os.path.isfile(cfgconvert_path):
return [addonbuilder_path, dssignfile_path, dscreatekey_path, cfgconvert_path]
else:
raise Exception("BadTools","Arma 3 Tools are not installed correctly or the P: drive needs to be created.")
def find_depbo_tools(regKey):
"""Use registry entries to find DePBO-based tools."""
stop = False
if regKey == "HKCU":
reg = winreg.ConnectRegistry(None, winreg.HKEY_CURRENT_USER)
stop = True
else:
reg = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
try:
try:
k = winreg.OpenKey(reg, r"Software\Wow6432Node\Mikero\pboProject")
except FileNotFoundError:
k = winreg.OpenKey(reg, r"Software\Mikero\pboProject")
try:
pboproject_path = winreg.QueryValueEx(k, "exe")[0]
winreg.CloseKey(k)
print("Found pboproject.")
except:
print_error("ERROR: Could not find pboProject.")
try:
k = winreg.OpenKey(reg, r"Software\Wow6432Node\Mikero\rapify")
except FileNotFoundError:
k = winreg.OpenKey(reg, r"Software\Mikero\rapify")
try:
rapify_path = winreg.QueryValueEx(k, "exe")[0]
winreg.CloseKey(k)
print("Found rapify.")
except:
print_error("Could not find rapify.")
try:
k = winreg.OpenKey(reg, r"Software\Wow6432Node\Mikero\MakePbo")
except FileNotFoundError:
k = winreg.OpenKey(reg, r"Software\Mikero\MakePbo")
try:
makepbo_path = winreg.QueryValueEx(k, "exe")[0]
winreg.CloseKey(k)
print("Found makepbo.")
except:
print_error("Could not find makepbo.")
except:
if stop == True:
raise Exception("BadDePBO", "DePBO tools not installed correctly")
return -1
#Strip any quotations from the path due to a MikeRo tool bug which leaves a trailing space in some of its registry paths.
return [pboproject_path.strip('"'),rapify_path.strip('"'),makepbo_path.strip('"')]
def color(color):
"""Set the color. Works on Win32 and normal terminals."""
if sys.platform == "win32":
if color == "green":
set_text_attr(FOREGROUND_GREEN | get_text_attr() & 0x0070 | FOREGROUND_INTENSITY)
elif color == "yellow":
set_text_attr(FOREGROUND_YELLOW | get_text_attr() & 0x0070 | FOREGROUND_INTENSITY)
elif color == "red":
set_text_attr(FOREGROUND_RED | get_text_attr() & 0x0070 | FOREGROUND_INTENSITY)
elif color == "blue":
set_text_attr(FOREGROUND_BLUE | get_text_attr() & 0x0070 | FOREGROUND_INTENSITY)
elif color == "reset":
set_text_attr(FOREGROUND_GREY | get_text_attr() & 0x0070)
elif color == "grey":
set_text_attr(FOREGROUND_GREY | get_text_attr() & 0x0070)
else :
if color == "green":
sys.stdout.write('\033[92m')
elif color == "red":
sys.stdout.write('\033[91m')
elif color == "blue":
sys.stdout.write('\033[94m')
elif color == "reset":
sys.stdout.write('\033[0m')
def print_error(msg):
color("red")
print ("ERROR: {}".format(msg))
color("reset")
def print_green(msg):
color("green")
print(msg)
color("reset")
def print_blue(msg):
color("blue")
print(msg)
color("reset")
def print_yellow(msg):
color("yellow")
print(msg)
color("reset")
def copy_important_files(source_dir,destination_dir):
originalDir = os.getcwd()
# Copy importantFiles
try:
print_blue("\nSearching for important files in {}".format(source_dir))
print("Source_dir: {}".format(source_dir))
print("Destination_dir: {}".format(destination_dir))
for file in importantFiles:
filePath = os.path.join(module_root_parent, file)
# Take only file name for destination path (to put it into root of release dir)
if "\\" in file:
count = file.count("\\")
file = file.split("\\", count)[-1]
print_green("Copying file => {}".format(os.path.join(source_dir,file)))
shutil.copyfile(os.path.join(source_dir,filePath),os.path.join(destination_dir,file))
except:
print_error("COPYING IMPORTANT FILES.")
raise
#copy all extension dlls
try:
os.chdir(os.path.join(source_dir))
print_blue("\nSearching for DLLs in {}".format(os.getcwd()))
filenames = glob.glob("*.dll")
if not filenames:
print ("Empty SET")
for dll in filenames:
print_green("Copying dll => {}".format(os.path.join(source_dir,dll)))
if os.path.isfile(dll):
shutil.copyfile(os.path.join(source_dir,dll),os.path.join(destination_dir,dll))
except:
print_error("COPYING DLL FILES.")
raise
finally:
os.chdir(originalDir)
def copy_optionals_for_building(mod,pbos):
src_directories = os.listdir(optionals_root)
current_dir = os.getcwd()
print_blue("\nChecking Optionals folder...")
try:
#special server.pbo processing
files = glob.glob(os.path.join(release_dir, project, "optionals", "*.pbo"))
for file in files:
file_name = os.path.basename(file)
#print ("Adding the following file: {}".format(file_name))
pbos.append(file_name)
pbo_path = os.path.join(release_dir, project, "optionals", file_name)
sigFile_name = file_name +"."+ key_name + ".bisign"
sig_path = os.path.join(release_dir, project, "optionals", sigFile_name)
if (os.path.isfile(pbo_path)):
print("Moving {} for processing.".format(pbo_path))
shutil.move(pbo_path, os.path.join(release_dir, project, "addons", file_name))
if (os.path.isfile(sig_path)):
#print("Moving {} for processing.".format(sig_path))
shutil.move(sig_path, os.path.join(release_dir, project, "addons", sigFile_name))
except:
print_error("Error in moving")
raise
finally:
os.chdir(current_dir)
print("")
try:
for dir_name in src_directories:
mod.append(dir_name)
#userconfig requires special handling since it is not a PBO source folder.
#CfgConvert fails to build server.pbo if userconfig is not found in P:\
if (dir_name == "userconfig"):
if (os.path.exists(os.path.join(release_dir, project, "optionals", dir_name))):
shutil.rmtree(os.path.join(release_dir, project, "optionals", dir_name), True)
shutil.copytree(os.path.join(optionals_root,dir_name), os.path.join(release_dir, project, "optionals", dir_name))
destination = os.path.join(work_drive,dir_name)
else:
destination = os.path.join(module_root,dir_name)
print("Temporarily copying {} => {} for building.".format(os.path.join(optionals_root,dir_name),destination))
if (os.path.exists(destination)):
shutil.rmtree(destination, True)
shutil.copytree(os.path.join(optionals_root,dir_name), destination)
except:
print_error("Copy Optionals Failed")
raise
finally:
os.chdir(current_dir)
def cleanup_optionals(mod):
print("")
try:
for dir_name in mod:
#userconfig requires special handling since it is not a PBO source folder.
if (dir_name == "userconfig"):
destination = os.path.join(work_drive,dir_name)
else:
destination = os.path.join(module_root,dir_name)
print("Cleaning {}".format(destination))
try:
file_name = "{}{}.pbo".format(pbo_name_prefix,dir_name)
src_file_path = os.path.join(release_dir, project, "addons", file_name)
dst_file_path = os.path.join(release_dir, project, "optionals", file_name)
sigFile_name = "{}.{}.bisign".format(file_name,key_name)
src_sig_path = os.path.join(release_dir, project, "addons", sigFile_name)
dst_sig_path = os.path.join(release_dir, project, "optionals", sigFile_name)
if (os.path.isfile(src_file_path)):
#print("Preserving {}".format(file_name))
os.renames(src_file_path,dst_file_path)
if (os.path.isfile(src_sig_path)):
#print("Preserving {}".format(sigFile_name))
os.renames(src_sig_path,dst_sig_path)
except FileExistsError:
print_error("{} already exists".format(file_name))
continue
shutil.rmtree(destination)
except FileNotFoundError:
print_yellow("{} file not found".format(file_name))
except:
print_error("Cleaning Optionals Failed")
raise
def purge(dir, pattern, friendlyPattern="files"):
print_green("Deleting {} files from directory: {}".format(friendlyPattern,dir))
for f in os.listdir(dir):
if re.search(pattern, f):
os.remove(os.path.join(dir, f))
def build_signature_file(file_name):
global key
global dssignfile
global signature_blacklist
ret = 0
baseFile = os.path.basename(file_name)
#print_yellow("Sig_fileName: {}".format(baseFile))
if not (baseFile in signature_blacklist):
print("Signing with {}.".format(key))
ret = subprocess.call([dssignfile, key, file_name])
if ret == 0:
return True
else:
return False
def check_for_obsolete_pbos(addonspath, file):
module = file[len(pbo_name_prefix):-4]
if not os.path.exists(os.path.join(addonspath, module)):
return True
return False
def backup_config(module):
#backup original $PBOPREFIX$
global work_drive
global prefix
try:
configpath = os.path.join(work_drive, prefix, module, "$PBOPREFIX$")
if os.path.isfile(configpath):
shutil.copyfile(configpath, os.path.join(work_drive, prefix, module, "$PBOPREFIX$.backup"))
else:
print_error("$PBOPREFIX$ Does not exist for module: {}.".format(module))
except:
print_error("Error creating backup of $PBOPREFIX$ for module {}.".format(module))
return True
def addon_restore(modulePath):
#restore original $PBOPREFIX$
try:
if os.path.isfile(os.path.join(modulePath, "$PBOPREFIX$.backup")):
if os.path.isfile(os.path.join(modulePath, "$PBOPREFIX$")):
os.remove(os.path.join(modulePath, "$PBOPREFIX$"))
os.rename(os.path.join(modulePath, "$PBOPREFIX$.backup"), os.path.join(modulePath, "$PBOPREFIX$"))
except:
print_yellow("Some error occurred. Check your addon folder {} for integrity".format(modulePath))
return True
def get_project_version():
global project_version
versionStamp = project_version
#do the magic based on https://github.com/acemod/ACE3/issues/806#issuecomment-95639048
try:
scriptModPath = os.path.join(work_drive, prefix, "main\script_mod.hpp")
if os.path.isfile(scriptModPath):
f = open(scriptModPath, "r")
hpptext = f.read()
f.close()
if hpptext:
majorText = re.search(r"#define MAJOR (.*\b)", hpptext).group(1)
minorText = re.search(r"#define MINOR (.*\b)", hpptext).group(1)
patchlvlText = re.search(r"#define PATCHLVL (.*\b)", hpptext).group(1)
buildText = re.search(r"#define BUILD (.*\b)", hpptext).group(1)
if majorText:
versionStamp = "{major}.{minor}.{patchlvl}.{build}".format(major=majorText,minor=minorText,patchlvl=patchlvlText,build=buildText)
else:
print_error("A Critical file seems to be missing or inaccessible: {}".format(scriptModPath))
raise FileNotFoundError("File Not Found: {}".format(scriptModPath))
except Exception as e:
print_error("Get_project_version error: {}".format(e))
print_error("Check the integrity of the file: {}".format(scriptModPath))
versionStamp = project_version
print_error("Resetting to the default version stamp: {}".format(versionStamp))
input("Press Enter to continue...")
print("Resuming build...")
print_yellow("{} VERSION set to {}".format(project.lstrip("@").upper(),versionStamp))
project_version = versionStamp
return project_version
def replace_file(filePath, oldSubstring, newSubstring):
#Create temp file
fh, absPath = mkstemp()
with open(absPath,'w') as newFile:
with open(filePath) as oldFile:
for line in oldFile:
newFile.write(line.replace(oldSubstring, newSubstring))
newFile.close()
#Remove original file
os.remove(filePath)
#Move new file
shutil.move(absPath, filePath)
def set_version_in_files():
newVersion = project_version # MAJOR.MINOR.PATCH.BUILD
newVersionShort = newVersion[:-2] # MAJOR.MINOR.PATCH
# Regex patterns
pattern = re.compile(r"(\b[0\.-9]+\b\.[0\.-9]+\b\.[0\.-9]+\b\.[0\.-9]+)") # MAJOR.MINOR.PATCH.BUILD
patternShort = re.compile(r"(\b[0\.-9]+\b\.[0\.-9]+\b\.[0\.-9]+)") # MAJOR.MINOR.PATCH
# Change versions in files containing version
for i in versionFiles:
filePath = os.path.join(module_root_parent, i)
try:
# Save the file contents to a variable if the file exists
if os.path.isfile(filePath):
f = open(filePath, "r+")
fileText = f.read()
f.close()
if fileText:
# Search and save version stamp, search short if long not found
versionFound = re.findall(pattern, fileText)
if not versionFound:
versionFound = re.findall(patternShort, fileText)
# Replace version stamp if any of the new version parts is higher than the one found
if versionFound:
# First item in the list findall returns
versionFound = versionFound[0]
# Use the same version length as the one found
if len(versionFound) == len(newVersion):
newVersionUsed = newVersion
if len(versionFound) == len(newVersionShort):
newVersionUsed = newVersionShort
# Print change and modify the file if changed
if versionFound != newVersionUsed:
print_green("Changing version {} => {} in {}".format(versionFound, newVersionUsed, filePath))
replace_file(filePath, versionFound, newVersionUsed)
except WindowsError as e:
# Temporary file is still "in use" by Python, pass this exception
pass
except Exception as e:
print_error("set_version_in_files error: {}".format(e))
raise
return True
def stash_version_files_for_building():
try:
for file in versionFiles:
filePath = os.path.join(module_root_parent, file)
# Take only file name for stash location if in subfolder (otherwise it gets removed when removing folders from release dir)
if "\\" in file:
count = file.count("\\")
file = file.split("\\", count)[-1]
stashPath = os.path.join(release_dir, file)
print("Temporarily stashing {} => {}.bak for version update".format(filePath, stashPath))
shutil.copy(filePath, "{}.bak".format(stashPath))
except:
print_error("Stashing version files failed")
raise
# Set version
set_version_in_files()
return True
def restore_version_files():
try:
for file in versionFiles:
filePath = os.path.join(module_root_parent, file)
# Take only file name for stash path if in subfolder (otherwise it gets removed when removing folders from release dir)
if "\\" in file:
count = file.count("\\")
file = file.split("\\", count)[-1]
stashPath = os.path.join(release_dir, file)
print("Restoring {}".format(filePath))
shutil.move("{}.bak".format(stashPath), filePath)
except:
print_error("Restoring version files failed")
raise
return True
def get_private_keyname(commitID,module="main"):
global pbo_name_prefix
aceVersion = get_project_version()
keyName = str("{prefix}{version}-{commit_id}".format(prefix=pbo_name_prefix,version=aceVersion,commit_id=commitID))
return keyName
def get_commit_ID():
# Get latest commit ID
global make_root
curDir = os.getcwd()
try:
gitpath = os.path.join(os.path.dirname(make_root), ".git")
assert os.path.exists(gitpath)
os.chdir(make_root)
commit_id = subprocess.check_output(["git", "rev-parse", "HEAD"])
commit_id = str(commit_id, "utf-8")[:8]
except:
print_error("FAILED TO DETERMINE COMMIT ID.")
print_yellow("Verify that \GIT\BIN or \GIT\CMD is in your system path or user path.")
commit_id = "NOGIT"
raise
finally:
pass
os.chdir(curDir)
print_yellow("COMMIT ID set to {}".format(commit_id))
return commit_id
def version_stamp_pboprefix(module,commitID):
### Update pboPrefix with the correct version stamp. Use commit_id as the build number.
#This function will not handle any $PBOPREFIX$ backup or cleanup.
global work_drive
global prefix
configpath = os.path.join(work_drive, prefix, module, "$PBOPREFIX$")
try:
f = open(configpath, "r")
configtext = f.read()
f.close()
if configtext:
if re.search(r"version=(.*?)$", configtext, re.DOTALL):
if configtext:
configtext = re.sub(r"version=(.*?)$", "version={}\n".format(commitID), configtext, flags=re.DOTALL)
f = open(configpath, "w")
f.write(configtext)
f.close()
else:
os.remove(os.path.join(work_drive, prefix, module, "$PBOPREFIX$"))
os.rename(os.path.join(work_drive, prefix, module, "$PBOPREFIX$.backup"), os.path.join(work_drive, prefix, module, "$PBOPREFIX$"))
else:
if configtext:
#append version info
f = open(configpath, "a")
f.write("\nversion = {}".format(commitID))
f.close()
else:
os.remove(os.path.join(work_drive, prefix, module, "$PBOPREFIX$"))
os.rename(os.path.join(work_drive, prefix, module, "$PBOPREFIX$.backup"), os.path.join(work_drive, prefix, module, "$PBOPREFIX$"))
except Exception as e:
print_error("Failed to include build number: {}".format(e))
return False
return True
###############################################################################
def main(argv):
"""Build an Arma addon suite in a directory from rules in a make.cfg file."""
print_blue("\nmake.py for Arma, modified for Advanced Combat Environment v{}".format(__version__))
global project_version
global arma3tools_path
global work_drive
global module_root
global make_root
global release_dir
global module_root_parent
global optionals_root
global key_name
global key
global dssignfile
global prefix
global pbo_name_prefix
global ciBuild
if sys.platform != "win32":
print_error("Non-Windows platform (Cygwin?). Please re-run from cmd.")
sys.exit(1)
reg = winreg.ConnectRegistry(None, winreg.HKEY_CURRENT_USER)
try:
k = winreg.OpenKey(reg, r"Software\bohemia interactive\arma 3 tools")
arma3tools_path = winreg.QueryValueEx(k, "path")[0]
winreg.CloseKey(k)
except:
raise Exception("BadTools","Arma 3 Tools are not installed correctly or the P: drive needs to be created.")
# Default behaviors
test = False # Copy to Arma 3 directory?
arg_modules = False # Only build modules on command line?
use_pboproject = True # Default to pboProject build tool
make_target = "DEFAULT" # Which section in make.cfg to use for the build
new_key = True # Make a new key and use it to sign?
quiet = False # Suppress output from build tool?
# Parse arguments
if "help" in argv or "-h" in argv or "--help" in argv:
print ("""
make.py [help] [test] [force] [key <name>] [target <name>] [release <version>]
[module name] [module name] [...]
test -- Copy result to Arma 3.
release <version> -- Make archive with <version>.
force -- Ignore cache and build all.
checkexternal -- Check External Files
target <name> -- Use rules in make.cfg under heading [<name>] rather than
default [Make]
key <name> -- Use key in working directory with <name> to sign. If it does not
exist, create key.
quiet -- Suppress command line output from build tool.
If module names are specified, only those modules will be built.
Examples:
make.py force test
Build all modules (ignoring cache) and copy the mod folder to the Arma 3
directory.
make.py mymodule_gun
Only build the module named 'mymodule_gun'.
make.py force key MyNewKey release 1.0
Build all modules (ignoring cache), sign them with NewKey, and pack them
into a zip file for release with version 1.0.
If a file called $NOBIN$ is found in the module directory, that module will not be binarized.
See the make.cfg file for additional build options.
""")
sys.exit(0)
if "force" in argv:
argv.remove("force")
force_build = True
else:
force_build = False
if "test" in argv:
test = True
argv.remove("test")
if "release" in argv:
make_release_zip = True
release_version = argv[argv.index("release") + 1]
argv.remove(release_version)
argv.remove("release")
else:
make_release_zip = False
release_version = project_version
if "target" in argv:
make_target = argv[argv.index("target") + 1]
argv.remove("target")
argv.remove(make_target)
force_build = True
if "key" in argv:
new_key = True
key_name = argv[argv.index("key") + 1]
argv.remove("key")
argv.remove(key_name)
if "quiet" in argv:
quiet = True
argv.remove("quiet")
if "checkexternal" in argv:
argv.remove("checkexternal")
check_external = True
else:
check_external = False
if "version" in argv:
argv.remove("version")
version_update = True
else:
version_update = False
if "--ci" in argv:
argv.remove("--ci")
ciBuild = True
print_yellow("\nCheck external references is set to {}".format(str(check_external)))
# Get the directory the make script is in.
make_root = os.path.dirname(os.path.realpath(__file__))
make_root_parent = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
os.chdir(make_root)
cfg = configparser.ConfigParser();
try:
cfg.read(os.path.join(make_root, "make.cfg"))
# Project name (with @ symbol)
project = cfg.get(make_target, "project", fallback="@"+os.path.basename(os.getcwd()))
# BI Tools work drive on Windows
work_drive = cfg.get(make_target, "work_drive", fallback="P:\\")
# Private key path
key = cfg.get(make_target, "key", fallback=None)
# Private key creation directory
private_key_path = cfg.get(make_target, "private_key_path", fallback=os.path.join(work_drive, "private_keys"))
# Project prefix (folder path)
prefix = cfg.get(make_target, "prefix", fallback="")
# Release archive prefix
zipPrefix = cfg.get(make_target, "zipPrefix", fallback=project.lstrip("@").lower())
# Should we autodetect modules on a complete build?
module_autodetect = cfg.getboolean(make_target, "module_autodetect", fallback=True)
# Manual list of modules to build for a complete build
modules = cfg.get(make_target, "modules", fallback=None)
# Parse it out
if modules:
modules = [x.strip() for x in modules.split(',')]
else:
modules = []
# List of directories to ignore when detecting
ignore = [x.strip() for x in cfg.get(make_target, "ignore", fallback="release").split(',')]
# Which build tool should we use?
build_tool = cfg.get(make_target, "build_tool", fallback="addonbuilder").lower()
# Release/build directory, relative to script dir
release_dir = cfg.get(make_target, "release_dir", fallback="release")
#Directory to copy the final built PBO's for a test run.
test_dir = cfg.get(make_target, "test_dir", fallback=os.path.join(os.environ["USERPROFILE"],r"documents\Arma 3"))
# Project PBO file prefix (files are renamed to prefix_name.pbo)
pbo_name_prefix = cfg.get(make_target, "pbo_name_prefix", fallback=None)
# Project module Root
module_root_parent = os.path.abspath(os.path.join(os.path.join(work_drive, prefix), os.pardir))
module_root = cfg.get(make_target, "module_root", fallback=os.path.join(make_root_parent, "addons"))
optionals_root = os.path.join(module_root_parent, "optionals")
extensions_root = os.path.join(module_root_parent, "extensions")
commit_id = get_commit_ID()
key_name = versionStamp = get_private_keyname(commit_id)
print_green ("module_root: {}".format(module_root))
if (os.path.isdir(module_root)):
os.chdir(module_root)
else:
print_error ("Directory {} does not exist.".format(module_root))
sys.exit()
if (os.path.isdir(optionals_root)):
print_green ("optionals_root: {}".format(optionals_root))
else:
print_error ("Directory {} does not exist.".format(optionals_root))
sys.exit()
print_green ("release_dir: {}".format(release_dir))
except:
raise
print_error("Could not parse make.cfg.")
sys.exit(1)
# See if we have been given specific modules to build from command line.
if len(argv) > 1 and not make_release_zip:
arg_modules = True
modules = [a for a in argv[1:] if a[0] != "-"]
# Find the tools we need.
try:
tools = find_bi_tools(work_drive)
addonbuilder = tools[0]
dssignfile = tools[1]
dscreatekey = tools[2]
cfgconvert = tools[3]
except:
print_error("Arma 3 Tools are not installed correctly or the P: drive has not been created.")
sys.exit(1)
if build_tool == "pboproject":
try:
depbo_tools = find_depbo_tools("HKLM")
if depbo_tools == -1:
depbo_tools = find_depbo_tools("HKCU")
pboproject = depbo_tools[0]
rapifyTool = depbo_tools[1]
makepboTool = depbo_tools[2]
except:
raise
print_error("Could not find dePBO tools. Download the needed tools from: https://dev.withsix.com/projects/mikero-pbodll/files")
sys.exit(1)
# Try to open and deserialize build cache file.
try:
cache = {}
with open(os.path.join(make_root, "make.cache"), 'r') as f:
cache_raw = f.read()
cache = json.loads(cache_raw)
except:
print ("No cache found.")
cache = {}
# Check the build version (from main) with cached version - forces a full rebuild when version changes
project_version = get_project_version()
cacheVersion = "None";
if 'cacheVersion' in cache:
cacheVersion = cache['cacheVersion']
if (project_version != cacheVersion):
cache = {}
print("Reseting Cache {0} to New Version {1}".format(cacheVersion, project_version))
cache['cacheVersion'] = project_version
if not os.path.isdir(os.path.join(release_dir, project, "addons")):
try:
os.makedirs(os.path.join(release_dir, project, "addons"))
except:
print_error("Cannot create release directory")
raise
if not os.path.isdir(os.path.join(release_dir, project, "keys")):
try:
os.makedirs(os.path.join(release_dir, project, "keys"))
except:
print_error("Cannot create release directory")
raise
# Update version stamp in all files that contain it
# Update version only for release if full update not requested (backup and restore files)
print_blue("\nChecking for obsolete version numbers...")
if not version_update:
stash_version_files_for_building()
else:
# Set version
set_version_in_files();
print("Version in files has been changed, make sure you commit and push the updates!")
amountOfBuildsFailed = 0
namesOfBuildsFailed = []
try:
# Temporarily copy optionals_root for building. They will be removed later.
optionals_modules = []
optional_files = []
copy_optionals_for_building(optionals_modules,optional_files)
# Get list of subdirs in make root.
dirs = next(os.walk(module_root))[1]
# Autodetect what directories to build.
if module_autodetect and not arg_modules:
modules = []
for path in dirs:
# Any dir that has a config.cpp in its root is an addon to build.
config_path = os.path.join(path, 'config.cpp')
if os.path.isfile(config_path) and not path in ignore:
modules.append(path)
# Make the key specified from command line if necessary.
if new_key:
if not os.path.isfile(os.path.join(private_key_path, key_name + ".biprivatekey")):
print_yellow("\nRequested key does not exist.")
try:
os.makedirs(private_key_path)
except:
pass
curDir = os.getcwd()
os.chdir(private_key_path)
ret = subprocess.call([dscreatekey, key_name]) # Created in make_root
os.chdir(curDir)
if ret == 0:
print_green("Created: {}".format(os.path.join(private_key_path, key_name + ".biprivatekey")))
print("Removing any old signature keys...")
purge(os.path.join(module_root, release_dir, project, "addons"), "^.*\.bisign$","*.bisign")
purge(os.path.join(module_root, release_dir, project, "optionals"), "^.*\.bisign$","*.bisign")
purge(os.path.join(module_root, release_dir, project, "keys"), "^.*\.bikey$","*.bikey")
else:
print_error("Failed to create key!")
else:
print_green("\nNOTE: Using key {}".format(os.path.join(private_key_path, key_name + ".biprivatekey")))
try:
print("Copying public key to release directory.")
try:
os.makedirs(os.path.join(module_root, release_dir, project, "keys"))
except:
pass
# Use biKeyNameAbrev to attempt to minimize problems from this BI Bug REFERENCE: http://feedback.arma3.com/view.php?id=22133
biKeyNameAbrev = key_name.split("-")[0]
shutil.copyfile(os.path.join(private_key_path, key_name + ".bikey"), os.path.join(module_root, release_dir, project, "keys", "{}.bikey".format(biKeyNameAbrev)))
except:
print_error("Could not copy key to release directory.")
raise
key = os.path.join(private_key_path, "{}.biprivatekey".format(key_name))
# Remove any obsolete files.
print_blue("\nChecking for obsolete files...")
obsolete_check_path = os.path.join(module_root, release_dir, project,"addons")
for file in os.listdir(obsolete_check_path):
if (file.endswith(".pbo") and os.path.isfile(os.path.join(obsolete_check_path,file))):
if check_for_obsolete_pbos(module_root, file):
fileName = os.path.splitext(file)[0]
print_yellow("Removing obsolete file => {}".format(file))
purge(obsolete_check_path, "{}\..".format(fileName), "{}.*".format(fileName))
obsolete_check_path = os.path.join(module_root, release_dir, project)
for file in os.listdir(obsolete_check_path):
if (file.endswith(".dll") and os.path.isfile(os.path.join(obsolete_check_path,file))):
if check_for_obsolete_pbos(extensions_root, file):
fileName = os.path.splitext(file)[0]
print_yellow("Removing obsolete file => {}".format(file))
try:
os.remove(os.path.join(obsolete_check_path,file))
except:
print_error("\nFailed to delete {}".format(os.path.join(obsolete_check_path,file)))
pass
# For each module, prep files and then build.
print_blue("\nBuilding...")
for module in modules:
print_green("\nMaking {}".format(module + "-"*max(1, (60-len(module)))))
missing = False
sigMissing = False
# Cache check
if module in cache:
old_sha = cache[module]
else:
old_sha = ""
# Hash the module
new_sha = get_directory_hash(os.path.join(module_root, module))
# Is the pbo or sig file missing?
missing = not os.path.isfile(os.path.join(release_dir, project, "addons", "{}{}.pbo".format(pbo_name_prefix,module)))
sigFile = "{}{}.pbo.{}.bisign".format(pbo_name_prefix,module,key_name)
sigMissing = not os.path.isfile(os.path.join(release_dir, project, "addons", sigFile))
if missing:
print_yellow("Missing PBO file {}{}.pbo. Building...".format(pbo_name_prefix,module))
# Check if it needs rebuilt
# print ("Hash:", new_sha)
if old_sha == new_sha and not missing:
if not force_build:
print("Module has not changed.")
if sigMissing:
if key:
print("Missing Signature key {}".format(sigFile))
build_signature_file(os.path.join(module_root, release_dir, project, "addons", "{}{}.pbo".format(pbo_name_prefix,module)))
# Skip everything else
continue
# Only do this if the project isn't stored directly on the work drive.
# Split the path at the drive name and see if they are on the same drive (usually P:)
if os.path.splitdrive(module_root)[0] != os.path.splitdrive(work_drive)[0]:
try:
# Remove old work drive version (ignore errors)
shutil.rmtree(os.path.join(work_drive, prefix, module), True)
# Copy module to the work drive
shutil.copytree(module, os.path.join(work_drive, prefix, module))
except:
raise
print_error("ERROR: Could not copy module to work drive. Does the module exist?")
input("Press Enter to continue...")
print("Resuming build...")
continue
#else:
#print("WARNING: Module is stored on work drive ({}).".format(work_drive))
try:
# Remove the old pbo, key, and log
old = os.path.join(module_root, release_dir, project, "addons", "{}{}".format(pbo_name_prefix,module)) + "*"
files = glob.glob(old)
for f in files:
os.remove(f)
if pbo_name_prefix:
old = os.path.join(module_root, release_dir, project, "addons", "{}{}".format(pbo_name_prefix,module)) + "*"
files = glob.glob(old)
for f in files:
os.remove(f)
except:
raise
print_error("ERROR: Could not copy module to work drive. Does the module exist?")
input("Press Enter to continue...")
print("Resuming build...")
continue
# Build the module into a pbo
print_blue("Building: {}".format(os.path.join(work_drive, prefix, module)))
print_blue("Destination: {}".format(os.path.join(module_root, release_dir, project, "addons")))
# Make destination folder (if needed)
try:
os.makedirs(os.path.join(module_root, release_dir, project, "addons"))
except:
pass
# Run build tool
build_successful = False
if build_tool == "pboproject":
try:
nobinFilePath = os.path.join(work_drive, prefix, module, "$NOBIN$")
backup_config(module)
version_stamp_pboprefix(module,commit_id)
if os.path.isfile(nobinFilePath):
print_green("$NOBIN$ Found. Proceeding with non-binarizing!")
cmd = [makepboTool, "-P","-A","-L","-G","-X=*.backup", os.path.join(work_drive, prefix, module),os.path.join(module_root, release_dir, project,"addons")]
else:
if check_external:
cmd = [pboproject, "-P", os.path.join(work_drive, prefix, module), "+Engine=Arma3", "-S","+Noisy", "+X", "+Clean", "+Mod="+os.path.join(module_root, release_dir, project), "-Key"]
else:
cmd = [pboproject, "-P", os.path.join(work_drive, prefix, module), "+Engine=Arma3", "-S","+Noisy", "-X", "+Clean", "+Mod="+os.path.join(module_root, release_dir, project), "-Key"]
color("grey")
if quiet:
devnull = open(os.devnull, 'w')
ret = subprocess.call(cmd, stdout=devnull)
devnull.close()
else:
ret = subprocess.call(cmd)
color("reset")
if ret == 0:
print_green("pboProject return code == {}".format(str(ret)))
# Prettyprefix rename the PBO if requested.
if pbo_name_prefix:
try:
os.rename(os.path.join(module_root, release_dir, project, "addons", "{}.pbo".format(module)), os.path.join(module_root, release_dir, project, "addons", "{}{}.pbo".format(pbo_name_prefix,module)))
except:
raise
print_error("Could not rename built PBO with prefix.")
# Sign result
if (key and not "{}{}.pbo".format(pbo_name_prefix,module) in signature_blacklist):
print("Signing with {}.".format(key))
if pbo_name_prefix:
ret = subprocess.call([dssignfile, key, os.path.join(module_root, release_dir, project, "addons", "{}{}.pbo".format(pbo_name_prefix,module))])
else:
ret = subprocess.call([dssignfile, key, os.path.join(module_root, release_dir, project, "addons", "{}.pbo".format(module))])
if ret == 0:
build_successful = True
else:
build_successful = True
if not build_successful:
print_error("pboProject return code == {}".format(str(ret)))
print_error("Module not successfully built/signed. Check your {}temp\{}_packing.log for more info.".format(work_drive,module))
print ("Resuming build...")
amountOfBuildsFailed += 1
namesOfBuildsFailed.append("{}".format(module))
continue
# Back to the root
os.chdir(module_root)
except:
raise
print_error("Could not run Addon Builder.")
input("Press Enter to continue...")
print ("Resuming build...")
continue
finally:
addon_restore(os.path.join(work_drive, prefix, module))
elif build_tool== "addonbuilder":
# Detect $NOBIN$ and do not binarize if found.
if os.path.isfile(os.path.join(work_drive, prefix, module, "$NOBIN$")):
do_binarize = False
print("$NOBIN$ file found in module, packing only.")
else:
do_binarize = True
try:
# Call AddonBuilder
os.chdir("P:\\")
cmd = [addonbuilder, os.path.join(work_drive, prefix, module), os.path.join(make_root, release_dir, project, "addons"), "-clear", "-project="+work_drive]
if not do_binarize:
cmd.append("-packonly")
if quiet:
previousDirectory = os.getcwd()
os.chdir(arma3tools_path)
devnull = open(os.devnull, 'w')
ret = subprocess.call(cmd, stdout=devnull)
devnull.close()
os.chdir(previousDirectory)
else:
previousDirectory = os.getcwd()
os.chdir(arma3tools_path)
print_error("Current directory - {}".format(os.getcwd()))
ret = subprocess.call(cmd)
os.chdir(previousDirectory)
print_error("Current directory - {}".format(os.getcwd()))
color("reset")
print_green("completed")
# Prettyprefix rename the PBO if requested.
if pbo_name_prefix:
try:
os.rename(os.path.join(make_root, release_dir, project, "addons", "{}.pbo".format(module)), os.path.join(make_root, release_dir, project, "addons", "{}{}.pbo".format(pbo_name_prefix,module)))
except:
raise
print_error("Could not rename built PBO with prefix.")
if ret == 0:
# Sign result
#print_yellow("Sig_fileName: ace_{}.pbo".format(module))
if (key and not "{}{}.pbo".format(pbo_name_prefix,module) in signature_blacklist) :
print("Signing with {}.".format(key))
if pbo_name_prefix:
ret = subprocess.call([dssignfile, key, os.path.join(make_root, release_dir, project, "addons","{}{}.pbo".format(pbo_name_prefix,module))])
else:
ret = subprocess.call([dssignfile, key, os.path.join(make_root, release_dir, project, "addons", "{}.pbo".format(module))])
if ret == 0:
build_successful = True
else:
build_successful = True
if not build_successful:
print_error("Module not successfully built. Check your {}temp\{}_packing.log for more info.".format(work_drive,module))
# Back to the root
os.chdir(make_root)
except:
raise
print_error("Could not run Addon Builder.")
input("Press Enter to continue...")
print ("Resuming build...")
continue
else:
print_error("Unknown build_tool {}!".format(build_tool))
# Update the hash for a successfully built module
if build_successful:
cache[module] = new_sha
except Exception as e:
print_yellow("Cancel or some error detected: {}".format(e))
finally:
copy_important_files(module_root_parent,os.path.join(release_dir, project))
cleanup_optionals(optionals_modules)
if not version_update:
restore_version_files()
# Done building all modules!
# Write out the cache state
cache_out = json.dumps(cache)
with open(os.path.join(make_root, "make.cache"), 'w') as f:
f.write(cache_out)
# Delete the pboproject temp files if building a release.
if make_release_zip and build_tool == "pboproject":
try:
shutil.rmtree(os.path.join(release_dir, project, "temp"), True)
except:
print_error("ERROR: Could not delete pboProject temp files.")
# Make release
if make_release_zip:
release_name = "{}_{}".format(zipPrefix, release_version)
print_blue("\nMaking release: {}.zip".format(release_name))
try:
# Delete all log files
for root, dirs, files in os.walk(os.path.join(release_dir, project, "addons")):
for currentFile in files:
if currentFile.lower().endswith("log"):
os.remove(os.path.join(root, currentFile))
# Remove all zip files from release folder to prevent zipping the zip
for file in os.listdir(release_dir):
if file.endswith(".zip"):
os.remove(os.path.join(release_dir, file))
# Create a zip with the contents of release folder in it
release_zip = shutil.make_archive("{}".format(release_name), "zip", release_dir)
# Move release zip to release folder
shutil.copy(release_zip, release_dir)
os.remove(release_zip)
except:
raise
print_error("Could not make release.")
# Copy to Arma 3 folder for testing
if test:
print_blue("\nCopying to Arma 3.")
if sys.platform == "win32":
reg = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
try:
k = winreg.OpenKey(reg, r"SOFTWARE\Wow6432Node\Bohemia Interactive\Arma 3")
a3_path = winreg.EnumValue(k, 1)[1]
winreg.CloseKey(k)
except:
print_error("Could not find Arma 3's directory in the registry.")
else:
a3_path = cygwin_a3path
print_yellow("Path from the registry => {}".format(a3_path))
a3_path = test_dir
print_yellow("Copying build files to {}".format(a3_path))
if os.path.exists(a3_path):
try:
shutil.rmtree(os.path.join(a3_path, project), True)
shutil.copytree(os.path.join(module_root, release_dir, project), os.path.join(a3_path, project))
except:
print_error("Could not copy files. Is Arma 3 running?")
if amountOfBuildsFailed > 0:
print_error("Build failed. {} pbos failed.".format(amountOfBuildsFailed))
for failedModuleName in namesOfBuildsFailed:
print("- {} failed.".format(failedModuleName))
sys.exit(1)
else:
print_green("\Completed with 0 errors.")
if __name__ == "__main__":
start_time = timeit.default_timer()
main(sys.argv)
d,h,m,s = Fract_Sec(timeit.default_timer() - start_time)
print("\nTotal Program time elapsed: {0:2}h {1:2}m {2:4.5f}s".format(h,m,s))
if ciBuild:
sys.exit(0)
input("Press Enter to continue...")
| gpl-3.0 | -626,459,769,304,963,800 | 37.780471 | 227 | 0.56901 | false |
waidyanatha/pingsam | visualize.py | 1 | 8668 | import numpy as np
import datetime as dtm
from dateutil import rrule
import pandas as pd
import csv
import matplotlib.pylab as plt
import sys, os
#lets first create the csv file
#
#change this to actual csv file name
pingfile="weeklylogs.csv"
#paramters @plotinterval = 10 minutes
plotinterval = 10
#csv file columns
col_seq=0
col_pingtime=1
col_domain=2
col_state=3
#
########## FUNCTION TO SYNTHESEIZE MISSING DATA POINTS ##########
#
def synth_data(synthdf, interval):
#create a temporary dataframe to hold the syntheseized data
tmpdf = pd.DataFrame(columns=['seqnum', 'pingdatetime', 'domain', 'statenow'])
#first check we have a none empty dataframe
if not synthdf.empty:
#pick the originating TS data point
synthdf.sort_values(by='pingdatetime')
#check if first timestamp starts at 00:00:00; if not add a dumy record
startseqnum = synthdf.index[0]
startpingdt = synthdf.iloc[0]['pingdatetime']
startdomain = synthdf.iloc[0]['domain']
startstate = synthdf.iloc[0]['statenow']
#loop through each TS data point to synthetically add new TS points
#to fill the gap between two consecutive data points
for i, row in synthdf.iterrows():
#initiate the synthesiezed data point to the origin
nextdatapoint = 0
pingdt_plus_interval = startpingdt
#stepwise loop to add syntheseized points from relative origin to the next TS data point
while row['pingdatetime'] > pingdt_plus_interval + dtm.timedelta(minutes = interval) :
nextdatapoint += 1
pingdt_plus_interval = startpingdt + dtm.timedelta(minutes = nextdatapoint*interval)
tmpdf.loc[len(tmpdf.index)] = [startseqnum,pingdt_plus_interval,startdomain,startstate]
startseqnum = i
startpingdt = row['pingdatetime']
startstate = row['statenow']
#after completing through all the TS datapoints check if a none empty dataframe was created
if not tmpdf.empty:
tmpdf = pd.concat([tmpdf,synthdf])
tmpdf = tmpdf.set_index('seqnum')
#whether null or not return a dataframe with syntheseized TS data
tmpdf.dropna(thresh=2)
return tmpdf
#
########## PLOT HISTOGRAM TO FIGURE ##########
#
def plot_hist_to_fig(histdf, dname):
#get date range of the plot to use in suptitile
begdt = histdf['pingdatetime'].min().date()
findt = histdf['pingdatetime'].max().date()
#create a new x-axis index using dataframe index; starting from 1 instead of 0
histdf['pingdate'] = histdf['pingdatetime'].apply(lambda x: x.date())
downdf = pd.DataFrame(columns=['xlabel','pingdate', 'downcount'])
datelist = list(histdf.pingdate.unique())
for uniquedate in datelist:
xlabel = str('{:02d}'.format(uniquedate.month))+'-'+str('{:02d}'.format(uniquedate.day))
downcount = len(histdf[(histdf.statenow == '0') & (histdf.pingdate == uniquedate)])
totalcount = len(histdf[(histdf.pingdate == uniquedate)])
downdf.loc[len(downdf.index)] = [xlabel, uniquedate,100*downcount//totalcount]
downdf = downdf.as_matrix()
#x-axis values are in the newly generated xvalues column
xl = np.array(downdf[:,0])
x = np.array(downdf[:,1])
#y-axis values (1 or 0) are in the dateframe statenow column
y = np.array(downdf[:,2])
histfig, ax = plt.subplots()
ax.bar(x,y,color='red',width=0.5, align="center")
#to give enough spacing for the suptitle; otherwise overlaps with title
histfig.subplots_adjust(top=0.87)
# plt.figure(figsize=(8,6), dpi=150)
#beautify the plot and name the labels, titles
ax.set_title('Percentage of time Server Failed each Day', fontsize=14, fontweight='bold', color='gray')
histfig.suptitle(dname+'\n'+str(begdt)+' --- '+str(findt), fontsize=10, color='blue')
ax.set_xlabel('Month-Day', fontsize=12, color='gray')
ax.set_ylabel('Faile Rate (%)', fontsize=12, color='gray')
plt.yticks(fontsize=10, color='gray', rotation='horizontal')
plt.xticks(x, xl, fontsize=10, color='gray', rotation='vertical')
ax.grid(True)
return histfig
#
########## PLOT DOWN TIMES FREQUENCY TO FIGURE ##########
#
def plot_freq_to_fig(plotdf, dname):
#get date range of the plot to use in suptitile
begdt = plotdf['pingdatetime'].min().date()
findt = plotdf['pingdatetime'].max().date()
failrate = 100-(sum(100*plotdf['statenow'].astype(int))/len(plotdf))
failrate = failrate.astype(float)
#create a new x-axis index using dataframe index; starting from 1 instead of 0
plotdf['xvalues'] = range(1,len(plotdf)+1)
plotdf = plotdf.as_matrix()
#x-axis values are in the newly generated xvalues column
x = np.array(plotdf[:,3].astype(int))
#y-axis values (1 or 0) are in the dateframe statenow column
y = np.array(plotdf[:,2].astype(int))
#setup to catputure the plot into a figure
plotfig = plt.figure(num=None, figsize=(8, 6), dpi=150, facecolor='y', edgecolor='k')
ax = plotfig.add_subplot(311)
ax.fill_between(x, 0, y, color='green')
ax.plot(x,y,color='green',lw=2)
#to give enough spacing for the suptitle; otherwise overlaps with title
plotfig.subplots_adjust(top=0.87)
#beautify the plot and name the labels, titles
ax.set_title('Frequency of Server Access Failure ('+str(failrate)+'%)', fontsize=14, fontweight='bold', color='gray')
plotfig.suptitle(dname+'\n'+str(begdt)+' --- '+str(findt), fontsize=10, color='blue')
ax.set_xlabel('Attempted Machine Accesss Times', fontsize=12, color='gray')
ax.set_ylabel('Machine State', fontsize=12, color='gray')
plt.yticks(y, ['UP','DOWN'], fontsize=10, color='gray', rotation='vertical')
plt.xticks(fontsize=10, color='gray', rotation='horizontal')
plt.ylim(0,1.1)
plt.xlim(0,x.max()+10)
ax.grid(True)
return plotfig
#
############# MAIN ################################
#
print("Complile data from file the log files")
#os.system('./analytics.sh')
print("Reading data from file "+pingfile)
with open(pingfile, 'rb') as f:
data = [i.split(",") for i in f.read().split()]
df = pd.DataFrame(data, columns=['seqnum', 'pingdatetime', 'domain', 'statenow'])
for index, row in df.iterrows():
row[col_pingtime] = dtm.datetime.strptime(row[col_pingtime], '%Y-%m-%d:%H:%M:%S')
#to avoid duplicate data and to reflect ping time to be on the minute
row[col_pingtime] = row[col_pingtime].replace(second = 0)
#format pingdatetime as proper datetime, set it as the indext and then order them
df['pingdatetime'] = pd.to_datetime(df['pingdatetime'])
df.sort_values(by='pingdatetime')
df = df.set_index('seqnum')
#begin processing for each unique domain
print(str(len(df.index))+" data rows added to the dataframe, ready for processing ...")
print ('-----------------------------------------------------')
for thedomain in df.domain.unique():
#insert syntheseised data points
dompingdf = df[df['domain']==thedomain]
print("Begin data synthesis for "+thedomain+" with data rows = "+str(len(dompingdf.index)))
amenddf = synth_data(dompingdf,plotinterval)
if not amenddf.empty:
#output the syntheseized dataframe to output file
print(str(len(amenddf.index))+" data rows of syntheseised added to "+thedomain )
amenddf['pingdatetime'] = pd.to_datetime(amenddf.pingdatetime)
amenddf = amenddf.sort(['pingdatetime'])
amenddf.index = range(0,len(amenddf))
print('writing data to file: ./data/syndata_'+thedomain+'.csv')
amenddf.to_csv('./data/syndata_'+thedomain+'.csv')
#plot timeseries with function (need to add if conditions to check if function returns valid fig)
fig = plot_freq_to_fig(amenddf, thedomain)
fig.savefig('./plots/freqplot_'+thedomain+'.png', bbox_inches='tight')
print ('frequency plot created in file: ./plots/freqplot_'+thedomain+'.png')
fig = plot_hist_to_fig(amenddf, thedomain)
fig.savefig('./plots/histplot_'+thedomain+'.png', bbox_inches='tight')
print ('histogram plot created in file: ./plots/histplot_'+thedomain+'.png')
print ('process complete for '+thedomain)
print ('-----------------------------------------------------')
else:
print ("Warning: no syntheseized data was added to: "+thedomain)
print ('-----------------------------------------------------')
print ('End processing data for visualization !!! ')
| mit | -8,745,131,300,681,286,000 | 48.531429 | 121 | 0.639132 | false |
pbauman/libmesh | doc/statistics/libmesh_pagehits.py | 1 | 10542 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
# Import stuff for working with dates
from datetime import datetime
from matplotlib.dates import date2num
# Hits/month, pages, and gigabytes served.
# To get the Google analytics data:
# .) Go to analytics.google.com.
# .) There should be (as of July 2017) a "Google Analytics Home" box at the top left of the dashboard.
# .) Click the "Audience Overview" link at the bottom right corner of this box.
# .) Adjust date range to previous month.
# .) Record the number of "Pageviews" in the "Hits" column below.
# The data below are from the libmesh.github.io site, which uses the
# number UA-24978333-1.
#
# Note: we do not have control over the analytics for the
# https://www.github.com/libMesh/libmesh page. If you look at the page
# source, analytics code UA-3769691-2 appears, but if I try to add
# this property in my analytics account, Google assigns me the number
# UA-24978333-{2,3,...} (where the last digit may change depending on
# how many times you tried to add/remove this property in the
# Analytics Dashboard) and there does not seem to be a straightforward
# way of inserting this code into the source. There have been some
# README.md based hacks for doing this in the past, but I don't think
# they are particularly reliable...
# Hits, pages, GB served
data = [
# 'Jan 2003', 616, 616, 0
# 'Feb 2003', 2078, 2078, 0,
# 'Mar 2003', 3157, 3157, 0,
# 'Apr 2003', 7800, 7800, 0,
# 'May 2003', 4627, 4627, 0,
# 'Jun 2003', 6156, 6156, 0,
# 'Jul 2003', 6389, 6389, 0,
# 'Aug 2003', 10136, 10136, 0,
# 'Sep 2003', 8871, 8871, 0,
# 'Oct 2003', 9703, 9703, 0,
# 'Nov 2003', 9802, 9802, 0,
# 'Dec 2003', 9123, 9123, 0,
# 'Jan 2004', 13599, 13599, 0,
# 'Feb 2004', 11018, 11018, 0,
# 'Mar 2004', 11713, 11713, 0,
# 'Apr 2004', 14995, 14995, 0,
# 'May 2004', 11285, 11285, 0,
# 'Jun 2004', 12974, 12974, 0,
# 'Jul 2004', 12939, 12939, 0,
# 'Aug 2004', 9708, 9708, 0,
# 'Sep 2004', 7994, 7994, 0,
# 'Oct 2004', 6920, 6920, 0,
# 'Nov 2004', 10261, 10261, 0,
# 'Dec 2004', 7483, 7483, 0,
# 'Jan 2005', 3184, 3184, 0,
# 'Feb 2005', 37733, 14077, .4373,
# 'Mar 2005', 43927, 16408, .5637,
# 'Apr 2005', 29792, 8518, .2890,
# 'May 2005', 51288, 17629, .5689,
# 'Jun 2005', 40617, 16599, .5379,
# 'Jul 2005', 29944, 10006, .3363,
# 'Aug 2005', 39592, 14556, .4577,
# 'Sep 2005', 57638, 14666, .4881,
# 'Oct 2005', 48336, 17976, .5749,
# 'Nov 2005', 49563, 15308, .5810,
# 'Dec 2005', 90863, 40736, .9415,
# 'Jan 2006', 46723, 13487, .5662,
# 'Feb 2006', 62285, 26567, .8229,
# 'Mar 2006', 47446, 14711, .6534,
# 'Apr 2006', 90314, 29635, .9762,
# 'May 2006', 68209, 20998, .7949,
# 'Jun 2006', 50495, 17128, .6881,
# 'Jul 2006', 42387, 10958, .6016,
# 'Aug 2006', 55658, 11793, .6174,
# 'Sep 2006', 54919, 20591, .9056,
# 'Oct 2006', 52916, 17944, .9015,
# 'Nov 2006', 55382, 19833, .9439,
# 'Dec 2006', 54265, 22688, .9162,
# 'Jan 2007', 53813, 19881, 1.0 ,
# 'Feb 2007', 52434, 17920, .9472,
# 'Mar 2007', 61530, 21172, 1.2,
# 'Apr 2007', 125578, 77539, 1.3,
# 'May 2007', 182764, 129596, 1.6,
# 'Jun 2007', 115730, 38571, 1.7,
# 'Jul 2007', 121054, 42757, 1.8,
# 'Aug 2007', 81192, 28187, 1.3,
# 'Sep 2007', 143553, 39734, 2.3,
# 'Oct 2007', 110449, 42111, 2.4,
# 'Nov 2007', 128307, 57851, 2.3,
# 'Dec 2007', 80584, 42631, 2.0,
# 'Jan 2008', 69623, 34155, 2.0,
# 'Feb 2008', 144881, 111751, 2.5,
# 'Mar 2008', 69801, 29211, 1.9,
# 'Apr 2008', 74023, 31149, 2.0,
# 'May 2008', 63123, 23277, 1.8,
# 'Jun 2008', 66055, 25418, 2.1,
# 'Jul 2008', 60046, 22082, 2.0,
# 'Aug 2008', 60206, 24543, 2.0,
# 'Sep 2008', 53057, 18635, 1.6,
# 'Oct 2008', 64828, 27042, 2.1,
# 'Nov 2008', 72406, 29767, 2.3,
# 'Dec 2008', 76248, 31690, 2.3,
# 'Jan 2009', 73002, 29744, 2.0,
# 'Feb 2009', 70801, 29156, 2.1,
# 'Mar 2009', 78200, 31139, 2.1,
# 'Apr 2009', 70888, 26182, 1.7,
# 'May 2009', 67263, 26210, 1.8,
# 'Jun 2009', 73146, 31328, 2.6,
# 'Jul 2009', 77828, 33711, 2.4,
# 'Aug 2009', 64378, 28542, 1.9,
# 'Sep 2009', 76167, 33484, 2.2,
# 'Oct 2009', 95727, 41062, 2.8,
# 'Nov 2009', 88042, 38869, 2.5,
# 'Dec 2009', 76148, 37609, 2.3,
# 'Jan 2010', 268856, 45983, 3.2,
# 'Feb 2010', 208210, 42680, 3.0,
# 'Mar 2010', 116263, 42660, 2.6,
# 'Apr 2010', 102493, 32942, 2.4,
# 'May 2010', 117023, 37107, 2.5,
# 'Jun 2010', 128589, 38019, 2.5,
# 'Jul 2010', 87183, 34026, 2.2,
# 'Aug 2010', 99161, 33199, 2.5,
# 'Sep 2010', 81657, 32305, 2.5,
# 'Oct 2010', 98236, 42091, 3.4,
# 'Nov 2010', 115603, 48695, 3.4,
# 'Dec 2010', 105030, 45570, 3.4,
# 'Jan 2011', 133476, 43549, 3.1,
# 'Feb 2011', 34483, 15002, 1.1,
# 'Mar 2011', 0, 0, 0.0,
# 'Apr 2011', 0, 0, 0.0,
# 'May 2011', 0, 0, 0.0,
# 'Jun 2011', 0, 0, 0.0,
# 'Jul 2011', 0, 0, 0.0,
'Aug 2011', 10185, 0, 0.0, # New "Pageviews" data from google analytics, does not seem comparable to sf.net pagehits data
'Sep 2011', 10305, 0, 0.0,
'Oct 2011', 14081, 0, 0.0,
'Nov 2011', 13397, 0, 0.0,
'Dec 2011', 13729, 0, 0.0,
'Jan 2012', 11050, 0, 0.0,
'Feb 2012', 12779, 0, 0.0,
'Mar 2012', 12970, 0, 0.0,
'Apr 2012', 13051, 0, 0.0,
'May 2012', 11857, 0, 0.0,
'Jun 2012', 12584, 0, 0.0,
'Jul 2012', 12995, 0, 0.0,
'Aug 2012', 13204, 0, 0.0,
'Sep 2012', 13170, 0, 0.0,
'Oct 2012', 13335, 0, 0.0,
'Nov 2012', 11337, 0, 0.0,
'Dec 2012', 10108, 0, 0.0, # libmesh switched to github on December 10, 2012
'Jan 2013', 13029, 0, 0.0,
'Feb 2013', 10420, 0, 0.0,
'Mar 2013', 13400, 0, 0.0,
'Apr 2013', 14416, 0, 0.0,
'May 2013', 13875, 0, 0.0,
'Jun 2013', 13747, 0, 0.0,
'Jul 2013', 14019, 0, 0.0,
'Aug 2013', 10828, 0, 0.0,
'Sep 2013', 9969, 0, 0.0,
'Oct 2013', 13083, 0, 0.0,
'Nov 2013', 12938, 0, 0.0,
'Dec 2013', 9079, 0, 0.0,
'Jan 2014', 9736, 0, 0.0,
'Feb 2014', 11824, 0, 0.0,
'Mar 2014', 10861, 0, 0.0,
'Apr 2014', 12711, 0, 0.0,
'May 2014', 11177, 0, 0.0,
'Jun 2014', 10738, 0, 0.0,
'Jul 2014', 10349, 0, 0.0,
'Aug 2014', 8877, 0, 0.0,
'Sep 2014', 9226, 0, 0.0,
'Oct 2014', 8052, 0, 0.0, # Google analytics number moved over to libmesh.github.io in Oct 2014
'Nov 2014', 9243, 0, 0.0,
'Dec 2014', 10714, 0, 0.0,
'Jan 2015', 11508, 0, 0.0,
'Feb 2015', 11278, 0, 0.0,
'Mar 2015', 13305, 0, 0.0,
'Apr 2015', 12347, 0, 0.0,
'May 2015', 11368, 0, 0.0,
'Jun 2015', 11203, 0, 0.0,
'Jul 2015', 10419, 0, 0.0,
'Aug 2015', 11282, 0, 0.0,
'Sep 2015', 13535, 0, 0.0,
'Oct 2015', 12912, 0, 0.0,
'Nov 2015', 13894, 0, 0.0,
'Dec 2015', 11694, 0, 0.0,
'Jan 2016', 11837, 0, 0.0,
'Feb 2016', 14102, 0, 0.0,
'Mar 2016', 13212, 0, 0.0,
'Apr 2016', 13355, 0, 0.0,
'May 2016', 12486, 0, 0.0,
'Jun 2016', 13973, 0, 0.0,
'Jul 2016', 10688, 0, 0.0,
'Aug 2016', 10048, 0, 0.0,
'Sep 2016', 10847, 0, 0.0,
'Oct 2016', 10984, 0, 0.0,
'Nov 2016', 12233, 0, 0.0,
'Dec 2016', 11430, 0, 0.0,
'Jan 2017', 10327, 0, 0.0,
'Feb 2017', 11039, 0, 0.0,
'Mar 2017', 12986, 0, 0.0,
'Apr 2017', 9773, 0, 0.0,
'May 2017', 10880, 0, 0.0,
'Jun 2017', 9179, 0, 0.0,
'Jul 2017', 8344, 0, 0.0,
'Aug 2017', 8617, 0, 0.0,
'Sep 2017', 8576, 0, 0.0,
'Oct 2017', 11255, 0, 0.0,
'Nov 2017', 10362, 0, 0.0,
'Dec 2017', 7948, 0, 0.0,
'Jan 2018', 9376, 0, 0.0,
'Feb 2018', 8864, 0, 0.0,
'Mar 2018', 10339, 0, 0.0,
'Apr 2018', 10958, 0, 0.0,
'May 2018', 10151, 0, 0.0,
'Jun 2018', 8981, 0, 0.0,
'Jul 2018', 8619, 0, 0.0,
'Aug 2018', 9226, 0, 0.0,
'Sep 2018', 8507, 0, 0.0,
'Oct 2018', 9150, 0, 0.0,
'Nov 2018', 8135, 0, 0.0,
'Dec 2018', 7522, 0, 0.0,
'Jan 2019', 8643, 0, 0.0,
'Feb 2019', 8729, 0, 0.0,
'Mar 2019', 7916, 0, 0.0,
]
# Extract number of hits/month
n_hits_month = data[1::4]
# Divide by 1000 for plotting...
n_hits_month = np.divide(n_hits_month, 1000.)
# Extract list of date strings
date_strings = data[0::4]
# Convert date strings into numbers
date_nums = []
for d in date_strings:
date_nums.append(date2num(datetime.strptime(d, '%b %Y')))
# Get a reference to the figure
fig = plt.figure()
# 111 is equivalent to Matlab's subplot(1,1,1) command
ax = fig.add_subplot(111)
# Make the bar chart. We have one number/month, there are about 30
# days in each month, this defines the bar width...
# The color used comes from sns.color_palette("muted").as_hex() They
# are the "same basic order of hues as the default matplotlib color
# cycle but more attractive colors."
ax.plot(date_nums, n_hits_month, marker='o', linewidth=2, color=u'#4878cf')
# Create title
fig.suptitle('libmesh.github.io Hits/Month (in Thousands)')
# Set up x-tick locations -- August of each year
ticks_names = ['2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019']
# Get numerical values for the names
tick_nums = []
for x in ticks_names:
tick_nums.append(date2num(datetime.strptime('Jan ' + x, '%b %Y')))
# Set tick labels and positions
ax.set_xticks(tick_nums)
ax.set_xticklabels(ticks_names)
# Set x limits for the plot
plt.xlim(date_nums[0], date_nums[-1]+30);
# Make x-axis ticks point outward
ax.get_xaxis().set_tick_params(direction='out')
# Save as PDF
plt.savefig('libmesh_pagehits.pdf')
# Local Variables:
# python-indent: 2
# End:
| lgpl-2.1 | -5,319,884,989,136,103,000 | 36.119718 | 131 | 0.525232 | false |
not-na/peng3d | peng3d/gui/layout.py | 1 | 8877 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# layout.py
#
# Copyright 2020 notna <[email protected]>
#
# This file is part of peng3d.
#
# peng3d is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# peng3d is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with peng3d. If not, see <http://www.gnu.org/licenses/>.
#
__all__ = [
"Layout", "GridLayout",
"LayoutCell",
]
import peng3d
from peng3d import util
from peng3d.util import WatchingList
try:
import pyglet
from pyglet.gl import *
except ImportError:
pass # Headless mode
class Layout(util.ActionDispatcher):
"""
Base Layout class.
This class does not serve any purpose directly other than to be a common base class
for all layouts.
Note that layouts can be nested, e.g. usually the first layouts parent is a SubMenu
and sub-layouts get a LayoutCell of their parent layout as their parent.
"""
def __init__(self, peng, parent):
self.peng = peng
self.parent = parent
@property
def pos(self):
return self.parent.pos
@property
def size(self):
return self.parent.size
class GridLayout(Layout):
"""
Grid-based layout helper class.
This class provides a grid-like layout to its sub-widgets. A border between widgets
can be defined. Additionally, all widgets using this layout should automatically scale
with screen size.
"""
def __init__(self, peng, parent, res, border):
super().__init__(peng, parent)
self.res = res
self.bordersize = border
@property
def cell_size(self):
"""
Helper property defining the current size of cells in both x and y axis.
:return: 2-tuple of float
"""
return self.size[0]/self.res[0], self.size[1]/self.res[1]
def get_cell(self, pos, size, anchor_x="left", anchor_y="bottom", border=1):
"""
Returns a grid cell suitable for use as the ``pos`` parameter of any widget.
The ``size`` parameter of the widget will automatically be overwritten.
:param pos: Grid position, in cell
:param size: Size, in cells
:param anchor_x: either ``left``\\ , ``center`` or ``right``
:param anchor_y: either ``bottom``\\ , ``center`` or ``top``
:return: LayoutCell subclass
"""
return _GridCell(self.peng, self, pos, size, anchor_x, anchor_y, border)
class LayoutCell(object):
"""
Base Layout Cell.
Not to be used directly. Usually subclasses of this class are returned by layouts.
Instances can be passed to Widgets as the ``pos`` argument. The ``size`` argument will
be automatically overridden.
Note that manually setting ``size`` will override the size set by the layout cell,
though the position will be kept.
"""
@property
def pos(self):
"""
Property accessing the position of the cell.
This usually refers to the bottom-left corner, but may change depending on arguments
passed during creation.
Note that results can be floats.
:return: 2-tuple of ``(x,y)``
"""
raise NotImplementedError("pos property has to be overridden")
@property
def size(self):
"""
Property accessing the size of the cell.
Note that results can be floats.
:return: 2-tuple of ``(width, height)``
"""
raise NotImplementedError("size property has to be overridden")
class DumbLayoutCell(LayoutCell):
"""
Dumb layout cell that behaves like a widget.
Note that this class is not actually widget and should only be used as the ``pos``
argument to a widget or the ``parent`` to another Layout.
It can be used to create, for example, a :py:class:`GridLayout()` over only a portion
of the screen.
Even though setting the :py:attr:`pos` and :py:attr:`size` attributes is possible,
sometimes a redraw cannot be triggered correctly if e.g. the parent is not submenu.
"""
def __init__(self, parent, pos, size):
self.parent = parent
self._pos = pos
self._size = size
@property
def pos(self):
"""
Property that will always be a 2-tuple representing the position of the widget.
Note that this method may call the method given as ``pos`` in the initializer.
The returned object will actually be an instance of a helper class to allow for setting only the x/y coordinate.
This property also respects any :py:class:`Container` set as its parent, any offset will be added automatically.
Note that setting this property will override any callable set permanently.
"""
if isinstance(self._pos, list) or isinstance(self._pos, tuple):
r = self._pos
elif callable(self._pos):
w, h = self.parent.size[:]
r = self._pos(w, h, *self.size)
elif isinstance(self._pos, LayoutCell):
r = self._pos.pos
else:
raise TypeError("Invalid position type")
ox, oy = self.parent.pos
r = r[0] + ox, r[1] + oy
# if isinstance(self.submenu,ScrollableContainer) and not self._is_scrollbar:# and self.name != "__scrollbar_%s"%self.submenu.name: # Widget inside scrollable container and not the scrollbar
# r = r[0],r[1]+self.submenu.offset_y
return WatchingList(r, self._wlredraw_pos)
@pos.setter
def pos(self, value):
self._pos = value
if hasattr(self.parent, "redraw"):
self.parent.redraw()
@property
def size(self):
"""
Similar to :py:attr:`pos` but for the size instead.
"""
if isinstance(getattr(self, "_pos", None), LayoutCell):
s = self._pos.size
elif isinstance(self._size, list) or isinstance(self._size, tuple):
s = self._size
elif callable(self._size):
w, h = self.parent.size[:]
s = self._size(w, h)
else:
raise TypeError("Invalid size type")
s = s[:]
if s[0] == -1 or s[1] == -1:
raise ValueError("Cannot set size to -1 in DumbLayoutCell")
# Prevents crashes with negative size
s = [max(s[0], 0), max(s[1], 0)]
return WatchingList(s, self._wlredraw_size)
@size.setter
def size(self, value):
self._size = value
if hasattr(self.parent, "redraw"):
self.parent.redraw()
def _wlredraw_pos(self,wl):
self._pos = wl[:]
if hasattr(self.parent, "redraw"):
self.parent.redraw()
def _wlredraw_size(self,wl):
self._size = wl[:]
if hasattr(self.parent, "redraw"):
self.parent.redraw()
class _GridCell(LayoutCell):
def __init__(self, peng, parent, offset, size, anchor_x, anchor_y, border=1):
self.peng = peng
self.parent = parent
self.offset = offset
self._size = size
self.anchor_x = anchor_x
self.anchor_y = anchor_y
self.border = border
@property
def pos(self):
dx, dy = self.parent.bordersize
dx *= self.border
dy *= self.border
px, py = self.parent.pos # Parent position in px
oxc, oyc = self.offset # Offset in cells
csx, csy = self.parent.cell_size # Cell size in px
ox, oy = oxc*csx, oyc*csy # Offset in px
sxc, sxy = self._size # Size in cells
sx, sy = sxc*csx, sxy*csy # Size in px
if self.anchor_x == "left":
x = px+ox+dx/2
elif self.anchor_x == "center":
x = px+ox+sx/2
elif self.anchor_x == "right":
x = px+ox+sx-dx/2
else:
raise ValueError(f"Invalid anchor_x of {self.anchor_x}")
if self.anchor_y == "bottom":
y = py+oy+dy/2
elif self.anchor_y == "center":
y = py+oy+sy/2
elif self.anchor_y == "top":
y = py+oy+sy-dy/2
else:
raise ValueError(f"Invalid anchor_y of {self.anchor_y}")
return x, y
@property
def size(self):
dx, dy = self.parent.bordersize
csx, csy = self.parent.cell_size # Cell size in px
sxc, sxy = self._size # Size in cells
sx, sy = sxc * csx-dx*self.border, sxy * csy-dy*self.border
return sx, sy
| gpl-2.0 | -6,435,422,355,169,676,000 | 30.038462 | 198 | 0.600315 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.