repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
Diviyan-Kalainathan/causal-humans | Clustering/performance_evaluation.py | 1 | 3279 | '''
Computing the misclassification error distance between to 2 k-means clustering
according to Marina Meila, "The Uniqueness of a Good Optimum for K-Means", ICML 2006
Author : Diviyan Kalainathan
Date : 20/06/2016
'''
import csv,numpy,itertools
from sklearn import metrics
def Clustering_performance_evaluation(mode, folder_name, run1, run2, num_clusters, num_init):
"""
:param mode: selects which metric is to be used
:param folder_name: Folder of the runs (String)
:param run1: Number of the run 1 (int)
:param run2: Number of the run 2 (int)
:param num_clusters:
:param num_init:
:return: distance value (float?)
"""
numpy.set_printoptions(threshold='nan')
print('-'+str(num_clusters)+'---performance evaluation between runs : ' + str(run1) + ' ,' + str(run2))
valid_data= True
#Checking if the data is valid by loading & testing the shape of it
try:
data_1=numpy.loadtxt('output/'+folder_name+'/cluster_predictions_c'+ str(num_clusters)
+ '_n'+ str(num_init) +'_r'+ str(run1)+'.csv',delimiter=';')
data_2=numpy.loadtxt('output/'+folder_name+'/cluster_predictions_c'+ str(num_clusters)
+ '_n'+ str(num_init) +'_r'+ str(run2)+'.csv',delimiter=';')
if data_1.shape != data_2.shape:
valid_data=False
except IOError as e:
print "I/O error({0}): {1}".format(e.errno, e.strerror)
valid_data=False
if valid_data:
n_samples=data_1.shape[0]
data_1 = numpy.asarray(sorted(data_1, key=lambda x: x[1]))
data_2 = numpy.asarray(sorted(data_2, key=lambda x: x[1]))
if mode==1:
#Distance defined by Marina Meila : k! complexity
clustering_1=numpy.zeros((n_samples,num_clusters))
clustering_2=numpy.zeros((n_samples,num_clusters))
for x in range(0,n_samples):
clustering_1[x,data_1[x,0]]+=1
clustering_2[x,data_2[x,0]]+=1
'''for y in range(0,num_clusters):
try:
clustering_1[:,y]*=1/numpy.sqrt(numpy.sum(clustering_1[:,y]))
except ZeroDivisionError:
clustering_1[:,y]=0
try:
clustering_2[:,y]*=1/numpy.sqrt(numpy.sum(clustering_2[:,y]))
except ZeroDivisionError:
clustering_2[:,y]=0
''' # No normalisation needed
confusion_matrix=numpy.dot(numpy.transpose(clustering_1),clustering_2)
max_confusion=0
result = []
for perm in itertools.permutations(range(num_clusters)):
confusion=0
for i in range(0, num_clusters):
confusion += confusion_matrix[i, perm[i]]
if max_confusion<confusion:
max_confusion=confusion
distance=(max_confusion/n_samples)
return distance
elif mode==2:
#Ajusted rand index
distance=metrics.adjusted_rand_score(data_1[:,0],data_2[:,0])
return distance
elif mode==3:
#V-mesure
distance=metrics.v_measure_score(data_1[:,0],data_2[:,0])
return distance
return 0 | mit | 552,402,320,679,528,060 | 33.526316 | 107 | 0.568771 | false |
willkg/pyrax-cmd | setup.py | 1 | 1027 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
from setuptools import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='pyrax-cmd',
version='0.1.0',
description='Eats bugs (WILLCAGE FURY EDITION)',
long_description=readme + '\n\n' + history,
author='Will Kahn-Greene',
author_email='[email protected]',
url='https://github.com/willkg/pyrax-cmd',
include_package_data=True,
install_requires=[
'pyrax',
],
license="BSD",
zip_safe=True,
keywords='',
scripts=['pyrax-cmd'],
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
],
)
| bsd-3-clause | -4,639,629,143,681,459,000 | 23.452381 | 66 | 0.596884 | false |
genialis/resolwe | resolwe/flow/executors/docker/run.py | 1 | 19322 | """.. Ignore pydocstyle D400.
.. autoclass:: resolwe.flow.executors.docker.run.FlowExecutor
:members:
"""
# pylint: disable=logging-format-interpolation
import asyncio
import copy
import functools
import json
import logging
import os
import random
import string
import tempfile
import time
from contextlib import suppress
from pathlib import Path
from typing import Any, Dict, Iterable, Tuple, Type
import docker
from .. import constants
from ..connectors import connectors
from ..connectors.baseconnector import BaseStorageConnector
from ..global_settings import LOCATION_SUBPATH, PROCESS_META, SETTINGS
from ..local.run import FlowExecutor as LocalFlowExecutor
from ..protocol import ExecutorFiles
from .seccomp import SECCOMP_POLICY
# Limits of containers' access to memory. We set the limit to ensure
# processes are stable and do not get killed by OOM signal.
DOCKER_MEMORY_HARD_LIMIT_BUFFER = 100
DOCKER_MEMORY_SWAP_RATIO = 2
DOCKER_MEMORY_SWAPPINESS = 1
logger = logging.getLogger(__name__)
def _random_string(size: int = 5, chars=string.ascii_lowercase + string.digits):
"""Generate and return random string."""
return "".join(random.choice(chars) for x in range(size))
def retry(
max_retries: int = 3,
retry_exceptions: Tuple[Type[Exception], ...] = (
docker.errors.ImageNotFound,
docker.errors.APIError,
),
min_sleep: int = 1,
max_sleep: int = 10,
):
"""Try to call decorated method max_retries times before giving up.
The calls are retried when function raises exception in retry_exceptions.
:param max_retries: maximal number of calls before giving up.
:param retry_exceptions: retry call if one of these exceptions is raised.
:param min_sleep: minimal sleep between calls (in seconds).
:param max_sleep: maximal sleep between calls (in seconds).
:returns: return value of the called method.
:raises: the last exceptions raised by the method call if none of the
retries were successfull.
"""
def decorator_retry(func):
@functools.wraps(func)
def wrapper_retry(*args, **kwargs):
last_error: Exception = Exception("Retry failed")
sleep: int = 0
for retry in range(max_retries):
try:
time.sleep(sleep)
return func(*args, **kwargs)
except retry_exceptions as err:
sleep = min(max_sleep, min_sleep * (2 ** retry))
last_error = err
raise last_error
return wrapper_retry
return decorator_retry
class FlowExecutor(LocalFlowExecutor):
"""Docker executor."""
name = "docker"
def __init__(self, *args, **kwargs):
"""Initialize attributes."""
super().__init__(*args, **kwargs)
container_name_prefix = SETTINGS.get("FLOW_EXECUTOR", {}).get(
"CONTAINER_NAME_PREFIX", "resolwe"
)
self.container_name = self._generate_container_name(container_name_prefix)
self.tools_volumes = []
self.command = SETTINGS.get("FLOW_DOCKER_COMMAND", "docker")
self.tmpdir = tempfile.TemporaryDirectory()
# Setup Docker volumes.
def _new_volume(
self, config: Dict[str, Any], mount_path: Path, read_only: bool = True
) -> Tuple[str, Dict[str, str]]:
"""Generate a new volume entry.
:param config: must include 'path' and may include 'selinux_label'.
:param mount_moint: mount point for the volume.
"""
options = set()
if "selinux_label" in config:
options.add(config["selinux_label"])
options.add("ro" if read_only else "rw")
return (
os.fspath(config["path"]),
{"bind": os.fspath(mount_path), "mode": ",".join(options)},
)
def _get_upload_dir(self) -> str:
"""Get upload path.
: returns: the path of the first mountable connector for storage
'upload'.
:raises RuntimeError: if no applicable connector is found.
"""
for connector in connectors.for_storage("upload"):
if connector.mountable:
return f"/upload_{connector.name}"
raise RuntimeError("No mountable upload connector is defined.")
def _get_mountable_connectors(self) -> Iterable[Tuple[str, BaseStorageConnector]]:
"""Iterate through all the storages and find mountable connectors.
:returns: list of tuples (storage_name, connector).
"""
return (
(storage_name, connector)
for storage_name in SETTINGS["FLOW_STORAGE"]
for connector in connectors.for_storage(storage_name)
if connector.mountable
)
def _get_volumes(self, subpaths=False) -> Dict[str, Tuple[Dict, Path]]:
"""Get writeable volumes from settings.
:attr subpaths: when True the location subpath in added to the volume
path.
:returns: mapping between volume name and tuple (config, mount_point).
"""
results = dict()
volume_mountpoint = {
constants.PROCESSING_VOLUME_NAME: constants.PROCESSING_VOLUME,
constants.INPUTS_VOLUME_NAME: constants.INPUTS_VOLUME,
constants.SECRETS_VOLUME_NAME: constants.SECRETS_VOLUME,
constants.SOCKETS_VOLUME_NAME: constants.SOCKETS_VOLUME,
}
for volume_name, volume in SETTINGS["FLOW_VOLUMES"].items():
if "read_only" not in volume["config"]:
if volume["type"] == "host_path":
config = copy.deepcopy(volume["config"])
if subpaths:
config["path"] = Path(config["path"]) / LOCATION_SUBPATH
results[volume_name] = (config, volume_mountpoint[volume_name])
elif volume["type"] == "temporary_directory":
config = copy.deepcopy(volume["config"])
volume_path = Path(self.tmpdir.name) / volume_name
mode = config.get("mode", 0o700)
volume_path.mkdir(exist_ok=True, mode=mode)
config["path"] = volume_path
results[volume_name] = (config, volume_mountpoint[volume_name])
else:
raise RuntimeError(
"Only 'host_type' and 'temporary_directory' volumes are "
" supported by Docker executor,"
f"requested '{volume['config']['type']}' for {volume_name}."
)
assert (
constants.PROCESSING_VOLUME_NAME in results
), "Processing volume must be defined."
return results
def _init_volumes(self) -> Dict:
"""Prepare volumes for init container."""
mount_points = [
(config, mount_point, False)
for config, mount_point in self._get_volumes().values()
]
mount_points += [
(connector.config, Path("/") / f"{storage_name}_{connector.name}", False)
for storage_name, connector in self._get_mountable_connectors()
]
return dict([self._new_volume(*mount_point) for mount_point in mount_points])
def _communicator_volumes(self) -> Dict[str, Dict]:
"""Prepare volumes for communicator container."""
mount_points = [
(connector.config, Path("/") / f"{storage_name}_{connector.name}", False)
for storage_name, connector in self._get_mountable_connectors()
]
volumes = self._get_volumes()
mount_points += [
(*volumes[constants.SECRETS_VOLUME_NAME], False),
(*volumes[constants.SOCKETS_VOLUME_NAME], False),
]
return dict([self._new_volume(*mount_point) for mount_point in mount_points])
def _processing_volumes(self) -> Dict:
"""Prepare volumes for processing container."""
# Expose processing and (possibly) input volume RW.
mount_points = [
(config, mount_point, False)
for config, mount_point in self._get_volumes(True).values()
]
# Expose mountable connectors ('upload' RW, othern 'RO').
mount_points += [
(
connector.config,
Path("/") / f"{storage_name}_{connector.name}",
storage_name != "upload",
)
for storage_name, connector in self._get_mountable_connectors()
]
mount_points += [
(
{"path": self.runtime_dir / "executors" / ExecutorFiles.SOCKET_UTILS},
Path("/socket_utils.py"),
False,
),
(
{
"path": self.runtime_dir
/ "executors"
/ ExecutorFiles.STARTUP_PROCESSING_SCRIPT
},
Path("/start.py"),
False,
),
(
{"path": self.runtime_dir / "executors" / ExecutorFiles.CONSTANTS},
Path("/constants.py"),
True,
),
]
# Generate dummy passwd and create mappings for it. This is required because some tools
# inside the container may try to lookup the given UID/GID and will crash if they don't
# exist. So we create minimal user/group files.
temporary_directory = Path(self.tmpdir.name)
passwd_path = temporary_directory / "passwd"
group_path = temporary_directory / "group"
with passwd_path.open("wt") as passwd_file:
passwd_file.write(
"root:x:0:0:root:/root:/bin/bash\n"
+ f"user:x:{os.getuid()}:{os.getgid()}:user:{os.fspath(constants.PROCESSING_VOLUME)}:/bin/bash\n"
)
with group_path.open("wt") as group_file:
group_file.write("root:x:0:\n" + f"user:x:{os.getgid()}:user\n")
mount_points += [
({"path": passwd_path}, Path("/etc/passwd"), True),
({"path": group_path}, Path("/etc/group"), True),
]
# Create mount points for tools.
mount_points += [
({"path": Path(tool)}, Path("/usr/local/bin/resolwe") / str(index), True)
for index, tool in enumerate(self.get_tools_paths())
]
# Create mount_points for runtime (all read-only).
mount_points += [
({"path": self.runtime_dir / src}, dst, True)
for src, dst in SETTINGS.get("RUNTIME_VOLUME_MAPS", {}).items()
]
return dict([self._new_volume(*mount_point) for mount_point in mount_points])
async def start(self):
"""Start process execution."""
memory = (
self.process["resource_limits"]["memory"] + DOCKER_MEMORY_HARD_LIMIT_BUFFER
)
memory_swap = int(memory * DOCKER_MEMORY_SWAP_RATIO)
network = "bridge"
if "network" in self.resources:
# Configure Docker network mode for the container (if specified).
# By default, current Docker versions use the 'bridge' mode which
# creates a network stack on the default Docker bridge.
network = SETTINGS.get("FLOW_EXECUTOR", {}).get("NETWORK", "")
security_options = []
if not SETTINGS.get("FLOW_DOCKER_DISABLE_SECCOMP", False):
security_options.append(f"seccomp={json.dumps(SECCOMP_POLICY)}")
processing_image = self.requirements.get(
"image",
SETTINGS.get(
"FLOW_DOCKER_DEFAULT_PROCESSING_CONTAINER_IMAGE",
"public.ecr.aws/s4q6j6e8/resolwe/base:ubuntu-20.04",
),
)
communicator_image = SETTINGS.get(
"FLOW_DOCKER_COMMUNICATOR_IMAGE",
"public.ecr.aws/s4q6j6e8/resolwe/com:latest",
)
ulimits = []
if (
self.process["scheduling_class"]
== PROCESS_META["SCHEDULING_CLASS_INTERACTIVE"]
):
# TODO: This is not very good as each child gets the same limit.
# Note: Ulimit does not work as expected on multithreaded processes
# Limit is increased by factor 1.2 for processes with 2-8 threads.
# TODO: This should be changed for processes with over 8 threads.
cpu_time_interactive = SETTINGS.get(
"FLOW_PROCESS_RESOURCE_DEFAULTS", {}
).get("cpu_time_interactive", 30)
cpu_limit = int(cpu_time_interactive * 1.2)
ulimits.append(
docker.types.Ulimit(name="cpu", soft=cpu_limit, hard=cpu_limit)
)
environment = {
"LISTENER_SERVICE_HOST": self.listener_connection[0],
"LISTENER_SERVICE_PORT": self.listener_connection[1],
"LISTENER_PROTOCOL": self.listener_connection[2],
"DATA_ID": self.data_id,
"RUNNING_IN_CONTAINER": 1,
"RUNNING_IN_DOCKER": 1,
"GENIALIS_UID": os.getuid(),
"GENIALIS_GID": os.getgid(),
"FLOW_MANAGER_KEEP_DATA": SETTINGS.get("FLOW_MANAGER_KEEP_DATA", False),
"DESCRIPTOR_CHUNK_SIZE": 100,
"MOUNTED_CONNECTORS": ",".join(
connector.name
for connector in connectors.values()
if connector.mountable
),
}
with suppress(RuntimeError):
environment["UPLOAD_DIR"] = self._get_upload_dir()
autoremove = SETTINGS.get("FLOW_DOCKER_AUTOREMOVE", False)
# Add random string between container name and init. Since check for
# existing stdout file has been moved inside init container we should
# use different containers name in case one init contaner is still
# running when another one is fired (or when containers are not purged
# automatically): otherwise executor will fail to start the init
# container due to name clash.
init_container_name = f"{self.container_name}-{_random_string()}-init"
init_arguments = {
"auto_remove": autoremove,
"volumes": self._init_volumes(),
"command": ["/usr/local/bin/python3", "-m", "executors.init_container"],
"image": communicator_image,
"name": init_container_name,
"detach": True,
"cpu_quota": 1000000,
"mem_limit": "4000m",
"mem_reservation": "200m",
"network_mode": network,
"user": f"{os.getuid()}:{os.getgid()}",
"environment": environment,
}
communication_arguments = {
"auto_remove": autoremove,
"volumes": self._communicator_volumes(),
"command": ["/usr/local/bin/python", "/startup.py"],
"image": communicator_image,
"name": f"{self.container_name}-communicator",
"detach": True,
"cpu_quota": 100000,
"mem_limit": "4000m",
"mem_reservation": "200m",
"network_mode": network,
"cap_drop": ["all"],
"security_opt": security_options,
"user": f"{os.getuid()}:{os.getgid()}",
"environment": environment,
}
processing_arguments = {
"auto_remove": autoremove,
"volumes": self._processing_volumes(),
"command": ["python3", "/start.py"],
"image": processing_image,
"network_mode": f"container:{self.container_name}-communicator",
"working_dir": os.fspath(constants.PROCESSING_VOLUME),
"detach": True,
"cpu_quota": self.process["resource_limits"]["cores"] * (10 ** 6),
"mem_limit": f"{memory}m",
"mem_reservation": f"{self.process['resource_limits']['memory']}m",
"mem_swappiness": DOCKER_MEMORY_SWAPPINESS,
"memswap_limit": f"{memory_swap}m",
"name": self.container_name,
"cap_drop": ["all"],
"security_opt": security_options,
"user": f"{os.getuid()}:{os.getgid()}",
"ulimits": ulimits,
"environment": environment,
}
@retry(max_retries=5)
def transfer_image(client, image_name):
"""Transfer missing image, retry 5 times."""
client.images.pull(image_name)
client = docker.from_env()
# Pull all the images.
try:
try:
logger.debug("Pulling processing image %s.", processing_image)
client.images.get(processing_image)
except docker.errors.ImageNotFound:
transfer_image(client, processing_image)
try:
logger.debug("Pulling communicator image %s.", communicator_image)
client.images.get(communicator_image)
except docker.errors.ImageNotFound:
transfer_image(client, communicator_image)
except docker.errors.APIError:
logger.exception("Docker API error")
raise RuntimeError("Docker API error")
loop = asyncio.get_event_loop()
start_time = time.time()
try:
init_container = client.containers.run(**init_arguments)
except docker.errors.APIError as error:
await self.communicator.finish(
{"error": f"Error starting init container: {error}"}
)
raise
init_container_status = await loop.run_in_executor(None, init_container.wait)
# Return code is as follows:
# - 0: no error occured, continue processing.
# - 1: error running init container, abort processing and log error.
# - 2: data exists in the processing volume, abort processing.
init_rc = init_container_status["StatusCode"]
if init_rc != 0:
logger.error("Init container returned %s instead of 0.", init_rc)
# Do not set error on data objects where previous data exists.
if init_rc == 1:
await self.communicator.finish(
{"error": f"Init container returned {init_rc} instead of 0."}
)
return
try:
communication_container = client.containers.run(**communication_arguments)
except docker.errors.APIError as error:
await self.communicator.finish(
{"error": f"Error starting communication container: {error}"}
)
raise
try:
processing_container = client.containers.run(**processing_arguments)
except docker.errors.APIError as e:
await self.communicator.finish(
{"error": f"Error starting processing container: {e}"}
)
with suppress(docker.errors.APIError):
communication_container.stop(timeout=1)
raise
end_time = time.time()
logger.info(
"It took {:.2f}s for Docker containers to start".format(
end_time - start_time
)
)
with suppress(docker.errors.NotFound):
await loop.run_in_executor(None, communication_container.wait)
with suppress(docker.errors.NotFound):
await loop.run_in_executor(None, processing_container.wait)
| apache-2.0 | 6,463,769,656,858,724,000 | 38.594262 | 113 | 0.572663 | false |
Digital-Preservation-Finland/dpres-ipt | ipt/scripts/check_xml_schematron_features.py | 1 | 1714 | #!/usr/bin/python
# -*- encoding:utf-8 -*-
# vim:ft=python
"""Validate XML file using Schematron."""
from __future__ import print_function, unicode_literals
import os
import sys
import optparse
from file_scraper.schematron.schematron_scraper import SchematronScraper
from ipt.utils import concat
from ipt.six_utils import ensure_text
def main(arguments=None):
"""Main loop"""
usage = "usage: %prog [options] xml-file-path"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-s", "--schemapath", dest="schemapath",
help="Path to schematron schemas",
metavar="PATH")
(options, args) = parser.parse_args(arguments)
if len(args) != 1:
parser.error("Must give a path to an XML file as argument")
if options.schemapath is None:
parser.error("The -s switch is required")
filename = args[0]
if os.path.isdir(filename):
filename = os.path.join(filename, 'mets.xml')
scraper = SchematronScraper(
filename, mimetype="text/xml",
params={"schematron": options.schemapath})
scraper.scrape_file()
message_string = ensure_text(concat(scraper.messages()).strip())
error_string = ensure_text(concat(scraper.errors()).strip())
if message_string:
print(message_string)
if error_string:
print(error_string, file=sys.stderr)
if error_string or not scraper.well_formed:
return 117
return 0
# pylint: disable=duplicate-code
# Main function can be similar in different scripts
if __name__ == '__main__':
# If run from the command line, take out the program name from sys.argv
RETVAL = main(sys.argv[1:])
sys.exit(RETVAL)
| lgpl-3.0 | 9,059,798,237,095,664,000 | 26.206349 | 75 | 0.65811 | false |
makemob/UkiModbusManager | UkiArticulation.py | 1 | 18114 | #!/usr/bin/env python
# -*- coding: utf_8 -*-
"""
Uki Articulation
Chris Mock, 2017
Uki Articulation UI
- Controls UkiModbusManager
- Plays CSV scripts/sequences to control speed/accel
Licensed under GNU General Public License v3.0, https://www.gnu.org/licenses/gpl-3.0.txt
"""
import threading
import time
import logging
import UkiModbusManager as uki
import UkiLogger
from UkiGUI import *
import yaml
import csv
from ModbusMap import MB_MAP
INITIAL_LOG_LEVEL = logging.INFO
LOG_LEVEL_MAP = {'ERROR': logging.ERROR,
'WARNING': logging.WARN,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG}
RESET_LOOPS = 3 # Number of times to reset boards prior to running script
THREAD_DELAY = 0.1 # Seconds to delay when avoiding thread spinning. Sets GUI update rate
# Still to do:
# - build windows executable
# - error handling: queues full, invalid config input
# - thread interlocking, one exception should quit all
# - move player piano into separate file
# - small memory leak somewhere..
class ThreadManager:
def __init__(self, master):
self.testing = 0
self.master = master
# Thread control vars (boolean reads are atomic)
self.running = True
self.playing = False
self.fault_active = False
self.gui_config = {} # Shadow GUI config, updated by Uki MM thread
self.gui_config_lock = threading.Lock() # Lock for self.gui_config
# Setup threads and queues
self.gui_queue = queue.Queue() # Log messages to GUI
self.uki_mm_thread_queue = queue.Queue() # Control messages from GUI to UkiModbusManager thread
self.uki_mm_thread = threading.Thread(target=self.uki_mm_control)
self.uki_mm_comms_queue = queue.Queue() # Data to send out to boards from Player Piano thread
self.uki_mm_comms_thread = threading.Thread(target=self.uki_player_piano)
self.uki_mm_responses_queue = queue.Queue() # Queue to hold responses (reads) coming in from boards
self.gui = UkiGUI(master, self.gui_queue, self.uki_mm_thread_queue)
self.periodic_gui_update()
self.log_level = INITIAL_LOG_LEVEL
self.logger = UkiLogger.get_logger(log_level=self.log_level, queue=self.gui_queue)
self.uki_mm_thread.start()
self.uki_mm_comms_thread.start()
def start_uki_modbus_manager(self, config):
left_port = config['left_comm_port'] if config['left_comm_disabled'] == 0 else None
right_port = config['right_comm_port'] if config['right_comm_disabled'] == 0 else None
return uki.UkiModbusManager(left_serial_port=left_port, right_serial_port=right_port,
config_filename=config['config_file'],
output_ip=config['output_ip'],
logger=self.logger,
incoming_queue=self.uki_mm_comms_queue,
outgoing_queue=self.uki_mm_responses_queue)
def uki_mm_control(self):
# UkiMM checks queue for quit signal, input settings, script triggers etc.
uki_mm_started = False
uki_manager = None
estopped_boards = {}
while self.running:
if uki_mm_started:
uki_manager.main_poll_loop()
# Check for messages from GUI to wrapper
while self.uki_mm_thread_queue.qsize():
try:
queue_obj = self.uki_mm_thread_queue.get(0)
gui_config = queue_obj['config']
with self.gui_config_lock:
self.gui_config = gui_config # Export for other threads to use
msg = queue_obj['message']
if LOG_LEVEL_MAP[gui_config['log_level']] != self.log_level:
self.log_level = LOG_LEVEL_MAP[gui_config['log_level']]
self.logger.warning("Log level changed to " + gui_config['log_level'])
self.logger.setLevel(LOG_LEVEL_MAP[gui_config['log_level']])
if not uki_mm_started:
# Only start Uki MM once we have config data from the GUI
uki_manager = self.start_uki_modbus_manager(gui_config)
uki_mm_started = True
if msg == 'QUIT':
self.logger.warning('Quitting...')
uki_manager.estop_all_boards()
self.running = False
elif msg == 'RESTART':
self.logger.warning('Restarting UkiModbusManager')
udpEnabled = uki_manager.udp_input_enabled
uki_manager.cleanup()
time.sleep(5) # Short delay to allow comms drivers to become available on Windows systems
uki_manager = self.start_uki_modbus_manager(gui_config)
uki_manager.udp_input(udpEnabled)
elif msg == 'UDP':
uki_manager.udp_input(True)
uki_manager.set_accel_config(True) # Allow config file to set accel
elif msg in ('CSV', 'None'):
uki_manager.udp_input(False)
uki_manager.set_accel_config(False) # Script will set accel values
elif msg == 'STOP':
uki_manager.estop_all_boards()
self.playing = False
elif msg == 'RESET':
uki_manager.reset_all_boards()
elif msg == 'PLAY':
self.playing = True
elif msg == 'FORCE_CALIBRATE':
uki_manager.force_calibrate()
except queue.Empty:
pass
# Check for messages coming in from boards eg. EStop state
last_estopped_boards = estopped_boards.copy()
while self.uki_mm_responses_queue.qsize():
try:
response = self.uki_mm_responses_queue.get(0)
# First byte is address
address = response[0]
# Offset/value pairs follow
for response_index in range(1, len(response), 2):
offset = response[response_index]
value = response[response_index + 1]
if offset == MB_MAP['MB_ESTOP_STATE']:
estopped_boards[address] = value
except queue.Empty:
pass
# Check whether any boards have estopped
estop_active = False
for board_address, board_estop_state in estopped_boards.items():
if board_estop_state:
estop_active = True
self.fault_active = estop_active # Transfer only once value determined, can't glitch through False
if estop_active and last_estopped_boards != estopped_boards:
self.logger.warning("EStop detected: " + ','.join([str(key) for key in estopped_boards]))
uki_manager.cleanup()
def uki_send_comms(self, address, offset, value):
if self.playing:
self.logger.debug('Address ' + str(address) + ', offset ' + str(offset) + '=' + str(value))
self.uki_mm_comms_queue.put((address, offset, value))
def uki_player_piano(self):
# Reads from CSV script containing speed (%), accel (%) or position (mm) targets
piano_states = {'IDLE': 0, 'INIT': 1, 'RESET_ESTOP': 2, 'ROLLING': 3, 'STOPPING': 4}
piano_state = piano_states['IDLE']
while self.running:
if piano_state == piano_states['IDLE']:
if self.playing:
piano_state = piano_states['INIT']
else:
time.sleep(THREAD_DELAY) # Slow poll loop until we start again
elif piano_state == piano_states['INIT']:
# Fetch gui config (locked)
with self.gui_config_lock:
csv_filename = self.gui_config['script_file']
loops = self.gui_config['script_loops']
frame_period = self.gui_config['script_rate']
config_file = self.gui_config['config_file']
# Read YAML config file, map names to addresses
board_names = []
board_mapping = {}
current_speed = {}
current_accel = {}
current_position = {}
position_control_boards = []
try:
board_config = yaml.safe_load(open(config_file, 'r', encoding='utf8'))
for cfg in board_config['actuators']:
if cfg['enabled']:
board_names.append(cfg['name'])
board_mapping[cfg['name']] = cfg['address']
current_speed[cfg['name']] = 0
current_accel[cfg['name']] = 100
current_position[cfg['name']] = None
except FileNotFoundError:
self.logger.error('Config file not found: ' + config_file)
self.playing = False
except yaml.parser.ParserError as exc:
self.logger.error('Failed to parse yaml config file ' + config_file + ': ' + str(exc))
self.playing = False
if self.playing:
piano_state = piano_states['RESET_ESTOP']
else:
piano_state = piano_states['IDLE']
elif piano_state == piano_states['RESET_ESTOP']:
# Reset estop a few times just in case of downstream errors
for reset_loops in range(0, RESET_LOOPS):
self.logger.warning("Resetting all boards: " + str(reset_loops + 1) + " of " + str(RESET_LOOPS) + " attempts")
for board in board_names:
self.uki_send_comms(address=board_mapping[board],
offset=MB_MAP['MB_RESET_ESTOP'],
value=0x5050)
time.sleep(2) # Wait for reset messages to go out
if self.playing:
loop_count = 0
piano_state = piano_states['ROLLING']
else:
piano_state = piano_states['IDLE']
elif piano_state == piano_states['ROLLING']:
self.logger.warning('Starting loop ' + str(loop_count + 1) + ' of ' + str(loops) + ': ' + csv_filename)
if not os.path.isfile(csv_filename):
self.logger.error('Script file not found:' + csv_filename)
self.playing = False
else:
with open(csv_filename, newline='') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',', quotechar='"')
# Read header row
header = next(csv_reader)
try:
actuator_names_by_col = [cell.split('_')[0] for cell in header]
speed_accel_position = [cell.split('_')[1] for cell in header]
except IndexError:
self.logger.error('Each CSV column heading must have one underscore eg. LeftRearHip_Speed')
self.playing = False
# Determine which boards are set up for goto position control
for cell_index in range(0, len(header)):
if speed_accel_position[cell_index] == 'Position':
position_control_boards.append(actuator_names_by_col[cell_index])
if position_control_boards:
self.logger.info('The following boards have position control enabled: ' + str(position_control_boards))
# Loop over each remaining row in CSV
frame_number = 1
for row in csv_reader:
# Any estopped board will stop the script
if self.fault_active:
self.logger.warning("Stopping script, estop detected")
self.playing = False
if not self.playing:
break
self.logger.debug('Frame ' + str(frame_number) + ' (row ' + str(frame_number + 1) + ')')
frame_number += 1
# Check for invalid row, too long/short
# Loop over each cell in the row, process non-blank entries
for cell_index in range(0, len(row)):
if row[cell_index] != '':
if speed_accel_position[cell_index] == 'Speed':
self.logger.info(actuator_names_by_col[cell_index] + ' speed set to ' + row[cell_index])
current_speed[actuator_names_by_col[cell_index]] = int(row[cell_index])
elif speed_accel_position[cell_index] == 'Accel':
self.logger.info(actuator_names_by_col[cell_index] + ' accel set to ' + row[cell_index])
current_accel[actuator_names_by_col[cell_index]] = int(row[cell_index])
elif speed_accel_position[cell_index] == 'Position':
self.logger.info(actuator_names_by_col[cell_index] + ' position set to ' + row[cell_index])
current_position[actuator_names_by_col[cell_index]] = int(row[cell_index])
else:
self.logger.warning('Invalid column name, does not contain "_Speed", "_Accel" or "_Position"' +
header[cell_index])
# Don't exit, need to fall thru to force stop
# Send out commands to boards
for board in board_names:
# Range check inputs, warn?
if board in position_control_boards:
# Send goto position @ speed commands for this board
if current_position[board] is not None:
# Only update position once per row to avoid hunting
self.uki_send_comms(address=board_mapping[board],
offset=MB_MAP['MB_GOTO_POSITION'],
value=current_position[board] * 10) # Convert mm to mm/10
current_position[board] = None
# The speed column for this board is now a goto speed
self.uki_send_comms(address=board_mapping[board],
offset=MB_MAP['MB_GOTO_SPEED_SETPOINT'],
value=current_speed[board])
# Acceleration can be left out if not wanted, but send anyway
self.uki_send_comms(address=board_mapping[board],
offset=MB_MAP['MB_MOTOR_ACCEL'],
value=current_accel[board])
else:
# Normal speed/accel mode for this board
# Just for now always update every board, every frame
self.uki_send_comms(address=board_mapping[board],
offset=MB_MAP['MB_MOTOR_SETPOINT'],
value=current_speed[board])
self.uki_send_comms(address=board_mapping[board],
offset=MB_MAP['MB_MOTOR_ACCEL'],
value=current_accel[board])
time.sleep(frame_period)
if loop_count < (loops - 1):
loop_count += 1
else:
self.playing = False
if not self.playing:
piano_state = piano_states['STOPPING']
elif piano_state == piano_states['STOPPING']:
# Send final stop command to finish script
for board in board_names:
self.uki_send_comms(address=board_mapping[board],
offset=MB_MAP['MB_MOTOR_SETPOINT'],
value=0)
self.uki_send_comms(address=board_mapping[board],
offset=MB_MAP['MB_MOTOR_ACCEL'],
value=100)
self.logger.info("Script finished")
piano_state = piano_states['IDLE']
def periodic_gui_update(self):
self.gui.process_queue()
self.master.after(int(THREAD_DELAY * 1000), self.periodic_gui_update) # Update GUI with wrapper info every few ms
if __name__ == "__main__":
root = Tk()
gui = ThreadManager(root)
root.mainloop() | gpl-3.0 | -7,555,098,107,644,283,000 | 48.766484 | 135 | 0.485978 | false |
steelcowboy/pyCourseManager | friday.py | 1 | 3765 | from datetime import datetime, timedelta
import course_manager
# from login_manager import LoginManager, login_manager, db
import coursedb_manager
from usage_resource import UsageResource
from secret import sqlalchemy_url
from login import (
PinResource,
SignUpResource,
AuthorizeResource,
LogoutResource,
UserManagementResource,
db,
)
from flask import Flask
from flask_restful import Api
from flask_sqlalchemy import SQLAlchemy
from pymongo import MongoClient
## Setup
app = Flask(__name__)
api = Api(app)
### UNCOMMENT TO ENABLE CORS ###
### IF NEEDED ###
from flask_cors import CORS
CORS(app, supports_credentials=True)
################################
# login_manager.init_app(app)
app.config.update(
SQLALCHEMY_DATABASE_URI = sqlalchemy_url,
SQLALCHEMY_TRACK_MODIFICATIONS = True,
SECRET_KEY = 'secret_xxx',
)
db.init_app(app)
mongo = MongoClient()
## API stuff
# CourseDB resources
api.add_resource(coursedb_manager.FullCatalogResource,
'/api/<string:school>/catalog',
resource_class_kwargs={'client': mongo}
)
api.add_resource(coursedb_manager.FullDeptResource,
'/api/<string:school>/catalog/<string:dept>',
resource_class_kwargs={'client': mongo}
)
api.add_resource(coursedb_manager.DepartmentResource,
'/api/<string:school>/courses',
resource_class_kwargs={'client': mongo}
)
api.add_resource(coursedb_manager.DepartmentListingResource,
'/api/<string:school>/courses/<string:dept>',
resource_class_kwargs={'client': mongo}
)
api.add_resource(coursedb_manager.CatalogCourseResource,
'/api/<string:school>/courses/<string:dept>/<int:num>',
resource_class_kwargs={'client': mongo}
)
# Login resources
api.add_resource(AuthorizeResource,
'/api/<string:school>/authorize',
resource_class_kwargs={'client': mongo}
)
api.add_resource(PinResource,
'/api/<string:school>/getpin',
resource_class_kwargs={'client': mongo}
)
api.add_resource(SignUpResource,
'/api/<string:school>/signup',
resource_class_kwargs={'client': mongo}
)
api.add_resource(UserManagementResource, '/api/<string:school>/users/<string:user>')
api.add_resource(LogoutResource, '/api/<string:school>/users/<string:user>/logout')
# How to use my lovely program
api.add_resource(UsageResource, '/api')
api.add_resource(course_manager.ListStockYears,
'/api/<string:school>/stock_charts',
resource_class_kwargs={'client': mongo}
)
api.add_resource(course_manager.ListStockCharts,
'/api/<string:school>/stock_charts/<string:year>',
resource_class_kwargs={'client': mongo}
)
api.add_resource(course_manager.GetStockChart,
'/api/<string:school>/stock_charts/<string:year>/<string:major>',
resource_class_kwargs={'client': mongo}
)
api.add_resource(course_manager.UserConfig,
'/api/<string:school>/users/<string:user>/config',
resource_class_kwargs={'client': mongo}
)
api.add_resource(course_manager.NewChartResource,
'/api/<string:school>/users/<string:user>/import',
resource_class_kwargs={'client': mongo}
)
api.add_resource(course_manager.ListUserCharts,
'/api/<string:school>/users/<string:user>/charts',
resource_class_kwargs={'client': mongo}
)
api.add_resource(course_manager.ChartResource,
'/api/<string:school>/users/<string:user>/charts/<string:chart>',
resource_class_kwargs={'client': mongo}
)
api.add_resource(course_manager.CourseResource,
'/api/<string:school>/users/<string:user>/charts/<string:chart>/<string:c_id>',
resource_class_kwargs={'client': mongo}
)
@app.before_first_request
def create_database():
db.create_all()
if __name__ == '__main__':
app.run(host='0.0.0.0', port=4500)
| apache-2.0 | 1,852,214,547,275,188,000 | 26.888889 | 87 | 0.694821 | false |
tago-io/tago-python | tago/account/notifications.py | 1 | 2226 | import requests # Used to make HTTP requests
import json # Used to parse JSON
import os # Used to infer environment variables
API_TAGO = os.environ.get('TAGO_API') or 'https://api.tago.io'
REALTIME = os.environ.get('TAGO_REALTIME') or 'https://realtime.tago.io'
class Notifications:
def __init__(self, acc_token):
self.token = acc_token
self.default_headers = {
'content-type': 'application/json', 'Account-Token': acc_token}
return
def list(self, params):
return requests.get('{api_endpoint}/notification'.format(api_endpoint=API_TAGO), headers=self.default_headers, params=params).json()
def markAsRead(self, notifications):
if not isinstance(notifications, list):
try:
notifications = list(notifications)
except TypeError:
raise ValueError('Parameter should be iterable')
else:
data = {'notification_ids': notifications}
return requests.put('{api_endpoint}/notification/read'.format(api_endpoint=API_TAGO), headers=self.default_headers, json=data).json()
def accept(self, notification_id):
return requests.post('{api_endpoint}/notification/accept/{notification_id}'.format(api_endpoint=API_TAGO, notification_id=notification_id), headers=self.default_headers).json()
def refuse(self, notification_id):
return requests.post('{api_endpoint}/notification/refuse/{notification_id}'.format(api_endpoint=API_TAGO, notification_id=notification_id), headers=self.default_headers).json()
def remove(self, notification_id):
return requests.delete('{api_endpoint}/notification/{notification_id}'.format(api_endpoint=API_TAGO, notification_id=notification_id), headers=self.default_headers).json()
def registerDevice(self, device_token, platform):
data = {
'device_token': device_token,
'platform': platform,
}
return requests.post('{api_endpoint}/notification/push/register'.format(api_endpoint=API_TAGO), headers=self.default_headers, json=data).json()
def unRegisterDevice(self, device_token):
data = {
'device_token': device_token,
}
return requests.post('{api_endpoint}/notification/push/unregister'.format(api_endpoint=API_TAGO), headers=self.default_headers, json=data).json()
| mit | 1,116,200,540,419,885,400 | 44.428571 | 180 | 0.721923 | false |
mtnsat/ics-release-dids | porta/account/account.py | 1 | 1435 | from ..with_request import WithRequest
class Account(WithRequest):
def __init__(self, url, session_id):
self.base_url = url
self.session_id=session_id
def account_get_by_id(self, i_account):
"""Get account by id"""
endpoint = "{0}".format('/rest/Account/get_account_info')
auth = '{{ "session_id": "{0}" }}'.format(self.session_id)
params = '{{ "i_account": {0} }}'.format(i_account)
payload = {
u'auth_info': auth,
u'params': params
}
return self.post_it(endpoint, payload, {})
def account_get_by_pin(self, pin_number):
"""Get account by pin number"""
endpoint = "{0}".format('/rest/Account/get_account_info')
auth = '{{ "session_id": "{0}" }}'.format(self.session_id)
params = '{{ "id": "{0}" }}'.format(pin_number)
payload = {
u'auth_info': auth,
u'params': params
}
return self.post_it(endpoint, payload, {})
def account_terminate_by_id(self, i_account):
"""Terminate account by id"""
endpoint = "{0}".format('/rest/Account/terminate_account')
auth = '{{ "session_id": "{0}" }}'.format(self.session_id)
params = '{{ "i_account": {0} }}'.format(i_account)
payload = {
u'auth_info': auth,
u'params': params
}
return self.post_it(endpoint, payload, {})
| mit | -4,234,994,092,088,408,000 | 31.613636 | 66 | 0.52892 | false |
moyaproject/moya | moya/db.py | 1 | 10394 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from sqlalchemy import create_engine, MetaData
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import scoped_session
from sqlalchemy.exc import (
DatabaseError,
IntegrityError,
OperationalError,
StatementError,
)
from . import namespaces
from .elements.utils import attr_bool
from .compat import text_type, implements_to_string, itervalues
from . import logic
from .console import Cell
import weakref
import logging
startup_log = logging.getLogger("moya.startup")
db_log = logging.getLogger("moya.db")
def dbobject(obj):
return getattr(obj, "__moyadbobject__", lambda: obj)()
@implements_to_string
class DBEngine(object):
def __init__(self, name, engine_name, engine, default=False):
self.name = name
self.engine_name = engine_name
self.engine = engine
self.default = default
# self.Session = sessionmaker(bind=engine) # expire_on_commit
self.session_factory = sessionmaker(bind=engine)
self.metadata = MetaData()
self.table_names = set()
def get_session(self):
return DBSession(self.session_factory, self.engine)
def __str__(self):
return "<dbengine %s>" % self.engine_name
def __repr__(self):
return '<dbengine "%s">' % self.name
def _get_db_error(e):
"""Extract information from sqlalchemy error"""
message = getattr(e, "message", text_type(e))
info = {"sql": e.statement, "params": e.params}
if hasattr(e, "orig"):
try:
code, message = e.orig.args
except:
pass
else:
info["code"] = code
message = message
return message, info
def wrap_db_errors(f):
"""Turn DB errors in to moya errors"""
def deco(self, *args, **kwargs):
try:
ret = f(self, *args, **kwargs)
except IntegrityError as e:
message, info = _get_db_error(e)
raise logic.MoyaException("db.integrity-error", message, info=info)
except OperationalError as e:
message, info = _get_db_error(e)
raise logic.MoyaException("db.operational-error", message, info=info)
except DatabaseError as e:
message, info = _get_db_error(e)
raise logic.MoyaException("db.error", message, info=info)
except StatementError as e:
message, info = _get_db_error(e)
raise logic.MoyaException(
"db.statement-error",
message,
info=info,
diagnosis="This error can occur if the models haven't been created in the database.\n\nDo you need to run **moya db sync**?",
)
except Exception as e:
raise
else:
return ret
return deco
class _SessionContextManager(object):
def __init__(self, session, element):
self._session = session
self._element = element
def __enter__(self):
self._session.enter_transaction()
def __exit__(self, exc_type, exc_val, exc_tb):
self._session.exit_transaction(
element=self._element, exc_type=exc_type, exc_val=exc_val
)
class DBSession(object):
def __init__(self, session_factory, engine=None):
self.session_factory = session_factory
self._engine = weakref.ref(engine) if engine is not None else None
self._session = None
self._transaction_level = 0
@property
def engine(self):
return self._engine() if self._engine is not None else None
@property
def session(self):
if self._session is None:
self._session = self.session_factory()
return self._session
def close(self):
if self._session:
self.session.close()
self._session = None
def __moyacontext__(self, context):
return self._session
def manage(self, element):
self.session
return _SessionContextManager(self, element)
def rollback(self):
self.session.rollback()
def __repr__(self):
if self.session is not None:
return "<dbsession %s>" % self.engine
return "<dbsession>"
def enter_transaction(self):
self._transaction_level += 1
@wrap_db_errors
def exit_transaction(self, element=None, exc_type=None, exc_val=None):
self._transaction_level -= 1
if exc_type is None:
if self._transaction_level == 0:
try:
self.session.commit()
except:
self.session.rollback()
raise
else:
self.session.rollback()
self._transaction_level = 0
def __getattr__(self, key):
return getattr(self.session, key)
def add_engine(archive, name, section):
engine_name = section["engine"]
echo = attr_bool(section.get("echo", "n"))
default = attr_bool(section.get("default", "n"))
connect_args = {}
if engine_name.startswith("sqlite:"):
connect_args["check_same_thread"] = False
sqla_engine = create_engine(
engine_name, echo=echo, pool_recycle=3600, connect_args=connect_args
)
# if engine_name.startswith('sqlite:'):
# @event.listens_for(sqla_engine, "connect")
# def do_connect(dbapi_connection, connection_record):
# # disable pysqlite's emitting of the BEGIN statement entirely.
# # also stops it from emitting COMMIT before any DDL.
# dbapi_connection.isolation_level = None
# @event.listens_for(sqla_engine, "begin")
# def do_begin(conn):
# # emit our own BEGIN
# conn.execute("BEGIN EXCLUSIVE")
engine = DBEngine(name, engine_name, sqla_engine, default)
if default or not archive.database_engines:
archive.default_db_engine = name
archive.database_engines[name] = engine
startup_log.debug("%r created", engine)
def get_session_map(archive):
"""Get a dictionary that maps db names on to session objects"""
session_map = {
db: engine.get_session() for db, engine in archive.database_engines.items()
}
if archive.default_db_engine is not None:
session_map["_default"] = session_map[archive.default_db_engine]
return session_map
def commit_sessions(context, close=True):
count = 0
for dbsession in context["._dbsessions"].values():
if dbsession.session:
try:
# db_log.debug('committing %s', dbsession)
dbsession.session.commit()
except:
db_log.exception("error committing session")
raise
else:
count += 1
if close:
try:
dbsession.close()
except:
db_log.exception("error closing session")
return count
def rollback_sessions(context, close=True):
count = 0
for dbsession in context["._dbsessions"].values():
if dbsession.session:
try:
# db_log.debug('rolling back %s', dbsession)
dbsession.session.rollback()
except:
db_log.exception("error rolling back session")
else:
count += 1
if close:
try:
dbsession.close()
except:
db_log.exception("error closing session")
return count
def close_sessions(context):
"""Close db sessions."""
for dbsession in context["._dbsessions"].values():
if dbsession.session:
try:
dbsession.close()
except:
db_log.exception("error closing session")
def sync_all(archive, console, summary=True):
if validate_all(archive, console) != 0:
return -1
engines = archive.database_engines
if not engines:
return 0
for engine in itervalues(engines):
if engine.default:
default_engine = engine
break
else:
default_engine = None
apps = archive.apps.values()
synced = []
try:
with console.progress("syncing", num_steps=len(apps), width=24) as progress:
progress.update(None, "building models...")
for app in apps:
for model in app.lib.get_elements_by_type((namespaces.db, "model")):
model._build_model(app)
for app in apps:
progress.update(None, "syncing {!r}".format(app))
count = 0
for model in app.lib.get_elements_by_type((namespaces.db, "model")):
engine_name = model.dbname
if engine_name is None:
engine = default_engine
else:
engine = engines[engine_name]
model.create_all(archive, engine, app)
count += 1
progress.step()
synced.append((app, count))
progress.update(None, "db sync complete")
finally:
if summary:
table = []
for app, count in synced:
table.append(
(
Cell(text_type(app), fg="magenta", bold=True),
Cell("{}".format(count) if count else "", bold=True),
)
)
console.table(table, header_row=["app", "synced"], dividers=True, grid=True)
return 0
def validate_all(archive, console=None):
"""Validates models and returns the number of fails"""
if not archive.database_engines:
return 0
from .tags.db import DBModel
fails = DBModel.validate_all(archive)
if console is None:
return not len(fails)
for model, app, element, error in fails:
if element:
console.document_error(
text_type(error),
element._location,
element._code,
element.source_line,
None,
)
else:
console.error(text_type(error))
if hasattr(error, "diagnosis"):
console.table([(error.diagnosis,)])
return len(fails)
| mit | 8,660,788,423,971,836,000 | 29.040462 | 141 | 0.566577 | false |
langurmonkey/gaiasky | assets/scripts/showcases/camera-constant-turn.py | 1 | 2066 | # This Gaia Sky script showcases a constant camera turn
# Created by Toni Sagrista
from py4j.clientserver import ClientServer, JavaParameters, PythonParameters
import time
class CameraUpdateRunnable(object):
def __init__(self, gs, rotation_rate):
self.gs = gs
self.rotation_rate = rotation_rate
self.prev_time = time.perf_counter()
self.direction = [0.0, 0.0, 1.0]
self.up = [0.0, 1.0, 0.0]
self.prev_time = time.time()
# Set the direction and up
self.gs.setCameraDirection(self.direction)
self.gs.setCameraUp(self.up)
def run(self):
self.time = time.time()
# This is the number of seconds since the last frame
dt = self.time - self.prev_time
# Actual degrees to rotate this frame
rot_deg = dt * self.rotation_rate
# Rotate the direction angle around up by rot_deg degrees
self.direction = self.gs.rotate3([self.direction[0], self.direction[1], self.direction[2]], [0.0, 1.0, 0.0], rot_deg)
# Set it
self.gs.setCameraDirection(self.direction)
# We do not need to set the up vector, since it never changes
# Store prev_time for use in next frame
self.prev_time = self.time
def toString():
return "camera-update-runnable"
class Java:
implements = ["java.lang.Runnable"]
gateway = ClientServer(java_parameters=JavaParameters(auto_convert=True),
python_parameters=PythonParameters())
gs = gateway.entry_point
gs.cameraStop()
gs.setCameraFree()
gs.stopSimulationTime()
gs.setVisibility("element.orbits", True)
gs.setCameraLock(True)
gs.setCameraOrientationLock(False)
gs.setFov(49)
# Rotation rate in deg/s
rotation_rate = 15.0
# park the camera updater
gs.parkRunnable("cam-updater", CameraUpdateRunnable(gs, rotation_rate))
gs.sleep(20)
# clean up and finish
print("Cleaning up and ending")
gs.unparkRunnable("cam-updater")
gs.cameraStop()
gs.maximizeInterfaceWindow()
gs.enableInput()
# close connection
gateway.shutdown()
| mpl-2.0 | -5,602,507,680,493,509,000 | 25.487179 | 125 | 0.673282 | false |
beluganos/beluganos | src/fabricflow/fibc/ofc/ofdpa2.py | 1 | 3614 | # -*- coding: utf-8 -*-
# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
FIBC mod factory
"""
from goryu.ofproto import ofdpa_match
from goryu.ofproto import ofdpa_action
from fabricflow.fibc.ofc import generic
ofdpa_match.init()
ofdpa_action.init()
def setup_flow(dpath, mod, ofctl):
"""
Setup flows
"""
return generic.setup_flow(dpath, mod, ofctl, False)
def vlan_flow(dpath, mod, ofctl):
"""
VLAN flow table.
"""
return generic.vlan_flow(dpath, mod, ofctl, False)
def termination_mac_flow(dpath, mod, ofctl):
"""
Termination MAC flow table.
"""
return generic.termination_mac_flow(dpath, mod, ofctl)
def mpls1_flow(dpath, mod, ofctl):
"""
MPLS1 flow table.
"""
return generic.mpls1_flow(dpath, mod, ofctl, False)
def unicast_routing_flow(dpath, mod, ofctl):
"""
Create flow_mod for Unicast Routing flow table.x
"""
return generic.unicast_routing_flow(dpath, mod, ofctl, False)
def bridging_flow(dpath, mod, ofctl):
"""
Bridging flow table.
"""
return generic.bridging_flow(dpath, mod, ofctl)
def policy_acl_flow(dpath, mod, ofctl):
"""
Policy ACL flow table.
"""
return generic.policy_acl_flow(dpath, mod, ofctl, False)
def setup_group(dpath, mod, ofctl):
"""
Setup Group.
"""
return generic.setup_group(dpath, mod, ofctl)
def l2_interface_group(dpath, mod, ofctl):
"""
L2 Interface Group
"""
return generic.l2_interface_group(dpath, mod, ofctl)
def l3_unicast_group(dpath, mod, ofctl):
"""
L3 Unicast Group
"""
return generic.l3_unicast_group(dpath, mod, ofctl)
def l3_ecmp_group(dpath, mod, ofctl):
"""
ECMP Group
"""
return generic.l3_ecmp_group(dpath, mod, ofctl)
def mpls_interface_group(dpath, mod, ofctl):
"""
MPLS Interface group
"""
return generic.mpls_interface_group(dpath, mod, ofctl)
def mpls_l3_vpn_group(dpath, mod, ofctl):
"""
MPLS L3 VPN Group
"""
return generic.mpls_l3_vpn_group(dpath, mod, ofctl)
def mpls_tun1_group(dpath, mod, ofctl):
"""
MPLS Tunnel1 Label Group
"""
return generic.mpls_tun1_group(dpath, mod, ofctl)
def mpls_swap_group(dpath, mod, ofctl):
"""
MPLS Swap Label Group
"""
return generic.mpls_swap_group(dpath, mod, ofctl)
def mpls_ecmp_group(dpath, mod, ofctl):
"""
MPLS ECMP Group
"""
return generic.mpls_ecmp_group(dpath, mod, ofctl)
def l2_unfiltered_interface_group(dpath, mod, ofctl):
"""
L2 Unfiltered Interface Group.
"""
return generic.l2_unfiltered_interface_group(dpath, mod, ofctl)
def pkt_out(dpath, port_id, strip_vlan, data):
"""
PacketOut
"""
return generic.pkt_out(dpath, port_id, strip_vlan, data)
def get_port_stats(dpath, waiters, port_id, ofctl):
"""
get port stats
"""
return generic.get_port_stats(dpath, waiters, port_id, ofctl)
def port_mod(dpath, mod, ofctl):
"""
PotMod
"""
return generic.port_mod(dpath, mod, ofctl)
| apache-2.0 | 5,274,588,383,089,223,000 | 20.771084 | 69 | 0.65523 | false |
pappasam/latexbuild | tests/test_utils.py | 1 | 4066 | import unittest
import os
from latexbuild.utils import (
random_str_uuid,
random_name_filepath,
list_filepathes_with_predicate,
read_file,
recursive_apply,
)
PATH_FILE = os.path.abspath(__file__)
PATH_TEST = os.path.dirname(PATH_FILE)
class TestRandomStrUuid(unittest.TestCase):
'''Test class for random_str_uuid'''
def test_correct_length(self):
l1, l2 = 4, 7
val1, val2 = random_str_uuid(l1), random_str_uuid(l2)
len1, len2 = len(val1), len(val2)
self.assertEqual(l1, len1)
self.assertEqual(l2, len2)
def test_random(self):
l = 7
v1, v2 = random_str_uuid(l), random_str_uuid(l)
self.assertNotEqual(v1, v2)
def test_below_1(self):
self.assertEqual(1, len(random_str_uuid(1)))
self.assertRaises(ValueError, random_str_uuid, 0)
def test_above_32(self):
self.assertEqual(32, len(random_str_uuid(32)))
self.assertRaises(ValueError, random_str_uuid, 33)
class TestRandomNameFilepath(unittest.TestCase):
'''Test class for random_name_filepath'''
PATH = "/hello/world/test.txt"
def test_correct_length(self):
len_random = 5
path_finish = random_name_filepath(self.PATH, len_random)
self.assertEqual(len(self.PATH) + len_random, len(path_finish))
def test_extension_still_there(self):
path_finish = random_name_filepath(self.PATH, 7)
ext_path_start = os.path.splitext(self.PATH)[-1]
ext_path_finish = os.path.splitext(path_finish)[-1]
self.assertEqual(ext_path_start, ext_path_finish)
def test_beginning_still_there(self):
len_random = 5
path_finish = random_name_filepath(self.PATH, len_random)
beg_start = os.path.splitext(self.PATH)[0]
beg_finish = os.path.splitext(path_finish)[0]
beg_finish_same = beg_finish[:-len_random]
self.assertEqual(beg_start, beg_finish_same)
def test_middle_is_random(self):
len_random = 5
path_1 = random_name_filepath(self.PATH, len_random)
path_2 = random_name_filepath(self.PATH, len_random)
beg_1 = os.path.splitext(path_1)[0][-len_random:]
beg_2 = os.path.splitext(path_2)[0][-len_random:]
self.assertNotEqual(beg_1, beg_2)
class TestListFilepathesWithPredicate(unittest.TestCase):
'''Test class for list_filepathes_with_predicate'''
def test_this_file(self):
most_of_this_file = PATH_FILE[:-2]
files = list_filepathes_with_predicate(PATH_TEST, most_of_this_file)
self.assertEqual(files, [PATH_FILE])
def test_not_a_match(self):
impossible_prefix = "no root therefore impossible"
files = list_filepathes_with_predicate(PATH_TEST, impossible_prefix)
self.assertEqual(files, [])
def test_invalid_directory(self):
self.assertRaises(ValueError, list_filepathes_with_predicate,
"notadirectory", "anything")
class TestReadFile(unittest.TestCase):
'''Test class for read_file'''
'''This function is too simple to warrant testing at this time'''
pass
class TestRecursiveApply(unittest.TestCase):
'''Test class for recursive_apply'''
def test_nested_objects(self):
inval = {
'hello': {'man': 'woman', 'dog': 'cat'},
'world': 'smartiepants',
'brownie': [
'flower',
{'sugar': 'bad'},
'chocolate',
]
}
expected_outval = {
'hello': {'man': 'womanTEST', 'dog': 'catTEST'},
'world': 'smartiepantsTEST',
'brownie': [
'flowerTEST',
{'sugar': 'badTEST'},
'chocolateTEST',
]
}
func = lambda s: s + 'TEST'
actual_outval = recursive_apply(inval, lambda s: s + 'TEST')
self.assertEqual(actual_outval, expected_outval)
if __name__ == '__main__':
unittest.main()
| mit | 9,070,330,015,316,110,000 | 34.356522 | 76 | 0.590999 | false |
spoqa/dodotable | dodotable/schema.py | 1 | 12038 | # -*- coding: utf-8 -*-
""":mod:`dodotable.schema` --- table schema
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import absolute_import
import collections
try:
from collections.abc import MutableSequence
except ImportError:
from collections import MutableSequence
import math
from sqlalchemy.orm import Query
from .environment.flask import FlaskEnvironment
from .util import render, string_literal, _get_data
__all__ = (
'Cell', 'Column', 'LinkedColumn', 'ObjectColumn', 'ENVIRONMENT',
'Queryable', 'Renderable', 'Row', 'Table', 'Pager', 'Schema',
)
ENVIRONMENT = FlaskEnvironment()
class Schema(object):
"""
:param environment:
:type environment: :class:`~.environment.Environment`
"""
environment = ENVIRONMENT
def render(self, template_name, **kwargs):
return render(template_name,
extra_environments=self.environment.__dict__(),
**kwargs)
class Renderable(object):
"""jinja에서 바로 렌더링되는 클래스의 상위 클래스
jinja에서는 ``__html__`` 를 호출하여 렌더링을 하므로
:class:`~Renderable` 을 상속받아 :meth:`~Renderable.__html__` 을
구현하는 경우 바로 렌더링 할 수 있습니다.
.. code-block:: python
class SomeElem(Renderable):
def __html__(self):
return "<h1>Hello World</h1>"
.. code-block:: jinja
{{ SomeElem() }} <!-- == <h1>Hello World</h1> -->
"""
def __html__(self):
""":mod:`jinja` 내부 호출용 함수
.. note::
요즘은 :func:`__html__` 을 구현하는게 HTML 뱉는 객체의 de facto 라고하더군요.
"""
raise NotImplementedError('__html__ not implemented yet.')
class Queryable(object):
""":class:`~sqlalchemy.orm.query.Query` 로 변환 가능한 객체
쿼리를 내뱉는 모든 필더들은 :class:`~Queryable` 을 상속받고
:meth:`~Queryable.__query__` 를 구현하여 sqlalchemy 쿼리로 사용할 수 있도록
변환해야합니다.
"""
def __query__(self):
"""모든 :class:`~dodotable.Queryable` 객체가 구현해야하는 메소드."""
raise NotImplementedError('__query__ not implemented yet.')
class Cell(Schema, Renderable):
"""테이블의 셀을 나타내는 클래스
:param int col: column 위치
:param int row: row 위치
:param data: 셀에 채워질 데이터
"""
def __init__(self, col, row, data, _repr=string_literal, classes=()):
self.col = col
self.row = row
self.data = data
self.repr = _repr
self.classes = classes
def __html__(self):
return self.render('cell.html', cell=self)
class LinkedCell(Cell):
"""컨텐츠에 링크가 걸린 Cell
:param int col: column 위치
:param int row: row 위치
:param data: 셀에 채워질 데이터
:param endpoint: 데이터를 누르면 이동할 url
"""
def __init__(self, col, row, data, endpoint):
self.col = col
self.row = row
self.data = data
self.url = endpoint
def __html__(self):
return self.render('linkedcell.html', cell=self)
class Column(Schema, Renderable):
"""테이블의 열을 나타내는 클래스
:param str label: 컬럼 레이블
:param str attr: 가져올 attribute 이름
:param list order_by: 정렬 기준
:param list filters: 정렬 기준
:param function _repr: 보여질 형식
:param bool sortable: 정렬 가능 여부
:param bool visible: 테이블에 해당 칼럼이 보일지 말지의 여부.
해당 값이 False여도
:class:`~dodotable.condition.IlikeSet`의 필터에는
보이므로 검색에는 사용할 수 있습니다.
"""
def __init__(self, label, attr, order_by=(), filters=None,
_repr=string_literal, sortable=True, visible=True,
classes=()):
from .condition import Order
if filters is None:
filters = []
self.label = label
self.attr = attr
self.filters = filters
self.order_by = Order.of_column(attr, order_by)
self._repr = _repr
self.sortable = sortable
self.visible = visible
self.classes = classes
def add_filter(self, filter):
self.filters.append(filter)
def __cell__(self, col, row, data, attribute_name, default=None):
"""해당 열의 데이터를 :class:`~dodotable.Cell`로 변환합니다.
:param col:
:param row:
:param data:
:param attribute_name:
:param default:
:return:
"""
return Cell(col=col, row=row,
data=_get_data(data, attribute_name, default),
_repr=self._repr,
classes=self.classes)
def __html__(self):
return self.render('column.html', column=self)
class LinkedColumn(Column):
"""링크가 걸려야 하는 열 나타내는 클래스
:param str label: 컬럼 레이블
:param str attr: 가져올 attribute 이름
:param str or function endpoint: 걸릴 링크 형식
:param list order_by: 정렬 기준
"""
def __init__(self, *args, **kwargs):
self.endpoint = kwargs.pop('endpoint')
super(LinkedColumn, self).__init__(*args, **kwargs)
def __cell__(self, col, row, data, attribute_name, default=None):
endpoint = self.endpoint(data) if callable(
self.endpoint) else self.endpoint
return LinkedCell(col=col, row=row,
data=_get_data(data, attribute_name, default),
endpoint=endpoint)
class ObjectColumn(Column):
"""Get __cell_.data as result instead of attribute."""
def __cell__(self, col, row, data, attribute_name, default=None):
return Cell(col=col, row=row,
data=data if data else default,
_repr=self._repr,
classes=self.classes)
class HiddenColumn(Column):
"""보이지 않는 열"""
def __init__(self, *args, **kwargs):
super(HiddenColumn, self).__init__(*args, **kwargs)
self.visible = False
class Row(Schema, MutableSequence, Renderable):
"""테이블에 행을 나타내는 클래스 """
def __init__(self):
self._row = []
def __delitem__(self, key):
del self._row[key]
def __getitem__(self, item):
return self._row[item]
def __setitem__(self, key, value):
self._row[key] = value
def __len__(self):
return len(self._row)
def insert(self, index, object_):
self._row.insert(index, object_)
def append(self, cell):
"""행에 cell을 붙입니다. """
assert isinstance(cell, Cell)
super(Row, self).append(cell)
def __html__(self):
return self.render('row.html', row=self)
class Pager(Schema, Renderable):
DEFAULT_LIMIT = 10
DEFAULT_OFFSET = 0
Page = collections.namedtuple('Page',
['selected', 'number', 'limit', 'offset'])
def __init__(self, limit, offset, count, padding=10):
try:
self.limit = int(limit)
self.offset = int(offset)
self.count = int(count)
self.padding = int(padding)
except ValueError:
self.limit = 10
self.offset = 0
self.count = 0
self.padding = 10
def from_page_number(self, number):
return self.Page(limit=self.limit, offset=(number - 1) * self.limit,
selected=False, number=number)
@property
def pages(self):
page_count = int(math.ceil(self.count / float(self.limit)))
current_page_count = (self.offset // self.limit) + 1
pages = []
s = (current_page_count - 1) // self.padding
start = s * 10 + 1
for page in self.range(start,
start + self.padding - 1,
max_=page_count):
selected = False
if page == current_page_count:
selected = True
p = self.Page(selected=selected, number=page, limit=self.limit,
offset=self.limit * (page - 1))
pages.append(p)
return pages
def range(self, start, end, max_, min_=1):
i = start
yield min_
while i <= end and i <= max_:
if i > min_:
yield i
i += 1
if i < max_:
yield max_
def __html__(self):
return self.render('pager.html', pager=self)
class Table(Schema, Queryable, Renderable):
"""데이터를 나타내는 테이블의 틀
:param cls:
:param label:
:param columns:
:param sqlalchemy_session:
"""
def __init__(self, cls, label, unit_label="row",
columns=None,
sqlalchemy_session=None):
self.cls = cls
self.label = label
self.unit_label = unit_label
self._filters = []
self.rows = []
if columns is None:
self._columns = []
else:
self._columns = columns
self._count = None
self.session = sqlalchemy_session
try:
if sqlalchemy_session is None:
self.session = self.environment.get_session()
finally:
if not self.session:
raise ValueError("{0.__class__.__name__}.session "
"can't be None".format(self))
self.pager = Pager(limit=1, offset=0, count=0)
self.pager.environment = self.environment
def select(self, offset=Pager.DEFAULT_OFFSET, limit=Pager.DEFAULT_LIMIT):
self.rows = []
q = self.query.offset(offset).limit(limit)
for i, row in enumerate(q):
_row = Row()
for j, col in enumerate(self.columns):
_row.append(
col.__cell__(col=j, row=i, data=row,
attribute_name=col.attr)
)
self.rows.append(_row)
self.pager = Pager(limit=limit, offset=offset,
count=self.count)
self.pager.environment = self.environment
return self
def add_filter(self, filter):
self._filters.append(filter)
@property
def _order_queries(self):
"""쿼리의 정렬 조건을 가져옵니다."""
from .condition import Order
order = []
for column in self.columns:
if column.order_by:
o = Order(self.cls, column.attr, column.order_by)
order.append(o.__query__())
if not order:
k = self.columns[0].attr
o = Order(self.cls, k)
self.columns[0].order_by = o.order
order.append(o.__query__())
return order
@property
def _filter_queries(self):
for filter in self._filters:
if filter:
yield filter.__query__()
@property
def count(self):
return self.build_base_query().count()
def build_base_query(self):
if isinstance(self.cls, Query):
query = self.cls
else:
query = self.session.query(self.cls)
for filter in self._filter_queries:
if filter is not None:
query = query.filter(filter)
return query
@property
def query(self):
"""쿼리를 만듭니다.
:return:
"""
query = self.build_base_query().order_by(*self._order_queries)
return query
@property
def columns(self):
return [column for column in self._columns if column.visible]
def __html__(self):
return self.render('table.html', table=self)
def __query__(self):
return self.query
| mit | 7,330,063,789,316,485,000 | 26.038462 | 77 | 0.540185 | false |
ccxt/ccxt | examples/py/bypass-cloudflare.py | 1 | 1389 | # -*- coding: utf-8 -*-
import cfscrape
import os
import sys
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
import ccxt # noqa: E402
def print_supported_exchanges():
print('Supported exchanges:')
print(', '.join(ccxt.exchanges))
try:
id = sys.argv[1] # get exchange id from command line arguments
# check if the exchange is supported by ccxt
exchange_found = id in ccxt.exchanges
if exchange_found:
print('Instantiating ' + id + ' exchange')
# instantiate the exchange by id
exchange = getattr(ccxt, id)({
'timeout': 20000,
'session': cfscrape.create_scraper(),
})
try:
# load all markets from the exchange
markets = exchange.load_markets()
# output a list of all market symbols
print(id + ' has ' + str(len(exchange.symbols)) + ' symbols: ' + ', '.join(exchange.symbols))
print('Succeeded.')
except ccxt.BaseError as e:
print(type(e).__name__, str(e))
print('Failed.')
else:
print('Exchange ' + id + ' not found')
print_supported_exchanges()
except Exception as e:
print('[' + type(e).__name__ + ']', str(e))
print('Usage: python ' + sys.argv[0] + ' id')
print_supported_exchanges()
| mit | -4,424,267,704,340,370,000 | 23.368421 | 105 | 0.577394 | false |
ojengwa/django-multitenants | tenant_schemas/utils.py | 1 | 2758 | from contextlib import contextmanager
from django.conf import settings
from django.db import connection
try:
from django.apps import apps
get_model = apps.get_model
except ImportError:
from django.db.models.loading import get_model
from django.core import mail
@contextmanager
def schema_context(schema_name):
previous_tenant = connection.tenant
try:
connection.set_schema(schema_name)
yield
finally:
if previous_tenant is None:
connection.set_schema_to_public()
else:
connection.set_tenant(previous_tenant)
@contextmanager
def tenant_context(tenant):
previous_tenant = connection.tenant
try:
connection.set_tenant(tenant)
yield
finally:
if previous_tenant is None:
connection.set_schema_to_public()
else:
connection.set_tenant(previous_tenant)
def get_tenant_model():
return get_model(*settings.TENANT_MODEL.split("."))
def get_public_schema_name():
return getattr(settings, 'PUBLIC_SCHEMA_NAME', 'public')
def get_limit_set_calls():
return getattr(settings, 'TENANT_LIMIT_SET_CALLS', False)
def clean_tenant_url(url_string):
"""
Removes the TENANT_TOKEN from a particular string
"""
if hasattr(settings, 'PUBLIC_SCHEMA_URLCONF'):
if (settings.PUBLIC_SCHEMA_URLCONF and url_string
.startswith(settings.PUBLIC_SCHEMA_URLCONF)):
url_string = url_string[len(settings.PUBLIC_SCHEMA_URLCONF):]
return url_string
def remove_www_and_dev(hostname):
"""
Legacy function - just in case someone is still using the old name
"""
return remove_www(hostname)
def remove_www(hostname):
"""
Removes www. from the beginning of the address. Only for
routing purposes. www.test.com/login/ and test.com/login/ should
find the same tenant.
"""
if hostname.startswith("www."):
return hostname[4:]
return hostname
def django_is_in_test_mode():
"""
I know this is very ugly! I'm looking for more elegant solutions.
See: http://stackoverflow.com/questions/6957016/detect-django-testing-mode
"""
return hasattr(mail, 'outbox')
def schema_exists(schema_name):
cursor = connection.cursor()
# check if this schema already exists in the db
sql = 'SELECT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace WHERE LOWER(nspname) = LOWER(%s))'
cursor.execute(sql, (schema_name, ))
row = cursor.fetchone()
if row:
exists = row[0]
else:
exists = False
cursor.close()
return exists
def app_labels(apps_list):
"""
Returns a list of app labels of the given apps_list
"""
return [app.split('.')[-1] for app in apps_list]
| mit | -1,970,988,568,842,079,000 | 24.072727 | 97 | 0.660986 | false |
chukysoria/pyspotify-connect | tests/test_error.py | 1 | 4528 | from __future__ import unicode_literals
import unittest
import spotifyconnect
from spotifyconnect import utils
class ErrorTest(unittest.TestCase):
def test_error_is_an_exception(self):
error = spotifyconnect.Error(0)
self.assertIsInstance(error, Exception)
def test_maybe_raise(self):
with self.assertRaises(spotifyconnect.LibError):
spotifyconnect.Error.maybe_raise(
spotifyconnect.ErrorType.WrongAPIVersion)
def test_maybe_raise_does_not_raise_if_ok(self):
spotifyconnect.Error.maybe_raise(spotifyconnect.ErrorType.Ok)
def test_maybe_raise_does_not_raise_if_error_is_ignored(self):
spotifyconnect.Error.maybe_raise(
spotifyconnect.ErrorType.WrongAPIVersion,
ignores=[spotifyconnect.ErrorType.WrongAPIVersion])
def test_maybe_raise_works_with_any_iterable(self):
spotifyconnect.Error.maybe_raise(
spotifyconnect.ErrorType.WrongAPIVersion,
ignores=(spotifyconnect.ErrorType.WrongAPIVersion,))
class LibErrorTest(unittest.TestCase):
def test_is_an_error(self):
error = spotifyconnect.LibError(0)
self.assertIsInstance(error, spotifyconnect.Error)
def test_has_error_type(self):
error = spotifyconnect.LibError(0)
self.assertEqual(error.error_type, 0)
error = spotifyconnect.LibError(1)
self.assertEqual(error.error_type, 1)
def test_is_equal_if_same_error_type(self):
self.assertEqual(
spotifyconnect.LibError(0),
spotifyconnect.LibError(0))
def test_is_not_equal_if_different_error_type(self):
self.assertNotEqual(
spotifyconnect.LibError(0),
spotifyconnect.LibError(1))
def test_error_has_useful_repr(self):
error = spotifyconnect.LibError(0)
self.assertIn('Ok', repr(error))
def test_error_has_useful_string_representation(self):
error = spotifyconnect.LibError(0)
self.assertEqual('%s' % error, 'Ok')
self.assertIsInstance('%s' % error, utils.text_type)
error = spotifyconnect.LibError(3)
self.assertEqual('%s' % error, 'WrongAPIVersion')
def test_has_error_constants(self):
self.assertEqual(
spotifyconnect.LibError.Ok,
spotifyconnect.LibError(
spotifyconnect.ErrorType.Ok))
self.assertEqual(
spotifyconnect.LibError.WrongAPIVersion,
spotifyconnect.LibError(spotifyconnect.ErrorType.WrongAPIVersion))
class ErrorTypeTest(unittest.TestCase):
def test_has_error_type_constants(self):
self.assertEqual(spotifyconnect.ErrorType.Ok, 0)
self.assertEqual(spotifyconnect.ErrorType.Failed, 1)
self.assertEqual(spotifyconnect.ErrorType.InitFailed, 2)
self.assertEqual(spotifyconnect.ErrorType.WrongAPIVersion, 3)
self.assertEqual(spotifyconnect.ErrorType.NullArgument, 4)
self.assertEqual(spotifyconnect.ErrorType.InvalidArgument, 5)
self.assertEqual(spotifyconnect.ErrorType.Uninitialized, 6)
self.assertEqual(spotifyconnect.ErrorType.AlreadyInitialized, 7)
self.assertEqual(spotifyconnect.ErrorType.LoginBadCredentials, 8)
self.assertEqual(spotifyconnect.ErrorType.NeedsPremium, 9)
self.assertEqual(spotifyconnect.ErrorType.TravelRestriction, 10)
self.assertEqual(spotifyconnect.ErrorType.ApplicationBanned, 11)
self.assertEqual(spotifyconnect.ErrorType.GeneralLoginError, 12)
self.assertEqual(spotifyconnect.ErrorType.Unsupported, 13)
self.assertEqual(spotifyconnect.ErrorType.NotActiveDevice, 14)
self.assertEqual(spotifyconnect.ErrorType.PlaybackErrorStart, 1000)
self.assertEqual(spotifyconnect.ErrorType.GeneralPlaybackError, 1001)
self.assertEqual(spotifyconnect.ErrorType.PlaybackRateLimited, 1002)
self.assertEqual(spotifyconnect.ErrorType.Unknown, 1003)
class TimeoutTest(unittest.TestCase):
def test_is_an_error(self):
error = spotifyconnect.Timeout(0.5)
self.assertIsInstance(error, spotifyconnect.Error)
def test_has_useful_repr(self):
error = spotifyconnect.Timeout(0.5)
self.assertIn('Operation did not complete in 0.500s', repr(error))
def test_has_useful_string_representation(self):
error = spotifyconnect.Timeout(0.5)
self.assertEqual('%s' % error, 'Operation did not complete in 0.500s')
self.assertIsInstance('%s' % error, utils.text_type)
| apache-2.0 | 884,721,389,033,791,600 | 38.034483 | 78 | 0.703843 | false |
vfuse/nixstatsagent | nixstatsagent/nixstatsagent.py | 2 | 24810 | #!/usr/bin/env python
# -*- coding: utf-8; tab-width: 4; indent-tabs: nil; -*-
# by Al Nikolov <[email protected]>
from __future__ import print_function
import bz2
import sys
if sys.version_info >= (3,):
try:
from past.builtins import basestring
except ImportError:
basestring = str
import configparser
import http.client
from queue import Queue, Empty
import io
else:
import ConfigParser
import httplib
import StringIO
from Queue import Queue, Empty
import glob
import imp
try:
import json
except ImportError:
import simplejson as json
import logging
import os
import pickle
import signal
import socket
import subprocess
import threading
import time
import types
import urllib
try:
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError
except ImportError:
from urlparse import urlparse
from urllib import urlencode
from urllib2 import urlopen, Request, HTTPError
__version__ = '1.2.12'
__FILEABSDIRNAME__ = os.path.dirname(os.path.abspath(__file__))
ini_files = (
os.path.join('/etc', 'nixstats.ini'),
os.path.join('/etc', 'nixstats-token.ini'),
os.path.join(os.path.dirname(__FILEABSDIRNAME__), 'nixstats.ini'),
os.path.join(os.path.dirname(__FILEABSDIRNAME__), 'nixstats-token.ini'),
os.path.abspath('nixstats.ini'),
os.path.abspath('nixstats-token.ini'),
)
if sys.platform == 'win32':
ini_files = (
os.path.join(__FILEABSDIRNAME__, 'nixstats.ini'),
os.path.join(__FILEABSDIRNAME__, 'nixstats-token.ini'),
)
def info():
'''
Return string with info about nixstatsagent:
- version
- plugins enabled
- absolute path to plugin directory
- server id from configuration file
'''
agent = Agent(dry_instance=True)
plugins_path = agent.config.get('agent', 'plugins')
plugins_enabled = agent._get_plugins(state='enabled')
return '\n'.join((
'Version: %s' % __version__,
'Plugins enabled: %s' % ', '.join(plugins_enabled),
'Plugins directory: %s' % plugins_path,
'Server: %s' % agent.config.get('agent', 'server'),
))
def hello(proto='https'):
user_id = sys.argv[1]
if len(sys.argv) > 2:
token_filename = sys.argv[2]
else:
token_filename = os.path.join(__FILEABSDIRNAME__, 'nixstats-token.ini')
if len(sys.argv) > 3:
unique_id = sys.argv[3]
else:
unique_id = ''
if '_' in user_id:
server_id = user_id.split('_')[1]
user_id = user_id.split('_')[0]
elif os.path.isfile('/etc/nixstats/token'):
oldconfigfile = open('/etc/nixstats/token','r')
server_id = oldconfigfile.readline()
print('Upgrading from old monitoring agent')
print('Remove the old agent from the crontab (crontab -e -u nixstats)')
elif os.path.isfile('/opt/nixstats/nixstats.cfg'):
oldconfigfile = open('/opt/nixstats/nixstats.cfg')
lines=oldconfigfile.readlines()
server_id = lines[1].replace('server=', '').strip()
print('Upgrading from old python client.')
print('Run :\nchkconfig --del nixstats \nor \nupdate-rc.d -f nixstats remove \nto remove the old service.')
else:
try:
hostname = os.uname()[1]
except AttributeError:
hostname = socket.getfqdn()
server_id = urlopen(
proto + '://api.nixstats.com/hello.php',
data=urlencode({
'user': user_id,
'hostname': hostname,
'unique_id': unique_id
}).encode("utf-8")
).read().decode()
print('Got server_id: %s' % server_id)
open(token_filename, 'w').\
write('[DEFAULT]\nuser=%s\nserver=%s\n' % (user_id, server_id))
# def run_agent():
# Agent().run()
def _plugin_name(plugin):
if isinstance(plugin, basestring):
basename = os.path.basename(plugin)
return os.path.splitext(basename)[0]
else:
return plugin.__name__
def test_plugins(plugins=[]):
'''
Test specified plugins and print their data output after single check.
If plugins list is empty test all enabled plugins.
'''
agent = Agent(dry_instance=True)
plugins_path = agent.config.get('agent', 'plugins')
if plugins_path not in sys.path:
sys.path.insert(0, plugins_path)
if not plugins:
plugins = agent._get_plugins(state='enabled')
print('Check all enabled plugins: %s' % ', '.join(plugins))
for plugin_name in plugins:
print('%s:' % plugin_name)
try:
fp, pathname, description = imp.find_module(plugin_name)
except Exception as e:
print('Find error:', e)
continue
try:
module = imp.load_module(plugin_name, fp, pathname, description)
except Exception as e:
print('Load error:', e)
continue
finally:
# Since we may exit via an exception, close fp explicitly.
if fp:
fp.close()
try:
payload = module.Plugin().run(agent.config)
print(json.dumps(payload, indent=4, sort_keys=True))
except Exception as e:
print('Execution error:', e)
class Agent:
execute = Queue()
metrics = Queue()
data = Queue()
cemetery = Queue()
shutdown = False
def __init__(self, dry_instance=False):
'''
Initialize internal strictures
'''
self._config_init()
# Cache for plugins so they can store values related to previous checks
self.plugins_cache = {}
if dry_instance:
return
self._logging_init()
self._plugins_init()
self._data_worker_init()
self._dump_config()
def _config_init(self):
'''
Initialize configuration object
'''
defaults = {
'max_data_span': 60,
'max_data_age': 60 * 10,
'logging_level': logging.INFO,
'threads': 100,
'ttl': 60,
'interval': 60,
'plugins': os.path.join(__FILEABSDIRNAME__, 'plugins'),
'enabled': 'no',
'subprocess': 'no',
'user': '',
'server': '',
'api_host': 'api.nixstats.com',
'api_path': '/v2/server/poll',
'log_file': '/var/log/nixstatsagent.log',
'log_file_mode': 'a',
'max_cached_collections': 10,
}
sections = [
'agent',
'execution',
'data',
]
if sys.version_info >= (3,):
config = configparser.RawConfigParser(defaults)
else:
config = ConfigParser.RawConfigParser(defaults)
config.read(ini_files)
self.config = config
for section in sections:
self._config_section_create(section)
if section is 'data':
self.config.set(section, 'interval', 1)
if section is 'agent':
self.config.set(section, 'interval', .5)
def _config_section_create(self, section):
'''
Create an addition section in the configuration object
if it's not exists
'''
if not self.config.has_section(section):
self.config.add_section(section)
def _logging_init(self):
'''
Initialize logging faculty
'''
level = self.config.getint('agent', 'logging_level')
log_file = self.config.get('agent', 'log_file')
log_file_mode = self.config.get('agent', 'log_file_mode')
if log_file_mode in ('w', 'a'):
pass
elif log_file_mode == 'truncate':
log_file_mode = 'w'
elif log_file_mode == 'append':
log_file_mode = 'a'
else:
log_file_mode = 'a'
if log_file == '-':
logging.basicConfig(level=level) # Log to sys.stderr by default
else:
try:
logging.basicConfig(filename=log_file, filemode=log_file_mode, level=level, format="%(asctime)-15s %(levelname)s %(message)s")
except IOError as e:
logging.basicConfig(level=level)
logging.info('IOError: %s', e)
logging.info('Drop logging to stderr')
logging.info('Agent logging_level %i', level)
def _plugins_init(self):
'''
Discover the plugins
'''
logging.info('_plugins_init')
plugins_path = self.config.get('agent', 'plugins')
filenames = glob.glob(os.path.join(plugins_path, '*.py'))
if plugins_path not in sys.path:
sys.path.insert(0, plugins_path)
self.schedule = {}
for filename in filenames:
name = _plugin_name(filename)
if name == 'plugins':
continue
self._config_section_create(name)
if self.config.getboolean(name, 'enabled'):
if self.config.getboolean(name, 'subprocess'):
self.schedule[filename] = 0
else:
fp, pathname, description = imp.find_module(name)
try:
module = imp.load_module(name, fp, pathname, description)
except Exception:
module = None
logging.error('import_plugin_exception:%s', str(sys.exc_info()[0]))
finally:
# Since we may exit via an exception, close fp explicitly.
if fp:
fp.close()
if module:
self.schedule[module] = 0
else:
logging.error('import_plugin:%s', name)
def _subprocess_execution(self, task):
'''
Execute /task/ in a subprocess
'''
process = subprocess.Popen((sys.executable, task),
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
logging.debug('%s:process:%i', threading.currentThread(), process.pid)
interval = self.config.getint('execution', 'interval')
name = _plugin_name(task)
ttl = self.config.getint(name, 'ttl')
ticks = ttl / interval or 1
process.poll()
while process.returncode is None and ticks > 0:
logging.debug('%s:tick:%i', threading.currentThread(), ticks)
time.sleep(interval)
ticks -= 1
process.poll()
if process.returncode is None:
logging.error('%s:kill:%i', threading.currentThread(), process.pid)
os.kill(process.pid, signal.SIGTERM)
stdout, stderr = process.communicate()
if process.returncode != 0 or stderr:
logging.error('%s:%s:%s:%s', threading.currentThread(),
task, process.returncode, stderr)
if stdout:
ret = pickle.loads(stdout)
else:
ret = None
return ret
def _execution(self):
'''
Take queued execution requests, execute plugins and queue the results
'''
while True:
if self.shutdown:
logging.info('%s:shutdown', threading.currentThread())
break
logging.debug('%s:exec_queue:%i', threading.currentThread(), self.execute.qsize())
try:
task = self.execute.get_nowait()
except Empty:
break
logging.debug('%s:task:%s', threading.currentThread(), task)
name = _plugin_name(task)
try:
interval = self.config.get(name, 'interval')
except:
interval = 60
ts = time.time()
if isinstance(task, basestring):
payload = self._subprocess_execution(task)
else:
try:
# Setup cache for plugin instance
# if name not in self.plugins_cache.iterkeys():
# self.plugins_cache[name] = []
self.plugins_cache.update({
name: self.plugins_cache.get(name, [])
})
plugin = task.Plugin(agent_cache=self.plugins_cache[name])
payload = plugin.run(self.config)
except Exception:
logging.exception('plugin_exception')
payload = {'exception': str(sys.exc_info()[0])}
self.metrics.put({
'ts': ts,
'task': task,
'name': name,
'interval': interval,
'payload': payload,
})
self.cemetery.put(threading.currentThread())
self.hire.release()
def _data(self):
'''
Take and collect data, send and clean if needed
'''
logging.info('%s', threading.currentThread())
api_host = self.config.get('data', 'api_host')
api_path = self.config.get('data', 'api_path')
max_age = self.config.getint('agent', 'max_data_age')
max_span = self.config.getint('agent', 'max_data_span')
server = self.config.get('agent', 'server')
user = self.config.get('agent', 'user')
interval = self.config.getint('data', 'interval')
max_cached_collections = self.config.get('agent', 'max_cached_collections')
cached_collections = []
collection = []
while True:
loop_ts = time.time()
if self.shutdown:
logging.info('%s:shutdown', threading.currentThread())
break
logging.debug('%s:data_queue:%i:collection:%i',
threading.currentThread(), self.data.qsize(), len(collection))
while self.data.qsize():
try:
collection.append(self.data.get_nowait())
except Exception as e:
logging.error('Data queue error: %s' % e)
if collection:
first_ts = min((e['ts'] for e in collection))
last_ts = max((e['ts'] for e in collection))
now = time.time()
send = False
if last_ts - first_ts >= max_span:
logging.debug('Max data span')
send = True
clean = False
elif now - first_ts >= max_age:
logging.warning('Max data age')
send = True
clean = True
if send:
headers = {
"Content-type": "application/json",
"Authorization": "ApiKey %s:%s" % (user, server),
}
logging.debug('collection: %s',
json.dumps(collection, indent=2, sort_keys=True))
if not (server and user):
logging.warning('Empty server or user, nowhere to send.')
clean = True
else:
try:
if sys.version_info >= (3,):
connection = http.client.HTTPSConnection(api_host, timeout=15)
else:
connection = httplib.HTTPSConnection(api_host, timeout=15)
# Trying to send cached collections if any
if cached_collections:
logging.info('Sending cached collections: %i', len(cached_collections))
while cached_collections:
connection.request('PUT', '%s?version=%s' % (api_path, __version__),
cached_collections[0],
headers=headers)
response = connection.getresponse()
response.read()
if response.status == 200:
del cached_collections[0] # Remove just sent collection
logging.debug('Successful response: %s', response.status)
else:
raise ValueError('Unsuccessful response: %s' % response.status)
logging.info('All cached collections sent')
# Send recent collection (reuse existing connection)
connection.request('PUT', '%s?version=%s' % (api_path, __version__),
bz2.compress(str(json.dumps(collection)+"\n").encode()),
headers=headers)
response = connection.getresponse()
response.read()
if response.status == 200:
logging.debug('Successful response: %s', response.status)
clean = True
else:
raise ValueError('Unsuccessful response: %s' % response.status)
except Exception as e:
logging.error('Failed to submit collection: %s' % e)
# Store recent collection in cached_collections if send failed
if max_cached_collections > 0:
if len(cached_collections) >= max_cached_collections:
del cached_collections[0] # Remove oldest collection
logging.info('Reach max_cached_collections (%s): oldest cached collection dropped',
max_cached_collections)
logging.info('Cache current collection to resend next time')
cached_collections.append(bz2.compress(str(json.dumps(collection)+"\n").encode()))
collection = []
finally:
connection.close()
if clean:
collection = []
sleep_interval = interval - (time.time() - loop_ts)
if sleep_interval > 0:
time.sleep(sleep_interval)
def _data_worker_init(self):
'''
Initialize data worker thread
'''
logging.info('_data_worker_init')
threading.Thread(target=self._data).start()
def _dump_config(self):
'''
Dumps configuration object
'''
if sys.version_info >= (3,):
buf = io.StringIO()
else:
buf = StringIO.StringIO()
self.config.write(buf)
logging.info('Config: %s', buf.getvalue())
def _get_plugins(self, state='enabled'):
'''
Return list with plugins names
'''
plugins_path = self.config.get('agent', 'plugins')
plugins = []
for filename in glob.glob(os.path.join(plugins_path, '*.py')):
plugin_name = _plugin_name(filename)
if plugin_name == 'plugins':
continue
self._config_section_create(plugin_name)
if state == 'enabled':
if self.config.getboolean(plugin_name, 'enabled'):
plugins.append(plugin_name)
elif state == 'disabled':
if not self.config.getboolean(plugin_name, 'enabled'):
plugins.append(plugin_name)
return plugins
def _rip(self):
'''
Join with dead workers
Workaround for https://bugs.python.org/issue37788
'''
logging.debug('cemetery:%i', self.cemetery.qsize())
while True:
try:
thread = self.cemetery.get_nowait()
except Empty:
break
logging.debug('joining:%s', thread)
thread.join()
def run(self):
'''
Start all the worker threads
'''
logging.info('Agent main loop')
interval = self.config.getfloat('agent', 'interval')
self.hire = threading.Semaphore(
self.config.getint('execution', 'threads'))
try:
while True:
self._rip()
now = time.time()
logging.debug('%i threads', threading.activeCount())
while self.metrics.qsize():
metrics = self.metrics.get_nowait()
name = metrics['name']
logging.debug('metrics:%s', name)
plugin = metrics.get('task')
if plugin:
self.schedule[plugin] = \
int(now) + self.config.getint(name, 'interval')
if isinstance(plugin, types.ModuleType):
metrics['task'] = plugin.__file__
self.data.put(metrics)
execute = [
what
for what, when in self.schedule.items()
if when <= now
]
for name in execute:
logging.debug('scheduling:%s', name)
del self.schedule[name]
self.execute.put(name)
if self.hire.acquire(False):
try:
thread = threading.Thread(target=self._execution)
thread.start()
logging.debug('new_execution_worker_thread:%s', thread)
except Exception as e:
logging.warning('Can not start new thread: %s', e)
else:
logging.warning('threads_capped')
self.metrics.put({
'ts': now,
'name': 'agent_internal',
'payload': {
'threads_capping':
self.config.getint('execution', 'threads')}
})
sleep_interval = .5-(time.time()-now)
if sleep_interval > 0:
time.sleep(sleep_interval)
else:
logging.warning('not enough time to start worker threads')
time.sleep(.1)
except KeyboardInterrupt:
logging.warning(sys.exc_info()[0])
logging.info('Shutting down')
self._rip()
wait_for = True
while wait_for:
all_threads = threading.enumerate()
logging.info('Remaining threads: %s', all_threads)
wait_for = [
thread for thread in all_threads
if not thread.isDaemon() and
not isinstance(thread, threading._MainThread)
]
if not wait_for:
logging.info('Bye!')
sys.exit(0)
self.shutdown = True
logging.info('Waiting for %i threads to exit', len(wait_for))
for thread in wait_for:
logging.info('Joining with %s/%f', thread, interval)
thread.join(interval)
except Exception as e:
logging.error('Worker error: %s' % e)
def main():
if len(sys.argv) > 1:
if sys.argv[1].startswith('--'):
sys.argv[1] = sys.argv[1][2:]
if sys.argv[1] == 'help':
print('\n'.join((
'Run without options to run agent.',
'Acceptable options (leading -- is optional):',
' help, info, version, hello, insecure-hello, test',
)))
sys.exit()
elif sys.argv[1] == 'info':
print(info())
sys.exit()
elif sys.argv[1] == 'version':
print(__version__)
sys.exit()
elif sys.argv[1] == 'hello':
del sys.argv[1]
sys.exit(hello())
elif sys.argv[1] == 'insecure-hello':
del sys.argv[1]
sys.exit(hello(proto='http'))
elif sys.argv[1] == 'test':
sys.exit(test_plugins(sys.argv[2:]))
else:
print('Invalid option:', sys.argv[1], file=sys.stderr)
sys.exit(1)
else:
Agent().run()
if __name__ == '__main__':
main()
| bsd-3-clause | 4,973,914,409,906,243,000 | 35.864785 | 146 | 0.49742 | false |
popgengui/negui | agestrucne/pgvalidationdefs.py | 1 | 5774 | '''
Description
'''
from __future__ import print_function
__filename__ = "pgvalidationdefs.py"
__date__ = "20170326"
__author__ = "Ted Cosart<[email protected]>"
VERBOSE=False
VERY_VERBOSE=False
def validateNbAdjustment( s_adjustment, i_highest_cycle_number=1e20 ):
'''
2017_03_08. This def is created to handle the PGInputSimuPop
parameter nbadjustment, which requires a more elaborate
validation than do the others that can be validated using
a simple boolean statement.
We test the user's entry into the list of strings that give
the cycle range and adjustment rate by creating an
NbAdjustmentRangeAndRate object, solely to test it using
that objects validation code. See the class description
and code for NbAdjustmentRangeAndRate in module pgutilityclasses.
2017_03_26. This def was originally in the pgguisimupop.py class,
but, as called there from KeyValueFrame objects via the ValueValidator
object instances, a recursive set of validations sometimes occurred.
This module is imported by the pgutilityclasses.ValueValidator class,
so that def names can be passed as simple strings to the ValueValidator,
and the latter object can test the string to see if it is defined and
callable. This means that the i_highest_cycle_number parameter is
currently not passed, since the value validator can only pass a single
value to its validation defs. This is a difficult restriction to overcome,
since we are trying to automate validation defs in the resources/*param.names
entries, and need non-parametric calls and expressions.
Note, too that the validation message "s_msg" below, that I am reatining,
and that used to be raised when this was a def member of PGGuiSimuPop objects,
is currently not used. This is again due to the restricted return values
required by ValueValidator's minimalist interface.
'''
b_return_val=True
i_lowest_cycle_number=2
s_msg="In mod pgvalidationdefs.py, def validateNbAdjustment, " \
+ "there was an error in the range " \
+ "and rate entry. The entry was: " \
+ s_adjustment + ". " \
+ "\n\nThe correct format is min-max:rate, where " \
+ "min is a starting cycle number, " \
+ "max is an ending cycle number, " \
+ "and rate is the proportion by which to " \
+ "multiply Nb and the age/class individual counts " \
+ "to be applied for each cycle in the range."
if VERY_VERBOSE:
print( "--------------------" )
print( "in pgguisimupop, validateNbAdjustment with value: " + s_adjustment )
#end very verbose
if i_highest_cycle_number < i_lowest_cycle_number:
if VERY_VERBOSE:
print( "returning false on cycle number test" )
#end if very verbose
s_msg="In mod pgvalidationdefs.py, def validateNbAdjustment, " \
"cannot validate cycle range: current setting for " \
+ "total generations is less than the allowed minimum for " \
+ "adjustment (cycle " + str( i_lowest_cycle_number ) + ")."
b_return_val = False
else:
ls_adj_vals=s_adjustment.split( ":" )
if len( ls_adj_vals ) != 2:
b_return_val = False
else:
s_cycle_range=ls_adj_vals[ 0 ]
ls_min_max=s_cycle_range.split( "-" )
if len( ls_min_max ) != 2:
b_return_val = False
else:
try:
i_min=int( ls_min_max[ 0 ] )
i_max=int( ls_min_max[ 1 ] )
f_rate=float( ls_adj_vals[ 1 ] )
if i_min < i_lowest_cycle_number \
or i_max > i_highest_cycle_number \
or i_min > i_max:
b_return_val = False
#end if min-max invalid range
except ValueError as ove:
b_return_val = False
#end try except
#end if min-max list not 2 items
#end if entry not colon-splittable into 2 items
#end if no input, else if current input.gens < 1, else test entry
return b_return_val
#end validateNbAdjustment
def validateStartSave( i_value, i_max_cycle_number=None ):
b_return_val=True
if type( i_value ) != int:
b_return_val=False
elif i_value < 1 or \
( i_max_cycle_number is not None and i_value > i_max_cycle_number ):
b_return_val=False
#end if non-int or out of range, return false
return b_return_val
#end validateStartSave
def validateHetFilter( s_het_filter ):
b_return_value=False
DELIM=","
NUM_FIELDS=3
FIELD_TYPES=[ float, float, int ]
IDX_MIN=0
IDX_MAX=1
IDX_TOTAL=2
ls_problems=[]
ls_values=s_het_filter.split( "," )
lv_typed_values=[]
if len( ls_values ) != NUM_FIELDS:
ls_problems.append( "The filter should have " \
+ "3, comma-separated fields." )
else:
for idx in range( NUM_FIELDS ):
try:
lv_typed_values.append( FIELD_TYPES[idx]( ls_values[ idx ] ) )
except ( TypeError, ValueError ) as oet:
ls_problems.append( "value for item " \
+ str( idx + 1 ) \
+ ", \"" + ls_values[ idx ] \
+ "\", should be of type " \
+ str( FIELD_TYPES[ idx ] ) \
+ "." )
#end for each value
if len( ls_problems ) == 0:
if lv_typed_values[IDX_MIN] > lv_typed_values[IDX_MAX]:
ls_problems.append( \
"Value for minimum, " + ls_values[ IDX_MIN ] \
+ ", should be less than or equal to " \
+ " the value for the maximum, " \
+ str( ls_values[ IDX_MAX ] ) + "." )
elif lv_typed_values[ IDX_TOTAL ] < 0:
ls_problems.append( "The value for total pops to save, " \
+ str( lv_typed_values[ IDX_TOTAL ] \
+ " should be greater than or equal to " \
+ "zero." ) )
#end if min > max, elif total lt zero
#end if we have no problems so far.
#end if we have the proper number of values
if len( ls_problems ) > 0:
b_return_value = False
else:
b_return_value = True
#end if no problems return None
return b_return_value
#end validateHetFilter
if __name__ == "__main__":
pass
#end if main
| agpl-3.0 | 3,903,065,343,897,432,600 | 28.917098 | 80 | 0.664011 | false |
mpaf/pywinauto-64bit | pywinauto/handleprops.py | 1 | 12146 | # GUI Application automation and testing library
# Copyright (C) 2006 Mark Mc Mahon
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
"""Functions to retrieve properties from a window handle
These are implemented in a procedural way so as to to be
useful to other modules with the least conceptual overhead
"""
__revision__ = "$Revision: 727 $"
import ctypes
import win32functions
import win32defines
import win32structures
import findwindows # for children
#=========================================================================
def text(handle):
"Return the text of the window"
length = ctypes.c_long()
win32functions.SendMessageTimeout(
handle,
win32defines.WM_GETTEXTLENGTH,
0,
0,
win32defines.SMTO_ABORTIFHUNG,
100, # .1 of a second
ctypes.byref(length))
length = length.value
textval = ''
if length:
length += 1
buffer_ = ctypes.create_unicode_buffer(length)
ret = win32functions.SendMessage(
handle, win32defines.WM_GETTEXT, length, ctypes.byref(buffer_))
if ret:
textval = buffer_.value
return textval
#=========================================================================
def classname(handle):
"Return the class name of the window"
class_name = (ctypes.c_wchar * 257)()
win32functions.GetClassName (handle, ctypes.byref(class_name), 256)
return class_name.value
#=========================================================================
def parent(handle):
"Return the handle of the parent of the window"
return win32functions.GetParent(handle)
#=========================================================================
def style(handle):
"Return the style of the window"
return win32functions.GetWindowLong (handle, win32defines.GWL_STYLE)
#=========================================================================
def exstyle(handle):
"Return the extended style of the window"
return win32functions.GetWindowLong (handle, win32defines.GWL_EXSTYLE)
#=========================================================================
def controlid(handle):
"Return the ID of the control"
return win32functions.GetWindowLong (handle, win32defines.GWL_ID)
#=========================================================================
def userdata(handle):
"Return the value of any userdata associated with the window"
return win32functions.GetWindowLong (handle, win32defines.GWL_USERDATA)
#=========================================================================
def contexthelpid(handle):
"Return the context help id of the window"
return win32functions.GetWindowContextHelpId (handle)
#=========================================================================
def iswindow(handle):
"Return True if the handle is a window"
return bool(win32functions.IsWindow(handle))
#=========================================================================
def isvisible(handle):
"Return True if the window is visible"
return bool(win32functions.IsWindowVisible(handle))
#=========================================================================
def isunicode(handle):
"Teturn True if the window is a unicode window"
return bool(win32functions.IsWindowUnicode(handle))
#=========================================================================
def isenabled(handle):
"Return True if the window is enabled"
return bool(win32functions.IsWindowEnabled(handle))
#=========================================================================
def clientrect(handle):
"Return the client rectangle of the control"
client_rect = win32structures.RECT()
win32functions.GetClientRect(handle, ctypes.byref(client_rect))
return client_rect
#=========================================================================
def rectangle(handle):
"Return the rectangle of the window"
rect = win32structures.RECT()
win32functions.GetWindowRect(handle, ctypes.byref(rect))
return rect
#=========================================================================
def font(handle):
"Return the font as a LOGFONTW of the window"
# get the font handle
font_handle = win32functions.SendMessage(
handle, win32defines.WM_GETFONT, 0, 0)
# if the fondUsed is 0 then the control is using the
# system font (well probably not - even though that is what the docs say)
# instead we switch to the default GUI font - which is more likely correct.
if not font_handle:
# So just get the default system font
font_handle = win32functions.GetStockObject(win32defines.DEFAULT_GUI_FONT)
# if we still don't have a font!
# ----- ie, we're on an antiquated OS, like NT 3.51
if not font_handle:
# ----- On Asian platforms, ANSI font won't show.
if win32functions.GetSystemMetrics(win32defines.SM_DBCSENABLED):
# ----- was...(SYSTEM_FONT)
font_handle = win32functions.GetStockObject(
win32defines.SYSTEM_FONT)
else:
# ----- was...(SYSTEM_FONT)
font_handle = win32functions.GetStockObject(
win32defines.ANSI_VAR_FONT)
else:
fontval = win32structures.LOGFONTW()
ret = win32functions.GetObject(
font_handle, ctypes.sizeof(fontval), ctypes.byref(fontval))
# Get the Logfont structure of the font of the control
fontval = win32structures.LOGFONTW()
ret = win32functions.GetObject(
font_handle, ctypes.sizeof(fontval), ctypes.byref(fontval))
# The function could not get the font - this is probably
# because the control does not have associated Font/Text
# So we should make sure the elements of the font are zeroed.
if not ret:
fontval = win32structures.LOGFONTW()
# if it is a main window
if is_toplevel_window(handle):
if "MS Shell Dlg" in fontval.lfFaceName or \
fontval.lfFaceName == "System":
# these are not usually the fonts actaully used in for
# title bars so we need to get the default title bar font
# get the title font based on the system metrics rather
# than the font of the control itself
ncms = win32structures.NONCLIENTMETRICSW()
ncms.cbSize = ctypes.sizeof(ncms)
win32functions.SystemParametersInfo(
win32defines.SPI_GETNONCLIENTMETRICS,
ctypes.sizeof(ncms),
ctypes.byref(ncms),
0)
# with either of the following 2 flags set the font of the
# dialog isthe small one (but there is normally no difference!
if has_style(handle, win32defines.WS_EX_TOOLWINDOW) or \
has_style(handle, win32defines.WS_EX_PALETTEWINDOW):
fontval = ncms.lfSmCaptionFont
else:
fontval = ncms.lfCaptionFont
return fontval
#=========================================================================
def processid(handle):
"Retrun the ID of process that controls this window"
process_id = ctypes.c_int()
win32functions.GetWindowThreadProcessId(handle, ctypes.byref(process_id))
return process_id.value
#=========================================================================
def children(handle):
"Return a list of handles to the children of this window"
return findwindows.enum_child_windows(handle)
#=========================================================================
def has_style(handle, tocheck):
"Return True if the control has style tocheck"
hwnd_style = style(handle)
return tocheck & hwnd_style == tocheck
#=========================================================================
def has_exstyle(handle, tocheck):
"Return True if the control has extended style tocheck"
hwnd_exstyle = exstyle(handle)
return tocheck & hwnd_exstyle == tocheck
#=========================================================================
def is_toplevel_window(handle):
"Return whether the window is a top level window or not"
# only request the style once - this is an optimization over calling
# (handle, style) for each style I wan to check!
style_ = style(handle)
if (style_ & win32defines.WS_OVERLAPPED == win32defines.WS_OVERLAPPED or \
style_ & win32defines.WS_CAPTION == win32defines.WS_CAPTION) and \
not (style_ & win32defines.WS_CHILD == win32defines.WS_CHILD):
return True
else:
return False
#=========================================================================
#def get_button_friendlyclassname(handle):
# "Return the friendly class name of a button control"
#
# # get the least significant bit
# style_lsb = style(handle) & 0xF
#
# # default to "Button"
# f_classname = "Button"
#
# if style_lsb == win32defines.BS_3STATE or \
# style_lsb == win32defines.BS_AUTO3STATE or \
# style_lsb == win32defines.BS_AUTOCHECKBOX or \
# style_lsb == win32defines.BS_CHECKBOX:
# f_classname = "CheckBox"
#
# elif style_lsb == win32defines.BS_RADIOBUTTON or \
# style_lsb == win32defines.BS_AUTORADIOBUTTON:
# f_classname = "RadioButton"
#
# elif style_lsb == win32defines.BS_GROUPBOX:
# f_classname = "GroupBox"
#
# if style(handle) & win32defines.BS_PUSHLIKE:
# f_classname = "Button"
#
# return f_classname
#def friendlyclassname(handle):
# """Return the friendly class name of the window
#
# The friendly class name might be subjective, but it
# tries to be what a normal user would call a window
# rather then the windows class name for the window.
# """
#
# import warnings
# warnings.warn("handleprops.friendlyclassname() is deprecated. Please use"
# "FriendlyClassMethod() of HwndWrapper",
# DeprecationWarning)
#
# # if it's a dialog then return that
# if is_toplevel_window(handle) and classname(handle) == "#32770":
# return "Dialog"
#
# # otherwise ask the wrapper class for the friendly class name
# class_name = classname(handle)
#
# from controls import wraphandle
# info = wraphandle._find_wrapper(class_name)
#
# if info:
# return info.friendlyclassname
#
# else:
# return class_name
#
#
# # Check if the class name is in the known classes
# for cls_name, f_cls_name in _class_names.items():
#
# # OK we found it
# if re.match(cls_name, classname(handle)):
# # If it is a string then just return it
# if isinstance(f_cls_name, basestring):
# return f_cls_name
# # otherwise it is a function so call it
# else:
# return f_cls_name(handle)
#
# # unknown class - just return it's classname
# return classname(handle)
#=========================================================================
def dumpwindow(handle):
"Dump a window to a set of properties"
props = {}
for func in (
text,
classname,
rectangle,
clientrect,
style,
exstyle,
contexthelpid,
controlid,
userdata,
font,
parent,
processid,
isenabled,
isunicode,
isvisible,
children,
):
props[func.__name__] = func(handle)
return props
| lgpl-2.1 | -6,504,744,661,773,950,000 | 32.185792 | 82 | 0.571464 | false |
puiterwijk/product-definition-center | pdc/apps/compose/tests.py | 1 | 99430 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
import json
import mock
from StringIO import StringIO
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import Client
from rest_framework.test import APITestCase
from rest_framework import status
from pdc.apps.bindings import models as binding_models
from pdc.apps.common.test_utils import create_user, TestCaseWithChangeSetMixin
from pdc.apps.release.models import Release, ProductVersion
from pdc.apps.component.models import (ReleaseComponent,
BugzillaComponent)
import pdc.apps.release.models as release_models
import pdc.apps.common.models as common_models
from . import models
class ComposeModelTestCase(TestCase):
fixtures = [
"pdc/apps/common/fixtures/test/sigkey.json",
"pdc/apps/package/fixtures/test/rpm.json",
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/compose/fixtures/tests/variant.json",
"pdc/apps/compose/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/compose_overriderpm.json",
"pdc/apps/compose/fixtures/tests/compose.json",
"pdc/apps/compose/fixtures/tests/compose_composerpm.json",
]
def setUp(self):
self.compose = models.Compose.objects.get(id=1)
def test_get_rpms_existing(self):
self.assertEqual(unicode(self.compose.get_rpms('bash')),
'[<RPM: bash-0:1.2.3-4.b1.x86_64.rpm>]')
def test_get_rpms_nonexisting(self):
self.assertEqual(list(self.compose.get_rpms('foo')), [])
def test_get_arch_testing_status(self):
self.assertDictEqual(self.compose.get_arch_testing_status(),
{'Server': {'x86_64': 'untested'}, 'Server2': {'x86_64': 'untested'}})
class VersionFinderTestCase(APITestCase):
# TODO: This test case could be removed after removing endpoint 'compose/package'
fixtures = [
"pdc/apps/common/fixtures/test/sigkey.json",
"pdc/apps/package/fixtures/test/rpm.json",
"pdc/apps/release/fixtures/tests/product.json",
"pdc/apps/release/fixtures/tests/product_version.json",
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/compose/fixtures/tests/variant.json",
"pdc/apps/compose/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/compose_overriderpm.json",
"pdc/apps/compose/fixtures/tests/compose.json",
"pdc/apps/compose/fixtures/tests/compose_composerpm.json",
"pdc/apps/compose/fixtures/tests/more_composes.json",
]
def setUp(self):
self.url = reverse('findcomposewitholderpackage-list')
def test_bad_args_missing_rpm_name(self):
response = self.client.get(self.url, {'compose': 'compose-1'})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('rpm_name', response.data.get('detail'))
def test_bad_args_missing_release_and_compose(self):
response = self.client.get(self.url, {'rpm_name': 'bash'})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('release', response.data.get('detail'))
self.assertIn('compose', response.data.get('detail'))
def test_missing_previous_compose(self):
response = self.client.get(self.url, {'compose': 'compose-1', 'rpm_name': 'bash'})
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_previous_compose_has_same_version(self):
response = self.client.get(self.url, {'compose': 'compose-2', 'rpm_name': 'bash'})
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_previous_compose_has_older_rpm(self):
response = self.client.get(self.url, {'compose': 'compose-3', 'rpm_name': 'bash'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('compose'), "compose-2")
self.assertEqual(response.data.get('packages'), ["bash-0:1.2.3-4.b1.x86_64.rpm"])
def test_same_version_different_arch(self):
"""There is a previous compose with same version of package, but with different RPM.arch."""
models.ComposeRPM.objects.filter(pk=1).update(rpm=3)
response = self.client.get(self.url, {'compose': 'compose-2', 'rpm_name': 'bash'})
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_get_for_release(self):
response = self.client.get(self.url, {'rpm_name': 'bash', 'release': 'release-1.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data,
[{'compose': 'compose-1', 'packages': ['bash-0:1.2.3-4.b1.x86_64.rpm']},
{'compose': 'compose-2', 'packages': ['bash-0:1.2.3-4.b1.x86_64.rpm']},
{'compose': 'compose-3', 'packages': ['bash-0:5.6.7-8.x86_64.rpm']}])
def test_get_for_release_with_latest(self):
response = self.client.get(self.url, {'rpm_name': 'bash', 'release': 'release-1.0', 'latest': 'True'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data,
[{'compose': 'compose-3', 'packages': ['bash-0:5.6.7-8.x86_64.rpm']}])
def test_get_for_release_to_dict(self):
response = self.client.get(self.url, {'rpm_name': 'bash', 'release': 'release-1.0', 'to_dict': True})
self.assertEqual(response.status_code, status.HTTP_200_OK)
expected = [
{'compose': u'compose-1', 'packages': [
{'name': u'bash', 'version': u'1.2.3', 'epoch': 0, 'release': u'4.b1',
'arch': u'x86_64', 'srpm_name': u'bash', 'srpm_nevra': u'bash-0:1.2.3-4.b1.src',
'filename': 'bash-1.2.3-4.b1.x86_64.rpm', 'id': 1,
'linked_composes': [u'compose-1', u'compose-2'], 'linked_releases': []}]},
{'compose': u'compose-2', 'packages': [
{'name': u'bash', 'version': u'1.2.3', 'epoch': 0, 'release': u'4.b1',
'arch': u'x86_64', 'srpm_name': u'bash', 'srpm_nevra': u'bash-0:1.2.3-4.b1.src',
'filename': 'bash-1.2.3-4.b1.x86_64.rpm', 'id': 1,
'linked_composes': [u'compose-1', u'compose-2'], 'linked_releases': []}]},
{'compose': u'compose-3', 'packages': [
{'name': u'bash', 'version': u'5.6.7', 'epoch': 0, 'release': u'8',
'arch': u'x86_64', 'srpm_name': u'bash', 'srpm_nevra': None,
'filename': 'bash-5.6.7-8.x86_64.rpm', 'id': 2,
'linked_composes': [u'compose-3'], 'linked_releases': []}]}
]
self.assertEqual(response.data, expected)
def test_get_for_product_version(self):
product_version = ProductVersion.objects.get(short='product', version='1')
release = Release.objects.get(release_id='release-1.0')
release.product_version = product_version
release.save()
response = self.client.get(self.url, {'rpm_name': 'bash', 'product_version': 'product-1'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data,
[{'compose': 'compose-1', 'packages': ['bash-0:1.2.3-4.b1.x86_64.rpm']},
{'compose': 'compose-2', 'packages': ['bash-0:1.2.3-4.b1.x86_64.rpm']},
{'compose': 'compose-3', 'packages': ['bash-0:5.6.7-8.x86_64.rpm']}])
def test_get_for_product_version_with_latest(self):
product_version = ProductVersion.objects.get(short='product', version='1')
release = Release.objects.get(release_id='release-1.0')
release.product_version = product_version
release.save()
response = self.client.get(self.url, {'rpm_name': 'bash', 'product_version': 'product-1',
'latest': 'True'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data,
[{'compose': 'compose-3', 'packages': ['bash-0:5.6.7-8.x86_64.rpm']}])
def test_get_for_included_compose_type(self):
response = self.client.get(self.url, {'rpm_name': 'bash', 'release': 'release-1.0',
'included_compose_type': 'production'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data,
[{'compose': 'compose-1', 'packages': ['bash-0:1.2.3-4.b1.x86_64.rpm']},
{'compose': 'compose-2', 'packages': ['bash-0:1.2.3-4.b1.x86_64.rpm']}])
def test_get_for_excluded_compose_type(self):
response = self.client.get(self.url, {'rpm_name': 'bash', 'release': 'release-1.0',
'excluded_compose_type': 'production'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data,
[{'compose': 'compose-3', 'packages': ['bash-0:5.6.7-8.x86_64.rpm']}])
class FindComposeByReleaseRPMTestCase(APITestCase):
fixtures = [
"pdc/apps/common/fixtures/test/sigkey.json",
"pdc/apps/package/fixtures/test/rpm.json",
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/compose/fixtures/tests/variant.json",
"pdc/apps/compose/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/compose_overriderpm.json",
"pdc/apps/compose/fixtures/tests/compose.json",
"pdc/apps/compose/fixtures/tests/compose_composerpm.json",
"pdc/apps/compose/fixtures/tests/more_composes.json",
]
def test_get_for_release(self):
url = reverse('findcomposebyrr-list', kwargs={'rpm_name': 'bash', 'release_id': 'release-1.0'})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data,
[{'compose': 'compose-1', 'packages': ['bash-0:1.2.3-4.b1.x86_64.rpm']},
{'compose': 'compose-2', 'packages': ['bash-0:1.2.3-4.b1.x86_64.rpm']},
{'compose': 'compose-3', 'packages': ['bash-0:5.6.7-8.x86_64.rpm']}])
def test_get_for_release_with_latest(self):
url = reverse('findcomposebyrr-list', kwargs={'rpm_name': 'bash', 'release_id': 'release-1.0'})
response = self.client.get(url, {'latest': 'True'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data,
[{'compose': 'compose-3', 'packages': ['bash-0:5.6.7-8.x86_64.rpm']}])
def test_get_for_release_to_dict(self):
url = reverse('findcomposebyrr-list', kwargs={'rpm_name': 'bash', 'release_id': 'release-1.0'})
response = self.client.get(url, {'to_dict': True})
self.assertEqual(response.status_code, status.HTTP_200_OK)
expected = [
{'compose': u'compose-1', 'packages': [
{'name': u'bash', 'version': u'1.2.3', 'epoch': 0, 'release': u'4.b1',
'arch': u'x86_64', 'srpm_name': u'bash', 'srpm_nevra': u'bash-0:1.2.3-4.b1.src',
'filename': 'bash-1.2.3-4.b1.x86_64.rpm', 'id': 1,
'linked_composes': ['compose-1', 'compose-2'], 'linked_releases': []}]},
{'compose': u'compose-2', 'packages': [
{'name': u'bash', 'version': u'1.2.3', 'epoch': 0, 'release': u'4.b1',
'arch': u'x86_64', 'srpm_name': u'bash', 'srpm_nevra': u'bash-0:1.2.3-4.b1.src',
'filename': 'bash-1.2.3-4.b1.x86_64.rpm', 'id': 1,
'linked_composes': ['compose-1', 'compose-2'], 'linked_releases': []}]},
{'compose': u'compose-3', 'packages': [
{'name': u'bash', 'version': u'5.6.7', 'epoch': 0, 'release': u'8',
'arch': u'x86_64', 'srpm_name': u'bash', 'srpm_nevra': None,
'filename': 'bash-5.6.7-8.x86_64.rpm', 'id': 2,
'linked_composes': ['compose-3'], 'linked_releases': []}]}
]
self.assertEqual(response.data, expected)
def test_get_for_excluded_compose_type(self):
url = reverse('findcomposebyrr-list', kwargs={'rpm_name': 'bash', 'release_id': 'release-1.0'})
response = self.client.get(url, {'excluded_compose_type': 'production'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data,
[{'compose': 'compose-3', 'packages': ['bash-0:5.6.7-8.x86_64.rpm']}])
def test_get_for_included_compose_type(self):
url = reverse('findcomposebyrr-list', kwargs={'rpm_name': 'bash', 'release_id': 'release-1.0'})
response = self.client.get(url, {'included_compose_type': 'production'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data,
[{'compose': 'compose-1', 'packages': ['bash-0:1.2.3-4.b1.x86_64.rpm']},
{'compose': 'compose-2', 'packages': ['bash-0:1.2.3-4.b1.x86_64.rpm']}])
class FindOlderComposeByComposeRPMTestCase(APITestCase):
fixtures = [
"pdc/apps/common/fixtures/test/sigkey.json",
"pdc/apps/package/fixtures/test/rpm.json",
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/compose/fixtures/tests/variant.json",
"pdc/apps/compose/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/compose_overriderpm.json",
"pdc/apps/compose/fixtures/tests/compose.json",
"pdc/apps/compose/fixtures/tests/compose_composerpm.json",
"pdc/apps/compose/fixtures/tests/more_composes.json",
]
def test_missing_previous_compose(self):
url = reverse('findoldercomposebycr-list', kwargs={'compose_id': 'compose-1', 'rpm_name': 'bash'})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_previous_compose_has_same_version(self):
url = reverse('findoldercomposebycr-list', kwargs={'compose_id': 'compose-2', 'rpm_name': 'bash'})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_previous_compose_has_older_rpm(self):
url = reverse('findoldercomposebycr-list', kwargs={'compose_id': 'compose-3', 'rpm_name': 'bash'})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('compose'), "compose-2")
self.assertEqual(response.data.get('packages'), ["bash-0:1.2.3-4.b1.x86_64.rpm"])
def test_previous_compose_has_older_rpm_with_to_dict(self):
url = reverse('findoldercomposebycr-list', kwargs={'compose_id': 'compose-3', 'rpm_name': 'bash'})
response = self.client.get(url, {'to_dict': True})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('compose'), "compose-2")
packages = response.data.get('packages')
self.assertEqual(len(packages), 1)
self.assertItemsEqual(packages[0].pop('linked_composes'), ['compose-1', 'compose-2'])
self.assertEqual(packages[0].pop('linked_releases'), [])
packages[0].pop('id')
self.assertDictEqual(
dict(packages[0]),
{'name': 'bash', 'version': '1.2.3', 'epoch': 0, 'release': '4.b1',
'arch': 'x86_64', 'srpm_name': 'bash', 'srpm_nevra': 'bash-0:1.2.3-4.b1.src',
'filename': 'bash-1.2.3-4.b1.x86_64.rpm'})
def test_same_version_different_arch(self):
"""There is a previous compose with same version of package, but with different RPM.arch."""
models.ComposeRPM.objects.filter(pk=1).update(rpm=3)
url = reverse('findoldercomposebycr-list', kwargs={'compose_id': 'compose-2', 'rpm_name': 'bash'})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_get_compose_from_previous_release(self):
r = release_models.Release.objects.create(release_type_id=1, short='release',
name='Test Release', version='0.5')
for cid in ('compose-1', 'compose-2'):
c = models.Compose.objects.get(compose_id=cid)
c.release = r
c.save()
url = reverse('findoldercomposebycr-list', kwargs={'compose_id': 'compose-3', 'rpm_name': 'bash'})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('compose'), 'compose-2')
def test_can_not_get_compose_from_previous_updates_release(self):
r = release_models.Release.objects.create(release_type_id=2, short='release',
name='Test Release', version='0.5')
for cid in ('compose-1', 'compose-2'):
c = models.Compose.objects.get(compose_id=cid)
c.release = r
c.save()
url = reverse('findoldercomposebycr-list', kwargs={'compose_id': 'compose-3', 'rpm_name': 'bash'})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
class FindCompoeByProductVersionRPMTestCase(APITestCase):
fixtures = [
"pdc/apps/common/fixtures/test/sigkey.json",
"pdc/apps/package/fixtures/test/rpm.json",
"pdc/apps/release/fixtures/tests/product.json",
"pdc/apps/release/fixtures/tests/product_version.json",
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/compose/fixtures/tests/variant.json",
"pdc/apps/compose/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/compose.json",
"pdc/apps/compose/fixtures/tests/compose_composerpm.json",
"pdc/apps/compose/fixtures/tests/more_composes.json",
]
def setUp(self):
product_version = ProductVersion.objects.get(short='product', version='1')
release = Release.objects.get(release_id='release-1.0')
release.product_version = product_version
release.save()
self.url = reverse('findcomposesbypvr-list', kwargs={'rpm_name': 'bash', 'product_version': 'product-1'})
def test_get_for_product_version(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data,
[{'compose': 'compose-1', 'packages': ['bash-0:1.2.3-4.b1.x86_64.rpm']},
{'compose': 'compose-2', 'packages': ['bash-0:1.2.3-4.b1.x86_64.rpm']},
{'compose': 'compose-3', 'packages': ['bash-0:5.6.7-8.x86_64.rpm']}])
def test_get_for_product_version_with_latest(self):
product_version = ProductVersion.objects.get(short='product', version='1')
release = Release.objects.get(release_id='release-1.0')
release.product_version = product_version
release.save()
response = self.client.get(self.url, {'latest': 'True'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data,
[{'compose': 'compose-3', 'packages': ['bash-0:5.6.7-8.x86_64.rpm']}])
def test_get_for_included_compose_type(self):
response = self.client.get(self.url, {'included_compose_type': 'production'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data,
[{'compose': 'compose-1', 'packages': ['bash-0:1.2.3-4.b1.x86_64.rpm']},
{'compose': 'compose-2', 'packages': ['bash-0:1.2.3-4.b1.x86_64.rpm']}])
def test_get_for_excluded_compose_type(self):
response = self.client.get(self.url, {'excluded_compose_type': 'production'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data,
[{'compose': 'compose-3', 'packages': ['bash-0:5.6.7-8.x86_64.rpm']}])
class ComposeAPITestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = [
"pdc/apps/common/fixtures/test/sigkey.json",
"pdc/apps/package/fixtures/test/rpm.json",
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/compose/fixtures/tests/variant.json",
"pdc/apps/compose/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/compose_overriderpm.json",
"pdc/apps/compose/fixtures/tests/compose.json",
"pdc/apps/compose/fixtures/tests/compose_composerpm.json",
]
def test_get_existing(self):
response = self.client.get(reverse('compose-detail', args=["compose-1"]))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['sigkeys'], ['ABCDEF'])
self.assertEqual(response.data['rpm_mapping_template'],
'http://testserver/rest_api/v1/composes/compose-1/rpm-mapping/{{package}}/')
def test_compose_with_unsigned_package(self):
crpm = models.ComposeRPM.objects.all()[0]
crpm.sigkey = None
crpm.save()
response = self.client.get(reverse('compose-detail', args=["compose-1"]))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual(response.data['sigkeys'], ['ABCDEF', None])
def test_get_nonexisting(self):
response = self.client.get(reverse('compose-detail', args=["does-not-exist"]))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_list(self):
response = self.client.get(reverse('compose-list'), {})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_query_composeid(self):
response = self.client.get(reverse('compose-list'), {"compose_id": "compose-1"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_query_composeid_nonexisting(self):
response = self.client.get(reverse('compose-list'), {"compose_id": "does-not-exist"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_query_compose_rpmname(self):
response = self.client.get(reverse('compose-list'), {"rpm_name": "bash"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_query_compose_rpmname_nonexisting(self):
response = self.client.get(reverse('compose-list'), {"rpm_name": "does-not-exist"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_query_compose_srpmname(self):
response = self.client.get(reverse('compose-list'), {"srpm_name": "bash"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_query_compose_srpmname_nonexisting(self):
response = self.client.get(reverse('compose-list'), {"srpm_name": "does-not-exist"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_query_compose_rpmversion(self):
response = self.client.get(reverse('compose-list'), {"rpm_version": "1.2.3"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_query_compose_rpmversion_nonexisting(self):
response = self.client.get(reverse('compose-list'), {"rpm_version": "does-not-exist"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_query_compose_rpmrelease(self):
response = self.client.get(reverse('compose-list'), {"rpm_release": "4.b1"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_query_compose_rpmrelease_nonexisting(self):
response = self.client.get(reverse('compose-list'), {"rpm_release": "does-not-exist"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_query_compose_rpmarch(self):
response = self.client.get(reverse('compose-list'), {"rpm_arch": "x86_64"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_query_compose_rpmarch_nonexisting(self):
response = self.client.get(reverse('compose-list'), {"rpm_arch": "does-not-exist"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_query_compose_rpmnvr(self):
response = self.client.get(reverse('compose-list'), {"rpm_nvr": "bash-1.2.3-4.b1"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_query_compose_rpmnvr_nonexisting(self):
response = self.client.get(reverse('compose-list'), {"rpm_nvr": "does-not-exist"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_query_compose_rpmnvr_invalid(self):
response = self.client.get(reverse('compose-list'), {"rpm_nvr": "invalid"})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_query_compose_rpmnvra(self):
response = self.client.get(reverse('compose-list'), {"rpm_nvra": "bash-1.2.3-4.b1.x86_64"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_query_compose_rpmnvra_nonexisting(self):
response = self.client.get(reverse('compose-list'), {"rpm_nvra": "does-not-exist.arch"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_query_compose_rpmnvra_invalid(self):
response = self.client.get(reverse('compose-list'), {"rpm_nvra": "invalid"})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_query_compose_acceptance_testing(self):
response = self.client.get(reverse('compose-list'), {"acceptance_testing": "untested"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_query_compose_acceptance_testing_nonexisting(self):
response = self.client.get(reverse('compose-list'), {"acceptance_testing": "broken"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
class ComposeApiOrderingTestCase(APITestCase):
fixtures = [
"pdc/apps/common/fixtures/test/sigkey.json",
"pdc/apps/package/fixtures/test/rpm.json",
"pdc/apps/release/fixtures/tests/product.json",
"pdc/apps/release/fixtures/tests/product_version.json",
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/compose/fixtures/tests/variant.json",
"pdc/apps/compose/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/compose_overriderpm.json",
"pdc/apps/compose/fixtures/tests/compose.json",
"pdc/apps/compose/fixtures/tests/compose_composerpm.json",
"pdc/apps/compose/fixtures/tests/more_composes.json",
]
def test_compose_list_is_ordered(self):
response = self.client.get(reverse('compose-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
[x['compose_id'] for x in response.data.get('results', [])],
['compose-1', 'compose-2', 'compose-3']
)
def test_compose_in_release_are_ordered(self):
response = self.client.get(reverse('release-detail', args=['release-1.0']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('compose_set', []),
['compose-1', 'compose-2', 'compose-3'])
class ComposeUpdateTestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = [
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/compose/fixtures/tests/variant.json",
"pdc/apps/compose/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/compose.json",
"pdc/apps/compose/fixtures/tests/more_releases.json",
]
def test_can_not_perform_full_update(self):
response = self.client.put(reverse('compose-detail', args=['compose-1']), {})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_can_update_acceptance_testing_state(self):
response = self.client.patch(reverse('compose-detail', args=['compose-1']),
{'acceptance_testing': 'passed'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('acceptance_testing'), 'passed')
self.assertNumChanges([1])
def test_can_not_update_compose_label(self):
response = self.client.patch(reverse('compose-detail', args=['compose-1']),
{'compose_label': 'i am a label'})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_update_linked_releases(self):
response = self.client.patch(reverse('compose-detail', args=['compose-1']),
{'linked_releases': ['release-1.0-updates']},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('linked_releases'),
['release-1.0-updates'])
self.assertNumChanges([1])
def test_update_both_linked_release_and_acceptance(self):
response = self.client.patch(reverse('compose-detail', args=['compose-1']),
{'linked_releases': ['release-1.0-updates'],
'acceptance_testing': 'passed'},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('linked_releases'),
['release-1.0-updates'])
self.assertEqual(response.data.get('acceptance_testing'), 'passed')
self.assertNumChanges([2])
def test_update_acceptance_preserves_links(self):
self.client.patch(reverse('compose-detail', args=['compose-1']),
{'linked_releases': ['release-1.0-updates']},
format='json')
response = self.client.patch(reverse('compose-detail', args=['compose-1']),
{'acceptance_testing': 'passed'},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('linked_releases'),
['release-1.0-updates'])
self.assertNumChanges([1, 1])
def test_update_can_not_link_to_same_release(self):
response = self.client.patch(reverse('compose-detail', args=['compose-1']),
{'linked_releases': ['release-1.0']},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('detail', response.data)
def test_update_can_not_link_to_same_release_twice(self):
response = self.client.patch(reverse('compose-detail', args=['compose-1']),
{'linked_releases': ['release-1.0-updates', 'release-1.0-updates']},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('linked_releases'), ['release-1.0-updates'])
def test_partial_update_empty(self):
response = self.client.patch(reverse('compose-detail', args=['compose-1']),
{}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_patch_linked_releases_not_a_list(self):
response = self.client.patch(reverse('compose-detail', args=['compose-1']),
{'linked_releases': 'release-1.0-updates'},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {'linked_releases': ['Expected a list.']})
self.assertNumChanges([])
def test_patch_linked_releases_null(self):
response = self.client.patch(reverse('compose-detail', args=['compose-1']),
{'linked_releases': None},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {'linked_releases': ['This field may not be null.']})
self.assertNumChanges([])
def test_patch_linked_releases_list_with_null(self):
response = self.client.patch(reverse('compose-detail', args=['compose-1']),
{'linked_releases': [None]},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data,
{'linked_releases': ['Expected a string instead of <None>.']})
self.assertNumChanges([])
def test_bulk_update_put(self):
response = self.client.put(reverse('compose-list'),
{'compose-1': {'linked_releases': []}},
format='json')
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
self.assertNumChanges([])
def test_bulk_update_patch(self):
response = self.client.patch(reverse('compose-list'),
{'compose-1': {'linked_releases': ['release-1.0-updates']}},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNumChanges([1])
self.assertEqual(response.data.keys(), ['compose-1'])
self.assertEqual(response.data['compose-1'].get('linked_releases'),
['release-1.0-updates'])
def test_partial_update_extra_field(self):
response = self.client.patch(reverse('compose-detail', args=['compose-1']),
{'foo': 'bar'}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_update_testing_status_on_arch(self):
data = {'Server': {'x86_64': 'passed'}, 'Server2': {'x86_64': 'untested'}}
response = self.client.patch(reverse('compose-detail', args=['compose-1']),
{'rtt_tested_architectures': data},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('rtt_tested_architectures', {}), data)
vararch = models.VariantArch.objects.get(arch__name='x86_64',
variant__variant_uid='Server',
variant__compose__compose_id='compose-1')
self.assertEqual(vararch.rtt_testing_status.name, 'passed')
self.assertNumChanges([1])
def test_update_testing_status_on_non_existing_tree(self):
inputs = [
({'Foo': {'x86_64': 'passed'}}, 'Foo.x86_64 not in compose compose-1.'),
({'Server': {'foo': 'passed'}}, 'Server.foo not in compose compose-1.'),
]
for data, err in inputs:
response = self.client.patch(reverse('compose-detail', args=['compose-1']),
{'rtt_tested_architectures': data},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('rtt_tested_architectures', ''), err)
self.assertNumChanges([])
def test_update_testing_status_to_non_existing_status(self):
data = {'Server': {'x86_64': 'awesome'}}
response = self.client.patch(reverse('compose-detail', args=['compose-1']),
{'rtt_tested_architectures': data},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('rtt_tested_architectures', ''),
'"awesome" is not a known testing status for Server.x86_64.')
def test_update_testing_status_with_malformed_data(self):
inputs = [
({'Server': 'passed'}, 'Server: "passed" is not a dict'),
('passed', 'rtt_tested_architectures: "passed" is not a dict'),
]
for data, err in inputs:
response = self.client.patch(reverse('compose-detail', args=['compose-1']),
{'rtt_tested_architectures': data},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('detail', []), [err])
self.assertNumChanges([])
class OverridesRPMAPITestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = [
'pdc/apps/release/fixtures/tests/release.json',
'pdc/apps/compose/fixtures/tests/compose_overriderpm.json',
]
def setUp(self):
self.release = release_models.Release.objects.get(release_id='release-1.0')
self.override_rpm = {'id': 1, 'release': 'release-1.0', 'variant': 'Server', 'arch': 'x86_64',
'srpm_name': 'bash', 'rpm_name': 'bash-doc', 'rpm_arch': 'x86_64',
'include': False, 'comment': '', 'do_not_delete': False}
self.do_not_delete_orpm = {'release': 'release-1.0', 'variant': 'Server', 'arch': 'x86_64',
'srpm_name': 'bash', 'rpm_name': 'bash-doc', 'rpm_arch': 'src',
'include': True, 'comment': '', 'do_not_delete': True}
def test_query_existing(self):
response = self.client.get(reverse('overridesrpm-list'), {'release': 'release-1.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
self.assertEqual(response.data['results'][0], self.override_rpm)
def test_query_nonexisting(self):
response = self.client.get(reverse('overridesrpm-list'), {'release': 'release-1.1'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_delete_existing(self):
response = self.client.delete(reverse('overridesrpm-detail', args=[1]))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(models.OverrideRPM.objects.count(), 0)
self.assertNumChanges([1])
def test_delete_non_existing(self):
response = self.client.delete(reverse('overridesrpm-list', args=[42]))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(models.OverrideRPM.objects.count(), 1)
self.assertNumChanges([])
def test_create_duplicit(self):
response = self.client.post(reverse('overridesrpm-list'), self.override_rpm)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(models.OverrideRPM.objects.count(), 1)
def test_create_correct(self):
self.override_rpm["rpm_name"] = "bash-debuginfo"
del self.override_rpm["id"]
response = self.client.post(reverse('overridesrpm-list'), self.override_rpm)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(models.OverrideRPM.objects.count(), 2)
def test_create_extra_field(self):
self.override_rpm["rpm_name"] = "bash-debuginfo"
self.override_rpm["foo"] = "bar"
response = self.client.post(reverse('overridesrpm-list'), self.override_rpm)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_clear(self):
response = self.client.delete(reverse('overridesrpm-list'), {'release': 'release-1.0'})
self.assertEqual(models.OverrideRPM.objects.count(), 0)
self.assertItemsEqual(response.data, [self.override_rpm])
def test_clear_with_no_matched_record(self):
response = self.client.delete(reverse('overridesrpm-list'), {'release': 'no_such_release'})
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_clear_preserve_do_not_delete(self):
models.OverrideRPM.objects.create(release=self.release, variant="Server", arch="x86_64",
rpm_name="bash-doc", rpm_arch="src", include=True,
do_not_delete=True, srpm_name="bash")
response = self.client.delete(reverse('overridesrpm-list'), {'release': 'release-1.0'})
self.assertEqual(models.OverrideRPM.objects.count(), 1)
self.assertItemsEqual(response.data, [self.override_rpm])
def test_delete_with_extra_param(self):
models.OverrideRPM.objects.create(release=self.release, variant="Server", arch="x86_64",
rpm_name="bash-doc", rpm_arch="src", include=True,
do_not_delete=True, srpm_name="bash")
response = self.client.delete(reverse('overridesrpm-list'), {'release': 'release-1.0', 'variant': "Server",
'arch': 'x86_64', 'rpm_name': 'bash-doc',
'rpm_arch': 'src', 'srpm_name': 'bash'})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_clear_with_extra_param(self):
models.OverrideRPM.objects.create(release=self.release, variant="Server", arch="x86_64",
rpm_name="bash-doc", rpm_arch="src", include=True,
do_not_delete=True, srpm_name="bash")
response = self.client.delete(reverse('overridesrpm-list'), {'release': 'release-1.0', 'srpm_name': 'bash'})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_clear_force(self):
override = models.OverrideRPM.objects.create(release=self.release, variant="Server",
arch="x86_64", rpm_name="bash-doc",
rpm_arch="src", include=True,
do_not_delete=True, srpm_name="bash")
self.do_not_delete_orpm['id'] = override.pk
response = self.client.delete(reverse('overridesrpm-list'), {'release': 'release-1.0', 'force': True})
self.assertEqual(models.OverrideRPM.objects.count(), 0)
self.assertItemsEqual(response.data, [self.override_rpm, self.do_not_delete_orpm])
def test_delete_two_by_id(self):
override = models.OverrideRPM.objects.create(release=self.release, variant="Server",
arch="x86_64", rpm_name="bash-doc",
rpm_arch="src", include=True,
do_not_delete=True, srpm_name="bash")
response = self.client.delete(reverse('overridesrpm-list'),
[1, override.pk],
format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertNumChanges([2])
self.assertEqual(models.OverrideRPM.objects.count(), 0)
class ComposeRPMViewAPITestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = [
"pdc/apps/common/fixtures/test/sigkey.json",
]
def setUp(self):
with open('pdc/apps/release/fixtures/tests/composeinfo.json', 'r') as f:
self.compose_info = json.loads(f.read())
with open('pdc/apps/compose/fixtures/tests/rpm-manifest.json', 'r') as f:
self.manifest = json.loads(f.read())
self.client.post(reverse('releaseimportcomposeinfo-list'),
self.compose_info, format='json')
# Caching ids makes it faster, but the cache needs to be cleared for each test.
models.Path.CACHE = {}
common_models.SigKey.CACHE = {}
def test_import_inconsistent_data(self):
self.manifest['payload']['compose']['id'] = 'TP-1.0-20150315.0'
response = self.client.post(reverse('composerpm-list'),
{'rpm_manifest': self.manifest,
'release_id': 'tp-1.0',
'composeinfo': self.compose_info},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_import_and_retrieve_manifest(self):
response = self.client.post(reverse('composerpm-list'),
{'rpm_manifest': self.manifest,
'release_id': 'tp-1.0',
'composeinfo': self.compose_info},
format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertNumChanges([11, 5])
self.assertEqual(models.ComposeRPM.objects.count(), 6)
response = self.client.get(reverse('composerpm-detail', args=['TP-1.0-20150310.0']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertDictEqual(dict(response.data),
self.manifest)
class ComposeImageAPITestCase(TestCaseWithChangeSetMixin, APITestCase):
def setUp(self):
with open('pdc/apps/release/fixtures/tests/composeinfo.json', 'r') as f:
self.compose_info = json.loads(f.read())
with open('pdc/apps/compose/fixtures/tests/image-manifest.json', 'r') as f:
self.manifest = json.loads(f.read())
self.client.post(reverse('releaseimportcomposeinfo-list'),
self.compose_info, format='json')
# Caching ids makes it faster, but the cache needs to be cleared for each test.
models.Path.CACHE = {}
def test_import_images_by_deprecated_api(self):
# TODO: remove this test after next release
response = self.client.post(reverse('composeimportimages-list'),
{'image_manifest': self.manifest,
'release_id': 'tp-1.0',
'composeinfo': self.compose_info},
format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertNumChanges([11, 5])
self.assertEqual(models.ComposeImage.objects.count(), 4)
response = self.client.get(reverse('image-list'), {'compose': 'TP-1.0-20150310.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 4)
def test_import_images(self):
response = self.client.post(reverse('composeimage-list'),
{'image_manifest': self.manifest,
'release_id': 'tp-1.0',
'composeinfo': self.compose_info},
format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertNumChanges([11, 5])
self.assertEqual(models.ComposeImage.objects.count(), 4)
response = self.client.get(reverse('image-list'), {'compose': 'TP-1.0-20150310.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 4)
def test_import_inconsistent_data(self):
self.manifest['payload']['compose']['id'] = 'TP-1.0-20150315.0'
response = self.client.post(reverse('composeimage-list'),
{'image_manifest': self.manifest,
'release_id': 'tp-1.0',
'composeinfo': self.compose_info},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_import_and_retrieve_images(self):
response = self.client.post(reverse('composeimage-list'),
{'image_manifest': self.manifest,
'release_id': 'tp-1.0',
'composeinfo': self.compose_info},
format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self.client.get(reverse('composeimage-detail', args=['TP-1.0-20150310.0']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertDictEqual(dict(response.data), self.manifest)
class RPMMappingAPITestCase(APITestCase):
fixtures = [
"pdc/apps/common/fixtures/test/sigkey.json",
"pdc/apps/package/fixtures/test/rpm.json",
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/release/fixtures/tests/variant.json",
"pdc/apps/release/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/variant.json",
"pdc/apps/compose/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/compose_overriderpm.json",
"pdc/apps/compose/fixtures/tests/compose.json",
"pdc/apps/compose/fixtures/tests/compose_composerpm.json",
]
def setUp(self):
self.release = release_models.Release.objects.latest('id')
self.compose = models.Compose.objects.get(compose_id='compose-1')
self.url = reverse('composerpmmapping-detail', args=[self.compose.compose_id, 'bash'])
def test_get_rpm_mapping(self):
response = self.client.get(self.url, {}, format='json')
expected_data = {
'Server': {
'x86_64': {
'bash': ['x86_64'],
}
}
}
self.assertEqual(response.data, expected_data)
def test_get_rpm_mapping_for_nonexisting_compose(self):
url = reverse('composerpmmapping-detail', args=['foo-bar', 'bash'])
response = self.client.get(url, {}, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_rpm_mapping_includes_overrides(self):
models.OverrideRPM.objects.create(variant='Server', arch='x86_64', srpm_name='bash', rpm_name='bash',
rpm_arch='src', include=True, release=self.release)
response = self.client.get(self.url, {}, format='json')
expected_data = {
'Server': {
'x86_64': {
'bash': ['src', 'x86_64'],
}
}
}
self.assertEqual(response.data, expected_data)
def test_rpm_mapping_can_exclude_overrides(self):
models.OverrideRPM.objects.create(variant='Server', arch='x86_64', srpm_name='bash', rpm_name='bash',
rpm_arch='src', include=True, release=self.release)
self.url += '?disable_overrides=1'
response = self.client.get(self.url, {}, format='json')
expected_data = {
'Server': {
'x86_64': {
'bash': ['x86_64'],
'bash-doc': ['x86_64'],
}
}
}
self.assertEqual(response.data, expected_data)
def test_does_not_return_empty_container(self):
models.OverrideRPM.objects.create(variant='Server', arch='x86_64', srpm_name='bash', rpm_name='bash',
rpm_arch='x86_64', include=False, release=self.release)
response = self.client.get(self.url, {}, format='json')
self.assertEqual(response.data, {})
def test_partial_update(self):
self.client.force_authenticate(create_user("user", perms=[]))
self.client.patch(self.url, [{"action": "create", "srpm_name": "bash", "rpm_name": "bash-magic",
"rpm_arch": "src", "variant": "Client", "arch": "x86_64",
"do_not_delete": False, "comment": "", "include": True}],
format='json')
orpm = models.OverrideRPM.objects.get(srpm_name="bash", rpm_name="bash-magic", rpm_arch="src",
variant="Client", arch="x86_64", include=True,
do_not_delete=False, comment="")
self.assertIsNotNone(orpm)
def test_update(self):
self.client.force_authenticate(create_user("user", perms=[]))
new_mapping = {'Server': {'x86_64': {'bash': ['x86_64', 'i386']}}}
response = self.client.put(self.url, new_mapping, format='json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, [{'action': 'create', 'srpm_name': 'bash', 'rpm_name': 'bash',
'rpm_arch': 'i386', 'variant': 'Server', 'arch': 'x86_64',
'include': True, 'release_id': 'release-1.0'}])
self.assertEqual(0, models.OverrideRPM.objects.filter(rpm_arch='i386').count())
def test_update_with_perform(self):
self.client.force_authenticate(create_user("user", perms=[]))
new_mapping = {'Server': {'x86_64': {'bash': ['x86_64', 'i386']}}}
response = self.client.put(self.url + '?perform=1', new_mapping, format='json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, [{'action': 'create', 'srpm_name': 'bash', 'rpm_name': 'bash',
'rpm_arch': 'i386', 'variant': 'Server', 'arch': 'x86_64',
'include': True, 'release_id': 'release-1.0'}])
self.assertEqual(1, models.OverrideRPM.objects.filter(rpm_arch='i386').count())
class FilterBugzillaProductsAndComponentsTestCase(APITestCase):
fixtures = [
"pdc/apps/package/fixtures/test/rpm.json",
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/compose/fixtures/tests/variant.json",
"pdc/apps/compose/fixtures/tests/compose.json",
"pdc/apps/component/fixtures/tests/release_component.json",
"pdc/apps/component/fixtures/tests/upstream.json",
"pdc/apps/component/fixtures/tests/global_component.json"
]
def setUp(self):
# Construct a new release and release component
self.release = Release.objects.create(
release_id='release-2.0',
short='release',
version='2.0',
name='Awesome Release',
release_type_id=1,
)
self.bugzilla_component = BugzillaComponent.objects.create(name='kernel')
filesystems = BugzillaComponent.objects.create(name='filesystems', parent_component=self.bugzilla_component)
BugzillaComponent.objects.create(name='ext4', parent_component=filesystems)
pyth = BugzillaComponent.objects.create(name='python', parent_component=self.bugzilla_component)
BugzillaComponent.objects.create(name='bin', parent_component=pyth)
ReleaseComponent.objects.create(
release=self.release,
global_component_id=1,
name='kernel',
bugzilla_component=self.bugzilla_component
)
def test_filter_bugzilla_products_components_with_rpm_nvr(self):
url = reverse('bugzilla-list')
response = self.client.get(url + '?nvr=bash-1.2.3-4.b1', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_filter_with_invalid_nvr(self):
url = reverse('bugzilla-list')
response = self.client.get(url + '?nvr=xxx', format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_filter_with_nvr_without_rpms(self):
url = reverse('bugzilla-list')
response = self.client.get(url + '?nvr=GConf2-3.2.6-8.el71', format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_filter_without_nvr(self):
url = reverse('bugzilla-list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@mock.patch('pdc.apps.compose.models.Compose.objects.filter')
def test_filter_without_srpm_component_name_mapping(self, mock_filter):
release_component, _ = ReleaseComponent.objects.get_or_create(
global_component_id=1,
release=self.release,
bugzilla_component=self.bugzilla_component,
name='bash')
mock_filter.return_value = mock.Mock()
mock_filter.return_value.distinct.return_value = [mock.Mock()]
mock_filter.return_value.distinct.return_value[0].release = self.release.release_id
url = reverse('bugzilla-list')
response = self.client.get(url + '?nvr=bash-1.2.3-4.b1', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('kernel', response.content)
@mock.patch('pdc.apps.compose.models.Compose.objects.filter')
def test_filter_with_srpm_component_name_mapping(self, mock_filter):
release_component, _ = ReleaseComponent.objects.get_or_create(
global_component_id=1,
release=self.release,
name='kernel')
binding_models.ReleaseComponentSRPMNameMapping.objects.create(
srpm_name='bash',
release_component=release_component)
mock_filter.return_value = mock.Mock()
mock_filter.return_value.distinct.return_value = [mock.Mock()]
mock_filter.return_value.distinct.return_value[0].release = self.release.release_id
url = reverse('bugzilla-list')
response = self.client.get(url + '?nvr=bash-1.2.3-4.b1', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('kernel', response.content)
class RPMMappingTestCase(TestCase):
fixtures = [
"pdc/apps/common/fixtures/test/sigkey.json",
"pdc/apps/package/fixtures/test/rpm.json",
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/release/fixtures/tests/variant.json",
"pdc/apps/release/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/variant.json",
"pdc/apps/compose/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/compose_overriderpm.json",
"pdc/apps/compose/fixtures/tests/compose.json",
"pdc/apps/compose/fixtures/tests/compose_composerpm.json",
]
def setUp(self):
self.compose = models.Compose.objects.get(compose_id='compose-1')
self.mapping, _ = self.compose.get_rpm_mapping('bash')
def test_compute_diff_add_new(self):
new_mapping = models.ComposeRPMMapping(data={'Server': {'x86_64': {'bash': ['src', 'x86_64']}}})
changes = self.mapping.compute_changes(new_mapping)
self.assertEqual(len(changes), 1)
self.assertEqual(changes[0], {'action': 'create', 'variant': 'Server', 'arch': 'x86_64',
'include': True, 'release_id': 'release-1.0', 'rpm_name': 'bash',
'srpm_name': 'bash', 'rpm_arch': 'src'})
def test_compute_diff_add_excluded(self):
new_mapping = models.ComposeRPMMapping(data={'Server': {'x86_64': {'bash': ['x86_64'],
'bash-doc': ['x86_64']}}})
changes = self.mapping.compute_changes(new_mapping)
self.assertEqual(len(changes), 1)
self.assertEqual(changes[0], {'action': 'delete', 'variant': 'Server', 'arch': 'x86_64',
'include': False, 'release_id': 'release-1.0', 'rpm_name': 'bash-doc',
'srpm_name': 'bash', 'rpm_arch': 'x86_64'})
def test_compute_diff_remove_existing(self):
new_mapping = models.ComposeRPMMapping(data={})
changes = self.mapping.compute_changes(new_mapping)
self.assertEqual(len(changes), 1)
self.assertEqual(changes[0], {'action': 'create', 'variant': 'Server', 'arch': 'x86_64',
'include': False, 'release_id': 'release-1.0', 'rpm_name': 'bash',
'srpm_name': 'bash', 'rpm_arch': 'x86_64'})
class OverrideManagementTestCase(TestCase):
fixtures = [
"pdc/apps/common/fixtures/test/sigkey.json",
"pdc/apps/package/fixtures/test/rpm.json",
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/release/fixtures/tests/variant.json",
"pdc/apps/release/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/variant.json",
"pdc/apps/compose/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/compose_overriderpm.json",
"pdc/apps/compose/fixtures/tests/compose.json",
"pdc/apps/compose/fixtures/tests/compose_composerpm.json",
]
def setUp(self):
self.initial_form_data = {
'checks-0-included': 'on',
'checks-0-variant': 'Server',
'checks-0-arch': 'x86_64',
'checks-0-rpm_name': 'bash',
'checks-0-rpm_arch': 'x86_64',
'checks-1-variant': 'Server',
'checks-1-arch': 'x86_64',
'checks-1-rpm_name': 'bash-doc',
'checks-1-rpm_arch': 'x86_64',
'checks-MAX_NUM_FORMS': '1000',
'checks-INITIAL_FORMS': 2,
'checks-TOTAL_FORMS': 2,
'news-MAX_NUM_FORMS': '1000',
'news-INITIAL_FORMS': 1,
'news-TOTAL_FORMS': 0,
'vararch-MAX_NUM_FORMS': '1000',
'vararch-INITIAL_FORMS': 1,
'vararch-TOTAL_FORMS': 0,
'for_new_vararch-MAX_NUM_FORMS': '1000',
'for_new_vararch-INITIAL_FORMS': 0,
'for_new_vararch-TOTAL_FORMS': 0,
}
def test_can_access_management_form(self):
client = Client()
response = client.get('/override/manage/release-1.0/', {'package': 'bash'})
self.assertEqual(response.status_code, 200)
# There is one package in fixtures
self.assertEqual(len(response.context['forms']), 1)
def test_submit_no_changes(self):
client = Client()
response = client.post('/override/manage/release-1.0/?package=bash', self.initial_form_data)
self.assertEqual(response.status_code, 200)
self.assertIn('compressed', response.context)
data = json.loads(response.context['compressed'])
self.assertEqual(len(data), 0)
def test_submit_disable(self):
client = Client()
del self.initial_form_data['checks-0-included']
response = client.post('/override/manage/release-1.0/?package=bash', self.initial_form_data)
self.assertEqual(response.status_code, 200)
self.assertIn('compressed', response.context)
data = json.loads(response.context['compressed'])
self.assertEqual(len(data), 1)
self.assertEqual({'variant': 'Server', 'arch': 'x86_64', 'rpm_name': 'bash', 'rpm_arch': 'x86_64',
'include': False, 'action': 'create', 'srpm_name': 'bash', 'release_id': 'release-1.0'},
data[0])
def test_submit_enable(self):
client = Client()
self.initial_form_data['checks-1-included'] = 'on'
response = client.post('/override/manage/release-1.0/?package=bash', self.initial_form_data)
self.assertEqual(response.status_code, 200)
self.assertIn('compressed', response.context)
data = json.loads(response.context['compressed'])
self.assertEqual(len(data), 1)
self.assertEqual({'variant': 'Server', 'arch': 'x86_64', 'rpm_name': 'bash-doc', 'rpm_arch': 'x86_64',
'include': False, 'action': 'delete', 'srpm_name': 'bash', 'release_id': 'release-1.0',
'comment': '', 'do_not_delete': False},
data[0])
def test_submit_new_override(self):
client = Client()
self.initial_form_data.update({
'news-0-variant': 'Server',
'news-0-arch': 'x86_64',
'news-0-rpm_name': 'bash-completion',
'news-0-rpm_arch': 'x86_64',
'news-TOTAL_FORMS': 1,
})
response = client.post('/override/manage/release-1.0/?package=bash', self.initial_form_data)
self.assertEqual(response.status_code, 200)
self.assertIn('compressed', response.context)
data = json.loads(response.context['compressed'])
self.assertEqual(len(data), 1)
self.assertEqual({'action': 'create', 'release_id': 'release-1.0', 'srpm_name': 'bash', 'variant': 'Server',
'arch': 'x86_64', 'rpm_name': 'bash-completion', 'rpm_arch': 'x86_64', 'include': True},
data[0])
def test_submit_new_override_on_new_variant(self):
client = Client()
self.initial_form_data.update({
'vararch-0-variant': 'Server-optional',
'vararch-0-arch': 'x86_64',
'for_new_vararch-0-new_variant': 0,
'for_new_vararch-0-rpm_name': 'bash-completion',
'for_new_vararch-0-rpm_arch': 'x86_64',
'vararch-TOTAL_FORMS': 1,
'for_new_vararch-TOTAL_FORMS': 1,
})
response = client.post('/override/manage/release-1.0/?package=bash', self.initial_form_data)
self.assertEqual(response.status_code, 200)
self.assertIn('compressed', response.context)
data = json.loads(response.context['compressed'])
self.assertEqual(len(data), 1)
self.assertEqual({'action': 'create', 'release_id': 'release-1.0', 'srpm_name': 'bash', 'variant': 'Server-optional',
'arch': 'x86_64', 'rpm_name': 'bash-completion', 'rpm_arch': 'x86_64', 'include': True},
data[0])
def test_submit_more_different_changes(self):
client = Client()
del self.initial_form_data['checks-0-included']
self.initial_form_data.update({
'news-0-variant': 'Server',
'news-0-arch': 'x86_64',
'news-0-rpm_name': 'bash-completion',
'news-0-rpm_arch': 'x86_64',
'vararch-0-variant': 'Server-optional',
'vararch-0-arch': 'x86_64',
'for_new_vararch-0-new_variant': 0,
'for_new_vararch-0-rpm_name': 'bash-completion',
'for_new_vararch-0-rpm_arch': 'x86_64',
'news-TOTAL_FORMS': 1,
'vararch-TOTAL_FORMS': 1,
'for_new_vararch-TOTAL_FORMS': 1,
})
response = client.post('/override/manage/release-1.0/?package=bash', self.initial_form_data)
self.assertEqual(response.status_code, 200)
self.assertIn('compressed', response.context)
data = json.loads(response.context['compressed'])
self.assertEqual(len(data), 3)
self.assertIn({'action': 'create', 'release_id': 'release-1.0', 'srpm_name': 'bash', 'variant': 'Server',
'arch': 'x86_64', 'rpm_name': 'bash-completion', 'rpm_arch': 'x86_64', 'include': True},
data)
self.assertIn({'action': 'create', 'release_id': 'release-1.0', 'srpm_name': 'bash', 'variant': 'Server-optional',
'arch': 'x86_64', 'rpm_name': 'bash-completion', 'rpm_arch': 'x86_64', 'include': True},
data)
self.assertIn({'action': 'create', 'release_id': 'release-1.0', 'srpm_name': 'bash', 'variant': 'Server',
'arch': 'x86_64', 'rpm_name': 'bash', 'rpm_arch': 'x86_64', 'include': False},
data)
def test_submit_more_same_changes(self):
client = Client()
self.initial_form_data.update({
'news-0-variant': 'Server',
'news-0-arch': 'x86_64',
'news-0-rpm_name': 'bash-completion',
'news-0-rpm_arch': 'x86_64',
'news-1-variant': 'Server',
'news-1-arch': 'x86_64',
'news-1-rpm_name': 'bash-magic',
'news-1-rpm_arch': 'src',
'news-TOTAL_FORMS': 2,
})
response = client.post('/override/manage/release-1.0/?package=bash', self.initial_form_data)
self.assertEqual(response.status_code, 200)
self.assertIn('compressed', response.context)
data = json.loads(response.context['compressed'])
self.assertEqual(len(data), 2)
self.assertIn({'action': 'create', 'release_id': 'release-1.0', 'srpm_name': 'bash', 'variant': 'Server',
'arch': 'x86_64', 'rpm_name': 'bash-completion', 'rpm_arch': 'x86_64', 'include': True},
data)
self.assertIn({'action': 'create', 'release_id': 'release-1.0', 'srpm_name': 'bash', 'variant': 'Server',
'arch': 'x86_64', 'rpm_name': 'bash-magic', 'rpm_arch': 'src', 'include': True},
data)
def test_submit_enable_and_disable(self):
client = Client()
del self.initial_form_data['checks-0-included']
self.initial_form_data['checks-1-included'] = 'on'
response = client.post('/override/manage/release-1.0/?package=bash', self.initial_form_data)
self.assertEqual(response.status_code, 200)
self.assertIn('compressed', response.context)
data = json.loads(response.context['compressed'])
self.assertEqual(len(data), 2)
self.assertIn({'variant': 'Server', 'arch': 'x86_64', 'rpm_name': 'bash-doc', 'rpm_arch': 'x86_64',
'include': False, 'action': 'delete', 'srpm_name': 'bash', 'release_id': 'release-1.0',
'comment': '', 'do_not_delete': False},
data)
self.assertIn({'variant': 'Server', 'arch': 'x86_64', 'rpm_name': 'bash', 'rpm_arch': 'x86_64',
'include': False, 'action': 'create', 'srpm_name': 'bash', 'release_id': 'release-1.0'},
data)
def test_submit_incorrect_new_override_missing_rpm_arch(self):
client = Client()
self.initial_form_data.update({
'news-0-variant': 'Server',
'news-0-arch': 'x86_64',
'news-0-rpm_name': 'bash-completion',
'news-0-rpm_arch': '',
'news-TOTAL_FORMS': 1,
})
response = client.post('/override/manage/release-1.0/?package=bash', self.initial_form_data)
self.assertEqual(response.status_code, 200)
self.assertFormsetError(response, 'override_forms', 0, None, 'Both RPM name and arch must be filled in.')
self.assertContains(response, 'There are errors in the form.')
def test_submit_incorrect_new_override_missing_rpm_name(self):
client = Client()
self.initial_form_data.update({
'news-0-variant': 'Server',
'news-0-arch': 'x86_64',
'news-0-rpm_name': '',
'news-0-rpm_arch': 'src',
'news-TOTAL_FORMS': 1,
})
response = client.post('/override/manage/release-1.0/?package=bash', self.initial_form_data)
self.assertEqual(response.status_code, 200)
self.assertFormsetError(response, 'override_forms', 0, None, 'Both RPM name and arch must be filled in.')
self.assertContains(response, 'There are errors in the form.')
def test_submit_incorrect_new_override_for_new_variant_missing_rpm_arch(self):
client = Client()
self.initial_form_data.update({
'vararch-0-variant': 'Server-optional',
'vararch-0-arch': 'x86_64',
'for_new_vararch-0-rpm_name': 'bash-completion',
'for_new_vararch-0-rpm_arch': '',
'for_new_vararch-0-new_variant': 0,
'vararch-TOTAL_FORMS': 1,
'for_new_vararch-TOTAL_FORMS': 1,
})
response = client.post('/override/manage/release-1.0/?package=bash', self.initial_form_data)
self.assertEqual(response.status_code, 200)
self.assertFormsetError(response, 'override_v_forms', 0, None, 'Both RPM name and arch must be filled in.')
self.assertContains(response, 'There are errors in the form.')
def test_submit_incorrect_new_override_for_new_variant_missing_rpm_name(self):
client = Client()
self.initial_form_data.update({
'vararch-0-variant': 'Server-optional',
'vararch-0-arch': 'x86_64',
'for_new_vararch-0-rpm_name': '',
'for_new_vararch-0-rpm_arch': 'src',
'for_new_vararch-0-new_variant': 0,
'vararch-TOTAL_FORMS': 1,
'for_new_vararch-TOTAL_FORMS': 1,
})
response = client.post('/override/manage/release-1.0/?package=bash', self.initial_form_data)
self.assertEqual(response.status_code, 200)
self.assertFormsetError(response, 'override_v_forms', 0, None, 'Both RPM name and arch must be filled in.')
self.assertContains(response, 'There are errors in the form.')
def test_submit_incorrect_new_override_for_new_variant_missing_variant_name(self):
client = Client()
self.initial_form_data.update({
'vararch-0-variant': '',
'vararch-0-arch': 'x86_64',
'for_new_vararch-0-rpm_name': 'bash-magic',
'for_new_vararch-0-rpm_arch': 'src',
'for_new_vararch-0-new_variant': 0,
'vararch-TOTAL_FORMS': 1,
'for_new_vararch-TOTAL_FORMS': 1,
})
response = client.post('/override/manage/release-1.0/?package=bash', self.initial_form_data)
self.assertEqual(response.status_code, 200)
self.assertFormsetError(response, 'variant_forms', 0, None, 'Both variant and arch must be filled in.')
self.assertContains(response, 'There are errors in the form.')
def test_submit_incorrect_new_override_for_new_variant_missing_variant_arch(self):
client = Client()
self.initial_form_data.update({
'vararch-0-variant': 'Server-optional',
'vararch-0-arch': '',
'for_new_vararch-0-rpm_name': 'bash-magic',
'for_new_vararch-0-rpm_arch': 'src',
'for_new_vararch-0-new_variant': 0,
'vararch-TOTAL_FORMS': 1,
'for_new_vararch-TOTAL_FORMS': 1,
})
response = client.post('/override/manage/release-1.0/?package=bash', self.initial_form_data)
self.assertEqual(response.status_code, 200)
self.assertFormsetError(response, 'variant_forms', 0, None, 'Both variant and arch must be filled in.')
self.assertContains(response, 'There are errors in the form.')
def test_submit_incorrect_new_override_for_new_variant_and_old_variant(self):
client = Client()
self.initial_form_data.update({
'vararch-0-variant': 'Server-optional',
'vararch-0-arch': 'x86_64',
'for_new_vararch-0-rpm_name': 'bash-magic',
'for_new_vararch-0-rpm_arch': 'src',
'for_new_vararch-0-new_variant': 0,
'for_new_vararch-0-variant': 'Server',
'for_new_vararch-0-arch': 'i686',
'vararch-TOTAL_FORMS': 1,
'for_new_vararch-TOTAL_FORMS': 1,
})
response = client.post('/override/manage/release-1.0/?package=bash', self.initial_form_data)
self.assertEqual(response.status_code, 200)
self.assertFormsetError(response, 'override_v_forms', 0, None, 'Can not reference both old and new variant.arch.')
self.assertContains(response, 'There are errors in the form.')
def test_submit_preview_no_change(self):
client = Client()
response = client.post('/override/manage/release-1.0/?package=bash', self.initial_form_data)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'No changes')
class OverridePreviewTestCase(TestCase):
fixtures = [
"pdc/apps/common/fixtures/test/sigkey.json",
"pdc/apps/package/fixtures/test/rpm.json",
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/release/fixtures/tests/variant.json",
"pdc/apps/release/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/variant.json",
"pdc/apps/compose/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/compose_overriderpm.json",
"pdc/apps/compose/fixtures/tests/compose.json",
"pdc/apps/compose/fixtures/tests/compose_composerpm.json",
]
def setUp(self):
self.form_data = {
'checks-0-included': 'on',
'checks-0-variant': 'Server',
'checks-0-arch': 'x86_64',
'checks-0-rpm_name': 'bash',
'checks-0-rpm_arch': 'x86_64',
'checks-1-variant': 'Server',
'checks-1-arch': 'x86_64',
'checks-1-rpm_name': 'bash-doc',
'checks-1-rpm_arch': 'x86_64',
'checks-MAX_NUM_FORMS': '1000',
'checks-INITIAL_FORMS': 2,
'checks-TOTAL_FORMS': 2,
'news-MAX_NUM_FORMS': '1000',
'news-INITIAL_FORMS': 1,
'news-TOTAL_FORMS': 0,
'vararch-MAX_NUM_FORMS': '1000',
'vararch-INITIAL_FORMS': 1,
'vararch-TOTAL_FORMS': 0,
'for_new_vararch-MAX_NUM_FORMS': '1000',
'for_new_vararch-INITIAL_FORMS': 0,
'for_new_vararch-TOTAL_FORMS': 0,
}
self.preview_form_data = {
'preview_submit': True,
'form-TOTAL_FORMS': 0,
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': 1000,
}
def _populate_preview_form(self, response):
"""Parse response and prepare form data for preview submission."""
def set_val(dict, key, val):
if isinstance(val, bool):
if val:
dict[key] = 'on'
dict[key] = val
for (i, action) in enumerate(json.loads(response.context['compressed'])):
for k in action:
set_val(self.preview_form_data, 'form-%d-%s' % (i, k), action[k])
self.preview_form_data['form-TOTAL_FORMS'] += 1
self.preview_form_data['initial_data'] = response.context['compressed']
def test_submit_with_comment_and_missing_do_not_delete(self):
client = Client()
del self.form_data['checks-0-included']
response = client.post('/override/manage/release-1.0/?package=bash', self.form_data)
self._populate_preview_form(response)
self.preview_form_data['form-0-comment'] = 'do not delete me'
response = client.post('/override/manage/release-1.0/?package=bash', self.preview_form_data)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'There are errors in the form.')
self.assertFormsetError(response, 'forms', 0, None, 'Comment needs do_not_delete checked.')
def test_submit_ok_no_comment(self):
client = Client()
create_user("user", perms=["pdc.overrides"])
client.login(username="user", password="user")
del self.form_data['checks-0-included']
response = client.post('/override/manage/release-1.0/?package=bash', self.form_data)
self._populate_preview_form(response)
response = client.post('/override/manage/release-1.0/?package=bash', self.preview_form_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(models.OverrideRPM.objects.count(), 2)
orpm = models.OverrideRPM.objects.latest('id')
self.assertEqual(orpm.include, False)
self.assertEqual(orpm.do_not_delete, False)
self.assertEqual(orpm.comment, '')
def test_submit_ok_with_comment(self):
client = Client()
create_user("user", perms=["pdc.overrides"])
client.login(username="user", password="user")
del self.form_data['checks-0-included']
response = client.post('/override/manage/release-1.0/?package=bash', self.form_data)
self._populate_preview_form(response)
self.preview_form_data.update({
'form-0-do_not_delete': 'on',
'form-0-comment': 'do not delete me',
})
response = client.post('/override/manage/release-1.0/?package=bash', self.preview_form_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(models.OverrideRPM.objects.count(), 2)
orpm = models.OverrideRPM.objects.latest('id')
self.assertEqual(orpm.include, False)
self.assertEqual(orpm.do_not_delete, True)
self.assertEqual(orpm.comment, 'do not delete me')
def test_submit_ok_should_delete(self):
client = Client()
create_user("user", perms=["pdc.overrides"])
client.login(username="user", password="user")
self.form_data['checks-1-included'] = 'on'
response = client.post('/override/manage/release-1.0/?package=bash', self.form_data)
self._populate_preview_form(response)
del self.preview_form_data['form-0-do_not_delete']
response = client.post('/override/manage/release-1.0/?package=bash', self.preview_form_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(models.OverrideRPM.objects.count(), 0)
def test_submit_ok_should_set_do_not_delete(self):
client = Client()
create_user("user", perms=["pdc.overrides"])
client.login(username="user", password="user")
self.form_data['checks-1-included'] = 'on'
response = client.post('/override/manage/release-1.0/?package=bash', self.form_data)
self._populate_preview_form(response)
self.preview_form_data.update({
'form-0-comment': 'comment',
'form-0-do_not_delete': 'on',
})
response = client.post('/override/manage/release-1.0/?package=bash', self.preview_form_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(models.OverrideRPM.objects.count(), 1)
orpm = models.OverrideRPM.objects.latest('id')
self.assertEqual(orpm.do_not_delete, True)
self.assertEqual(orpm.comment, 'comment')
self.assertEqual(orpm.include, True)
def test_submit_ok_should_remove_do_not_delete_and_delete(self):
orpm = models.OverrideRPM.objects.latest('id')
orpm.do_not_delete = True
orpm.save()
client = Client()
create_user("user", perms=["pdc.overrides"])
client.login(username="user", password="user")
self.form_data['checks-1-included'] = 'on'
response = client.post('/override/manage/release-1.0/?package=bash', self.form_data)
self._populate_preview_form(response)
del self.preview_form_data['form-0-do_not_delete']
response = client.post('/override/manage/release-1.0/?package=bash', self.preview_form_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(models.OverrideRPM.objects.count(), 0)
def test_submit_ok_disable_override_without_compose_rpm__should_delete(self):
orpm = models.OverrideRPM.objects.latest('id')
orpm.rpm_name = 'bash-magic'
orpm.include = True
orpm.save()
client = Client()
create_user("user", perms=["pdc.overrides"])
client.login(username="user", password="user")
self.form_data.update({
'checks-1-included': 'on',
'checks-2-variant': 'Server',
'checks-2-arch': 'x86_64',
'checks-2-rpm_name': 'bash-magic',
'checks-2-rpm_arch': 'x86_64',
'checks-TOTAL_FORMS': 3,
})
response = client.post('/override/manage/release-1.0/?package=bash', self.form_data)
self.assertEqual(len(response.context['forms']), 1)
self._populate_preview_form(response)
response = client.post('/override/manage/release-1.0/?package=bash', self.preview_form_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(models.OverrideRPM.objects.count(), 0)
class OverridePreviewBulkTestCase(TestCase):
fixtures = [
"pdc/apps/common/fixtures/test/sigkey.json",
"pdc/apps/package/fixtures/test/rpm.json",
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/release/fixtures/tests/variant.json",
"pdc/apps/release/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/variant.json",
"pdc/apps/compose/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/compose_overriderpm.json",
"pdc/apps/compose/fixtures/tests/compose.json",
"pdc/apps/compose/fixtures/tests/compose_composerpm.json",
"pdc/apps/compose/fixtures/tests/compose_composerpm_more.json",
]
def setUp(self):
self.initial_form_data = {
'checks-0-variant': 'Server',
'checks-0-arch': 'x86_64',
'checks-0-rpm_name': 'bash',
'checks-0-rpm_arch': 'x86_64',
'checks-1-variant': 'Server',
'checks-1-arch': 'x86_64',
'checks-1-rpm_name': 'bash-completion',
'checks-1-rpm_arch': 'x86_64',
'checks-2-included': 'on',
'checks-2-variant': 'Server',
'checks-2-arch': 'x86_64',
'checks-2-rpm_name': 'bash-debuginfo',
'checks-2-rpm_arch': 'x86_64',
'checks-3-included': 'on',
'checks-3-variant': 'Server',
'checks-3-arch': 'x86_64',
'checks-3-rpm_name': 'bash-doc',
'checks-3-rpm_arch': 'x86_64',
'checks-4-variant': 'Server',
'checks-4-arch': 'x86_64',
'checks-4-rpm_name': 'bash-magic',
'checks-4-rpm_arch': 'x86_64',
'checks-MAX_NUM_FORMS': '1000',
'checks-INITIAL_FORMS': 5,
'checks-TOTAL_FORMS': 5,
'news-MAX_NUM_FORMS': '1000',
'news-INITIAL_FORMS': 1,
'news-TOTAL_FORMS': 0,
'vararch-MAX_NUM_FORMS': '1000',
'vararch-INITIAL_FORMS': 1,
'vararch-TOTAL_FORMS': 0,
'for_new_vararch-MAX_NUM_FORMS': '1000',
'for_new_vararch-INITIAL_FORMS': 0,
'for_new_vararch-TOTAL_FORMS': 0,
}
self.preview_form_data = {
'preview_submit': True,
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': 1000,
}
def test_more_changes_at_the_same_time(self):
client = Client()
create_user("user", perms=["pdc.overrides"])
client.login(username="user", password="user")
response = client.post('/override/manage/release-1.0/?package=bash', self.initial_form_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['forms']), 5)
self.preview_form_data.update({
'initial_data': response.context['compressed'],
'form-TOTAL_FORMS': 5,
'form-0-action': 'create',
'form-0-variant': 'Server',
'form-0-arch': 'x86_64',
'form-0-rpm_name': 'bash',
'form-0-rpm_arch': 'x86_64',
'form-0-include': 'False',
'form-1-action': 'create',
'form-1-variant': 'Server',
'form-1-arch': 'x86_64',
'form-1-rpm_name': 'bash-competion',
'form-1-rpm_arch': 'x86_64',
'form-1-include': 'False',
'form-2-action': 'delete',
'form-2-variant': 'Server',
'form-2-arch': 'x86_64',
'form-2-rpm_name': 'bash-debuginfo',
'form-2-rpm_arch': 'x86_64',
'form-2-include': 'False',
'form-3-action': 'delete',
'form-3-variant': 'Server',
'form-3-arch': 'x86_64',
'form-3-rpm_name': 'bash-doc',
'form-3-rpm_arch': 'x86_64',
'form-3-include': 'False',
'form-4-action': 'delete',
'form-4-variant': 'Server',
'form-4-arch': 'x86_64',
'form-4-rpm_name': 'bash-magic',
'form-4-rpm_arch': 'x86_64',
'form-4-include': 'False',
})
response = client.post('/override/manage/release-1.0/?package=bash', self.preview_form_data)
self.assertEqual(response.status_code, 302)
self.assertItemsEqual(
[o.export() for o in models.OverrideRPM.objects.all()],
[{"release_id": 'release-1.0', "variant": 'Server', "arch": 'x86_64',
"srpm_name": 'bash', "rpm_name": 'bash', "rpm_arch": 'x86_64',
"include": False, "comment": '', "do_not_delete": False},
{"release_id": 'release-1.0', "variant": 'Server', "arch": 'x86_64',
"srpm_name": 'bash', "rpm_name": 'bash-completion', "rpm_arch": 'x86_64',
"include": False, "comment": '', "do_not_delete": False}]
)
class UselessOverrideTestCase(TestCase):
fixtures = [
"pdc/apps/common/fixtures/test/sigkey.json",
"pdc/apps/package/fixtures/test/rpm.json",
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/release/fixtures/tests/variant.json",
"pdc/apps/release/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/variant.json",
"pdc/apps/compose/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/compose_overriderpm.json",
"pdc/apps/compose/fixtures/tests/compose.json",
"pdc/apps/compose/fixtures/tests/compose_composerpm.json",
]
def setUp(self):
self.release = release_models.Release.objects.latest('id')
def test_delete_unused_include_override(self):
orpm = models.OverrideRPM.objects.create(release=self.release,
variant='Server',
arch='x86_64',
srpm_name='bash',
rpm_name='bash',
rpm_arch='x86_64',
include=True)
client = Client()
with mock.patch('sys.stdout', new_callable=StringIO) as out:
response = client.get('/override/manage/release-1.0/', {'package': 'bash'})
self.assertEqual(response.context['useless_overrides'], [])
self.assertIn('NOTICE', out.getvalue())
self.assertIn(str(orpm), out.getvalue())
self.assertEqual(models.OverrideRPM.objects.count(), 1)
def test_delete_unused_exclude_override(self):
orpm = models.OverrideRPM.objects.create(release=self.release,
variant='Server',
arch='x86_64',
srpm_name='bash',
rpm_name='bash-missing',
rpm_arch='x86_64',
include=False)
client = Client()
with mock.patch('sys.stdout', new_callable=StringIO) as out:
response = client.get('/override/manage/release-1.0/', {'package': 'bash'})
self.assertEqual(response.context['useless_overrides'], [])
self.assertIn('NOTICE', out.getvalue())
self.assertIn(str(orpm), out.getvalue())
self.assertEqual(models.OverrideRPM.objects.count(), 1)
def test_delete_unused_exclude_override_on_new_variant_arch(self):
orpm = models.OverrideRPM.objects.create(release=self.release,
variant='Server',
arch='x86_64',
srpm_name='bash',
rpm_name='bash',
rpm_arch='rpm_arch',
include=False)
client = Client()
with mock.patch('sys.stdout', new_callable=StringIO) as out:
response = client.get('/override/manage/release-1.0/', {'package': 'bash'})
self.assertEqual(response.context['useless_overrides'], [])
self.assertIn('NOTICE', out.getvalue())
self.assertIn(str(orpm), out.getvalue())
self.assertEqual(models.OverrideRPM.objects.count(), 1)
def test_do_not_delete_unused_include_override(self):
orpm = models.OverrideRPM.objects.create(release=self.release,
variant='Server',
arch='x86_64',
srpm_name='bash',
rpm_name='bash',
rpm_arch='x86_64',
include=True,
do_not_delete=True)
client = Client()
response = client.get('/override/manage/release-1.0/', {'package': 'bash'})
self.assertEqual(response.context['useless_overrides'], [orpm])
self.assertEqual(models.OverrideRPM.objects.count(), 2)
def test_do_not_delete_unused_exclude_override(self):
orpm = models.OverrideRPM.objects.create(release=self.release,
variant='Server',
arch='x86_64',
srpm_name='bash',
rpm_name='bash-missing',
rpm_arch='x86_64',
include=False,
do_not_delete=True)
client = Client()
response = client.get('/override/manage/release-1.0/', {'package': 'bash'})
self.assertEqual(response.context['useless_overrides'], [orpm])
self.assertEqual(models.OverrideRPM.objects.count(), 2)
def test_do_not_delete_unused_exclude_override_on_new_variant_arch(self):
orpm = models.OverrideRPM.objects.create(release=self.release,
variant='Server',
arch='x86_64',
srpm_name='bash',
rpm_name='bash',
rpm_arch='rpm_arch',
include=False,
do_not_delete=True)
client = Client()
response = client.get('/override/manage/release-1.0/', {'package': 'bash'})
self.assertEqual(response.context['useless_overrides'], [orpm])
self.assertEqual(models.OverrideRPM.objects.count(), 2)
def test_update_unused_override_when_creating_conflict(self):
orpm = models.OverrideRPM.objects.create(release=self.release,
variant='Server',
arch='x86_64',
srpm_name='bash',
rpm_name='bash',
rpm_arch='x86_64',
include=True,
do_not_delete=True)
client = Client()
create_user("user", perms=["pdc.overrides"])
client.login(username="user", password="user")
response = client.get('/override/manage/release-1.0/', {'package': 'bash'})
self.assertEqual(response.context['useless_overrides'], [orpm])
form_data = {
'checks-0-variant': 'Server',
'checks-0-arch': 'x86_64',
'checks-0-rpm_name': 'bash',
'checks-0-rpm_arch': 'x86_64',
'checks-MAX_NUM_FORMS': '1000',
'checks-INITIAL_FORMS': 1,
'checks-TOTAL_FORMS': 1,
'news-MAX_NUM_FORMS': '1000',
'news-INITIAL_FORMS': 1,
'news-TOTAL_FORMS': 0,
'vararch-MAX_NUM_FORMS': '1000',
'vararch-INITIAL_FORMS': 1,
'vararch-TOTAL_FORMS': 0,
'for_new_vararch-MAX_NUM_FORMS': '1000',
'for_new_vararch-INITIAL_FORMS': 0,
'for_new_vararch-TOTAL_FORMS': 0,
}
response = client.post('/override/manage/release-1.0/?package=bash', form_data)
self.assertContains(response, 'warning')
self.assertContains(response, 'Will modify override with do_not_delete set.')
preview_data = {
'preview_submit': True,
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': 1000,
'form-TOTAL_FORMS': 1,
'initial_data': response.context['compressed'],
'form-0-action': 'create',
'form-0-variant': 'Server',
'form-0-arch': 'x86_64',
'form-0-rpm_name': 'bash',
'form-0-rpm_arch': 'x86_64',
'form-0-include': 'False',
}
response = client.post('/override/manage/release-1.0/?package=bash', preview_data)
self.assertEqual(response.status_code, 302)
orpm = models.OverrideRPM.objects.latest('id')
self.assertFalse(orpm.include)
| mit | 1,441,402,328,042,708,700 | 49.9375 | 125 | 0.578668 | false |
was4444/chromium.src | third_party/WebKit/Tools/Scripts/webkitpy/tool/commands/flakytests_unittest.py | 1 | 3178 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import flakytests
from webkitpy.common.checkout.scm.scm_mock import MockSCM
from webkitpy.layout_tests.layout_package import bot_test_expectations
from webkitpy.layout_tests.port import builders
from webkitpy.tool.commands.commandtest import CommandsTest
from webkitpy.tool.mocktool import MockTool, MockOptions
class FakeBotTestExpectations(object):
def expectation_lines(self, only_ignore_very_flaky=False):
return []
class FakeBotTestExpectationsFactory(object):
FAILURE_MAP = {"A": "AUDIO", "C": "CRASH", "F": "TEXT", "I": "IMAGE", "O": "MISSING",
"N": "NO DATA", "P": "PASS", "T": "TIMEOUT", "Y": "NOTRUN", "X": "SKIP",
"Z": "IMAGE+TEXT", "K": "LEAK"}
def _expectations_from_test_data(self, builder, test_data):
test_data[bot_test_expectations.ResultsJSON.FAILURE_MAP_KEY] = self.FAILURE_MAP
json_dict = {
builder: test_data,
}
results = bot_test_expectations.ResultsJSON(builder, json_dict)
return bot_test_expectations.BotTestExpectations(results, builders._exact_matches[builder]["specifiers"])
def expectations_for_builder(self, builder):
if builder == 'foo-builder':
return self._expectations_from_test_data(builder, {
'tests': {
'pass.html': {'results': [[2, 'FFFP']], 'expected': 'PASS'},
}
})
if builder == 'bar-builder':
return self._expectations_from_test_data(builder, {
'tests': {
'pass.html': {'results': [[2, 'TTTP']], 'expected': 'PASS'},
}
})
return FakeBotTestExpectations()
class FlakyTestsTest(CommandsTest):
def test_merge_lines(self):
command = flakytests.FlakyTests()
factory = FakeBotTestExpectationsFactory()
old_builders = builders._exact_matches
builders._exact_matches = {
"foo-builder": {"port_name": "dummy-port", "specifiers": ['Linux', 'Release']},
"bar-builder": {"port_name": "dummy-port", "specifiers": ['Mac', 'Debug']},
}
try:
lines = command._collect_expectation_lines(['foo-builder', 'bar-builder'], factory)
self.assertEqual(len(lines), 1)
self.assertEqual(lines[0].expectations, ['TEXT', 'TIMEOUT', 'PASS'])
self.assertEqual(lines[0].specifiers, ['Mac', 'Linux'])
finally:
builders._exact_matches = old_builders
def test_integration(self):
command = flakytests.FlakyTests()
tool = MockTool()
command.expectations_factory = FakeBotTestExpectationsFactory
options = MockOptions(upload=True)
expected_stdout = flakytests.FlakyTests.OUTPUT % (
flakytests.FlakyTests.HEADER,
'',
flakytests.FlakyTests.FLAKINESS_DASHBOARD_URL % '') + '\n'
self.assert_execute_outputs(command, options=options, tool=tool, expected_stdout=expected_stdout)
| bsd-3-clause | 5,665,240,591,855,856,000 | 38.234568 | 113 | 0.614223 | false |
openstack/neutron-lib | neutron_lib/agent/extension.py | 1 | 1809 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
class AgentExtension(object, metaclass=abc.ABCMeta):
"""Define stable abstract interface for agent extensions.
An agent extension extends the agent core functionality.
"""
@abc.abstractmethod
def initialize(self, connection, driver_type):
"""Perform agent core resource extension initialization.
:param connection: RPC connection that can be reused by the extension
to define its RPC endpoints
:param driver_type: String that defines the agent type to the
extension. Can be used to choose the right backend
implementation.
Called after all extensions have been loaded.
No resource (port, policy, router, etc.) handling will be called before
this method.
"""
def consume_api(self, agent_api):
"""Consume the AgentAPI instance from the AgentExtensionsManager.
Allows an extension to gain access to resources internal to the
neutron agent and otherwise unavailable to the extension. Examples of
such resources include bridges, ports, and routers.
:param agent_api: An instance of an agent-specific API.
"""
| apache-2.0 | 1,100,415,168,997,778,700 | 39.2 | 79 | 0.679381 | false |
Daniel15/PathPicker | src/test.py | 1 | 7076 | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
#
# @nolint
import unittest
import os
import format
import parse
fileTestCases = [{
'input': 'html/js/hotness.js',
'match': True,
'file': 'html/js/hotness.js',
'num': 0
}, {
'input': '/absolute/path/to/something.txt',
'match': True,
'file': '/absolute/path/to/something.txt',
'num': 0
}, {
'input': '/html/js/hotness.js42',
'match': True,
'file': '/html/js/hotness.js42',
'num': 0
}, {
'input': '/html/js/hotness.js',
'match': True,
'file': '/html/js/hotness.js',
'num': 0
}, {
'input': './asd.txt:83',
'match': True,
'file': './asd.txt',
'num': 83
}, {
'input': 'flib/asd/ent/berkeley/two.py-22',
'match': True,
'file': 'flib/asd/ent/berkeley/two.py',
'num': 22
}, {
'input': 'flib/foo/bar',
'match': True,
'file': 'flib/foo/bar',
'num': 0
}, {
'input': 'flib/foo/bar ', # note space
'match': True,
'file': 'flib/foo/bar',
'num': 0
}, {
'input': 'foo/b ',
'match': True,
'file': 'foo/b',
'num': 0
}, {
'input': 'foo/bar/baz/',
'match': False
}, {
'input': 'flib/ads/ads.thrift',
'match': True,
'file': 'flib/ads/ads.thrift',
'num': 0
}, {
'input': 'banana hanana Wilde/ads/story.m',
'match': True,
'file': 'Wilde/ads/story.m',
'num': 0
}, {
'input': 'flib/asd/asd.py two/three/four.py',
'match': True,
'file': 'flib/asd/asd.py',
'num': 0
}, {
'input': 'asd/asd/asd/ 23',
'match': False
}, {
'input': 'foo/bar/TARGETS:23',
'match': True,
'num': 23,
'file': 'foo/bar/TARGETS'
}, {
'input': 'foo/bar/TARGETS-24',
'match': True,
'num': 24,
'file': 'foo/bar/TARGETS'
}, {
'input':
'fbcode/search/places/scorer/PageScorer.cpp:27:46:#include "search/places/scorer/linear_scores/MinutiaeVerbScorer.h',
'match': True,
'num': 27,
'file': 'fbcode/search/places/scorer/PageScorer.cpp'
}, {
# Pretty intense case
'input':
'fbcode/search/places/scorer/TARGETS:590:28: srcs = ["linear_scores/MinutiaeVerbScorer.cpp"]',
'match': True,
'num': 590,
'file': 'fbcode/search/places/scorer/TARGETS'
}, {
'input':
'fbcode/search/places/scorer/TARGETS:1083:27: "linear_scores/test/MinutiaeVerbScorerTest.cpp"',
'match': True,
'num': 1083,
'file': 'fbcode/search/places/scorer/TARGETS'
}, {
'input': '~/foo/bar/something.py',
'match': True,
'num': 0,
'file': '~/foo/bar/something.py'
}, {
'input': '~/foo/bar/inHomeDir.py:22',
'match': True,
'num': 22,
'file': '~/foo/bar/inHomeDir.py'
}, {
'input': 'blarge assets/retina/[email protected]',
'match': True,
'num': 0,
'file': 'assets/retina/[email protected]'
}, {
'input': '~/assets/retina/[email protected]',
'match': True,
'num': 0,
'file': '~/assets/retina/[email protected]'
}, {
'input': 'So.many.periods.txt',
'match': True,
'num': 0,
'file': 'So.many.periods.txt'
}, {
'input': 'SO.MANY.PERIODS.TXT',
'match': True,
'num': 0,
'file': 'SO.MANY.PERIODS.TXT'
}, {
'input': 'blarg blah So.MANY.PERIODS.TXT:22',
'match': True,
'num': 0, # we ignore the number here
'file': 'So.MANY.PERIODS.TXT'
}, {
'input': 'SO.MANY&&PERIODSTXT',
'match': False
}]
prependDirTestCases = [
{
'in': 'home/absolute/path.py',
'out': '/home/absolute/path.py'
}, {
'in': '~/www/asd.py',
'out': '~/www/asd.py'
}, {
'in': 'www/asd.py',
'out': '~/www/asd.py'
}, {
'in': 'foo/bar/baz/asd.py',
'out': parse.PREPEND_PATH + 'foo/bar/baz/asd.py'
}, {
'in': 'a/foo/bar/baz/asd.py',
'out': parse.PREPEND_PATH + 'foo/bar/baz/asd.py'
}, {
'in': 'b/foo/bar/baz/asd.py',
'out': parse.PREPEND_PATH + 'foo/bar/baz/asd.py'
}, {
'in': '',
'out': ''
}]
class TestParseFunction(unittest.TestCase):
def testPrependDir(self):
for testCase in prependDirTestCases:
inFile = testCase['in']
result = parse.prependDir(inFile)
expected = testCase['out']
if inFile[0:2] == '~/':
expected = os.path.expanduser(expected)
self.assertEqual(expected, result)
print 'Tested %d dir cases.' % len(prependDirTestCases)
def testFileFuzz(self):
befores = ['M ', 'Modified: ', 'Changed: ', '+++ ',
'Banana asdasdoj pjo ']
afters = [' * Adapts AdsErrorCodestore to something',
':0:7: var AdsErrorCodeStore', ' jkk asdad']
for testCase in fileTestCases:
for before in befores:
for after in afters:
testInput = '%s%s%s' % (before, testCase['input'], after)
thisCase = testCase.copy()
thisCase['input'] = testInput
self.checkFileResult(thisCase)
print 'Tested %d cases for file fuzz.' % len(fileTestCases)
def testUnresolvable(self):
fileLine = ".../something/foo.py"
result = parse.matchLine(fileLine)
lineObj = format.LineMatch(fileLine, result, 0)
self.assertTrue(
not lineObj.isResolvable(),
'"%s" should not be resolvable' % fileLine
)
print 'Tested unresolvable case.'
def testResolvable(self):
toCheck = [case for case in fileTestCases if case['match']]
for testCase in toCheck:
result = parse.matchLine(testCase['input'])
lineObj = format.LineMatch(testCase['input'], result, 0)
self.assertTrue(
lineObj.isResolvable(),
'Line "%s" was not resolvable' % testCase['input']
)
print 'Tested %d resolvable cases.' % len(toCheck)
def testFileMatch(self):
for testCase in fileTestCases:
self.checkFileResult(testCase)
print 'Tested %d cases.' % len(fileTestCases)
def checkFileResult(self, testCase):
result = parse.matchLine(testCase['input'])
if not result:
self.assertFalse(testCase['match'],
'Line "%s" did not match any regex' %
testCase['input'])
return
file, num, match = result
self.assertTrue(testCase['match'], 'Line "%s" did match' %
testCase['input'])
self.assertEqual(testCase['file'], file, 'files not equal |%s| |%s|' %
(testCase['file'], file))
self.assertEqual(testCase['num'], num, 'num matches not equal %d %d for %s'
% (testCase['num'], num, testCase.get('input')))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 9,048,890,270,372,936,000 | 27.304 | 121 | 0.539994 | false |
dontnod/weblate | weblate/fonts/validators.py | 1 | 1311 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2019 Michal Čihař <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
import os
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
from weblate.fonts.utils import get_font_name
def validate_font(value):
"""Simple extension based validation for uploads."""
ext = os.path.splitext(value.name)[1]
if ext.lower() not in (".ttf", ".otf"):
raise ValidationError(_("Unsupported file format."))
try:
get_font_name(value)
except OSError:
raise ValidationError(_("Unsupported file format."))
return value
| gpl-3.0 | 6,577,466,489,244,068,000 | 33.421053 | 72 | 0.720948 | false |
jmartinm/invenio-records | invenio_records/config.py | 1 | 1717 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Records configuration."""
from __future__ import unicode_literals
from .models import RecordMetadata as RecordMetadataModel
RECORDS_BREADCRUMB_TITLE_KEY = 'title.title'
"""Key used to extract the breadcrumb title from the record."""
RECORD_DOCUMENT_NAME_GENERATOR = ('invenio.modules.records.utils:'
'default_name_generator')
RECORD_DOCUMENT_VIEWRESTR_POLICY = 'ANY'
"""When a document belongs to more than one record, and this policy is set to
`ALL` the user must be authorized to view all the records to continue checking
the access rights of the document. If the policy is set to `ANY` (default),
then the user needs to be authorized to view at least one record in order to
continue checking the document specific access rights."""
RECORD_KEY_ALIASSES = {
'recid': 'control_number',
'980': 'collections',
'980__a': 'collections.primary',
'980__b': 'collections.secondary',
}
| gpl-2.0 | 383,431,581,862,431,300 | 37.155556 | 78 | 0.728596 | false |
darencard/ContigAnnotator | ensembl_orthologs.py | 1 | 2122 | #!/usr/local/env python
import optparse
usage_line = """
A script to extract orthologous Ensembl IDs from a genome-of-interest using a list \
of Ensembl IDs already in hand. Input is a list of Ensembl IDs and a "database" file \
downloaded from Ensembl that has the query Ensembl IDs in one column and the target/subject \
Ensembl IDs in another column. The user can also specify which column contains the \
query and the target Ensembl IDs and an output file name (tab-delimited text file). \
python ensembl_orthologs.py --query <query_list> --database <ensembl_database> \
-q <query_column> -s <subject_column> --output <output.txt>
"""
usage = usage_line
parser = optparse.OptionParser(usage=usage)
parser.add_option("--query", action= "store", type= "string", dest="query", help="""The query list of Ensembl IDs to find orthologs for""")
parser.add_option("--database", action="store", type= "string", dest="database", help="""A tab-delimited file with query IDs and subject IDs obtained from BioMart""")
parser.add_option("-q", action = "store", type = "string", dest = "q", help = """Column number where query IDs are located in "database" file (1, 2, ..., N)""")
parser.add_option("-s", action = "store", type = "string", dest = "s", help = """Column number where subject IDs are located in "database" file (1, 2, ..., N)""")
parser.add_option("--output", action = "store", type = "string", dest = "output" , help = """Output file to write results""", default = "output.txt")
options, args = parser.parse_args()
if __name__ == '__main__':
db_dict = {}
for line in open(options.database, "r"):
if not line.strip().startswith("#"):
record = line.rstrip().split("\t")
q = str(options.q)
s = str(options.s)
query = int(q)-1
subject = int(s)-1
if len(record) == 2:
db_dict[record[query]] = record[subject]
else:
db_dict[record[query]] = "NA"
out = open(options.output, "w")
for line in open(options.query, "r"):
if not line.strip().startswith("#"):
record = line.rstrip()
value = db_dict[record]
outline = record+"\t"+value+"\n"
out.write(outline)
out.close()
| gpl-2.0 | 6,088,053,830,567,926,000 | 43.229167 | 166 | 0.669651 | false |
pyblub/pyload | pyload/api/base.py | 1 | 12584 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from types import MethodType
from future import standard_library
# from abc import ABCMeta, abstractmethod
from future.builtins import object
from pyload.core.datatype.base import DownloadState, DownloadStatus, Permission
from pyload.core.datatype.user import User
from pyload.utils.convert import to_str
# from future.utils import with_metaclass
standard_library.install_aliases()
# Workaround to let code-completion think, this is subclass of AbstractApi
AbstractApi = object
class BaseApi(AbstractApi):
def __init__(self, core, user):
# Only for auto completion, this class can not be instantiated
from pyload.core import Core
from pyload.core.datatype.user import User
self.pyload = core
self._ = core._
self.user = user
# No instantiating!
raise Exception
# TODO: Make AbstractApi... abstract!
# class AbstractApi(with_metaclass(ABCMeta, object)):
class AbstractApi(object):
# @abstractmethod
def add_links(self, pid, links):
pass
# @abstractmethod
def add_local_file(self, pid, name, path):
pass
# @abstractmethod
def add_package(self, name, links, password):
pass
# @abstractmethod
def add_package_child(self, name, links, password, root, paused):
pass
# @abstractmethod
def addPackageP(self, name, links, password, paused):
pass
# @abstractmethod
def add_user(self, username, password):
pass
# @abstractmethod
def check_container(self, filename, data):
pass
# @abstractmethod
def check_html(self, html, url):
pass
# @abstractmethod
def check_links(self, links):
pass
# @abstractmethod
def create_account(self, plugin, loginname, password):
pass
# @abstractmethod
def create_package(self, name, folder, root,
password, site, comment, paused):
pass
# @abstractmethod
# def delete_config(self, plugin):
# pass
# @abstractmethod
def delete_files(self, fids):
pass
# @abstractmethod
def delete_packages(self, pids):
pass
# @abstractmethod
def find_files(self, pattern):
pass
# @abstractmethod
def find_packages(self, tags):
pass
# @abstractmethod
def avail_space(self):
pass
# @abstractmethod
def generate_download_link(self, fid, timeout):
pass
# @abstractmethod
def generate_packages(self, links):
pass
# @abstractmethod
def get_account_info(self, aid, plugin, refresh):
pass
# @abstractmethod
def get_account_types(self):
pass
# @abstractmethod
def get_accounts(self):
pass
# @abstractmethod
def get_addon_handler(self):
pass
# @abstractmethod
def get_all_files(self):
pass
# @abstractmethod
def get_all_info(self):
pass
# @abstractmethod
def get_all_user_data(self):
pass
# @abstractmethod
# def get_available_plugins(self):
# pass
# @abstractmethod
# def get_config(self):
# pass
# @abstractmethod
def get_config_value(self, section, option):
pass
# @abstractmethod
# def get_core_config(self):
# pass
# @abstractmethod
def get_file_info(self, fid):
pass
# @abstractmethod
def get_file_tree(self, pid, full):
pass
# @abstractmethod
def get_filtered_file_tree(self, pid, full, state):
pass
# @abstractmethod
def get_filtered_files(self, state):
pass
# @abstractmethod
def get_info_by_plugin(self, plugin):
pass
# @abstractmethod
def get_interaction_tasks(self, mode):
pass
# @abstractmethod
def get_log(self, offset):
pass
# @abstractmethod
def get_package_content(self, pid):
pass
# @abstractmethod
def get_package_info(self, pid):
pass
# @abstractmethod
# def get_plugin_config(self):
# pass
# @abstractmethod
def get_progress_info(self):
pass
# @abstractmethod
def get_quota(self):
pass
# @abstractmethod
def get_server_version(self):
pass
# @abstractmethod
def get_status_info(self):
pass
# @abstractmethod
def get_user_data(self):
pass
# @abstractmethod
# def get_ws_address(self):
# pass
# @abstractmethod
def invoke_addon(self, plugin, func, func_args):
pass
# @abstractmethod
def invoke_addon_handler(self, plugin, func, pid_or_fid):
pass
# @abstractmethod
def is_interaction_waiting(self, mode):
pass
# @abstractmethod
# def load_config(self, name):
# pass
# @abstractmethod
def login(self, username, password):
pass
# @abstractmethod
def move_files(self, fids, pid):
pass
# @abstractmethod
def move_package(self, pid, root):
pass
# @abstractmethod
def order_files(self, fids, pid, position):
pass
# @abstractmethod
def order_package(self, pids, position):
pass
# @abstractmethod
def parse_links(self, links):
pass
# @abstractmethod
def pause_server(self):
pass
# @abstractmethod
def poll_results(self, rid):
pass
# @abstractmethod
def exit(self):
pass
# @abstractmethod
def recheck_package(self, pid):
pass
# @abstractmethod
def remove_account(self, account):
pass
# @abstractmethod
def remove_files(self, fids):
pass
# @abstractmethod
def remove_packages(self, pids):
pass
# @abstractmethod
def remove_user(self, uid):
pass
# @abstractmethod
def restart(self):
pass
# @abstractmethod
def restart_failed(self):
pass
# @abstractmethod
def restart_file(self, fid):
pass
# @abstractmethod
def restart_package(self, pid):
pass
# @abstractmethod
# def save_config(self, config):
# pass
# @abstractmethod
def search_suggestions(self, pattern):
pass
# @abstractmethod
def set_config_value(self, section, option, value):
pass
# @abstractmethod
def set_interaction_result(self, iid, result):
pass
# @abstractmethod
def set_package_paused(self, pid, paused):
pass
# @abstractmethod
def set_password(self, username, old_password, new_password):
pass
# @abstractmethod
def stop_all_downloads(self):
pass
# @abstractmethod
def stop_downloads(self, fids):
pass
# @abstractmethod
def toggle_pause(self):
pass
# @abstractmethod
def toggle_reconnect(self):
pass
# @abstractmethod
def unpause_server(self):
pass
# @abstractmethod
def update_account(self, aid, plugin, loginname, password):
pass
# @abstractmethod
def update_account_info(self, account):
pass
# @abstractmethod
def update_package(self, pack):
pass
# @abstractmethod
def update_user_data(self, data):
pass
# @abstractmethod
def upload_container(self, filename, data):
pass
# contains function names mapped to their permissions
# unlisted functions are for admins only
perm_map = {}
# decorator only called on init, never initialized, so has no effect on runtime
def requireperm(bits):
class _Dec(object):
def __new__(cls, func, *args, **kwargs):
perm_map[func.__name__] = bits
return func
return _Dec
statemap = {
DownloadState.All:
frozenset(getattr(DownloadStatus, x)
for x in dir(DownloadStatus) if not x.startswith('_')),
DownloadState.Finished:
frozenset((DownloadStatus.Finished, DownloadStatus.Skipped)),
DownloadState.Unfinished: None, # set below
DownloadState.Failed:
frozenset((DownloadStatus.Failed, DownloadStatus.TempOffline,
DownloadStatus.Aborted, DownloadStatus.NotPossible,
DownloadStatus.FileMismatch)),
DownloadState.Unmanaged: None,
}
statemap[DownloadState.Unfinished] = frozenset(
statemap[DownloadState.All].difference(statemap[DownloadState.Finished]))
def statestring(state):
return ','.join(map(to_str, statemap[state]))
class Api(AbstractApi):
"""
**pyLoads API**
This is accessible either internal via core.api,
websocket backend or json api.
see Thrift specification file rpc/thriftbackend/pyload.thrift
for information about data structures and what methods are usable with rpc.
Most methods requires specific permissions,
please look at the source code if you need to know.
These can be configured via web interface.
Admin user have all permissions, and are the only ones who can access
the methods with no specific permission
"""
EXTERNAL = AbstractApi # let the json api know which methods are external
EXTEND = False # only extendable when set too true
def __init__(self, core):
self._ = core._
self.pyload = core
self.user_apis = {}
@property
def user(self):
return # TODO: return default user?
# @property
# def primary_uid(self):
# return self.user.primary if self.user else None
def has_access(self, obj):
"""Helper method to determine if a user has access to a resource.
Works for obj that provides .owner attribute. Core admin has
always access.
"""
return self.user is None or self.user.has_access(obj)
@classmethod
def extend(cls, api):
"""Takes all params from api and extends cls with it. Api class can be
removed afterwards.
:param api: Class with methods to extend
"""
if cls.EXTEND:
for name, func in api.__dict__.items():
if name.startswith('_'):
continue
setattr(cls, name, MethodType(func, cls))
return cls.EXTEND
def with_user_context(self, uid):
"""Returns a proxy version of the api, to call method in user context.
:param uid: user or userData instance or uid
:return: :class:`UserApi`
"""
if isinstance(uid, User):
uid = uid.uid
if uid not in self.user_apis:
user = self.pyload.db.get_user_data(uid=uid)
if not user: # TODO: anonymous user?
return
self.user_apis[uid] = UserApi(
self.pyload, User.from_user_data(self, user))
return self.user_apis[uid]
#############################
# Auth+User Information
#############################
@requireperm(Permission.All)
def login(self, username, password):
"""Login into pyLoad, this **must** be called when using rpc before any
methods can be used.
:param username:
:param password:
:param remoteip: Omit this argument, its only used internal
:return: bool indicating login was successful
"""
return True if self.check_auth(username, password) else False
def check_auth(self, username, password):
"""Check authentication and returns details.
:param username:
:param password:
:param remoteip:
:return: dict with info, empty when login is incorrect
"""
self.pyload.log.info(
self._("User '{0}' tries to log in").format(username))
return self.pyload.db.check_auth(username, password)
@staticmethod
def is_authorized(func, user):
"""Checks if the user is authorized for specific method.
:param func: function name
:param user: `User`
:return: boolean
"""
if user.is_admin():
return True
elif func in perm_map and user.has_permission(perm_map[func]):
return True
else:
return False
class UserApi(Api):
"""Proxy object for api that provides all methods in user context."""
def __init__(self, core, user):
super(UserApi, self).__init__(core)
self._user = user
def with_user_context(self, uid):
raise Exception('Not allowed')
@property
def user(self):
return self._user
| agpl-3.0 | 5,071,868,251,098,711,000 | 21.838475 | 79 | 0.607359 | false |
martindurant/starclassifier | ui/pysynphot/wavetable.py | 1 | 2684 | from __future__ import division
""" This module handles the wavecat.dat table presently used by the
synphot countrate task (and thus the ETC) to select an appropriate wavelength
set for a given obsmode. """
import re
import os
import numpy as N
import locations
class Wavetable(object):
""" Class to handle wavecat.dat initialization and access. (This class
may need a better name; wavetable and waveset are awfully close.)
Also, put the default waveset into this object with a key of NONE."""
def __init__(self, fname):
""" Instantiate a Wavetable from a file """
self.file=fname
self.lookup={}
self.setlookup={}
fs = open(wavecat_file, mode='r')
lines = fs.readlines()
fs.close()
regx = re.compile(r'\S+', re.IGNORECASE)
for line in lines:
if not line.startswith("#"):
try:
[obm,coeff] = regx.findall(line)
self.lookup[obm] = coeff
self.setlookup[frozenset(obm.split(','))] = coeff
except ValueError:
raise ValueError("Error processing line: %s"%line)
def __getitem__(self, key):
"""Fairly smart lookup: if no exact match, find the most complete
match.
"""
ans=None
try:
#Try an exact match
ans = self.lookup[key]
except KeyError:
ans=None
#Try a setwise match.
#The correct key will be a subset of the input key.
setkey=set(key.split(','))
candidates=[]
for k in self.setlookup:
if k.issubset(setkey):
candidates.append(k)
#We may have 1, 0, or >1 candidates.
if len(candidates) == 1:
ans = self.setlookup[candidates[0]]
elif len(candidates) == 0:
raise KeyError("%s not found in %s; candidates:%s"%(setkey,self.file,str(candidates)))
elif len(candidates) > 1:
setlens=N.array([len(k) for k in candidates])
srtlen=setlens.argsort()
k,j=srtlen[-2:]
if setlens[k] == setlens[j]:
#It's really ambiguous
raise ValueError("Ambiguous key %s; candidates %s"%(setkey, candidates))
else:
#We have a winner
k=candidates[srtlen[-1]]
ans=self.setlookup[k]
return ans
wavecat_file=locations.wavecat
wavetable=Wavetable(wavecat_file)
| mit | -1,998,349,719,848,117,800 | 31.974684 | 102 | 0.525335 | false |
CanonicalLtd/landscape-client | landscape/client/broker/tests/test_client.py | 1 | 16179 | import mock
from twisted.internet import reactor
from twisted.internet.defer import Deferred
from landscape.lib.twisted_util import gather_results
from landscape.client.tests.helpers import (
LandscapeTest, DEFAULT_ACCEPTED_TYPES)
from landscape.client.broker.tests.helpers import BrokerClientHelper
from landscape.client.broker.client import (
BrokerClientPlugin, HandlerNotFoundError)
class BrokerClientTest(LandscapeTest):
helpers = [BrokerClientHelper]
def test_ping(self):
"""
The L{BrokerClient.ping} method always returns C{True}.
"""
self.assertTrue(self.client.ping())
def test_add(self):
"""
The L{BrokerClient.add} method registers a new plugin
plugin, and calls the plugin's C{register} method.
"""
plugin = BrokerClientPlugin()
self.client.add(plugin)
self.assertIs(plugin.client, self.client)
def test_registering_plugin_gets_session_id(self):
"""
As part of the BrokerClientPlugin registration process, a session ID
is generated.
"""
plugin = BrokerClientPlugin()
self.client.add(plugin)
self.assertIsNot(None, plugin._session_id)
def test_registered_plugin_uses_correct_scope(self):
"""
When we register a plugin we use that plugin's scope variable when
getting a session id.
"""
test_session_id = self.successResultOf(
self.client.broker.get_session_id(scope="test"))
plugin = BrokerClientPlugin()
plugin.scope = "test"
self.client.add(plugin)
self.assertEqual(test_session_id, plugin._session_id)
def test_resynchronizing_out_of_scope(self):
"""
When a 'reysnchronize' event happens and the plugin scope is not part
of the scopes that were passed, BrokerClientPlugin succeeds.
"""
plugin = BrokerClientPlugin()
plugin.scope = "foo"
self.client.add(plugin)
deferred = self.client_reactor.fire("resynchronize", scopes=["bar"])[0]
self.assertIsNone(self.successResultOf(deferred))
def test_resynchronizing_refreshes_session_id(self):
"""
When a 'reysnchronize' event fires a new session ID is acquired as the
old one will be removed.
"""
plugin = BrokerClientPlugin()
plugin.scope = "test"
self.client.add(plugin)
session_id = plugin._session_id
self.mstore.drop_session_ids()
self.client_reactor.fire("resynchronize")
self.assertNotEqual(session_id, plugin._session_id)
def test_resynchronize_calls_reset(self):
plugin = BrokerClientPlugin()
plugin.scope = "test"
self.client.add(plugin)
plugin._reset = mock.Mock()
self.client_reactor.fire("resynchronize")
plugin._reset.assert_called_once_with()
def test_get_plugins(self):
"""
The L{BrokerClient.get_plugins} method returns a list
of registered plugins.
"""
plugins = [BrokerClientPlugin(), BrokerClientPlugin()]
self.client.add(plugins[0])
self.client.add(plugins[1])
self.assertEqual(self.client.get_plugins(), plugins)
def test_get_plugins_returns_a_copy(self):
"""
The L{BrokerClient.get_plugins} method returns a copy of the list
of registered plugins, so user can't can't modify our internals.
"""
plugins = self.client.get_plugins()
plugins.append(BrokerClientPlugin())
self.assertEqual(self.client.get_plugins(), [])
def test_get_named_plugin(self):
"""
If a plugin has a C{plugin_name} attribute, it is possible to look it
up by name after adding it to the L{BrokerClient}.
"""
plugin = BrokerClientPlugin()
plugin.plugin_name = "foo"
self.client.add(plugin)
self.assertEqual(self.client.get_plugin("foo"), plugin)
def test_run_interval(self):
"""
If a plugin has a C{run} method, the reactor will call it every
C{run_interval} seconds.
"""
plugin = BrokerClientPlugin()
plugin.run = mock.Mock()
self.client.add(plugin)
self.client_reactor.advance(plugin.run_interval)
self.client_reactor.advance(plugin.run_interval)
self.assertEqual(2, plugin.run.call_count)
def test_run_interval_log_exceptions(self):
"""
If a plugin has a run method, the reactor will call it every
run_interval, but will stop and log if it raises unhandled exceptions.
"""
class RunFailure(Exception):
pass
# log helper should not complain on the error we're testing
self.log_helper.ignore_errors("BrokerClientPlugin.*")
plugin = BrokerClientPlugin()
plugin.run = mock.Mock(side_effect=RunFailure("oh noes!"))
self.client.add(plugin)
self.client_reactor.advance(plugin.run_interval)
# We expect this exception to stay uncaught, so flush it to continue.
self.assertEqual(1, len(self.flushLoggedErrors(RunFailure)))
plugin.run.assert_called_with()
# The fake reactor also logs errors in test, so check for this specific
# message entry that would be present on a live client.
self.assertIn(
"ERROR: BrokerClientPlugin raised an uncaught exception",
self.logfile.getvalue())
def test_run_interval_blocked_during_resynch(self):
"""
During resynchronisation we want to block the C{run} method so that we
don't send any new messages with old session ids, or with state in an
indeterminate condition.
"""
runs = []
plugin = BrokerClientPlugin()
plugin.run_immediately = True
plugin.run = lambda: runs.append(True)
self.client.add(plugin)
# At this point the plugin has already run once and has scheduled as
# second run in plugin.run_interval seconds.
self.assertEquals(runs, [True])
# Mock out get_session_id so that it doesn't complete synchronously
deferred = Deferred()
self.client.broker.get_session_id = lambda scope: deferred
self.client_reactor.fire("resynchronize")
# The scheduled run has been cancelled, and even if plugin.run_interval
# seconds elapse the plugin won't run again.
self.client_reactor.advance(plugin.run_interval)
self.assertEquals(runs, [True])
# Finally get_session_id completes and the plugin runs again.
deferred.callback(123)
self.assertEquals(runs, [True, True])
@mock.patch("random.random")
def test_run_interval_staggered(self, mock_random):
"""
If a plugin has a run method and staggered_launch is set,
the launch gets delayed by a random factor.
"""
mock_random.return_value = 1.0
plugin = BrokerClientPlugin()
plugin.run_interval = 60
plugin.run = mock.Mock()
self.client.config.stagger_launch = 0.5
self.client.add(plugin)
self.client_reactor.advance(30)
self.assertEqual(0, plugin.run.call_count)
self.client_reactor.advance(60)
self.assertEqual(1, plugin.run.call_count)
self.client_reactor.advance(60)
self.assertEqual(2, plugin.run.call_count)
self.assertEqual(1, mock_random.call_count)
def test_run_immediately(self):
"""
If a plugin has a C{run} method and C{run_immediately} is C{True},
the plugin will be run immediately at registration.
"""
plugin = BrokerClientPlugin()
plugin.run_immediately = True
plugin.run = mock.Mock()
self.client.add(plugin)
plugin.run.assert_called_once_with()
def test_register_message(self):
"""
When L{BrokerClient.register_message} is called, the broker is notified
that the message type is now accepted.
"""
result1 = self.client.register_message("foo", lambda m: None)
result2 = self.client.register_message("bar", lambda m: None)
def got_result(result):
self.assertEqual(
self.exchanger.get_client_accepted_message_types(),
sorted(["bar", "foo"] + DEFAULT_ACCEPTED_TYPES))
return gather_results([result1, result2]).addCallback(got_result)
def test_dispatch_message(self):
"""
L{BrokerClient.dispatch_message} calls a previously-registered message
handler and return its value.
"""
message = {"type": "foo"}
handle_message = mock.Mock(return_value=123)
def dispatch_message(result):
self.assertEqual(self.client.dispatch_message(message), 123)
handle_message.assert_called_once_with(message)
result = self.client.register_message("foo", handle_message)
return result.addCallback(dispatch_message)
def test_dispatch_message_with_exception(self):
"""
L{BrokerClient.dispatch_message} gracefully logs exceptions raised
by message handlers.
"""
message = {"type": "foo"}
handle_message = mock.Mock(side_effect=ZeroDivisionError)
self.log_helper.ignore_errors("Error running message handler.*")
def dispatch_message(result):
self.assertIs(self.client.dispatch_message(message), None)
self.assertTrue("Error running message handler for type 'foo'" in
self.logfile.getvalue())
handle_message.assert_called_once_with(message)
result = self.client.register_message("foo", handle_message)
return result.addCallback(dispatch_message)
def test_dispatch_message_with_no_handler(self):
"""
L{BrokerClient.dispatch_message} raises an error if no handler was
found for the given message.
"""
error = self.assertRaises(HandlerNotFoundError,
self.client.dispatch_message, {"type": "x"})
self.assertEqual(str(error), "x")
def test_message(self):
"""
The L{BrokerClient.message} method dispatches a message and
returns C{True} if an handler for it was found.
"""
message = {"type": "foo"}
handle_message = mock.Mock()
def dispatch_message(result):
self.assertEqual(self.client.message(message), True)
handle_message.assert_called_once_with(message)
result = self.client.register_message("foo", handle_message)
return result.addCallback(dispatch_message)
def test_message_with_no_handler(self):
"""
The L{BrokerClient.message} method returns C{False} if no
handler was found.
"""
message = {"type": "foo"}
self.assertEqual(self.client.message(message), False)
def test_exchange(self):
"""
The L{BrokerClient.exchange} method calls C{exchange} on all
plugins, if available.
"""
plugin = BrokerClientPlugin()
plugin.exchange = mock.Mock()
self.client.add(plugin)
self.client.exchange()
plugin.exchange.assert_called_once_with()
def test_exchange_on_plugin_without_exchange_method(self):
"""
The L{BrokerClient.exchange} method ignores plugins without
an C{exchange} method.
"""
plugin = BrokerClientPlugin()
self.assertFalse(hasattr(plugin, "exchange"))
self.client.exchange()
def test_exchange_logs_errors_and_continues(self):
"""
If the L{exchange} method of a registered plugin fails, the error is
logged and other plugins are processed.
"""
self.log_helper.ignore_errors(ZeroDivisionError)
plugin1 = BrokerClientPlugin()
plugin2 = BrokerClientPlugin()
plugin1.exchange = mock.Mock(side_effect=ZeroDivisionError)
plugin2.exchange = mock.Mock()
self.client.add(plugin1)
self.client.add(plugin2)
self.client.exchange()
self.assertTrue("Error during plugin exchange" in
self.logfile.getvalue())
self.assertTrue("ZeroDivisionError" in self.logfile.getvalue())
plugin1.exchange.assert_called_once_with()
plugin2.exchange.assert_called_once_with()
def test_notify_exchange(self):
"""
The L{BrokerClient.notify_exchange} method is triggered by an
impending-exchange event and calls C{exchange} on all plugins,
logging the event.
"""
plugin = BrokerClientPlugin()
plugin.exchange = mock.Mock()
self.client.add(plugin)
self.client_reactor.fire("impending-exchange")
self.assertTrue("Got notification of impending exchange. "
"Notifying all plugins." in self.logfile.getvalue())
plugin.exchange.assert_called_once_with()
def test_fire_event(self):
"""
The L{BrokerClient.fire_event} method makes the reactor fire the
given event.
"""
callback = mock.Mock()
self.client_reactor.call_on("event", callback)
self.client.fire_event("event")
callback.assert_called_once_with()
def test_fire_event_with_arguments(self):
"""
The L{BrokerClient.fire_event} accepts optional arguments and keyword
arguments to pass to the registered callback.
"""
callback = mock.Mock()
self.client_reactor.call_on("event", callback)
self.client.fire_event("event", True, kwarg=2)
callback.assert_called_once_with(True, kwarg=2)
def test_fire_event_with_mixed_results(self):
"""
The return values of the fired handlers can be part L{Deferred}s
and part not.
"""
deferred = Deferred()
callback1 = mock.Mock(return_value=123)
callback2 = mock.Mock(return_value=deferred)
self.client_reactor.call_on("event", callback1)
self.client_reactor.call_on("event", callback2)
result = self.client.fire_event("event")
reactor.callLater(0, lambda: deferred.callback("abc"))
def check_calls(result):
self.assertEqual(result, [123, "abc"])
callback1.assert_called_once_with()
callback2.assert_called_once_with()
return result.addCallback(check_calls)
def test_fire_event_with_acceptance_changed(self):
"""
When the given event type is C{message-type-acceptance-changed}, the
fired event will be a 2-tuple of the eventy type and the message type.
"""
event_type = "message-type-acceptance-changed"
callback = mock.Mock()
self.client_reactor.call_on((event_type, "test"), callback)
self.client.fire_event(event_type, "test", False)
callback.assert_called_once_with(False)
def test_handle_reconnect(self):
"""
The L{BrokerClient.handle_reconnect} method is triggered by a
broker-reconnect event, and it causes any message types previously
registered with the broker to be registered again.
"""
result1 = self.client.register_message("foo", lambda m: None)
result2 = self.client.register_message("bar", lambda m: None)
def got_result(result):
broker = mock.Mock()
self.client.broker = broker
self.client_reactor.fire("broker-reconnect")
calls = [mock.call("bar"), mock.call("foo")]
broker.register_client_accepted_message_type.assert_has_calls(
calls, any_order=True)
broker.register_client.assert_called_once_with("client")
return gather_results([result1, result2]).addCallback(got_result)
def test_exit(self):
"""
The L{BrokerClient.exit} method causes the reactor to be stopped.
"""
self.client.reactor.stop = mock.Mock()
self.client.exit()
self.client.reactor.advance(0.1)
self.client.reactor.stop.assert_called_once_with()
| gpl-2.0 | 7,469,548,300,860,606,000 | 36.625581 | 79 | 0.629829 | false |
mneagul/scape-cloud-toolkit | src/sct/cloudinit.py | 1 | 9066 | # -*- coding: utf-8 -*-
"""
Copyright 2014 Universitatea de Vest din Timișoara
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: Marian Neagul <[email protected]>
@contact: [email protected]
@copyright: 2014 Universitatea de Vest din Timișoara
"""
import StringIO
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from string import Template
import base64
import gzip
import yaml
class BaseHandler(object):
def to_mime(self):
raise NotImplementedError()
class CloudConfig(BaseHandler):
"""
Sadly Ubuntu 12.04 does not support CloudInit mergers...
ToDo: find a way to work around
"""
def __init__(self, configuration={}):
self.__configuration = configuration
def to_mime(self):
buffer = StringIO.StringIO()
yaml.dump(self.__configuration, buffer, default_flow_style=False)
value = buffer.getvalue()
message = MIMEText(value, "cloud-config", "utf8")
return message
def _get_configuration(self):
return self.__configuration
def add_apt_source(self, source):
apt_sources = self.__configuration.setdefault('apt_sources', [])
apt_sources.append(source)
def add_package(self, package_spec):
packages = self.__configuration.setdefault('packages', [])
packages.append(package_spec)
def set_option(self, key, value):
# if key in self.__configuration:
# raise KeyError("Duplicate key %s. It already has the value: %s", key, self.__configuration[key])
self.__configuration[key] = value
class CloudUserScriptFile(BaseHandler):
def __init__(self):
raise NotImplementedError()
class CloudUserScript(BaseHandler):
def __init__(self, content):
self.__script_content = content
def to_mime(self):
message = MIMEText(self.__script_content, "x-shellscript", "utf8")
return message
class CloudIncludeURL(BaseHandler):
def __init__(self, urls=[]):
self.__urls = urls
def to_mime(self):
content = "\n".join(self.__urls)
message = MIMEText(content, "x-include-url", "utf8")
return message
class CloudInitPartHandler(BaseHandler):
def __init__(self, content):
self.__content = content
def to_mime(self):
message = MIMEText(self.__content, "part-handler", "utf8")
return message
class SCAPERecursiveHandler(BaseHandler):
def __init__(self, path):
self.__content = path
def to_mime(self):
message = MIMEText(self.__content, "scape-handler", "utf8")
return message
class CloudSHScript(CloudUserScript):
def __init__(self, content):
bash_content = "#!/bin/bash\n%s" % content
CloudUserScript.__init__(self, bash_content)
class CloudConfigStoreFile(CloudSHScript):
def __init__(self, content, destination_file):
encoded_content = base64.encodestring(content)
content = """
cat <<EOF | base64 -d > %s
%s
EOF
""" % (destination_file, encoded_content)
CloudSHScript.__init__(self, content)
class DefaultJavaCloudCloudConfig(CloudConfig):
configuration = {
'apt_sources': [ # Add puppet lab repository
{'source': 'deb http://ppa.launchpad.net/webupd8team/java/ubuntu precise main',
'keyid': 'EEA14886',
'filename': 'oracle-java.list'
},
]
}
def __init__(self):
CloudConfig.__init__(self, self.configuration)
class DefaultPuppetCloudConfig(CloudConfig):
puppet_apt_repos = [
{'source': 'deb http://apt.puppetlabs.com precise main',
'keyid': '4BD6EC30',
'filename': 'puppet-labs-main.list'
},
{'source': 'deb http://apt.puppetlabs.com precise dependencies',
'keyid': '4BD6EC30',
'filename': 'puppet-labs-deps.list'
},
]
configuration = {
# 'apt_sources': puppet_apt_repos, # Add puppet lab repository
'packages': [
"puppet",
"puppetmaster-common",
"git"
]
}
def __init__(self):
CloudConfig.__init__(self, self.configuration)
class PuppetMasterCloudConfig(CloudConfig):
puppet_agent_init_config = 'START=yes\nDAEMON_OPTS=""\n'
puppet_apt_repos = [
{'source': 'deb http://apt.puppetlabs.com precise main',
'keyid': '4BD6EC30',
'filename': 'puppet-labs-main.list'
},
{'source': 'deb http://apt.puppetlabs.com precise dependencies',
'keyid': '4BD6EC30',
'filename': 'puppet-labs-deps.list'
},
]
configuration = {
'apt_sources': puppet_apt_repos, # Add puppet lab repository
'apt_update': True, # Runs `apt-get update`
'apt_upgrade': False, # Runs `apt-get upgrade`
'manage_etc_hosts': True,
'packages': [
"puppet",
"puppetmaster-common",
"puppetmaster",
"git"
],
}
def __init__(self):
CloudConfig.__init__(self, self.configuration)
class SimpleStringTemplate(Template):
delimiter = "@"
def __init__(self, *args, **kwargs):
Template.__init__(self, *args, **kwargs)
class FormattedCloudInitShScript(CloudSHScript):
def __init__(self, content, maps):
tmpl = SimpleStringTemplate(content)
new_content = tmpl.substitute(**maps)
CloudSHScript.__init__(self, new_content)
class PuppetMasterInitCloudBashScript(FormattedCloudInitShScript):
script = """
. /etc/profile
echo 'START=yes\n' > /etc/default/puppet
sed -i 's|127.0.0.1|127.0.0.1 puppet|g' /etc/hosts
LV=`LANG=C mount | grep -i 'on / ' | cut -d " " -f 1`
VG=`echo $LV | cut -d "/" -f 4 | cut -d "-" -f 1`
SF="/etc/scape/modules/sct/files/"
PM="/etc/puppet/manifests/"
/sbin/pvcreate /dev/vdb
/sbin/vgextend $VG /dev/vdb
lvcreate -L 800M -n Swap ubuntu
/sbin/lvresize -l +90%FREE $LV
/sbin/resize2fs $LV
mkswap -f /dev/ubuntu/Swap
SWAP_DISKS=$(blkid -s TYPE | grep -i swap | cut -d ":" -f 1)
for DSK in $SWAP_DISKS; do
swapon $DSK
done
apt-get -q -y --force-yes install facter=2.0.2-1puppetlabs1 git
apt-mark hold facter
SK=/usr/local/bin/skapur
RP=/usr/local/bin/reload-puppet-master
curl -o ${SK} http://ftp.info.uvt.ro/projects/scape/tools/skapur/skapur
chmod +x ${SK}
mkdir -p ${PM}/nodes/
touch ${PM}/nodes/dummy.pp
chown -R puppet ${PM}/nodes/
ln -s ${SF}/templates ${PM}/templates
ln -s ${SF}/site.pp ${PM}/site.pp
echo "puppet ALL=(ALL) NOPASSWD: /etc/init.d/puppetmaster" >> /etc/sudoers
echo -e "#!/bin/bash\nsudo /etc/init.d/puppetmaster restart" > ${RP}
chmod +x ${RP}
screen -A -m -d -S skapurpuppet sudo -u puppet ${SK} -hook=${RP} -address="0.0.0.0:8088" -store /etc/puppet/manifests/nodes/ -secret "@HMACSECREET"
/etc/init.d/puppetmaster stop
/etc/init.d/puppet stop
echo "*" > /etc/puppet/autosign.conf
rm -fr /var/lib/puppet/ssl/*
/etc/init.d/puppetmaster start
/etc/init.d/puppet start
puppet module install --target-dir /etc/puppet/modules/ puppetlabs/puppetdb
puppet module install --target-dir /etc/puppet/modules/ puppetlabs/motd
mkdir -p /etc/scape/
git clone @URL /etc/scape/modules
cd /etc/scape/modules
git submodule init && git submodule update
echo "*/10 * * * * root /usr/bin/git --git-dir=/etc/scape/modules/.git --work-tree=/etc/scape/modules/ pull" >> /etc/crontab
puppet apply /etc/puppet_scape_master.pp
/etc/init.d/puppet restart
"""
def __init__(self, **kwargs):
FormattedCloudInitShScript.__init__(self, self.script, kwargs)
class CloudInit(object):
def __init__(self, handlers=[]):
self.handler = []
if handlers:
self.handler.extend(handlers)
def add_handler(self, handler):
self.handler.append(handler)
def _generate(self):
message = MIMEMultipart()
for hndlr in self.handler:
message.attach(hndlr.to_mime())
return message.as_string()
def generate(self, compress=True):
if not compress:
return self._generate()
strfd = StringIO.StringIO()
with gzip.GzipFile(fileobj=strfd, mode="w", compresslevel=9) as gzfd:
gzfd.write(self._generate())
strfd.seek(0)
return strfd.read()
def __str__(self):
return self.generate(compress=False)
| apache-2.0 | 1,146,312,053,393,448,100 | 29.621622 | 151 | 0.61827 | false |
JJMC89/delsort | scripts/upload.py | 1 | 3001 | """
Usage:
python scripts/upload.py SITE TARGET USERNAME
SITE: enwiki or testwiki
TARGET: the page on SITE where the script will be uploaded
USERNAME: the account to make the edit under
"""
import datetime
import getpass
import os.path
import re
import sys
from clint.textui import colored
from clint.textui import prompt
import git
from wikitools import page
from wikitools import wiki
API_PAGES = {"enwiki": "https://en.wikipedia.org/w/api.php",
"testwiki": "https://test.wikipedia.org/w/api.php"}
HEADER = "/* Uploaded from the Git repo @ {} (branch {}) */\n"
SUMMARY = "Updating delsort: {} @ {}"
if len(sys.argv) < 4:
print(colored.yellow("Incorrect number of arguments supplied."))
print(__doc__)
sys.exit(1)
if "--help" in sys.argv:
print(__doc__)
sys.exit(0)
site_name = sys.argv[1]
if not site_name in API_PAGES:
print(colored.yellow("Unrecognized wiki '%s'. Must be 'enwiki' or" +
" 'testwiki'" % site_name))
sys.exit(1)
site = wiki.Wiki(API_PAGES[site_name])
root = sys.argv[2]
username = sys.argv[3]
if len(sys.argv) > 4:
password = sys.argv[4]
else:
password = getpass.getpass("Password for {} on {}: "
.format(username, site_name))
login_result = site.login(username, password)
if not login_result:
print(colored.yellow("Error logging in."))
sys.exit(1)
else:
print("Successfully logged in.")
target = page.Page(site, title=root)
if not os.path.isfile("delsort.js"):
print(colored.yellow("Couldn't find a file called 'delsort.js' in the project home."))
sys.exit(1)
repo = git.Repo(os.getcwd())
branch = repo.active_branch
sha1 = branch.commit.hexsha
header = HEADER.format(sha1, branch)
print("Made a header.")
if site_name == "enwiki" and root == "User:Enterprisey/delsort.js" and str(branch) == "master":
print("Updating script documentation page.")
docs = page.Page(site, title="User:Enterprisey/delsort")
docs_wikitext = docs.getWikiText()
date = re.search("start date and age\|\d+\|\d+\|\d+", docs_wikitext).group(0)
now = datetime.datetime.now()
revised_date = "start date and age|%d|%d|%d" % (now.year, now.month, now.day)
new_wikitext = docs_wikitext.replace(date, revised_date)
result = docs.edit(text=new_wikitext, summary="Updating delsort \"updated\" time")
if result["edit"]["result"] == "Success":
print(colored.green("Success!") + " Updated the \"updated\" time on the documentation.")
else:
print(colored.red("Error updating the \"updated\" time: ") + result)
with open("delsort.js", "r") as delsort:
new_text = header + delsort.read()
edit_summary = SUMMARY.format(branch, sha1[:7])
print("Uploading delsort...")
result = target.edit(text=new_text, summary=edit_summary)
if result["edit"]["result"] == "Success":
print(colored.green("Success!") + " Uploaded delsort to " + root)
else:
print(colored.red("Error uploading delsort: ") + result)
| mit | -6,782,603,428,927,825,000 | 31.978022 | 96 | 0.658447 | false |
Yubico/yubioath-desktop-dpkg | yubioath/cli/controller.py | 1 | 3176 | # Copyright (c) 2014 Yubico AB
# All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Additional permission under GNU GPL version 3 section 7
#
# If you modify this program, or any covered work, by linking or
# combining it with the OpenSSL project's OpenSSL library (or a
# modified version of that library), containing parts covered by the
# terms of the OpenSSL or SSLeay licenses, We grant you additional
# permission to convey the resulting work. Corresponding Source for a
# non-source form of such a combination shall include the source code
# for the parts of OpenSSL used as well as that of the covered work.
from ..core.controller import Controller
from ..core.standard import YubiOathCcid
from ..core.exc import CardError
from getpass import getpass
import sys
class CliController(Controller):
def __init__(self, keystore, save=False):
self.keystore = keystore
self._save = save
def _prompt_touch(self):
sys.stderr.write('Touch your YubiKey...\n')
def unlock(self, device):
key = self.keystore.get(device.id)
if key:
try:
device.unlock(key)
except CardError:
sys.stderr.write('Incorrect password from file.\n')
self.keystore.delete(device.id)
while device.locked:
pw = getpass('Password: ')
key = device.calculate_key(pw)
try:
device.unlock(key)
if self._save:
self.keystore.put(device.id, key)
sys.stderr.write('Password saved to %s\n' %
self.keystore.fname)
except CardError:
sys.stderr.write('Incorrect password!\n')
def set_password(self, ccid_dev, password, remember=False):
dev = YubiOathCcid(ccid_dev)
key = super(CliController, self).set_password(dev, password)
if remember:
self.keystore.put(dev.id, key)
sys.stderr.write('Password saved to %s\n' % self.keystore.fname)
else:
self.keystore.delete(dev.id)
def add_cred(self, ccid_dev, *args, **kwargs):
dev = YubiOathCcid(ccid_dev)
super(CliController, self).add_cred(dev, *args, **kwargs)
def delete_cred(self, ccid_dev, name):
dev = YubiOathCcid(ccid_dev)
super(CliController, self).delete_cred(dev, name)
def reset_device(self, ccid_dev):
dev = YubiOathCcid(ccid_dev)
self.keystore.delete(dev.id)
super(CliController, self).reset_device(dev)
| gpl-3.0 | 2,109,234,010,021,809,700 | 36.809524 | 76 | 0.653652 | false |
bbiiggppiigg/NTHUOJ_web | install.py | 1 | 2886 | """
The MIT License (MIT)
Copyright (c) 2014 NTHUOJ team
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import getpass
import ConfigParser
from func import *
CONFIG_PATH = 'nthuoj/config/nthuoj.cfg'
if not os.path.isfile(CONFIG_PATH):
# If the config file does not exist, write default config
write_default_config(CONFIG_PATH)
config = ConfigParser.RawConfigParser()
config.optionxform = str
config.read(CONFIG_PATH)
if not config.has_section('client'):
# Setting mysql info
write_config(config, 'client',
{'default-character-set': 'utf8'},
host=raw_input('Mysql host: '),
database=raw_input('Mysql database: '),
user=raw_input('Mysql user: '),
password=getpass.getpass('Mysql user password: ')
)
if not config.has_section('system_version'):
# Getting system version info
write_config(config, 'system_version',
backend=raw_input('Host os version: '),
gcc=raw_input('gcc version: '),
gpp=raw_input('g++ version: ')
)
if not config.has_section('email'):
# Setting email info
write_config(config, 'email',
user=raw_input('Email host(gmail): '),
password=getpass.getpass("Email host's password: ")
)
# Change defaut path
paths = dict(config.items('path'))
print 'Default path configuration is:\n'
for key in paths:
print '%s: %s' % (key, paths[key])
if prompt('Customize source code, testcase path?'):
for key in paths:
path = raw_input('%s: ' % key)
paths[key] = path
os.system('mkdir %s' % path)
write_config(config, paths)
# Writing our configuration file
with open(CONFIG_PATH, 'wb') as configfile:
config.write(configfile)
# Bower
if prompt('Install static file by `bower install`?'):
django_manage('bower install')
# Database Migratinos
django_manage('syncdb')
django_manage('makemigrations')
django_manage('migrate')
| mit | -5,840,387,914,222,046,000 | 30.369565 | 78 | 0.711019 | false |
infobloxopen/infoblox-netmri | infoblox_netmri/api/remote/models/spm_history_interface_history_grid_remote.py | 1 | 3640 | from ..remote import RemoteModel
class SpmHistoryInterfaceHistoryGridRemote(RemoteModel):
"""
This table lists the SPM interface history within the user specified period of time for a given interface.
| ``id:`` The internal NetMRI identifier of the grid entry.
| ``attribute type:`` number
| ``FirstSeen:`` The timestamp of when NetMRI first discovered this interface.
| ``attribute type:`` datetime
| ``LastSeen:`` The timestamp of when NetMRI last polled data from this interface.
| ``attribute type:`` datetime
| ``DeviceID:`` The NetMRI internal identifier for the device.
| ``attribute type:`` number
| ``DeviceType:`` The NetMRI-determined device type.
| ``attribute type:`` string
| ``DeviceName:`` The NetMRI name of the device; this will be either the same as DeviceSysName or DeviceDNSName, depending on your NetMRI configuration.
| ``attribute type:`` string
| ``DeviceIPDotted:`` The management IP address of the device, in dotted (or colon-delimited for IPv6) format.
| ``attribute type:`` string
| ``DeviceIPNumeric:`` The numerical value of the device IP address.
| ``attribute type:`` number
| ``InterfaceID:`` The internal NetMRI identifier for the interface configured with this address.
| ``attribute type:`` number
| ``ifName:`` The name of this interface. This is typically the short name of the interface as it is identified in the console.
| ``attribute type:`` string
| ``ifIndex:`` The SNMP interface index of the interface configured with this address.
| ``attribute type:`` string
| ``ifDescr:`` The description of the interface, as set in the device's configuration file.
| ``attribute type:`` string
| ``ifAlias:`` Interface alias of this interface.
| ``attribute type:`` string
| ``ifMAC:`` The interface Media Access Controller (MAC) address of this interface.
| ``attribute type:`` string
| ``ifOperStatus:`` The operational status (up/down) of this interface.
| ``attribute type:`` string
| ``ifAdminStatus:`` The configured status (up/down) of this interface.
| ``attribute type:`` string
| ``ifSpeed:`` The operational speed, in bps, of this interface.
| ``attribute type:`` number
| ``ifDuplex:`` The operational duplex of this interface.
| ``attribute type:`` string
| ``ifAdminDuplex:`` Admin setting of duplex, Auto indicates the device will try to negotiate with the other end to determine.
| ``attribute type:`` string
| ``Errors:`` Total inbound and outbound errors on this interface.
| ``attribute type:`` number
| ``VirtualNetworkID:`` The internal identifier for the network which the interface is associated to.
| ``attribute type:`` number
| ``Network:`` The name of the Network View associated.
| ``attribute type:`` string
"""
properties = ("id",
"FirstSeen",
"LastSeen",
"DeviceID",
"DeviceType",
"DeviceName",
"DeviceIPDotted",
"DeviceIPNumeric",
"InterfaceID",
"ifName",
"ifIndex",
"ifDescr",
"ifAlias",
"ifMAC",
"ifOperStatus",
"ifAdminStatus",
"ifSpeed",
"ifDuplex",
"ifAdminDuplex",
"Errors",
"VirtualNetworkID",
"Network",
)
| apache-2.0 | -8,600,558,602,595,849,000 | 35.767677 | 157 | 0.600275 | false |
amiv-eth/amivapi | amivapi/tests/groups/test_permissions.py | 1 | 2254 | # -*- coding: utf-8 -*-
#
# license: AGPLv3, see LICENSE for details. In addition we strongly encourage
# you to buy us beer if we meet and you like the software.
"""Tests for custom validation rules for groups.
Since this hook will be added for all requests (the after auth hook) and this
is tested for auth.py we only get get on resource level to test functionality.
"""
from flask import g
from amivapi.tests.utils import WebTest
class PermissionsTest(WebTest):
"""Test that a groupmembership grants correct permissions."""
UID = 24 * '0'
def assertBase(self, admin, admin_readonly):
"""Assert baseline"""
with self.app.app_context():
self.api.get('/groups', status_code=200,
token=self.get_user_token(self.UID))
self.assertEqual(g.get('resource_admin'), admin)
self.assertEqual(g.get('resource_admin_readonly'), admin_readonly)
def assertNothing(self):
"""Assert admin and admin_readonly are false or not in g."""
self.assertBase(False, False)
def assertAdmin(self):
"""Assert admin is true."""
self.assertBase(True, False)
def assertAdminReadonly(self):
"""Assert admin and admin_readonly are false or not in g."""
self.assertBase(False, True)
def permission_fixture(self, permissions):
"""Create user, group with permissions and membership."""
gid = 24 * '1'
self.load_fixture({
'users': [{'_id': self.UID}],
'groups': [{'_id': gid, 'permissions': permissions}],
'groupmemberships': [{'user': self.UID, 'group': gid}]
})
def test_other_permissions_have_no_influence(self):
"""Test that permissions for other resources have no influence."""
self.permission_fixture({'sessions': 'read', 'users': 'readwrite'})
self.assertNothing()
def test_admin(self):
"""Test that 'readwrite' gives admin permissions."""
self.permission_fixture({'groups': 'readwrite'})
self.assertAdmin()
def test_readonly(self):
"""Test that 'readwrite' gives admin permissions."""
self.permission_fixture({'groups': 'read'})
self.assertAdminReadonly()
| agpl-3.0 | 3,172,133,955,290,614,300 | 35.354839 | 78 | 0.628217 | false |
c3cashdesk/c6sh | src/postix/backoffice/checks.py | 1 | 2378 | from decimal import Decimal
from django.utils.translation import ugettext as _
from postix.core.models import (
ListConstraintProduct, Product, WarningConstraintProduct,
)
_check_registry = set()
def register_check(fn):
_check_registry.add(fn)
return fn
class CheckError(Exception):
pass
@register_check
def check_quotas():
prods = []
for p in Product.objects.filter(is_visible=True).prefetch_related('quota_set'):
quotas = bool(p.quota_set.all())
if not quotas:
prods.append(p)
if prods:
raise CheckError(
_(
'The following products are visible but have no quota: {products}'.format(
products=', '.join(str(r) for r in prods)
)
)
)
@register_check
def check_tax_rates():
product_rates = set(
Product.objects.exclude(price=0).values_list('tax_rate', flat=True).distinct()
)
constraint_rates = set(
ListConstraintProduct.objects.exclude(price=0)
.values_list('tax_rate', flat=True)
.distinct()
) | set(
WarningConstraintProduct.objects.exclude(price=0)
.values_list('tax_rate', flat=True)
.distinct()
)
if len(constraint_rates - product_rates):
raise CheckError(
_(
'You have list or warning constraints with tax rates of {constraint_rates} '
'but your products only use the tax rates {product_rates}. Are you sure this is '
'correct?'
).format(
constraint_rates=', '.join(str(r) + '%' for r in constraint_rates),
product_rates=', '.join(str(r) + '%' for r in product_rates),
)
)
if Decimal('0.00') in product_rates and len(product_rates) > 1:
raise CheckError(
_(
'You have some products that use a non-zero tax rate but the following products are set to 0%: '
'{products}'
).format(
products=', '.join(
str(p) for p in Product.objects.filter(tax_rate=0).exclude(price=0)
)
)
)
def all_errors():
errors = []
for check in _check_registry:
try:
check()
except CheckError as e:
errors.append(str(e))
return errors
| agpl-3.0 | 86,373,377,420,104,510 | 26.976471 | 112 | 0.55635 | false |
XianliangJ/collections | Jellyfish/ripl/ripl/routing.py | 1 | 14278 | #!/usr/bin/env python
'''@package routing
Routing engine base class.
@author Brandon Heller ([email protected])
'''
from copy import copy
from random import choice
from collections import deque
import logging
lg = logging.getLogger('ripl.routing')
DEBUG = False
lg.setLevel(logging.WARNING)
if DEBUG:
lg.setLevel(logging.DEBUG)
lg.addHandler(logging.StreamHandler())
class Routing(object):
'''Base class for data center network routing.
Routing engines must implement the get_route() method.
'''
def __init__(self, topo):
'''Create Routing object.
@param topo Topo object from Net parent
'''
self.topo = topo
def get_route(self, src, dst, pkt):
'''Return flow path.
@param src source host
@param dst destination host
@param hash_ hash value
@return flow_path list of DPIDs to traverse (including hosts)
'''
raise NotImplementedError
class StructuredRouting(Routing):
'''Route flow through a StructuredTopo and return one path.
Optionally accepts a function to choose among the set of valid paths. For
example, this could be based on a random choice, hash value, or
always-leftmost path (yielding spanning-tree routing).
Completely stupid! Think of it as a topology-aware Dijstra's, that either
extends the frontier until paths are found, or quits when it has looked for
path all the way up to the core. It simply enumerates all valid paths and
chooses one. Alternately, think of it as a bidrectional DFS.
This is in no way optimized, and may be the slowest routing engine you've
ever seen. Still, it works with both VL2 and FatTree topos, and should
help to bootstrap hardware testing and policy choices.
The main data structures are the path dicts, one each for the src and dst.
Each path dict has node ids as its keys. The values are lists of routes,
where each route records the list of dpids to get from the starting point
(src or dst) to the key.
Invariant: the last element in each route must be equal to the key.
'''
def __init__(self, topo, path_choice):
'''Create Routing object.
@param topo Topo object
@param path_choice path choice function (see examples below)
'''
self.topo = topo
self.path_choice = path_choice
self.src_paths = None
self.dst_paths = None
self.src_path_layer = None
self.dst_path_layer = None
def _extend_reachable(self, frontier_layer):
'''Extend reachability up, closer to core.
@param frontier_layer layer we're extending TO, for filtering paths
@return paths list of complete paths or None if no overlap
invariant: path starts with src, ends in dst
If extending the reachability frontier up yields a path to a node which
already has some other path, then add that to a list to return of valid
path choices. If multiple paths lead to the newly-reached node, then
add a path for every possible combination. For this reason, beware
exponential path explosion.
Modifies most internal data structures as a side effect.
'''
complete_paths = [] # List of complete dpid routes
# expand src frontier if it's below the dst
if self.src_path_layer > frontier_layer:
src_paths_next = {}
# expand src frontier up
for node in sorted(self.src_paths):
src_path_list = self.src_paths[node]
lg.info("src path list for node %s is %s" %
(node, src_path_list))
if not src_path_list or len(src_path_list) == 0:
continue
last = src_path_list[0][-1] # Last element on first list
up_edges = self.topo.up_edges(last)
if not up_edges:
continue
assert up_edges
up_nodes = self.topo.up_nodes(last)
if not up_nodes:
continue
assert up_nodes
for edge in sorted(up_edges):
a, b = edge
assert a == last
assert b in up_nodes
frontier_node = b
# add path if it connects the src and dst
if frontier_node in self.dst_paths:
dst_path_list = self.dst_paths[frontier_node]
lg.info('self.dst_paths[frontier_node] = %s' %
self.dst_paths[frontier_node])
for dst_path in dst_path_list:
dst_path_rev = copy(dst_path)
dst_path_rev.reverse()
for src_path in src_path_list:
new_path = src_path + dst_path_rev
lg.info('adding path: %s' % new_path)
complete_paths.append(new_path)
else:
if frontier_node not in src_paths_next:
src_paths_next[frontier_node] = []
for src_path in src_path_list:
extended_path = src_path + [frontier_node]
src_paths_next[frontier_node].append(extended_path)
lg.info("adding to self.paths[%s] %s: " % \
(frontier_node, extended_path))
# filter paths to only those in the most recently seen layer
lg.info("src_paths_next: %s" % src_paths_next)
self.src_paths = src_paths_next
self.src_path_layer -= 1
# expand dst frontier if it's below the rc
if self.dst_path_layer > frontier_layer:
dst_paths_next = {}
# expand src frontier up
for node in self.dst_paths:
dst_path_list = self.dst_paths[node]
lg.info("dst path list for node %s is %s" %
(node, dst_path_list))
last = dst_path_list[0][-1] # last element on first list
up_edges = self.topo.up_edges(last)
if not up_edges:
continue
assert up_edges
up_nodes = self.topo.up_nodes(last)
if not up_nodes:
continue
assert up_nodes
lg.info("up_edges = %s" % sorted(up_edges))
for edge in sorted(up_edges):
a, b = edge
assert a == last
assert b in up_nodes
frontier_node = b
# add path if it connects the src and dst
if frontier_node in self.src_paths:
src_path_list = self.src_paths[frontier_node]
lg.info('self.src_paths[frontier_node] = %s' %
self.src_paths[frontier_node])
for src_path in src_path_list:
for dst_path in dst_path_list:
dst_path_rev = copy(dst_path)
dst_path_rev.reverse()
new_path = src_path + dst_path_rev
lg.info('adding path: %s' % new_path)
complete_paths.append(new_path)
else:
if frontier_node not in dst_paths_next:
dst_paths_next[frontier_node] = []
for dst_path in dst_path_list:
extended_path = dst_path + [frontier_node]
dst_paths_next[frontier_node].append(extended_path)
lg.info("adding to self.paths[%s] %s: " % \
(frontier_node, extended_path))
# filter paths to only those in the most recently seen layer
lg.info("dst_paths_next: %s" % dst_paths_next)
self.dst_paths = dst_paths_next
self.dst_path_layer -= 1
lg.info("complete paths = %s" % complete_paths)
return complete_paths
def get_route(self, src, dst, hash_):
'''Return flow path.
@param src source dpid (for host or switch)
@param dst destination dpid (for host or switch)
@param hash_ hash value
@return flow_path list of DPIDs to traverse (including inputs), or None
'''
if src == dst:
return [src]
self.src_paths = {src: [[src]]}
self.dst_paths = {dst: [[dst]]}
src_layer = self.topo.layer(src)
dst_layer = self.topo.layer(dst)
# use later in extend_reachable
self.src_path_layer = src_layer
self.dst_path_layer = dst_layer
# the lowest layer is the one closest to hosts, with the highest value
lowest_starting_layer = src_layer
if dst_layer > src_layer:
lowest_starting_layer = dst_layer
for depth in range(lowest_starting_layer - 1, -1, -1):
lg.info('-------------------------------------------')
paths_found = self._extend_reachable(depth)
if paths_found:
path_choice = self.path_choice(paths_found, src, dst, hash_)
lg.info('path_choice = %s' % path_choice)
return path_choice
return None
# Disable unused argument warnings in the classes below
# pylint: disable-msg=W0613
class STStructuredRouting(StructuredRouting):
'''Spanning Tree Structured Routing.'''
def __init__(self, topo):
'''Create StructuredRouting object.
@param topo Topo object
'''
def choose_leftmost(paths, src, dst, hash_):
'''Choose leftmost path
@param path paths of dpids generated by a routing engine
@param src src dpid (unused)
@param dst dst dpid (unused)
@param hash_ hash value (unused)
'''
return paths[0]
super(STStructuredRouting, self).__init__(topo, choose_leftmost)
class RandomStructuredRouting(StructuredRouting):
'''Random Structured Routing.'''
def __init__(self, topo):
'''Create StructuredRouting object.
@param topo Topo object
'''
def choose_random(paths, src, dst, hash_):
'''Choose random path
@param path paths of dpids generated by a routing engine
@param src src dpid (unused)
@param dst dst dpid (unused)
@param hash_ hash value (unused)
'''
return choice(paths)
super(RandomStructuredRouting, self).__init__(topo, choose_random)
class HashedStructuredRouting(StructuredRouting):
'''Hashed Structured Routing.'''
def __init__(self, topo):
'''Create StructuredRouting object.
@param topo Topo object
'''
def choose_hashed(paths, src, dst, hash_):
'''Choose consistent hashed path
@param path paths of dpids generated by a routing engine
@param src src dpid
@param dst dst dpid
@param hash_ hash value
'''
choice = hash_ % len(paths)
path = sorted(paths)[choice]
return path
super(HashedStructuredRouting, self).__init__(topo, choose_hashed)
# pylint: enable-msg=W0613
class JfRouting(Routing):
def __init__(self, topo):
self.topo = topo
self.graph = None
def build_graph(self, links):
'''Build a map of nodes -> neighbors given
a list of links, which are (node, node) tuples'''
self.graph = {}
for link in links:
if link[0] in self.graph:
self.graph[link[0]].append(link[1])
else:
self.graph[link[0]] = [link[1]]
# add the reverse link
if link[1] in self.graph:
self.graph[link[1]].append(link[0])
else:
self.graph[link[1]] = [link[0]]
def bfs(self, src, dst):
pathsFound = []
path = [src]
q = deque()
q.append(path)
shortestPathLen = 0
while len(q) > 0 and (self.k == 0 or len(pathsFound) < self.k):
path = q.popleft()
# if we're running ECMP and we encounter a path longer than shortest path, stop
if shortestPathLen > 0 and len(path) > shortestPathLen:
break
# if last node on path is the destination
if path[-1] == dst:
# add to list of paths
if self.k == 0 and len(pathsFound) == 0:
# no paths found yet, store shortest path length
# self.k == 0 means we're running ECMP
shortestPathLen = len(path)
pathsFound.append(path)
# add next neighbors to paths to explore
for neighbor in self.graph[path[-1]]:
if neighbor not in path:
newPath = path + [neighbor]
q.append(newPath)
return pathsFound
def get_routes(self, src, dst):
if src == dst:
return [src]
self.build_graph(self.topo.links())
paths = self.bfs(src, dst)
return paths
class KSPRouting(JfRouting):
'''k-shortest-paths routing'''
def __init__(self, topo):
self.k = 4
super(KSPRouting, self).__init__(topo)
def get_route(self, src, dst, hash_ = None):
paths = self.get_routes(src, dst)
if len(paths) > 0:
return choice(paths)
else:
return None
class ECMPRouting(JfRouting):
'''ECMP routing'''
def __init__(self, topo):
self.k = 0
super(ECMPRouting, self).__init__(topo)
def get_route(self, src, dst, hash_):
paths = self.get_routes(src, dst)
if len(paths) > 0:
return sorted(paths)[hash_ % len(paths)]
else:
return None
| gpl-3.0 | -424,992,199,045,397,060 | 33.909535 | 91 | 0.535719 | false |
darrencheng0817/AlgorithmLearning | Python/leetcode/ImplementStackUsingQueues.py | 1 | 1871 | '''
Created on 1.12.2016
@author: Darren
''''''
from leetcode.BinaryTreeMaximumPathSum import Solution
Implement the following operations of a stack using queues.
push(x) -- Push element x onto stack.
pop() -- Removes the element on top of the stack.
top() -- Get the top element.
empty() -- Return whether the stack is empty.
Notes:
You must use only standard operations of a queue -- which means only push to back, peek/pop from front, size, and is empty operations are valid.
Depending on your language, queue may not be supported natively. You may simulate a queue by using a list or deque (double-ended queue), as long as you use only standard operations of a queue.
You may assume that all operations are valid (for example, no pop or top operations will be called on an empty stack).
Update (2015-06-11):
The class name of the Java function had been updated to MyStack instead of Stack.
Credits:Special thanks to @jianchao.li.fighter for adding this problem and all test cases."
'''
class Stack(object):
def __init__(self):
"""
initialize your data structure here.
"""
self.q1=[]
self.q2=[]
def push(self, x):
"""
:type x: int
:rtype: nothing
"""
self.q1.append(x)
def pop(self):
"""
:rtype: nothing
"""
while self.q1:
self.q2.append(self.q1.pop(0))
self.q2.pop()
while self.q2:
self.q1.append(self.q2.pop(0))
def top(self):
"""
:rtype: int
"""
return self.q1[-1]
def empty(self):
"""
:rtype: bool
"""
return not bool(self.q1)
so=Stack()
so.push(1)
so.push(2)
so.pop()
print(so.top())
| mit | 1,133,080,208,888,505,900 | 15.431193 | 192 | 0.575094 | false |
tylertian/Openstack | openstack F/glance/glance/store/scrubber.py | 1 | 7036 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import calendar
import eventlet
import os
import time
from glance import context
from glance.common import utils
from glance.openstack.common import cfg
import glance.openstack.common.log as logging
from glance import registry
from glance import store
import glance.store.filesystem
import glance.store.http
import glance.store.s3
import glance.store.swift
LOG = logging.getLogger(__name__)
scrubber_opts = [
cfg.BoolOpt('cleanup_scrubber', default=False),
cfg.IntOpt('cleanup_scrubber_time', default=86400)
]
CONF = cfg.CONF
CONF.register_opts(scrubber_opts)
class Daemon(object):
def __init__(self, wakeup_time=300, threads=1000):
LOG.info(_("Starting Daemon: wakeup_time=%(wakeup_time)s "
"threads=%(threads)s") % locals())
self.wakeup_time = wakeup_time
self.event = eventlet.event.Event()
self.pool = eventlet.greenpool.GreenPool(threads)
def start(self, application):
self._run(application)
def wait(self):
try:
self.event.wait()
except KeyboardInterrupt:
msg = _("Daemon Shutdown on KeyboardInterrupt")
LOG.info(msg)
def _run(self, application):
LOG.debug(_("Running application"))
self.pool.spawn_n(application.run, self.pool, self.event)
eventlet.spawn_after(self.wakeup_time, self._run, application)
LOG.debug(_("Next run scheduled in %s seconds") % self.wakeup_time)
class Scrubber(object):
CLEANUP_FILE = ".cleanup"
def __init__(self):
self.datadir = CONF.scrubber_datadir
self.cleanup = CONF.cleanup_scrubber
self.cleanup_time = CONF.cleanup_scrubber_time
# configs for registry API store auth
self.admin_user = CONF.admin_user
self.admin_tenant = CONF.admin_tenant_name
host, port = CONF.registry_host, CONF.registry_port
LOG.info(_("Initializing scrubber with conf: %s") %
{'datadir': self.datadir, 'cleanup': self.cleanup,
'cleanup_time': self.cleanup_time,
'registry_host': host, 'registry_port': port})
registry.configure_registry_client()
registry.configure_registry_admin_creds()
ctx = context.RequestContext()
self.registry = registry.get_registry_client(ctx)
utils.safe_mkdirs(self.datadir)
store.create_stores()
def run(self, pool, event=None):
now = time.time()
if not os.path.exists(self.datadir):
LOG.info(_("%s does not exist") % self.datadir)
return
delete_work = []
for root, dirs, files in os.walk(self.datadir):
for id in files:
if id == self.CLEANUP_FILE:
continue
file_name = os.path.join(root, id)
delete_time = os.stat(file_name).st_mtime
if delete_time > now:
continue
uri, delete_time = read_queue_file(file_name)
if delete_time > now:
continue
delete_work.append((id, uri, now))
LOG.info(_("Deleting %s images") % len(delete_work))
pool.starmap(self._delete, delete_work)
# NOTE(bourke): When not running as a daemon, a slight pause is needed
# to allow the starmap to begin it's work.
eventlet.sleep(0.1)
if self.cleanup:
self._cleanup(pool)
def _delete(self, id, uri, now):
file_path = os.path.join(self.datadir, str(id))
try:
LOG.debug(_("Deleting %(uri)s") % {'uri': uri})
# Here we create a request context with credentials to support
# delayed delete when using multi-tenant backend storage
ctx = context.RequestContext(auth_tok=self.registry.auth_tok,
user=self.admin_user,
tenant=self.admin_tenant)
store.delete_from_backend(ctx, uri)
except store.UnsupportedBackend:
msg = _("Failed to delete image from store (%(uri)s).")
LOG.error(msg % {'uri': uri})
write_queue_file(file_path, uri, now)
self.registry.update_image(id, {'status': 'deleted'})
utils.safe_remove(file_path)
def _cleanup(self, pool):
now = time.time()
cleanup_file = os.path.join(self.datadir, self.CLEANUP_FILE)
if not os.path.exists(cleanup_file):
write_queue_file(cleanup_file, 'cleanup', now)
return
_uri, last_run_time = read_queue_file(cleanup_file)
cleanup_time = last_run_time + self.cleanup_time
if cleanup_time > now:
return
LOG.info(_("Getting images deleted before %s") % self.cleanup_time)
write_queue_file(cleanup_file, 'cleanup', now)
filters = {'deleted': True, 'is_public': 'none',
'status': 'pending_delete'}
pending_deletes = self.registry.get_images_detailed(filters=filters)
delete_work = []
for pending_delete in pending_deletes:
deleted_at = pending_delete.get('deleted_at')
if not deleted_at:
continue
time_fmt = "%Y-%m-%dT%H:%M:%S"
# NOTE: Strip off microseconds which may occur after the last '.,'
# Example: 2012-07-07T19:14:34.974216
date_str = deleted_at.rsplit('.', 1)[0].rsplit(',', 1)[0]
delete_time = calendar.timegm(time.strptime(date_str,
time_fmt))
if delete_time + self.cleanup_time > now:
continue
delete_work.append((pending_delete['id'],
pending_delete['location'],
now))
LOG.info(_("Deleting %s images") % len(delete_work))
pool.starmap(self._delete, delete_work)
def read_queue_file(file_path):
with open(file_path) as f:
uri = f.readline().strip()
delete_time = int(f.readline().strip())
return uri, delete_time
def write_queue_file(file_path, uri, delete_time):
with open(file_path, 'w') as f:
f.write('\n'.join([uri, str(int(delete_time))]))
os.chmod(file_path, 0600)
os.utime(file_path, (delete_time, delete_time))
| apache-2.0 | -6,856,816,283,492,491,000 | 33.490196 | 78 | 0.592951 | false |
tboyce1/home-assistant | homeassistant/components/light/tplink.py | 2 | 7236 | """
Support for TPLink lights.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/light.tplink/
"""
import logging
import colorsys
import time
import voluptuous as vol
from homeassistant.const import (CONF_HOST, CONF_NAME)
from homeassistant.components.light import (
Light, ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_KELVIN, ATTR_RGB_COLOR,
SUPPORT_BRIGHTNESS, SUPPORT_COLOR_TEMP, SUPPORT_RGB_COLOR, PLATFORM_SCHEMA)
import homeassistant.helpers.config_validation as cv
from homeassistant.util.color import \
color_temperature_mired_to_kelvin as mired_to_kelvin
from homeassistant.util.color import (
color_temperature_kelvin_to_mired as kelvin_to_mired)
from typing import Tuple
REQUIREMENTS = ['pyHS100==0.3.0']
_LOGGER = logging.getLogger(__name__)
ATTR_CURRENT_POWER_W = 'current_power_w'
ATTR_DAILY_ENERGY_KWH = 'daily_energy_kwh'
ATTR_MONTHLY_ENERGY_KWH = 'monthly_energy_kwh'
DEFAULT_NAME = 'TP-Link Light'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Initialise pyLB100 SmartBulb."""
from pyHS100 import SmartBulb
host = config.get(CONF_HOST)
name = config.get(CONF_NAME)
add_devices([TPLinkSmartBulb(SmartBulb(host), name)], True)
def brightness_to_percentage(byt):
"""Convert brightness from absolute 0..255 to percentage."""
return int((byt*100.0)/255.0)
def brightness_from_percentage(percent):
"""Convert percentage to absolute value 0..255."""
return (percent*255.0)/100.0
# Travis-CI runs too old astroid https://github.com/PyCQA/pylint/issues/1212
# pylint: disable=invalid-sequence-index
def rgb_to_hsv(rgb: Tuple[float, float, float]) -> Tuple[int, int, int]:
"""Convert RGB tuple (values 0-255) to HSV (degrees, %, %)."""
hue, sat, value = colorsys.rgb_to_hsv(rgb[0]/255, rgb[1]/255, rgb[2]/255)
return int(hue * 360), int(sat * 100), int(value * 100)
# Travis-CI runs too old astroid https://github.com/PyCQA/pylint/issues/1212
# pylint: disable=invalid-sequence-index
def hsv_to_rgb(hsv: Tuple[float, float, float]) -> Tuple[int, int, int]:
"""Convert HSV tuple (degrees, %, %) to RGB (values 0-255)."""
red, green, blue = colorsys.hsv_to_rgb(hsv[0]/360, hsv[1]/100, hsv[2]/100)
return int(red * 255), int(green * 255), int(blue * 255)
class TPLinkSmartBulb(Light):
"""Representation of a TPLink Smart Bulb."""
def __init__(self, smartbulb: 'SmartBulb', name) -> None:
"""Initialize the bulb."""
self.smartbulb = smartbulb
self._name = name
self._state = None
self._available = True
self._color_temp = None
self._brightness = None
self._rgb = None
self._supported_features = 0
self._emeter_params = {}
@property
def name(self):
"""Return the name of the Smart Bulb, if any."""
return self._name
@property
def available(self) -> bool:
"""Return if bulb is available."""
return self._available
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return self._emeter_params
def turn_on(self, **kwargs):
"""Turn the light on."""
self.smartbulb.state = self.smartbulb.BULB_STATE_ON
if ATTR_COLOR_TEMP in kwargs:
self.smartbulb.color_temp = \
mired_to_kelvin(kwargs[ATTR_COLOR_TEMP])
if ATTR_KELVIN in kwargs:
self.smartbulb.color_temp = kwargs[ATTR_KELVIN]
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs.get(ATTR_BRIGHTNESS, self.brightness or 255)
self.smartbulb.brightness = brightness_to_percentage(brightness)
if ATTR_RGB_COLOR in kwargs:
rgb = kwargs.get(ATTR_RGB_COLOR)
self.smartbulb.hsv = rgb_to_hsv(rgb)
def turn_off(self):
"""Turn the light off."""
self.smartbulb.state = self.smartbulb.BULB_STATE_OFF
@property
def color_temp(self):
"""Return the color temperature of this light in mireds for HA."""
return self._color_temp
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def rgb_color(self):
"""Return the color in RGB."""
return self._rgb
@property
def is_on(self):
"""Return True if device is on."""
return self._state
def update(self):
"""Update the TP-Link Bulb's state."""
from pyHS100 import SmartDeviceException
try:
self._available = True
if self._supported_features == 0:
self.get_features()
self._state = (
self.smartbulb.state == self.smartbulb.BULB_STATE_ON)
# Pull the name from the device if a name was not specified
if self._name == DEFAULT_NAME:
self._name = self.smartbulb.alias
if self._supported_features & SUPPORT_BRIGHTNESS:
self._brightness = brightness_from_percentage(
self.smartbulb.brightness)
if self._supported_features & SUPPORT_COLOR_TEMP:
if (self.smartbulb.color_temp is not None and
self.smartbulb.color_temp != 0):
self._color_temp = kelvin_to_mired(
self.smartbulb.color_temp)
if self._supported_features & SUPPORT_RGB_COLOR:
self._rgb = hsv_to_rgb(self.smartbulb.hsv)
if self.smartbulb.has_emeter:
self._emeter_params[ATTR_CURRENT_POWER_W] = '{:.1f}'.format(
self.smartbulb.current_consumption())
daily_statistics = self.smartbulb.get_emeter_daily()
monthly_statistics = self.smartbulb.get_emeter_monthly()
try:
self._emeter_params[ATTR_DAILY_ENERGY_KWH] \
= "{:.3f}".format(
daily_statistics[int(time.strftime("%d"))])
self._emeter_params[ATTR_MONTHLY_ENERGY_KWH] \
= "{:.3f}".format(
monthly_statistics[int(time.strftime("%m"))])
except KeyError:
# device returned no daily/monthly history
pass
except (SmartDeviceException, OSError) as ex:
_LOGGER.warning("Could not read state for %s: %s", self._name, ex)
self._available = False
@property
def supported_features(self):
"""Flag supported features."""
return self._supported_features
def get_features(self):
"""Determine all supported features in one go."""
if self.smartbulb.is_dimmable:
self._supported_features += SUPPORT_BRIGHTNESS
if self.smartbulb.is_variable_color_temp:
self._supported_features += SUPPORT_COLOR_TEMP
if self.smartbulb.is_color:
self._supported_features += SUPPORT_RGB_COLOR
| apache-2.0 | 1,386,101,221,474,034,700 | 34.126214 | 79 | 0.614704 | false |
pseudonym117/Riot-Watcher | src/riotwatcher/Handlers/TypeCorrectorHandler.py | 1 | 1562 | from . import RequestHandler
class TypeCorrectorHandler(RequestHandler):
"""
The TypeCorrector class is meant to correct any inconsistencies in the types
of objects provided as query parameters.
Currently this only involves changing boolean values into strings,
as the API only accepts lower case booleans for some reason.
"""
def preview_request(
self,
region: str,
endpoint_name: str,
method_name: str,
url: str,
query_params: dict,
):
"""
called before a request is processed.
:param string endpoint_name: the name of the endpoint being requested
:param string method_name: the name of the method being requested
:param url: the URL that is being requested.
:param query_params: dict: the parameters to the url that is being queried,
e.g. ?key1=val&key2=val2
"""
if query_params is not None:
for key, value in query_params.items():
if isinstance(value, bool):
query_params[key] = str(value).lower()
# check to see if we have a list/tuple, but not a string
if (
not hasattr(value, "strip")
and hasattr(value, "__getitem__")
or hasattr(value, "__iter__")
):
for idx, val in enumerate(value):
if isinstance(val, bool):
value[idx] = str(val).lower()
| mit | 8,863,373,832,344,485,000 | 35.325581 | 83 | 0.552497 | false |
eddiejessup/nex | tests/test_state.py | 1 | 12435 | import pytest
from nex.constants.instructions import Instructions
from nex.constants.commands import Commands
from nex.constants.specials import Specials
from nex.state import Mode, GlobalState
from nex import box
from nex.box_writer import write_to_dvi_file
from nex.state import ExecuteCommandError
from nex.utils import UserError
from nex.tokens import BuiltToken, CommandToken
from nex.fonts import GlobalFontState
from common import DummyCommands, DummyGlobalFontState, ITok
do_output = False
font_path = '/Users/ejm/projects/nex/fonts'
def CTok(command, value):
return CommandToken(command=command, value=value, parents=None)
def BTok(type_, value):
return BuiltToken(type_=type_, value=value, parents=None)
@pytest.fixture()
def state():
if do_output:
global_font_state = GlobalFontState(search_paths=[font_path])
else:
global_font_state = DummyGlobalFontState()
state = GlobalState.from_defaults(global_font_state=global_font_state)
font_id = state.load_new_font(file_name='cmr10', at_clause=None)
state._select_font(is_global=True, font_id=font_id)
return state
def write(state, file_name):
if do_output:
write_to_dvi_file(state, file_name, write_pdf=True)
def nr_tok(n):
v = BTok(type_='internal_number', value=n)
return BTok(type_='number', value=v)
class DummyTokenQueue:
def replace_tokens_on_input(self, tokens):
pass
def test_single_letter(state):
state.do_indent()
state.add_character_char('a')
state.do_paragraph()
assert len(state.modes) == 1
assert state.mode == Mode.vertical
lst = state.current_page
assert len(lst) == 3
assert isinstance(lst[0], box.FontDefinition)
assert isinstance(lst[1], box.FontSelection)
assert isinstance(lst[2], box.HBox)
hbox = lst[2]
assert isinstance(hbox.contents[0], box.HBox)
assert isinstance(hbox.contents[1], box.Character)
write(state, 'test_single_letter.dvi')
def test_solo_accent(state):
state.do_indent()
state.do_accent(accent_code=23, target_code=None)
state.do_paragraph()
write(state, 'test_solo_accent.dvi')
def test_paired_accent(state):
state.do_indent()
state.do_accent(accent_code=127, target_code=ord('O'))
state.do_accent(accent_code=127, target_code=ord('o'))
state.add_character_char('O')
state.add_character_char('o')
state.do_paragraph()
write(state, 'test_accent.dvi')
def test_v_rule(state):
state.push_mode(Mode.horizontal)
state.add_v_rule(width=int(1e7), height=int(1e2), depth=0)
state.add_v_rule(width=int(1e7), height=int(1e2), depth=int(1e7))
state.do_paragraph()
assert len(state.modes) == 1
assert state.mode == Mode.vertical
lst = state.current_page
assert isinstance(lst[2], box.HBox)
h_box = lst[2]
assert isinstance(h_box.contents[0], box.Rule)
assert isinstance(h_box.contents[1], box.Rule)
write(state, 'test_v_rule.dvi')
def test_if_num(state):
assert state.evaluate_if_num(2, 2, '=')
assert state.evaluate_if_num(5, 0, '>')
assert not state.evaluate_if_num(-6, -10, '<')
def test_if_dimen(state):
assert state.evaluate_if_dim(2, 2, '=')
assert state.evaluate_if_dim(5, 0, '>')
assert not state.evaluate_if_dim(-6, -10, '<')
def test_if_odd(state):
assert not state.evaluate_if_odd(2)
assert state.evaluate_if_odd(5)
assert not state.evaluate_if_odd(-6)
assert state.evaluate_if_odd(-1)
assert not state.evaluate_if_odd(0)
def test_if_mode(state):
assert state.evaluate_if_v_mode()
assert not state.evaluate_if_h_mode()
assert not state.evaluate_if_m_mode()
assert not state.evaluate_if_inner_mode()
state.do_indent()
assert not state.evaluate_if_v_mode()
assert state.evaluate_if_h_mode()
assert not state.evaluate_if_m_mode()
assert not state.evaluate_if_inner_mode()
def test_if_case(state):
assert state.evaluate_if_case(2) == 2
assert state.evaluate_if_case(5) == 5
with pytest.raises(ValueError):
state.evaluate_if_case(-6)
def test_set_box(state):
box_item = box.HBox(contents=[])
state.set_box_register(token_source=None, i=2, item=box_item, is_global=False)
state.append_register_box(i=2, copy=False)
lst = state.current_page
assert lst[-1].contents is box_item.contents
def test_set_box_void(state):
nr_elems_before = len(state.current_page)
state.append_register_box(i=2, copy=False)
nr_elems_after = len(state.current_page)
assert nr_elems_before == nr_elems_after
def test_unbox(state):
box_item = box.VBox([
box.HBox([
box.Glue(20),
]),
box.Glue(100),
])
i_reg = 2
state.set_box_register(token_source=None, i=i_reg, item=box_item, is_global=False)
nr_elems_before = len(state.current_page)
state.append_unboxed_register_v_box(i=i_reg, copy=True)
nr_elems_after = len(state.current_page)
assert nr_elems_after == nr_elems_before + 2
unboxed_contents = state.get_unboxed_register_box(i=i_reg, copy=False,
horizontal=False)
inner_glue = unboxed_contents[0].contents[0]
assert isinstance(inner_glue, box.Glue)
assert inner_glue.is_set
outer_glue = unboxed_contents[1]
assert isinstance(outer_glue, box.Glue)
assert not outer_glue.is_set
# Should be empty now, because I called with copy == False just then.
assert state.get_register_box(i=i_reg, copy=False) is None
def test_unbox_bad_box_type(state):
box_item = box.HBox(contents=[box.Rule(1, 1, 1), box.Rule(2, 2, 2)])
state.set_box_register(token_source=None, i=2, item=box_item, is_global=False)
with pytest.raises(UserError):
state.append_unboxed_register_v_box(i=2, copy=False)
def test_get_box_dimen(state):
box_item = box.HBox(contents=[], to=100)
state.set_box_register(token_source=None, i=2, item=box_item, is_global=False)
b = state.get_box_dimen(i=2, type_=Instructions.box_dimen_width.value)
assert b == 100
def test_space_factor(state):
state.do_indent()
a_sf = 900
state.codes.set(code_type=Instructions.space_factor_code.value,
char='a',
code=a_sf,
is_global=False)
state.codes.set(code_type=Instructions.space_factor_code.value,
char='b',
code=1100,
is_global=False)
# Check space factor starts at 1000.
assert state.specials.get(Specials.space_factor) == 1000
# Check space factor changes to letter's space factor after adding it.
state.add_character_char('a')
assert state.specials.get(Specials.space_factor) == a_sf
# Check space factor does't jump from less than 1000 to more than 1000.
state.add_character_char('b')
assert state.specials.get(Specials.space_factor) == 1000
# Make space factor be non-1000, then check adding a non-character box sets
# it back to 1000.
state.add_character_char('a')
state.add_v_rule(10, 10, 10)
assert state.specials.get(Specials.space_factor) == 1000
def test_after_group(state):
# Input "{\aftergroup\space \aftergroup a}".
state.start_local_group()
t_sp = ITok(Instructions.space)
state.push_to_after_group_queue(t_sp)
assert list(state.after_group_queue) == [t_sp]
t_a = ITok(Instructions.a)
state.push_to_after_group_queue(t_a)
assert list(state.after_group_queue) == [t_sp, t_a]
tok_source = DummyTokenQueue()
state.end_group(tok_source)
assert not state.after_group_queue
def test_after_group_scoped(state):
# Input "{\aftergroup\space {\aftergroup a}}".
state.start_local_group()
t_sp = ITok(Instructions.space)
state.push_to_after_group_queue(t_sp)
assert list(state.after_group_queue) == [t_sp]
# Assume we have 'b' waiting on the queue to be executed.
tok_source = DummyTokenQueue()
state.start_local_group()
t_a = ITok(Instructions.a)
state.push_to_after_group_queue(t_a)
assert list(state.after_group_queue) == [t_a]
state.end_group(tok_source)
assert list(state.after_group_queue) == [t_sp]
state.end_group(tok_source)
assert not state.after_group_queue
def test_token_executor(state):
tok = CTok(command=DummyCommands.verb, value=None)
with pytest.raises(ExecuteCommandError):
state.execute_command_token(tok, banisher=None)
with pytest.raises(ExecuteCommandError):
state.execute_command_tokens(iter([tok]), banisher=None)
def test_command_token_set_box(state):
i_reg = 5
box_tok = BTok(type_='box',
value=BTok(type_='explicit_box',
value={'box_type': Instructions.h_box.value,
'contents': [],
'specification': None}))
set_box_tok = CTok(command=Commands.assign,
value=BTok(type_=Instructions.set_box.value,
value={
'box': box_tok,
'nr': nr_tok(i_reg),
'global': True,
}))
state.execute_command_token(set_box_tok, banisher=None)
def test_command_token_get_box(state):
i_reg = 5
# Get a box in to retrieve.
box_item = box.HBox(contents=[])
state.set_box_register(token_source=None, i=i_reg, item=box_item, is_global=False)
get_box_tok = CTok(command=Commands.add_box,
value=BTok(type_='box_register',
value={
'retrieve_type': Instructions.box.value,
'number': nr_tok(i_reg),
}))
state.execute_command_token(get_box_tok, banisher=None)
lst = state.current_page
assert lst[-1].contents is box_item.contents
state.get_register_box(i=i_reg, copy=False) is None
def test_command_token_add_h_rule(state):
add_h_rule_tok = CTok(command=Commands.add_horizontal_rule,
value={'width': None,
'height': None,
'depth': None})
state.execute_command_token(add_h_rule_tok, banisher=None)
lst = state.current_page
assert len(lst) == 3
assert isinstance(lst[2], box.Rule)
rule = lst[2]
assert rule.width == 0
assert rule.depth == 0
assert rule.height > 0
def test_command_token_code_assignment(state):
sf_variable = BTok(type_=Instructions.space_factor_code.value,
value=nr_tok(ord('a')))
set_sf_tok = CTok(command=Commands.assign,
value=BTok(type_='code_assignment',
value={
'variable': sf_variable,
'code': nr_tok(900),
'global': True
}))
state.execute_command_token(set_sf_tok, banisher=None)
assert state.codes.get_space_factor_code('a') == 900
def test_command_token_unbox(state):
i_reg = 3
box_item = box.VBox(contents=[box.Rule(1, 1, 1), box.Rule(2, 2, 2)])
state.set_box_register(token_source=None,
i=i_reg, item=box_item, is_global=False)
nr_elems_before = len(state.current_page)
get_box_tok = CTok(command=Commands.unpack_vertical_box,
value={'nr': nr_tok(i_reg),
'cmd_type': Instructions.un_v_copy})
state.execute_command_token(get_box_tok, banisher=None)
nr_elems_after = len(state.current_page)
assert nr_elems_after == nr_elems_before + 2
# Should still work, since we unpacked with copy.
state.get_register_box(i=i_reg, copy=False)
def test_command_token_message(state):
message_tok = CTok(command=Commands.message,
value={'content': []})
err_message_tok = CTok(command=Commands.error_message,
value={'content': []})
state.execute_command_token(message_tok, banisher=None)
state.execute_command_token(err_message_tok, banisher=None)
| mit | -7,880,691,049,410,203,000 | 33.256198 | 86 | 0.616003 | false |
ThunderGemios10/The-Super-Duper-Script-Editor-2 | editor_form.py | 1 | 98101 | # -*- coding: utf-8 -*-
################################################################################
### Copyright © 2012-2013 BlackDragonHunt
###
### This file is part of the Super Duper Script Editor.
###
### The Super Duper Script Editor is free software: you can redistribute it
### and/or modify it under the terms of the GNU General Public License as
### published by the Free Software Foundation, either version 3 of the License,
### or (at your option) any later version.
###
### The Super Duper Script Editor is distributed in the hope that it will be
### useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
### GNU General Public License for more details.
###
### You should have received a copy of the GNU General Public License
### along with the Super Duper Script Editor.
### If not, see <http://www.gnu.org/licenses/>.
################################################################################
from PyQt4 import QtCore, QtGui, Qt
from PyQt4.QtGui import QProgressDialog, QProgressBar, QTextCursor, QImage, QApplication, QShortcut, QKeySequence
from PyQt4.QtCore import QProcess, QString
from ui.editor import Ui_Editor
from anagram import AnagramEditor
from console import Console
from diffs_menu import DiffsMenu
from eboot_editor import EbootEditor
from font_gen_menu import FontGenMenu
from open_menu import OpenMenu
from script_dump_menu import ScriptDumpMenu
from search_menu import SearchMenu
from settings_menu import SettingsMenu
from terminology_editor import TerminologyEditor
import codecs
import logging
import os
import re
import shutil
import time
from enum import Enum
import backup
import common
from dupe_db import db as dupe_db
import dir_tools
from import_export import *
import script_analytics
import text_printer
from text_format import TEXT_FORMATS
import tree
# from audio.bgm_player import BGMPlayer
from pack.packer import CpkPacker
from list_files import list_all_files
from object_labels import get_map_name, get_char_name, get_obj_label, get_bgm_name
from mtb import MTBParser
from nonstop import NonstopParser
from progress import ProgressCalculator
from script_file import ScriptFile, TAG_KILLER
from script_jump import ScriptJump
from script_pack import ScriptPack
from similarity_db import SimilarityDB
from voice import get_voice_file
from voice_player import VoicePlayer
from word_count import count_words
from iso_builder import IsoBuilder
IMAGE_POS = Enum("original", "translated")
_LOGGER_NAME = common.LOGGER_NAME + "." + __name__
_LOGGER = logging.getLogger(_LOGGER_NAME)
################################################################################
### ###
### EditorForm Class ###
### ###
################################################################################
class EditorForm(QtGui.QMainWindow):
def __init__(self, parent = None):
##############################
### VARIABLES
##############################
super(EditorForm, self).__init__(parent)
self.ui = Ui_Editor()
self.ui.setupUi(self)
self.directory = ""
self.script_pack = ScriptPack()
self.changed = []
self.cur_script = 0
self.similarity_db = SimilarityDB()
# If there are dupes of a file in the same folder, we want to edit them
# synchronously, so we keep track of those dupes here.
self.internal_dupes = {}
self.bg = None
self.console = Console()
self.iso_builder = IsoBuilder(self)
##############################
### CUSTOM ACTIONS
##############################
self.ui.actionSeparator = QtGui.QAction("", None)
self.ui.actionSeparator2 = QtGui.QAction("", None)
self.ui.actionSeparator .setSeparator(True)
self.ui.actionSeparator2.setSeparator(True)
### SAVE IMAGE ###
self.ui.actionSaveImgTrans = QtGui.QAction("Save image...", None, triggered = (lambda: self.saveImage(IMAGE_POS.translated)))
self.ui.actionSaveImgOrig = QtGui.QAction("Save image...", None, triggered = (lambda: self.saveImage(IMAGE_POS.original)))
self.ui.lblTranslated.addAction(self.ui.actionSaveImgTrans)
self.ui.lblOriginal.addAction(self.ui.actionSaveImgOrig)
### COPY NODE PATH ###
self.ui.actionCopyActivePath = QtGui.QAction("Copy path", None, triggered = self.copyActivePath)
self.ui.actionCopyDupePath = QtGui.QAction("Copy path", None, triggered = (lambda: self.copyNodePath(self.ui.treeDupes.currentItem())))
self.ui.actionCopySimilarPath = QtGui.QAction("Copy path", None, triggered = (lambda: self.copyNodePath(self.ui.treeSimilar.currentItem())))
self.ui.actionCopyReferencePath = QtGui.QAction("Copy path", None, triggered = (lambda: self.copyNodePath(self.ui.treeReferences.currentItem())))
self.ui.lstFiles .addAction(self.ui.actionCopyActivePath)
self.ui.treeDupes .addAction(self.ui.actionCopyDupePath)
self.ui.treeSimilar .addAction(self.ui.actionCopySimilarPath)
self.ui.treeReferences.addAction(self.ui.actionCopyReferencePath)
# Go to script jump
self.ui.actionGotoScriptJump = QtGui.QAction("Jump to folder", None, triggered = self.gotoScriptJump)
self.ui.lstFiles.addAction(self.ui.actionGotoScriptJump)
### SEPARATOR ###
self.ui.lstFiles .addAction(self.ui.actionSeparator)
self.ui.treeDupes .addAction(self.ui.actionSeparator)
self.ui.treeSimilar .addAction(self.ui.actionSeparator)
self.ui.treeReferences.addAction(self.ui.actionSeparator)
### SHOW FILE IN EDITOR ###
self.ui.actionShowDupeInEditor = QtGui.QAction("Show in editor", None, triggered = (lambda: self.showNodeInEditor(self.ui.treeDupes.currentItem())))
self.ui.actionShowSimilarInEditor = QtGui.QAction("Show in editor", None, triggered = (lambda: self.showNodeInEditor(self.ui.treeSimilar.currentItem())))
self.ui.actionShowReferenceInEditor = QtGui.QAction("Show in editor", None, triggered = (lambda: self.showNodeInEditor(self.ui.treeReferences.currentItem())))
self.ui.treeDupes .addAction(self.ui.actionShowDupeInEditor)
self.ui.treeSimilar .addAction(self.ui.actionShowSimilarInEditor)
self.ui.treeReferences.addAction(self.ui.actionShowReferenceInEditor)
### SHOW FILE IN EXPLORER ###
self.ui.actionShowDupeInExplorer = QtGui.QAction("Show in explorer", None, triggered = (lambda: self.showNodeInExplorer(self.ui.treeDupes.currentItem())))
self.ui.actionShowSimilarInExplorer = QtGui.QAction("Show in explorer", None, triggered = (lambda: self.showNodeInExplorer(self.ui.treeSimilar.currentItem())))
self.ui.actionShowReferenceInExplorer = QtGui.QAction("Show in explorer", None, triggered = (lambda: self.showNodeInExplorer(self.ui.treeReferences.currentItem())))
self.ui.treeDupes .addAction(self.ui.actionShowDupeInExplorer)
self.ui.treeSimilar .addAction(self.ui.actionShowSimilarInExplorer)
self.ui.treeReferences.addAction(self.ui.actionShowReferenceInExplorer)
### SEPARATOR ###
self.ui.treeDupes .addAction(self.ui.actionSeparator2)
self.ui.treeSimilar .addAction(self.ui.actionSeparator2)
self.ui.treeReferences.addAction(self.ui.actionSeparator2)
### DUPES/SIMILARITY STUFF ###
self.ui.actionAddDupeSim = QtGui.QAction("Mark as duplicate", None, triggered = (lambda: self.addDupe(self.ui.treeSimilar.currentItem())))
self.ui.actionAddDupeRef = QtGui.QAction("Mark as duplicate", None, triggered = (lambda: self.addDupe(self.ui.treeReferences.currentItem())))
self.ui.actionRemoveSimilarity = QtGui.QAction("Remove similarity", None, triggered = self.removeSimilarityMenu)
self.ui.actionRemoveDupeRelated = QtGui.QAction("Remove duplicate", None, triggered = self.removeDupeRelated)
self.ui.actionRemoveDupeAll = QtGui.QAction("Remove all duplicates", None, triggered = self.removeDupeAll)
self.ui.treeDupes .addAction(self.ui.actionRemoveDupeRelated)
self.ui.treeDupes .addAction(self.ui.actionRemoveDupeAll)
self.ui.treeSimilar .addAction(self.ui.actionAddDupeSim)
self.ui.treeSimilar .addAction(self.ui.actionRemoveSimilarity)
self.ui.treeReferences.addAction(self.ui.actionAddDupeRef)
### SCRIPT FILES TREE ###
#self.ui.actionInsertLine = QtGui.QAction("Insert line after selection", None, triggered = self.insertLine)
self.ui.actionInsertLine.triggered.connect(self.insertLine)
self.ui.actionRemoveDupeActive = QtGui.QAction("Remove from duplicate group", None, triggered = self.removeDupeActive)
self.ui.lstFiles.addAction(self.ui.actionInsertLine)
self.ui.lstFiles.addAction(self.ui.actionRemoveDupeActive)
##############################
### MENU BAR ACTIONS
##############################
self.ui.actionCopyOrig.triggered.connect(self.copyFromOrig)
self.ui.actionOpen.triggered.connect(self.showOpenMenu)
self.ui.actionGoToReference.triggered.connect(self.showGotoMenu)
self.ui.actionSave.triggered.connect(self.saveChanges)
self.ui.actionExit.triggered.connect(self.close)
self.ui.actionReloadDirectory.triggered.connect(self.reloadDirectory)
self.ui.actionTerminology.triggered.connect(self.showTerminologyEditor)
self.ui.actionConsole.triggered.connect(self.console.show)
self.ui.actionScriptDumper.triggered.connect(self.showScriptDumper)
self.ui.actionBuild.triggered .connect(self.buildArchives)
self.ui.actionSearch.triggered .connect(self.showSearchMenu)
self.ui.actionShowPrefs.triggered .connect(self.showSettingsMenu)
self.ui.actionCalculateProgress.triggered .connect(self.showProgressCalculator)
self.ui.actionAbout.triggered .connect(self.showAbout)
self.ui.actionImportData01.triggered.connect(self.importData01)
self.ui.actionExportData01.triggered.connect(self.exportData01)
self.ui.actionFirstFile.triggered.connect(self.firstFile)
self.ui.actionPreviousFile.triggered.connect(self.prevFile)
self.ui.actionNextFile.triggered.connect(self.nextFile)
self.ui.actionLastFile.triggered.connect(self.lastFile)
self.ui.actionHighlightTerminology.triggered.connect(self.toggleHighlight)
self.ui.actionAutoExpand.triggered .connect(self.updateConfig)
self.ui.actionAutoPlayVoice.triggered .connect(self.updateConfig)
self.ui.actionShowDirectory.triggered .connect(self.showCurrentInExplorer)
self.ui.actionReloadDupesDB.triggered .connect(self.reloadDupes)
self.ui.actionCheckForErrors.triggered.connect(self.checkForErrors)
self.ui.actionFontGenerator.triggered .connect(self.showFontGenerator)
##############################
### SIGNALS
##############################
self.ui.txtComments.refs_edited.connect(self.updateRefs)
self.ui.btnAddSingleQuotes.clicked.connect(lambda: self.surroundSelection(u"‘", u"’"))
self.ui.btnAddQuotes.clicked.connect(lambda: self.surroundSelection(u"“", u"”"))
self.ui.btnAddEnDash.clicked.connect(lambda: self.replaceSelection(u"–"))
self.ui.btnAddDash.clicked.connect(lambda: self.replaceSelection(u"―"))
self.ui.btnAddBrackets.clicked.connect(lambda: self.surroundSelection(u"【", u"】"))
add_clt = lambda: self.surroundSelection((u"<CLT %02d>" % self.ui.spnClt.value()), u"<CLT>")
add_clt_rev = lambda: self.surroundSelection(u"<CLT>", (u"<CLT %02d>" % self.ui.spnClt.value()))
self.ui.btnAddClt.clicked.connect(add_clt)
self.ui.btnAddClt.rightClicked.connect(add_clt_rev)
self.ui.actionInsertCLT.triggered.connect(add_clt)
# self.ui.shortcutAddClt = QShortcut(QKeySequence("Ctrl+Alt+C"), self.ui.txtTranslated)
# self.ui.shortcutAddClt.activated.connect(lambda: self.surroundSelection((u"<CLT %d>" % self.ui.spnClt.value()), u"<CLT>"))
# self.ui.shortcutAddCltReversed = QShortcut(QKeySequence("Ctrl+Alt+Shift+C"), self.ui.txtTranslated)
# self.ui.shortcutAddCltReversed.activated.connect(lambda: self.surroundSelection(u"<CLT>", (u"<CLT %d>" % self.ui.spnClt.value())))
self.ui.shortcutCltUp = QShortcut(QKeySequence("Ctrl++"), self)
self.ui.shortcutCltDown = QShortcut(QKeySequence("Ctrl+-"), self)
self.ui.shortcutCltUp.activated.connect(lambda: self.ui.spnClt.setValue(self.ui.spnClt.value() + 1))
self.ui.shortcutCltDown.activated.connect(lambda: self.ui.spnClt.setValue(self.ui.spnClt.value() - 1))
##############################
### TOOLBAR STUFF
##############################
toolbar_spacer = QtGui.QWidget()
toolbar_spacer.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
self.ui.toolBar.addWidget(toolbar_spacer)
# Right-aligned actions
self.ui.toolBar.addAction(self.ui.actionCalculateProgress)
self.ui.toolBar.addAction(self.ui.actionCheckForErrors)
self.ui.toolBar.addAction(self.ui.actionBuild)
##############################
### STATUS BAR STUFF
##############################
self.ui.statusLabelMode = QtGui.QLabel("Mode")
self.ui.statusLabelCursor = QtGui.QLabel("Cursor info")
self.ui.statusLabelWordCount = QtGui.QLabel("Word count")
self.ui.statusLabelDirInfo = QtGui.QLabel("Dir Info")
self.ui.statusLabelRelated = QtGui.QLabel("Related")
self.ui.statusbar.addWidget(self.ui.statusLabelMode, stretch = 505)
self.ui.statusbar.addWidget(self.ui.statusLabelCursor, stretch = 342)
self.ui.statusbar.addWidget(self.ui.statusLabelWordCount, stretch = 114)
self.ui.statusbar.addWidget(self.ui.statusLabelDirInfo, stretch = 147)
self.ui.statusbar.addWidget(self.ui.statusLabelRelated, stretch = 320)
##############################
### MISC
##############################
self.voice_player = VoicePlayer()
self.ui.volumeSlider.setAudioOutput(self.voice_player.output)
# self.ui.barVoiceVolume.valueChanged.connect(lambda value: self.voice_player.set_volume(value / 100.0))
# self.bgm_player = BGMPlayer()
# self.ui.barBGMVolume.valueChanged.connect(lambda value: self.bgm_player.set_volume(value / 100.0))
self.hide_original = False
self.updateActions()
self.loadDirectory(common.editor_config.last_opened)
self.search_menu = SearchMenu()
self.search_menu.open_clicked.connect(self.searchMenuOpenClicked)
self.open_menu = OpenMenu(self, self.directory)
self.progress_calc = ProgressCalculator()
self.terminology_editor = TerminologyEditor()
self.font_gen_menu = FontGenMenu()
##############################################################################
### @fn updateActions()
### @desc Takes values from the config file and updates UI elements to match.
##############################################################################
def updateActions(self):
self.ui.actionHighlightTerminology.setChecked(common.editor_config.highlight_terms)
self.ui.actionAutoExpand.setChecked(common.editor_config.auto_expand)
self.ui.actionAutoPlayVoice.setChecked(common.editor_config.auto_play_voice)
##############################################################################
### @fn updateConfig()
### @desc Takes setting changes made on the UI and update the config file.
##############################################################################
def updateConfig(self):
common.editor_config.highlight_terms = self.ui.actionHighlightTerminology.isChecked()
common.editor_config.auto_expand = self.ui.actionAutoExpand.isChecked()
common.editor_config.auto_play_voice = self.ui.actionAutoPlayVoice.isChecked()
common.editor_config.last_opened = self.directory
common.editor_config.save_config()
##############################################################################
### @fn loadDirectory(directory)
### @desc Parses and loads a directory with script files.
##############################################################################
def loadDirectory(self, directory, clear_similarity = True, selected_file = None):
directory = dir_tools.normalize(directory)
# Record our last selected file before we leave this directory.
if not self.directory == "":
self.recordSelectedFile()
# See if we're trying to load a special kind of directory.
if directory[:7] == "anagram":
self.loadAnagram(directory)
return
elif directory[:7] == "nonstop" or directory[:6] == "hanron" or directory[:6] == "kokoro":
parser = NonstopParser()
parser.load(directory)
self.script_pack = parser.script_pack
# No, you can't insert lines into the nonstop debates. (ノ`Д´)ノ彡┻━┻
self.ui.actionInsertLine.setEnabled(False)
elif directory[:8] == "hs_mtb_s" or directory[:10] == "dr2_mtb2_s":
parser = MTBParser()
parser.load(directory)
self.script_pack = parser.script_pack
# No, you can't insert lines into the MTBs either.
self.ui.actionInsertLine.setEnabled(False)
else:
try:
script_pack = ScriptPack(directory, common.editor_config.data01_dir)
except Exception as e:
QtGui.QMessageBox.critical(self, "Error", str(e))
return
else:
if len(script_pack) <= 0:
QtGui.QMessageBox.warning(self, "No Lines", "Could not load %s. No lines found." % directory)
return
self.script_pack = script_pack
if not self.script_pack.wrd_file == None:
self.ui.actionInsertLine.setEnabled(True)
else:
self.ui.actionInsertLine.setEnabled(False)
# So we don't trigger any play commands while loading.
temp_auto_voice = common.editor_config.auto_play_voice
common.editor_config.auto_play_voice = False
# If we weren't given a file to start out on, see if we have something
# we can use before we go inserting the files, which will toss up our data.
if selected_file == None and directory in common.editor_config.last_file:
selected_file = common.editor_config.last_file[directory]
self.cur_script = 0
self.changed = [False] * len(self.script_pack)
self.ui.lstFiles.clear()
self.ui.lblFolderName.setText(directory)
self.directory = directory
# Getting to be a bit of a memory whore if we leave the data around too long.
# if clear_similarity:
# self.similarity_db.clear()
self.similarity_db.clear_queue()
# Reversed so we can add to the Similarity DB simultaneously. We want the
# queries at the the top, but in reverse order, so we prioritize finding data
# about the folder we're currently editing, but still go from the top down.
for script in reversed(self.script_pack):
basename = os.path.basename(script.filename)
# Add our easy to read name to the main list.
self.ui.lstFiles.insertItem(0, QtGui.QListWidgetItem(basename))
self.similarity_db.queue_query_at_top(os.path.join(self.script_pack.get_real_dir(), basename))
# We're safe now.
common.editor_config.auto_play_voice = temp_auto_voice
# Some cleanup.
self.findInternalDupes()
self.updateConfig()
self.setWindowModified(False)
if selected_file == None and self.directory in common.editor_config.last_file:
selected_file = common.editor_config.last_file[self.directory]
if not selected_file == None:
self.setCurrentFile(selected_file)
else:
self.ui.lstFiles.setCurrentRow(0)
self.updateStatusBar()
##############################################################################
### @fn setCurrentFile(filename)
### @desc Selects the file in the current directory with the given name.
##############################################################################
def setCurrentFile(self, filename):
nodes = self.ui.lstFiles.findItems(filename, Qt.Qt.MatchFixedString)
if len(nodes) >= 1:
self.ui.lstFiles.setCurrentItem(nodes[0])
self.ui.lstFiles.scrollToItem(nodes[0], QtGui.QAbstractItemView.PositionAtCenter)
else:
self.ui.lstFiles.setCurrentRow(0)
##############################################################################
### @fn loadAnagram(anagram)
### @desc Shows the anagram editor and hides the main window until it's done.
##############################################################################
def loadAnagram(self, anagram):
self.hide()
anagram_editor = AnagramEditor()
path = os.path.join(common.editor_config.data01_dir_jp_all, anagram)
anagram_editor.load(path)
anagram_editor.exec_()
self.show()
##############################################################################
### @fn loadEbootText()
### @desc Shows the EBOOT text editor and hides the main window until it's done.
##############################################################################
def loadEbootText(self):
self.hide()
eboot_editor = EbootEditor()
eboot_editor.exec_()
self.show()
##############################################################################
### @fn recordSelectedFile()
### @desc Records the currently selected file into our history of selected files.
##############################################################################
def recordSelectedFile(self):
filename = os.path.basename(self.script_pack[self.cur_script].filename)
common.editor_config.last_file[self.directory] = filename
##############################################################################
### @fn insertLine()
### @desc Inserts a new line after the selected line.
##############################################################################
def insertLine(self):
# Can't insert without a wrd file.
if self.script_pack.wrd == None:
# Shouldn't be enabled anyway, but we'll be safe about it.
self.ui.actionInsertLine.setEnabled(False)
return
if not isinstance(self.script_pack[self.cur_script], ScriptFile):
return
# Make absolutely sure we want to do this.
# Absolutely sure.
# Seriously.
# 真剣で。
answer = QtGui.QMessageBox.warning(
self,
"Insert Line",
"You are about to insert a new line into the script. This action cannot be undone. " +
"The added line will not have any similarities or duplicates, as there is no good way to keep track of these things for newly created lines.\n\n"
"If you made modifications to the decompiled .py file since loading this folder, they will be lost. If you want to keep your changes, click the Reload button and try inserting again.\n\n" +
"Proceed?",
buttons = QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
defaultButton = QtGui.QMessageBox.No
)
if answer == QtGui.QMessageBox.No:
return
if not self.askUnsavedChanges():
return
dir = os.path.join(self.script_pack.get_real_dir())
filename = os.path.basename(self.script_pack[self.cur_script].filename)
filename = os.path.join(dir, filename)
filename = dir_tools.normalize(filename)
dupes = dupe_db.files_in_same_group(filename)
if not dupes == None:
answer = QtGui.QMessageBox.warning(
self,
"Insert Line",
"You are about to insert a new line after a file that has dupes.\n\n" +
"This can really screw things up if you're not careful.\n\n" +
"Proceed?",
buttons = QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
defaultButton = QtGui.QMessageBox.No
)
if answer == QtGui.QMessageBox.No:
return
# If we're totally sure, then we'll get going.
# We want the file ID as it's referenced in the wrd file.
insert_after = self.script_pack[self.cur_script].scene_info.file_id
# Make sure it works, first.
try:
# new_wrd, new_index = wrd_inserter.insert_line(wrd_file, insert_after)
new_index = self.script_pack.wrd.insert_line(insert_after)
except Exception as e:
QtGui.QMessageBox.critical(self, "Error", str(e))
return
# Get our backup out of the way, first.
source_dir = common.editor_config.data01_dir
wrd_file = self.script_pack.wrd_file
py_file = self.script_pack.py_file
wrd_basename = os.path.basename(wrd_file)
py_basename = os.path.basename(py_file)
# backup.backup_files(source_dir, [wrd_file[len(source_dir) + 1:], py_file[len(source_dir) + 1:]], suffix = "_NEWLINE")
backup.backup_files(source_dir, [py_file[len(source_dir) + 1:]], suffix = "_NEWLINE")
changes_dir = os.path.join(common.editor_config.changes_dir, self.script_pack.get_real_dir())
original_dir = os.path.join(common.editor_config.data01_dir, self.script_pack.get_real_dir())
# A copy for our change set.
# changes_wrd = os.path.join(changes_dir, wrd_basename)
changes_py = os.path.join(changes_dir, py_basename)
if not os.path.isdir(changes_dir):
os.makedirs(changes_dir)
# Dump our wrd file to disk.
# self.script_pack.wrd.save_bin(wrd_file)
# self.script_pack.wrd.save_bin(changes_wrd)
self.script_pack.wrd.save_python(py_file)
self.script_pack.wrd.save_python(changes_py)
# Then duplicate the selected file with the new name.
new_filename = "%04d.txt" % new_index
shutil.copy(self.script_pack[self.cur_script].filename, os.path.join(original_dir, new_filename))
shutil.copy(self.script_pack[self.cur_script].filename, os.path.join(changes_dir, new_filename))
# Reload the directory, so the changes are visible.
self.loadDirectory(self.directory, clear_similarity = False, selected_file = os.path.basename(self.script_pack[self.cur_script].filename))
##############################################################################
### @fn findInternalDupes()
### @desc Find duplicates between files in this folder.
##############################################################################
def findInternalDupes(self):
self.internal_dupes = {}
dir = dir_tools.normalize(self.script_pack.get_real_dir())
# So we can get indexes in the script list without a ton of looping.
name_to_index = {}
for index, script in enumerate(self.script_pack):
if not isinstance(script, ScriptFile):
continue
name = os.path.basename(script.filename)
name_to_index[name] = index
for index, script in enumerate(self.script_pack):
if not isinstance(script, ScriptFile):
continue
filename = os.path.basename(script.filename)
filename = os.path.join(dir, filename)
filename = dir_tools.normalize(filename)
dupes = dupe_db.files_in_same_group(filename)
self.internal_dupes[index] = []
if dupes == None:
continue
for dupe in dupes:
if dupe == filename:
continue
dupe_dir, dupe_name = os.path.split(dupe)
if dupe_dir == dir and dupe_name in name_to_index:
self.internal_dupes[index].append(name_to_index[dupe_name])
##############################################################################
### @fn showImage(image_pos)
### @param image_pos -- IMAGE_POS.original or IMAGE_POS.translated
### If None, will update both images.
##############################################################################
def showImage(self, image_pos = None):
bg = self.bg
if bg == None:
bg = QImage(text_printer.IMG_W, text_printer.IMG_H, QImage.Format_ARGB32_Premultiplied)
bg.fill(QtGui.QColor(0, 0, 0, 255).rgba())
max = 0
kill_blanks = True
scene_info = self.script_pack[self.cur_script].scene_info
format = TEXT_FORMATS[scene_info.mode] if scene_info.format == None else scene_info.format
mangle = common.editor_config.mangle_text
if image_pos == None or image_pos == IMAGE_POS.original:
if not self.hide_original:
text = common.qt_to_unicode(self.ui.txtOriginal.toPlainText())
orig = text_printer.print_text(bg, text, scene_info.mode, format, mangle)
if scene_info.special == common.SCENE_SPECIAL.option:
orig = text_printer.print_text(orig, text, common.SCENE_SPECIAL.option, TEXT_FORMATS[common.SCENE_SPECIAL.option], mangle)
qt_pixmap = QtGui.QPixmap.fromImage(orig)
self.ui.lblOriginal.setPixmap(qt_pixmap)
else:
hidden = QImage(text_printer.IMG_W, text_printer.IMG_H, QImage.Format_ARGB32_Premultiplied)
hidden.fill(QtGui.QColor(0, 0, 0, 255).rgba())
qt_pixmap = QtGui.QPixmap.fromImage(hidden)
self.ui.lblOriginal.setPixmap(qt_pixmap)
if image_pos == None or image_pos == IMAGE_POS.translated:
text = common.qt_to_unicode(self.ui.txtTranslated.toPlainText())
trans = text_printer.print_text(bg, text, scene_info.mode, format, mangle)
if scene_info.special == common.SCENE_SPECIAL.option:
trans = text_printer.print_text(trans, text, common.SCENE_SPECIAL.option, TEXT_FORMATS[common.SCENE_SPECIAL.option], mangle)
qt_pixmap = QtGui.QPixmap.fromImage(trans)
self.ui.lblTranslated.setPixmap(qt_pixmap)
##############################################################################
### @fn updateUI()
### @desc Matches the UI elements to the selected script data.
##############################################################################
def updateUI(self):
scene_info = self.script_pack[self.cur_script].scene_info
###################################################
### SPEAKER
###################################################
self.ui.lblSpeaker.setToolTip("Speaker ID: %d" % scene_info.speaker)
speaker = get_char_name(scene_info.speaker, common.editor_config.data01_dir)
if speaker == None:
speaker = "N/A"
self.ui.lblSpeaker.setText(speaker)
###################################################
### SPRITE
###################################################
self.ui.lblSprite.setToolTip("Sprite ID: %d" % scene_info.sprite.sprite_id)
sprite_char = get_char_name(scene_info.sprite.char_id, common.editor_config.data01_dir)
if sprite_char == None:
sprite_char = "N/A"
self.ui.lblSprite.setText(sprite_char)
###################################################
### SCENE INFO
###################################################
scene_text = common.chapter_to_text(scene_info.chapter)
if not scene_info.scene == -1:
if scene_info.chapter == common.CHAPTER_FREETIME:
scene_text += ": " + get_char_name(scene_info.scene, common.editor_config.data01_dir)
elif scene_info.chapter == common.CHAPTER_ISLAND and scene_info.scene >= 701 and scene_info.scene <= 715:
scene_text += ": " + get_char_name(scene_info.scene - 700, common.editor_config.data01_dir)
else:
scene_text += ", Scene %d" % scene_info.scene
self.ui.lblScene.setText(scene_text)
self.ui.lblScene.setToolTip("BGD: %d\nCut-in: %d\nFlash: %d\nMovie: %d" % (scene_info.bgd, scene_info.cutin, scene_info.flash, scene_info.movie))
###################################################
### MODE
###################################################
self.ui.lblMode.setText(common.mode_to_text(scene_info.mode))
###################################################
### BOX COLOR
###################################################
# I don't actually know how to figure this out, anyway.
#self.ui.lblColor.setText(str(scene_info.box_color).title())
###################################################
### MAP
###################################################
map_name = get_map_name(scene_info.room, common.editor_config.data01_dir)
self.ui.lblArea.setToolTip("Area ID: %d" % scene_info.room)
if not map_name == None:
self.ui.lblArea.setText(map_name)
else:
self.ui.lblArea.setText("N/A")
###################################################
### VOICE
###################################################
voice_tooltip = "Chapter: %d\nCharacter: %d\nVoice ID: %d" % (scene_info.voice.chapter, scene_info.voice.char_id, scene_info.voice.voice_id)
voice = get_voice_file(scene_info.voice)
if not voice == None:
voice_char = get_char_name(scene_info.voice.char_id, common.editor_config.data01_dir)
if voice_char == None:
voice_char = "N/A"
self.ui.lblVoice.setText(voice_char)
self.ui.btnPlayVoice.setEnabled(True)
voice_tooltip += "\nFile: %d" % voice
else:
self.ui.lblVoice.setText("N/A")
self.ui.btnPlayVoice.setEnabled(False)
self.ui.lblVoice.setToolTip(voice_tooltip)
###################################################
### BGM
###################################################
# if scene_info.bgm != -1:
# bgm_tooltip = "BGM ID: %d" % scene_info.bgm
# bgm_name = get_bgm_name(scene_info.bgm, common.editor_config.data01_dir)
# self.ui.lblBGM.setText(bgm_name)
# self.ui.btnPlayBGM.setEnabled(True)
# else:
# bgm_tooltip = "BGM ID: N/A"
# self.ui.lblBGM.setText("N/A")
# self.ui.btnPlayBGM.setEnabled(False)
# self.ui.lblBGM.setToolTip(bgm_tooltip)
###################################################
### SPECIAL
###################################################
self.ui.lblSpecial.setToolTip("")
if scene_info.special == common.SCENE_SPECIAL.option:
self.ui.lblSpecial.setText("Options: %s" % scene_info.extra_val)
elif scene_info.special == common.SCENE_SPECIAL.showopt:
self.ui.lblSpecial.setText("Options: %s" % scene_info.extra_val)
elif scene_info.special == common.SCENE_SPECIAL.react:
self.ui.lblSpecial.setText("Re:ACT")
elif scene_info.special == common.SCENE_SPECIAL.debate:
self.ui.lblSpecial.setText("Nonstop Debate")
elif scene_info.special == common.SCENE_SPECIAL.hanron:
self.ui.lblSpecial.setText("Counterstrike Showdown")
elif scene_info.special == common.SCENE_SPECIAL.chatter:
self.ui.lblSpecial.setText("Chatter %d" % scene_info.extra_val)
elif scene_info.special == common.SCENE_SPECIAL.checkobj:
obj_label = get_obj_label(scene_info.room, scene_info.extra_val - 20, common.editor_config.data01_dir)
if obj_label:
self.ui.lblSpecial.setText("Obj: %s" % obj_label)
self.ui.lblSpecial.setToolTip("Obj ID: %d" % scene_info.extra_val)
else:
self.ui.lblSpecial.setText("Obj: ID %d" % scene_info.extra_val)
elif scene_info.special == common.SCENE_SPECIAL.checkchar:
character = get_char_name(scene_info.extra_val, common.editor_config.data01_dir)
if character:
self.ui.lblSpecial.setText("Char: %s" % character)
self.ui.lblSpecial.setToolTip("Char ID: %d" % scene_info.extra_val)
else:
self.ui.lblSpecial.setText("Char: %s" % scene_info.extra_val)
else:
self.ui.lblSpecial.setText("N/A")
##############################################################################
### @fn showDupes()
### @desc Fills in the "Duplicates" tree for this file.
##############################################################################
def showDupes(self):
self.ui.treeDupes.clear()
if not isinstance(self.script_pack[self.cur_script], ScriptFile):
self.ui.tabRelated.setTabText(0, "0 Dupes")
return
# A little fiddling with the directory names, since we hide a bunch of info
# so the UI doesn't get cluttered.
directory = self.script_pack.get_real_dir()
filename = os.path.basename(self.script_pack[self.cur_script].filename)
filename = os.path.join(directory, filename)
filename = dir_tools.normalize(filename)
dupes = dupe_db.files_in_same_group(filename)
num_dupes = 0
tree_items = []
if not dupes == None:
self.ui.actionRemoveDupeActive.setEnabled(True)
for file in dupes:
if not file == filename:
parsed_file = dir_tools.consolidate_dir(file)
tree_item = tree.path_to_tree(parsed_file)
tree_items.append(tree_item)
num_dupes = num_dupes + 1
tree_items = tree.consolidate_tree_items(tree_items)
for item in tree_items:
self.ui.treeDupes.addTopLevelItem(item)
else:
self.ui.actionRemoveDupeActive.setEnabled(False)
if num_dupes == 1:
self.ui.tabRelated.setTabText(0, "%d Dupe" % num_dupes)
else:
self.ui.tabRelated.setTabText(0, "%d Dupes" % num_dupes)
# If we refill this, they haven't selected anything and can't mark anything.
self.ui.actionRemoveDupeRelated.setEnabled(False)
self.ui.actionRemoveDupeAll.setEnabled(False)
self.ui.actionShowDupeInEditor.setEnabled(False)
self.ui.actionShowDupeInExplorer.setEnabled(False)
self.ui.txtSimilarTrans.setPlainText("")
self.ui.txtSimilarOrig.setPlainText("")
self.ui.txtSimilarComm.setPlainText("")
if common.editor_config.auto_expand:
self.ui.treeDupes.expandAll()
##############################################################################
### @fn showSimilar()
### @desc Fills in the "Similar" tree for this file.
##############################################################################
def showSimilar(self):
self.ui.treeSimilar.clear()
if not isinstance(self.script_pack[self.cur_script], ScriptFile):
self.ui.tabRelated.setTabText(1, "0 Similar")
return
# A little fiddling with the directory names, since we hide a bunch of info
# so the UI doesn't get cluttered.
directory = self.script_pack.get_real_dir()
filename = os.path.basename(self.script_pack[self.cur_script].filename)
filename = os.path.join(directory, filename)
filename = dir_tools.normalize(filename)
num_similar = 0
tree_items = []
similarities = self.similarity_db.get_similarities(filename)
for file in similarities:
parsed_file = dir_tools.consolidate_dir(file)
tree_item = tree.path_to_tree(parsed_file)
tree_items.append(tree_item)
num_similar = num_similar + 1
tree_items = tree.consolidate_tree_items(tree_items)
for item in tree_items:
self.ui.treeSimilar.addTopLevelItem(item)
self.ui.tabRelated.setTabText(1, "%d Similar" % num_similar)
# If we refill this, they haven't selected anything and can't mark anything.
self.ui.actionAddDupeSim.setEnabled(False)
self.ui.actionRemoveSimilarity.setEnabled(False)
self.ui.actionShowSimilarInEditor.setEnabled(False)
self.ui.actionShowSimilarInExplorer.setEnabled(False)
self.ui.txtSimilarTrans.setPlainText("")
self.ui.txtSimilarOrig.setPlainText("")
self.ui.txtSimilarComm.setPlainText("")
if common.editor_config.auto_expand:
self.ui.treeSimilar.expandAll()
##############################################################################
### @fn updateRefs()
### @desc Triggered by a change of references in the comments.
##############################################################################
def updateRefs(self):
self.ui.treeReferences.clear()
if not isinstance(self.script_pack[self.cur_script], ScriptFile):
self.ui.tabRelated.setTabText(2, "0 References")
return
# A little fiddling with the directory names, since we hide a bunch of info
# so the UI doesn't get cluttered.
directory = self.script_pack.get_real_dir()
filename = os.path.basename(self.script_pack[self.cur_script].filename)
filename = os.path.join(directory, filename)
filename = dir_tools.normalize(filename)
references = self.ui.txtComments.references
num_refs = 0
tree_items = []
if references:
for file in references:
if not file == filename:
if os.path.split(file)[0] == "":
file = os.path.join(self.script_pack.directory, file)
parsed_file = dir_tools.consolidate_dir(file)
tree_item = tree.path_to_tree(parsed_file)
tree_items.append(tree_item)
num_refs = num_refs + 1
tree_items = tree.consolidate_tree_items(tree_items)
for item in tree_items:
self.ui.treeReferences.addTopLevelItem(item)
if num_refs == 1:
self.ui.tabRelated.setTabText(2, "%d Reference" % num_refs)
else:
self.ui.tabRelated.setTabText(2, "%d References" % num_refs)
# If we refill this, they haven't selected anything and can't mark anything.
self.ui.actionShowReferenceInEditor.setEnabled(False)
self.ui.actionShowReferenceInExplorer.setEnabled(False)
self.ui.actionAddDupeRef.setEnabled(False)
if common.editor_config.auto_expand:
self.ui.treeReferences.expandAll()
##############################################################################
### @fn updateSimilarView()
### @desc Displays text on the right panel based the selected tree item.
##############################################################################
def updateSimilarView(self, tree_item):
if tree_item == None or tree_item.childCount() != 0:
self.ui.actionAddDupeSim.setEnabled(False)
self.ui.actionAddDupeRef.setEnabled(False)
self.ui.actionRemoveSimilarity.setEnabled(False)
self.ui.actionRemoveDupeRelated.setEnabled(False)
#self.ui.actionRemoveDupeAll.setEnabled(False)
return
#else:
#self.ui.actionAddDupeSim.setEnabled(True)
#self.ui.actionRemoveSimilarity.setEnabled(True)
#self.ui.actionRemoveDupeRelated.setEnabled(True)
#self.ui.actionRemoveDupeAll.setEnabled(True)
self.updateStatusRelated()
file = common.qt_to_unicode(tree_item.text(0))
directory = tree.tree_item_to_path(tree_item.parent())
expanded_dir = dir_tools.expand_dir(directory)
filename = os.path.join(common.editor_config.data01_dir, expanded_dir, file)
if not os.path.isfile(filename):
self.ui.txtSimilarTrans.setPlainText("Could not load \"%s\"." % file)
self.ui.txtSimilarOrig.setPlainText("")
self.ui.txtSimilarComm.setPlainText("")
self.ui.actionAddDupeSim.setEnabled(False)
self.ui.actionAddDupeRef.setEnabled(False)
return
if not directory == self.directory:
script_file = ScriptFile(filename)
else:
script_file = self.script_pack.get_script(file)
# If a file exists in this directory but, for some reason
# isn't referenced in the wrd file, so it's not in the
# script pack, just load it anyway, so we can see it.
if script_file == None:
script_file = ScriptFile(filename)
self.ui.txtSimilarTrans.setPlainText(script_file[common.editor_config.lang_trans])
self.ui.txtSimilarOrig.setPlainText(script_file[common.editor_config.lang_orig])
self.ui.txtSimilarComm.setPlainText(script_file.comments)
##############################################################################
### ###
### S L O T S ###
### ###
##############################################################################
##############################################################################
### @fn showOpenMenu()
### @desc Two guesses.
##############################################################################
def showOpenMenu(self):
self.open_menu.exec_()
# If they chose something, and there are unsaved changes,
# ask about them before trying to load the folder.
if not self.open_menu.current_dir == None \
and not dir_tools.normalize(self.directory) == dir_tools.normalize(self.open_menu.current_dir) \
and self.askUnsavedChanges():
self.loadDirectory(self.open_menu.current_dir)
##############################################################################
### @fn showGotoMenu()
### @desc Two and a half guesses.
##############################################################################
def showGotoMenu(self):
reference, accepted = QtGui.QInputDialog.getText(self, "Go to...", "Where would you like to go?")
if not accepted:
return
reference = common.qt_to_unicode(reference)
reference = re.sub(ur"[{}]", u"", reference)
if not reference:
return
directory, filename = os.path.split(reference)
if not directory and not filename:
return
elif not directory:
directory = filename
filename = None
elif not filename:
filename = None
if not dir_tools.normalize(directory) == dir_tools.normalize(self.directory):
if not self.askUnsavedChanges():
return
self.loadDirectory(directory, selected_file = filename)
else:
self.setCurrentFile(filename)
##############################################################################
### @fn gotoScriptJump()
### @desc Go to the folder indicated by the ScriptJump object currently selected.
##############################################################################
def gotoScriptJump(self):
if not isinstance(self.script_pack[self.cur_script], ScriptJump):
return
target = self.script_pack[self.cur_script].target()
if not dir_tools.normalize(target) == dir_tools.normalize(self.directory):
if not self.askUnsavedChanges():
return
self.loadDirectory(target)
##############################################################################
### @fn showSettingsMenu()
### @desc Three guesses.
##############################################################################
def showSettingsMenu(self):
# Store this so we can see if they changed anything.
temp_data01 = common.editor_config.data01_dir
menu = SettingsMenu(self)
result = menu.exec_()
self.showImage()
self.updateActions()
self.updateHighlight()
self.updateTranslatedBoxCfg()
# If they changed data01, reload the directory,
# so we're looking at the one they're actually set to use.
if not dir_tools.normalize(temp_data01) == dir_tools.normalize(common.editor_config.data01_dir):
self.askUnsavedChanges()
self.loadDirectory(self.directory, clear_similarity = False, selected_file = os.path.basename(self.script_pack[self.cur_script].filename))
##############################################################################
### @fn showProgressCalculator()
### @desc Three and a third guesses.
##############################################################################
def showProgressCalculator(self):
self.progress_calc.show()
self.progress_calc.raise_()
self.progress_calc.activateWindow()
##############################################################################
### @fn showSearchMenu()
### @desc Four guesses.
##############################################################################
def showSearchMenu(self):
self.search_menu.show()
self.search_menu.raise_()
self.search_menu.activateWindow()
##############################################################################
### @fn searchMenuOpenClicked()
### @desc Four and a quarter guesses.
##############################################################################
def searchMenuOpenClicked(self):
node = self.search_menu.ui.treeResults.currentItem()
if node == None:
return
self.showNodeInEditor(self.search_menu.ui.treeResults.currentItem())
self.raise_()
self.activateWindow()
##############################################################################
### @fn showTerminologyEditor()
### @desc Four AND A HALF guesses.
##############################################################################
def showTerminologyEditor(self):
self.terminology_editor.show()
self.terminology_editor.raise_()
self.terminology_editor.activateWindow()
##############################################################################
### @fn showFontGenerator()
### @desc X guesses.
##############################################################################
def showFontGenerator(self):
self.font_gen_menu.show()
self.font_gen_menu.raise_()
self.font_gen_menu.activateWindow()
##############################################################################
### @fn showScriptDumper()
### @desc X guesses.
##############################################################################
def showScriptDumper(self):
menu = ScriptDumpMenu(self)
menu.exec_()
##############################################################################
### @fn reloadDirectory()
### @desc Five guesses.
##############################################################################
def reloadDirectory(self):
if self.askUnsavedChanges():
self.loadDirectory(self.directory, clear_similarity = False, selected_file = os.path.basename(self.script_pack[self.cur_script].filename))
##############################################################################
### @fn saveChanges()
### @desc Six guesses.
##############################################################################
def saveChanges(self):
progress = QProgressDialog("Saving...", QString(), 0, len(self.script_pack), self)
progress.setWindowTitle("Saving...")
progress.setWindowModality(Qt.Qt.WindowModal)
progress.setValue(0)
progress.setAutoClose(False)
progress.setMinimumDuration(1000)
width = self.width()
height = self.height()
x = self.x()
y = self.y()
dir = self.script_pack.get_real_dir()
# The base name of all the files being saved.
files = []
file_count = 0
# Get a list of the files we are going to change.
for index, script in enumerate(self.script_pack):
files.append([])
# Don't bother if we haven't changed this file.
if not self.changed[index]:
continue
# Or if this isn't actually a script file.
if not isinstance(script, ScriptFile):
continue
filename = os.path.basename(script.filename)
filename = os.path.join(dir, filename)
filename = dir_tools.normalize(filename)
dupes = dupe_db.files_in_same_group(filename)
if dupes == None:
files[-1].append(filename)
file_count += 1
continue
# This includes the original file itself.
for dupe in dupes:
files[-1].append(dupe)
file_count += 1
progress.setMaximum(file_count)
# Make backups first.
backup_time = time.strftime("%Y.%m.%d_%H.%M.%S_SAVE")
for file_set in files:
for file in file_set:
source = os.path.join(common.editor_config.data01_dir, file)
target = os.path.join(common.editor_config.backup_dir, backup_time, file)
progress.setLabelText("Backing up...\n" + file)
progress.setValue(progress.value() + 1)
# Re-center the dialog.
progress_w = progress.geometry().width()
progress_h = progress.geometry().height()
new_x = x + ((width - progress_w) / 2)
new_y = y + ((height - progress_h) / 2)
progress.move(new_x, new_y)
# Make sure we have a place to put it.
basedir = os.path.dirname(target)
if not os.path.isdir(basedir):
os.makedirs(basedir)
shutil.copy2(source, target)
progress.setValue(0)
# Now do some saving.
for index, script in enumerate(self.script_pack):
file_set = files[index]
for file in file_set:
target = os.path.join(common.editor_config.data01_dir, file)
target_copy = os.path.join(common.editor_config.changes_dir, file)
progress.setLabelText("Saving...\n" + file)
progress.setValue(progress.value() + 1)
# Re-center the dialog.
progress_w = progress.geometry().width()
progress_h = progress.geometry().height()
new_x = x + ((width - progress_w) / 2)
new_y = y + ((height - progress_h) / 2)
progress.move(new_x, new_y)
# Make sure we have a place to put it.
basedir = os.path.dirname(target_copy)
if not os.path.isdir(basedir):
os.makedirs(basedir)
script.save(target)
script.save(target_copy)
progress.close()
self.setWindowModified(False)
self.changed = [False] * len(self.script_pack)
##############################################################################
### @fn askUnsavedChanges()
### @desc Checks for unsaved changes, then asks the user how to proceed.
### @return Returns True if it is okay to proceed and False if not.
##############################################################################
def askUnsavedChanges(self):
if not self.isWindowModified():
return True
answer = QtGui.QMessageBox.question(
self,
"Unsaved Changes",
"Would you like to save your changes?",
buttons = QtGui.QMessageBox.Save | QtGui.QMessageBox.Discard | QtGui.QMessageBox.Cancel,
defaultButton = QtGui.QMessageBox.Cancel
)
if answer == QtGui.QMessageBox.Cancel:
return False
elif answer == QtGui.QMessageBox.Discard:
return True
elif answer == QtGui.QMessageBox.Save:
self.saveChanges()
return True
##############################################################################
### @fn playVoice()
### @desc Triggered by the "Play Voice" button.
##############################################################################
def playVoice(self):
self.voice_player.play(self.script_pack[self.cur_script].scene_info.voice)
##############################################################################
### @fn stopVoice()
### @desc Triggered by the "Stop Voice" button.
##############################################################################
def stopVoice(self):
self.voice_player.stop()
##############################################################################
### @fn playBGM()
### @desc Triggered by the "Play BGM" button.
##############################################################################
# def playBGM(self):
# self.bgm_player.play(self.script_pack[self.cur_script].scene_info.bgm)
##############################################################################
### @fn stopBGM()
### @desc Triggered by the "Stop BGM" button.
##############################################################################
# def stopBGM(self):
# self.bgm_player.stop()
##############################################################################
### @fn updateStatusBar()
### @desc Updates all the labels in the status bar.
##############################################################################
def updateStatusBar(self):
self.updateStatusMode()
self.updateStatusCursor()
self.updateStatusWordCount()
self.updateStatusDirInfo()
self.updateStatusRelated()
##############################################################################
### @fn updateStatusMode()
### @desc Updates the label on the status bar for the script mode.
##############################################################################
def updateStatusMode(self):
self.ui.statusLabelMode.setText(common.mode_to_text(self.script_pack[self.cur_script].scene_info.mode))
##############################################################################
### @fn updateStatusCursor()
### @desc Updates the label on the status bar for the text box.
##############################################################################
def updateStatusCursor(self):
cursor = self.ui.txtTranslated.textCursor()
len = self.ui.txtTranslated.toPlainText().length()
lines = self.ui.txtTranslated.document().blockCount()
line_num = cursor.blockNumber() + 1
col = cursor.positionInBlock()
# We don't want our column count to include CLTs, so we can actually
# have a useful look at how long the lines are. So we search for all
# CLTs on the line we're in that start before the cursor position and
# we chop them out of the column count.
line = common.qt_to_unicode(cursor.block().text())
tag_re = TAG_KILLER
adjusted_col = col
for match in tag_re.finditer(line):
if match.end() <= col:
adjusted_col -= (match.end() - match.start())
elif match.start() < col:
adjusted_col -= (col - match.start())
break
else:
break
self.ui.statusLabelCursor.setText("Length: %d\t Lines: %d\t Line: %d\t Col: %d" % (len, lines, line_num, adjusted_col))
##############################################################################
### @fn updateStatusWordCount()
### @desc Updates the label on the status bar for the text box.
##############################################################################
def updateStatusWordCount(self):
words = count_words(common.qt_to_unicode(self.ui.txtTranslated.toPlainText()))
self.ui.statusLabelWordCount.setText("Words: %d" % words)
##############################################################################
### @fn updateStatusDirInfo()
### @desc Updates the label on the status bar for the directory listing.
##############################################################################
def updateStatusDirInfo(self):
self.ui.statusLabelDirInfo.setText("Item: %d / %d" % (self.cur_script + 1, len(self.script_pack.script_files)))
##############################################################################
### @fn updateStatusRelated()
### @desc Updates the label on the status bar for the related window.
##############################################################################
def updateStatusRelated(self):
tree_item = self.ui.treeDupes.currentItem()
if tree_item != None and tree_item.childCount() == 0:
self.ui.statusLabelRelated.setText("Duplicate: %s" % tree.tree_item_to_path(tree_item))
return
tree_item = self.ui.treeSimilar.currentItem()
if tree_item != None and tree_item.childCount() == 0:
self.ui.statusLabelRelated.setText("Similarity: %s" % tree.tree_item_to_path(tree_item))
return
tree_item = self.ui.treeReferences.currentItem()
if tree_item != None and tree_item.childCount() == 0:
self.ui.statusLabelRelated.setText("Reference: %s" % tree.tree_item_to_path(tree_item))
return
self.ui.statusLabelRelated.setText("Nothing selected")
##############################################################################
### @fn changedOriginalTab()
### @desc Called when the tab showing the original text is changed.
##############################################################################
def changedOriginalTab(self):
hide_original = False
if self.ui.tabsOriginal.currentWidget() == self.ui.tabHide:
hide_original = True
if self.hide_original != hide_original:
self.hide_original = hide_original
self.showImage(IMAGE_POS.original)
##############################################################################
### @fn changedTranslated()
### @desc Called when txtTranslated is changed. Updates the internal script
### information and the preview image.
##############################################################################
def changedTranslated(self):
if not isinstance(self.script_pack[self.cur_script], ScriptFile):
self.showImage(IMAGE_POS.translated)
return
translated = common.qt_to_unicode(self.ui.txtTranslated.toPlainText())
if not translated == self.script_pack[self.cur_script][common.editor_config.lang_trans]:
self.setWindowModified(True)
self.script_pack[self.cur_script][common.editor_config.lang_trans] = translated
self.changed[self.cur_script] = True
for dupe in self.internal_dupes[self.cur_script]:
self.script_pack[dupe][common.editor_config.lang_trans] = translated
#self.changed[dupe] = True
self.showImage(IMAGE_POS.translated)
self.updateStatusCursor()
self.updateStatusWordCount()
##############################################################################
### @fn changedOriginal()
### @desc Called when txtOriginal is changed. Updates the preview image.
##############################################################################
def changedOriginal(self):
self.showImage(IMAGE_POS.original)
##############################################################################
### @fn changedComments()
### @desc Called when txtComments is changed. Updates the internal script.
##############################################################################
def changedComments(self):
if not isinstance(self.script_pack[self.cur_script], ScriptFile):
return
comments = common.qt_to_unicode(self.ui.txtComments.toPlainText())
if not comments == self.script_pack[self.cur_script].comments:
self.setWindowModified(True)
self.script_pack[self.cur_script].comments = comments
self.changed[self.cur_script] = True
for dupe in self.internal_dupes[self.cur_script]:
self.script_pack[dupe].comments = comments
##############################################################################
### @fn changedScriptFile(index)
### @desc Called when the selected script file is changed. Fills in the text
### boxes and updates the preview images.
##############################################################################
# Find sequential, valid <CLT> tags, with or without numbers, at the end of the line.
CURSOR_RE = re.compile(ur"(\n*\<CLT(\s+\d+)?\>)*\Z", re.UNICODE)
def changedScriptFile(self, index):
self.cur_script = index
# Has to happen early, so the highlighter can take advantage of any changes
# to the terms list as soon as the new text is shown.
self.updateHighlight()
self.updateTranslatedBoxCfg()
self.bg = text_printer.draw_scene(self.script_pack[index].scene_info)
self.ui.txtComments.setPlainText(self.script_pack[index].comments)
self.ui.txtTranslated.setPlainText(self.script_pack[index][common.editor_config.lang_trans])
cursor_match = self.CURSOR_RE.search(self.script_pack[index][common.editor_config.lang_trans])
if cursor_match == None:
self.ui.txtTranslated.moveCursor(QTextCursor.End)
else:
cursor = self.ui.txtTranslated.textCursor()
cursor.setPosition(cursor_match.start())
self.ui.txtTranslated.setTextCursor(cursor)
self.ui.txtComments.moveCursor(QTextCursor.End)
self.ui.txtOriginal.setPlainText(self.script_pack[index][common.editor_config.lang_orig])
self.ui.txtOriginalNoTags.setPlainText(self.script_pack[index].notags[common.editor_config.lang_orig])
if isinstance(self.script_pack[self.cur_script], ScriptJump):
self.ui.txtTranslated.setReadOnly(True)
self.ui.txtComments.setReadOnly(True)
self.ui.actionCopyActivePath.setEnabled(False)
self.ui.actionGotoScriptJump.setEnabled(True)
self.setWindowTitle("The Super Duper Script Editor 2 - %s[*]" % self.script_pack.directory)
elif isinstance(self.script_pack[self.cur_script], ScriptFile):
self.ui.txtTranslated.setReadOnly(False)
self.ui.txtComments.setReadOnly(False)
self.ui.actionCopyActivePath.setEnabled(True)
self.ui.actionGotoScriptJump.setEnabled(False)
filename = os.path.basename(self.script_pack[self.cur_script].filename)
self.setWindowTitle("The Super Duper Script Editor 2 - " + os.path.join(self.script_pack.directory, filename) + "[*]")
#self.showImage()
self.showSimilar()
self.showDupes()
self.ui.txtSimilarTrans.setPlainText("")
self.ui.txtSimilarOrig.setPlainText("")
self.ui.txtSimilarComm.setPlainText("")
self.updateUI()
self.updateStatusDirInfo()
self.updateStatusRelated()
if common.editor_config.auto_play_voice:
self.playVoice()
# if self.script_pack[index].scene_info.bgm < 0:
# self.stopBGM()
# else:
# if common.editor_config.auto_play_bgm:
# self.playBGM()
##############################################################################
### @fn changedDupe(current, prev)
### @desc Called when the selected dupe is changed. Loads and displays the
### text in the right panel.
##############################################################################
def changedDupe(self, current, prev):
if current == None:
self.ui.actionShowDupeInEditor.setEnabled(False)
self.ui.actionShowDupeInExplorer.setEnabled(False)
self.ui.actionRemoveDupeRelated.setEnabled(False)
self.ui.actionRemoveDupeAll.setEnabled(False)
return
else:
self.ui.actionShowDupeInEditor.setEnabled(True)
self.ui.actionShowDupeInExplorer.setEnabled(True)
self.ui.actionRemoveDupeRelated.setEnabled(True)
self.ui.actionRemoveDupeAll.setEnabled(True)
self.ui.treeSimilar.setCurrentItem(None)
self.ui.treeReferences.setCurrentItem(None)
self.updateSimilarView(current)
##############################################################################
### @fn changedSimilar(current, prev)
### @desc Called when the selected similar file is changed. Loads and
### displays the text in the right panel.
##############################################################################
def changedSimilar(self, current, prev):
if current == None:
self.ui.actionShowSimilarInEditor.setEnabled(False)
self.ui.actionShowSimilarInExplorer.setEnabled(False)
self.ui.actionAddDupeSim.setEnabled(False)
self.ui.actionRemoveSimilarity.setEnabled(False)
return
else:
self.ui.actionShowSimilarInEditor.setEnabled(True)
self.ui.actionShowSimilarInExplorer.setEnabled(True)
self.ui.actionAddDupeSim.setEnabled(True)
self.ui.actionRemoveSimilarity.setEnabled(True)
self.ui.treeDupes.setCurrentItem(None)
self.ui.treeReferences.setCurrentItem(None)
self.updateSimilarView(current)
##############################################################################
### @fn changedReference(current, prev)
### @desc Called when the selected reference file is changed. Loads and
### displays the text in the right panel.
##############################################################################
def changedReference(self, current, prev):
if current == None:
self.ui.actionShowReferenceInEditor.setEnabled(False)
self.ui.actionShowReferenceInExplorer.setEnabled(False)
self.ui.actionAddDupeRef.setEnabled(False)
return
else:
self.ui.actionShowReferenceInEditor.setEnabled(True)
self.ui.actionShowReferenceInExplorer.setEnabled(True)
self.ui.actionAddDupeRef.setEnabled(True)
self.ui.treeDupes.setCurrentItem(None)
self.ui.treeSimilar.setCurrentItem(None)
self.updateSimilarView(current)
##############################################################################
### @fn addDupe()
### @desc Marks the selected "similar" file as a duplicate of the currently
### active script file and removes it from the similarity database.
##############################################################################
def addDupe(self, node):
#current = self.ui.treeSimilar.currentItem()
current = node
if current == None or current.childCount() != 0:
return
selected_dir = tree.tree_item_to_path(current.parent())
selected_dir = dir_tools.expand_dir(selected_dir)
selected_dir = os.path.join(selected_dir)
selected_file = common.qt_to_unicode(current.text(0))
selected_file = os.path.join(selected_dir, selected_file)
active_dir = self.script_pack.get_real_dir()
active_file = os.path.basename(self.script_pack[self.cur_script].filename)
active_file = os.path.join(active_dir, active_file)
active_file = dir_tools.normalize(active_file)
if active_file == selected_file:
return
active_group = dupe_db.group_from_file(active_file)
# See if this one's in a group. If so, we merge. If not, we add.
selected_group = dupe_db.group_from_file(selected_file)
# Be sure we actually want to do this.
if not selected_group == None or not active_group == None:
answer = QtGui.QMessageBox.warning(
self,
"Mark as Duplicate",
"One or both of the selected files is already a member of a duplicate group.\n\n" +
"By marking the selected files duplicates, the two duplicate groups will be " +
"merged, and all files in both groups will be considered duplicates of each other.\n\n" +
"Proceed?",
buttons = QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
defaultButton = QtGui.QMessageBox.No
)
if answer == QtGui.QMessageBox.No:
return
else:
answer = QtGui.QMessageBox.warning(
self,
"Mark as Duplicate",
"Mark the selected file as a duplicate of the active file?",
buttons = QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
defaultButton = QtGui.QMessageBox.No
)
if answer == QtGui.QMessageBox.No:
return
self.removeSimilarity(active_file, selected_file)
if active_group == None:
active_group = dupe_db.add_file(active_file)
if selected_group == None:
dupe_db.add_file(selected_file, active_group)
else:
dupe_db.merge_groups([selected_group, active_group])
dupe_db.save_csv()
self.findInternalDupes()
self.showSimilar()
self.showDupes()
##############################################################################
### @fn removeDupe(filename)
### @desc Removes the given file from whatever duplicate group it is in
### and adds it to the similarity database.
##############################################################################
def removeDupe(self, filename):
group = dupe_db.group_from_file(filename)
if group == None:
return
remaining_files = dupe_db.files_in_group(group)
remaining_files.discard(filename)
dupe_db.remove_file(filename)
if not remaining_files == None or len(remaining_files) == 0:
self.similarity_db.add_similar_files([filename], remaining_files, 100)
dupe_db.save_csv()
##############################################################################
### @fn removeDupeAll()
### @desc Kills the entire duplicate group.
##############################################################################
def removeDupeAll(self):
answer = QtGui.QMessageBox.warning(
self,
"Remove Duplicates",
"Are you sure you want to remove all files from the current duplicate group?\n\n" +
"All files in this duplicate group will be marked as 100% similar to the each other.\n\n" +
"This action cannot be undone. Proceed?",
buttons = QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
defaultButton = QtGui.QMessageBox.No
)
if answer == QtGui.QMessageBox.No:
return
active_dir = self.script_pack.get_real_dir()
active_file = os.path.basename(self.script_pack[self.cur_script].filename)
active_file = os.path.join(active_dir, active_file)
active_file = dir_tools.normalize(active_file)
group = dupe_db.group_from_file(active_file)
if group == None:
return
files = dupe_db.files_in_group(group)
dupe_db.remove_group(group)
# Mark all files as 100% similar to each other.
self.similarity_db.add_similar_files(files, files, 100)
dupe_db.save_csv()
self.findInternalDupes()
self.showSimilar()
self.showDupes()
##############################################################################
### @fn removeDupeActive()
### @desc Removes the active file from its duplicate group.
##############################################################################
def removeDupeActive(self):
answer = QtGui.QMessageBox.warning(
self,
"Remove Duplicate",
"Are you sure you want to remove the active file from its duplicate group?\n\n" +
"The active file will be marked as 100% similar to all other files in this duplicate group.",
buttons = QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
defaultButton = QtGui.QMessageBox.No
)
if answer == QtGui.QMessageBox.No:
return
active_dir = self.script_pack.get_real_dir()
active_file = os.path.basename(self.script_pack[self.cur_script].filename)
active_file = os.path.join(active_dir, active_file)
active_file = dir_tools.normalize(active_file)
self.removeDupe(active_file)
self.findInternalDupes()
self.showSimilar()
self.showDupes()
##############################################################################
### @fn removeDupeRelated()
### @desc Removes the selected "duplicate" file as a duplicate of the
### currently active script file and adds it to the similarity database.
##############################################################################
def removeDupeRelated(self):
current = self.ui.treeDupes.currentItem()
if current == None or current.childCount() != 0:
return
answer = QtGui.QMessageBox.warning(
self,
"Remove Duplicate",
"Are you sure you want to remove the selected file as a duplicate?\n\n" +
"All other files in this duplicate group will be marked as 100% similar to the selected file.",
buttons = QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
defaultButton = QtGui.QMessageBox.No
)
if answer == QtGui.QMessageBox.No:
return
dupe_dir = tree.tree_item_to_path(current.parent())
dupe_dir = dir_tools.expand_dir(dupe_dir)
dupe_dir = os.path.join(dupe_dir)
dupe_file = common.qt_to_unicode(current.text(0))
dupe_file = os.path.join(dupe_dir, dupe_file)
self.removeDupe(dupe_file)
self.findInternalDupes()
self.showSimilar()
self.showDupes()
##############################################################################
### @fn removeSimilarityMenu()
### @desc Removes the selected "similar" file as a similarity of the
### currently active script file.
##############################################################################
def removeSimilarityMenu(self):
current = self.ui.treeSimilar.currentItem()
if current == None or current.childCount() != 0:
return
answer = QtGui.QMessageBox.warning(
self,
"Remove Similarity",
"Are you sure you want to remove the selected file as a similarity?\n\n" +
"All other files in the active file's duplicate group will be marked as non-similar to the selected file.",
buttons = QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
defaultButton = QtGui.QMessageBox.No
)
if answer == QtGui.QMessageBox.No:
return
similar_dir = tree.tree_item_to_path(current.parent())
similar_dir = dir_tools.expand_dir(similar_dir)
similar_dir = os.path.join(similar_dir)
similar_file = common.qt_to_unicode(current.text(0))
similar_file = os.path.join(similar_dir, similar_file)
active_dir = self.script_pack.get_real_dir()
active_file = os.path.basename(self.script_pack[self.cur_script].filename)
active_file = os.path.join(active_dir, active_file)
active_file = dir_tools.normalize(active_file)
self.removeSimilarity(active_file, similar_file)
self.findInternalDupes()
self.showSimilar()
self.showDupes()
##############################################################################
### @fn removeSimilarity(file1, file2)
### @desc Removes any trace of two files being similar to each other.
##############################################################################
def removeSimilarity(self, file1, file2):
# The similarity data we want to remove in exchange for adding a dupe.
# All dupes in the same group store redundant similarity info, so we want
# to kill all that info in one go.
file1_dupes = dupe_db.files_in_same_group(file1)
file2_dupes = dupe_db.files_in_same_group(file2)
if file1_dupes == None:
file1_dupes = [file1]
if file2_dupes == None:
file2_dupes = [file2]
self.similarity_db.remove_similar_files(file1_dupes, file2_dupes)
##############################################################################
### @fn saveImage()
### @desc Saves a preview image. :D
##############################################################################
def saveImage(self, image_pos):
dir = "ss"
index = 0
if not os.path.isdir(dir):
if os.path.isfile(dir):
return
else:
os.mkdir(dir)
while True:
if index >= 9999:
return
filename = os.path.join(dir, ("shot%04d.png" % index))
if not os.path.isfile(filename):
break
index = index + 1
if not os.path.isdir(dir):
os.mkdir(dir)
if image_pos == IMAGE_POS.original:
self.ui.lblOriginal.pixmap().save(filename)
elif image_pos == IMAGE_POS.translated:
self.ui.lblTranslated.pixmap().save(filename)
##############################################################################
### @fn buildArchives()
### @desc Create archives from our data folders.
##############################################################################
def buildArchives(self):
if not self.askUnsavedChanges():
return
answer = QtGui.QMessageBox.warning(
self,
"Build Archives",
"Building the archives can take a long time to complete, and once you start the process, it cannot be canceled.\n\n" +
"Proceed?",
buttons = QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
defaultButton = QtGui.QMessageBox.No
)
if answer == QtGui.QMessageBox.No:
return
# If they happen to close the dialog, then try to build again before it finishes.
if not self.iso_builder.process == None:
self.iso_builder.process.kill()
packer = CpkPacker(self)
packer.create_archives()
if common.editor_config.build_iso:
self.iso_builder = IsoBuilder(self)
self.iso_builder.build_iso(common.editor_config.iso_dir, common.editor_config.iso_file)
##############################################################################
### @fn importData01()
### @desc Imports a directory into data01. <3
##############################################################################
def importData01(self):
return
if not self.askUnsavedChanges():
return
source_dir = QtGui.QFileDialog.getExistingDirectory(self, caption = "Select a source directory", directory = common.editor_config.last_imported)
if not source_dir == "":
source_dir = os.path.abspath(source_dir)
else:
return
target_dir = common.editor_config.data01_dir
convert = self.ui.actionConvertPNGGIM.isChecked()
propogate = self.ui.actionPropogateDupes.isChecked()
import_data01(source_dir, target_dir, convert, propogate, parent = self)
common.editor_config.last_imported = source_dir
common.editor_config.last_import_target = target_dir
common.editor_config.save_config()
# Reload the directory, so the changes are visible.
self.loadDirectory(self.directory, clear_similarity = False, selected_file = os.path.basename(self.script_pack[self.cur_script].filename))
##############################################################################
### @fn exportData01()
### @desc Exports data01 to some other directory. <3
##############################################################################
def exportData01(self):
target_dir = QtGui.QFileDialog.getExistingDirectory(self, caption = "Select a target directory", directory = common.editor_config.last_export_target)
if not target_dir == "":
target_dir = os.path.abspath(target_dir)
else:
return
source_dir = common.editor_config.data01_dir
convert = self.ui.actionConvertPNGGIM.isChecked()
unique = self.ui.actionExportUnique.isChecked()
export_data01(source_dir, target_dir, convert, unique, parent = self)
common.editor_config.last_exported = source_dir
common.editor_config.last_export_target = target_dir
common.editor_config.save_config()
##############################################################################
### @fn copyFromOrig()
### @desc Called when the user clicks the "copy from original" button.
##############################################################################
def copyFromOrig(self):
# Get the translated box's text cursor.
# Using the cursor, the edit can be undone, which is preferable.
cursor = self.ui.txtTranslated.textCursor()
# Select the whole thing.
cursor.select(QTextCursor.Document)
# Replace it with the Original box.
cursor.insertText(self.ui.txtOriginal.toPlainText())
self.ui.txtTranslated.setTextCursor(cursor)
self.ui.txtTranslated.setFocus()
##############################################################################
### @fn surroundSelection()
### @desc Takes the selected text and surrounds it with the given text.
##############################################################################
def surroundSelection(self, before, after):
# Get the translated box's text cursor.
# Using the cursor, the edit can be undone, which is preferable.
cursor = self.ui.txtTranslated.textCursor()
# Get the selected text.
selection = cursor.selectedText()
# Store our selection so we can maintain it.
anchor = min(cursor.anchor(), cursor.position()) + len(before)
length = len(selection)
# Add the surrounding text.
selection = before + selection + after
# Replace it with the quoted text.
cursor.insertText(selection)
cursor.setPosition(anchor)
cursor.setPosition(anchor + length, QTextCursor.KeepAnchor)
# Update the text box.
self.ui.txtTranslated.setTextCursor(cursor)
self.ui.txtTranslated.setFocus()
##############################################################################
### @fn replaceSelection()
### @desc Takes the selected text and replaces it with the given text.
##############################################################################
def replaceSelection(self, replaceWith):
# Get the translated box's text cursor.
# Using the cursor, the edit can be undone, which is preferable.
cursor = self.ui.txtTranslated.textCursor()
# Insert the dash, replacing any selected text.
cursor.insertText(replaceWith)
# Update the text box.
self.ui.txtTranslated.setTextCursor(cursor)
self.ui.txtTranslated.setFocus()
##############################################################################
### @fn expandAll()
### @desc ???
##############################################################################
def expandAll(self):
self.ui.treeDupes.expandAll()
self.ui.treeSimilar.expandAll()
self.ui.treeReferences.expandAll()
##############################################################################
### @fn collapseAll()
### @desc ???
##############################################################################
def collapseAll(self):
self.ui.treeDupes.collapseAll()
self.ui.treeSimilar.collapseAll()
self.ui.treeReferences.collapseAll()
##############################################################################
### @fn firstFile()
### @desc Selects the first file in the list. Triggered by Ctrl+PgUp.
##############################################################################
def firstFile(self):
self.ui.lstFiles.setCurrentRow(0)
##############################################################################
### @fn lastFile()
### @desc Selects the last file in the list. Triggered by Ctrl+PgDn.
##############################################################################
def lastFile(self):
self.ui.lstFiles.setCurrentRow(self.ui.lstFiles.count() - 1)
##############################################################################
### @fn prevFile()
### @desc Selects the previous file in the list. Triggered by PgUp.
##############################################################################
def prevFile(self):
current_row = self.ui.lstFiles.currentRow()
if current_row > 0:
self.ui.lstFiles.setCurrentRow(current_row - 1)
##############################################################################
### @fn nextFile()
### @desc Selects the next file in the list. Triggered by PgDn.
##############################################################################
def nextFile(self):
current_row = self.ui.lstFiles.currentRow()
if current_row < self.ui.lstFiles.count() - 1:
self.ui.lstFiles.setCurrentRow(current_row + 1)
##############################################################################
### @fn toggleHighlight()
### @desc Updates config + highlighter to reflect the change.
### Triggered by Alt+H or the menu option.
##############################################################################
def toggleHighlight(self):
self.updateConfig()
self.updateHighlight()
##############################################################################
### @fn updateHighlight()
### @desc Updates the highlighter based on our setting.
### Triggered by Alt+H or the menu option.
##############################################################################
def updateHighlight(self):
if common.editor_config.highlight_terms:
self.ui.txtOriginal.load_keywords()
else:
self.ui.txtOriginal.clear_keywords()
##############################################################################
### @fn updateTranslatedBoxCfg()
### @desc Updates the settings for the translated box based on our config.
##############################################################################
def updateTranslatedBoxCfg(self):
##############################
### Spell-check settings
##############################
if not common.editor_config.spell_check == self.ui.txtTranslated.spellcheck_enabled():
if common.editor_config.spell_check:
self.ui.txtTranslated.enable_spellcheck()
else:
self.ui.txtTranslated.disable_spellcheck()
if not common.editor_config.spell_check_lang == self.ui.txtTranslated.get_language():
self.ui.txtTranslated.set_language(common.editor_config.spell_check_lang)
##############################
### Text replacement
##############################
self.ui.txtTranslated.enable_replacement = common.editor_config.text_repl
if not common.editor_config.repl == self.ui.txtTranslated.replacements:
self.ui.txtTranslated.replacements = common.editor_config.repl
##############################
### Other settings
##############################
self.ui.txtTranslated.enable_smart_quotes = common.editor_config.smart_quotes
self.ui.txtTranslated.enable_quick_clt = common.editor_config.quick_clt
##############################################################################
### @fn showNodeInEditor()
### @desc Code duplication is for faggots.
##############################################################################
def showNodeInEditor(self, node):
if not node == None:
directory = tree.tree_item_to_path(node)
filename = None
# If we're at the leaf node, then pull back to the directory.
if node.childCount() == 0:
directory, filename = os.path.split(directory)
if not dir_tools.normalize(directory) == dir_tools.normalize(self.directory):
if not self.askUnsavedChanges():
return
self.loadDirectory(directory, selected_file = filename)
else:
self.setCurrentFile(filename)
##############################################################################
### @fn showCurrentInExplorer()
### @desc omgwtfbbq
##############################################################################
def showCurrentInExplorer(self):
if not isinstance(self.script_pack[self.cur_script], ScriptFile):
directory = self.script_pack.get_real_dir()
directory = os.path.join(common.editor_config.data01_dir, directory)
dir_tools.show_in_explorer(directory)
else:
dir_tools.show_in_explorer(self.script_pack[self.cur_script].filename)
##############################################################################
### @fn showNodeInExplorer()
### @desc Code duplication is for faggots.
### But comment duplication is pro.
##############################################################################
def showNodeInExplorer(self, node):
if not node == None:
directory = tree.tree_item_to_path(node)
filename = ""
# If we're at the leaf node, then pull back to the directory.
if node.childCount() == 0:
directory, filename = os.path.split(directory)
directory = dir_tools.expand_dir(directory)
directory = os.path.join(common.editor_config.data01_dir, directory, filename)
dir_tools.show_in_explorer(directory)
##############################################################################
### @fn copyNodePath()
### @desc Copies the path of the given node to the clipboard.
##############################################################################
def copyNodePath(self, node):
if not node == None:
text = "{%s}" % tree.tree_item_to_path(node)
clipboard = QApplication.clipboard()
clipboard.setText(text)
##############################################################################
### @fn copyActivePath()
### @desc Copies the path of the active file to the clipboard.
##############################################################################
def copyActivePath(self):
filename = os.path.basename(self.script_pack[self.cur_script].filename)
path = "{%s}" % os.path.join(self.script_pack.directory, filename)
clipboard = QApplication.clipboard()
clipboard.setText(path)
##############################################################################
### @fn showAbout()
### @desc A simple About screen.
##############################################################################
def showAbout(self):
QtGui.QMessageBox.information(
self,
u"About",
u"""
<b>The Super Duper Script Editor 2</b> v0.0.0.0<br/>
Copyright © 2012-2013 BlackDragonHunt, released under the GNU GPL (see file COPYING).<br/>
<br/>
Attributions:
<ol>
<li>Bitstring: Copyright (c) 2006-2012 Scott Griffiths; Licensed under the MIT License</li>
<li>Diff Match and Patch: Copyright 2006 Google Inc.; Licensed under the Apache License, Version 2.0</li>
<li>enum: Copyright © 2007–2009 Ben Finney <[email protected]>; Licensed under the GNU GPL, Version 3</li>
<li>GIM2PNG: Copyright (c) 2008; <a href="http://www.geocities.jp/junk2ool/">Website</a></li>
<li>GIMExport: Copyright © 2012 /a/nonymous scanlations; Used with permission.</li>
<li>mkisofs: Copyright (C) 1993-1997 Eric Youngdale (C); Copyright (C) 1997-2010 Joerg Schilling; Licensed under the GNU GPL</li>
<li>pngquant: Copyright (C) 1989, 1991 by Jef Poskanzer; Copyright (C) 1997, 2000, 2002 by Greg Roelofs, based on an idea by Stefan Schneider; Copyright 2009-2012 by Kornel Lesinski</li>
<li>ProjexUI: Copyright (c) 2011, Projex Software; Licensed under the LGPL</li>
<li>squish: Copyright (c) 2006 Simon Brown</li>
<li>Unique Postfix: Copyright (c) 2010 Denis Barmenkov; Licensed under the MIT License</li>
<li>Silk Icon Set: Copyright Mark James; Licensed under the Creative Commons Attribution 2.5 License; <a href="http://www.famfamfam.com/lab/icons/silk/">Website</a></li>
</ol>""",
buttons = QtGui.QMessageBox.Ok,
defaultButton = QtGui.QMessageBox.Ok
)
##############################################################################
### @fn calculateProgress()
### @desc Calculates.
##############################################################################
def calculateProgress(self):
calculate_progress(self)
##############################################################################
### @fn checkForErrors()
### @desc Checks the translated script against an untranslated script
### for any errors -- cases of the original, untranslated text differing.
##############################################################################
def checkForErrors(self):
base_dir = QtGui.QFileDialog.getExistingDirectory(self, caption = "Select the base directory", directory = common.editor_config.last_checked_with)
if not base_dir == "":
base_dir = os.path.abspath(base_dir)
else:
return
common.editor_config.last_checked_with = base_dir
common.editor_config.save_config()
progress = QProgressDialog("Checking for script errors...", "Abort", 0, 72000, self)
progress.setWindowTitle("Checking for Errors")
progress.setWindowModality(Qt.Qt.WindowModal)
progress.setValue(0)
progress.setAutoClose(False)
files = list(list_all_files(base_dir))
progress.setMaximum(len(files))
# For our dupe database, we need the relative file location, not absolute.
dir_start = len(base_dir) + 1
text_files = []
for i, file in enumerate(files):
if os.path.splitext(file)[1] == ".txt":
text_files.append(file[dir_start:])
progress.setValue(0)
progress.setMaximum(len(text_files))
errors = []
for i, file in enumerate(text_files):
if progress.wasCanceled():
return
if i % 100 == 0:
progress.setValue(progress.value() + 100)
base_file = os.path.join(base_dir, file)
cur_file = os.path.join(common.editor_config.data01_dir, file)
base_script = ScriptFile(base_file)
if os.path.isfile(cur_file):
cur_script = ScriptFile(cur_file)
if not base_script[common.editor_config.lang_orig].strip() == cur_script[common.editor_config.lang_orig].strip():
errors.append(file)
else:
errors.append(file)
progress.close()
diffs = DiffsMenu()
diffs.menu_name = "Script Errors"
diffs.setWindowTitle(diffs.menu_name)
diffs.set_folders(base_dir, common.editor_config.data01_dir, errors)
diffs.exec_()
##############################################################################
### @fn reloadDupes()
### @desc Requests a reload of the duplicate database.
##############################################################################
def reloadDupes(self):
dupe_db.load_csv(common.editor_config.dupes_csv)
self.findInternalDupes()
self.showDupes()
##############################################################################
### @fn closeEvent()
### @desc Makes sure everything's saved before closing.
##############################################################################
def closeEvent(self, event):
if self.askUnsavedChanges():
self.console.close()
self.search_menu.close()
self.open_menu.close()
self.terminology_editor.close()
self.progress_calc.close()
# Record the last selected file before we fade out of existence.
if not self.directory == "":
self.recordSelectedFile()
# Then make sure the config file is up-to-date.
common.editor_config.save_config()
script_analytics.SA.save()
event.accept()
else:
event.ignore()
##### EOF #####
| gpl-3.0 | -1,414,101,607,728,544,800 | 40.04437 | 195 | 0.566815 | false |
GNOME/meld | meld/accelerators.py | 1 | 2091 |
from typing import Dict, Sequence, Union
from gi.repository import Gtk
VIEW_ACCELERATORS: Dict[str, Union[str, Sequence[str]]] = {
'app.quit': '<Primary>Q',
'view.find': '<Primary>F',
'view.find-next': ('<Primary>G', 'F3'),
'view.find-previous': ('<Primary><Shift>G', '<Shift>F3'),
'view.find-replace': '<Primary>H',
'view.go-to-line': '<Primary>I',
# Overridden in CSS
'view.next-change': ('<Alt>Down', '<Alt>KP_Down', '<Primary>D'),
'view.next-pane': '<Alt>Page_Down',
# Overridden in CSS
'view.previous-change': ('<Alt>Up', '<Alt>KP_Up', '<Primary>E'),
'view.previous-pane': '<Alt>Page_Up',
'view.redo': '<Primary><Shift>Z',
'view.refresh': ('<control>R', 'F5'),
'view.save': '<Primary>S',
'view.save-all': '<Primary><Shift>L',
'view.save-as': '<Primary><Shift>S',
'view.undo': '<Primary>Z',
'win.close': '<Primary>W',
'win.gear-menu': 'F10',
'win.new-tab': '<Primary>N',
'win.stop': 'Escape',
# File comparison actions
'view.file-previous-conflict': '<Primary>I',
'view.file-next-conflict': '<Primary>K',
'view.file-push-left': '<Alt>Left',
'view.file-push-right': '<Alt>Right',
'view.file-pull-left': '<Alt><shift>Right',
'view.file-pull-right': '<Alt><shift>Left',
'view.file-copy-left-up': '<Alt>bracketleft',
'view.file-copy-right-up': '<Alt>bracketright',
'view.file-copy-left-down': '<Alt>semicolon',
'view.file-copy-right-down': '<Alt>quoteright',
'view.file-delete': ('<Alt>Delete', '<Alt>KP_Delete'),
'view.show-overview-map': 'F9',
# Folder comparison actions
'view.folder-compare': 'Return',
'view.folder-copy-left': '<Alt>Left',
'view.folder-copy-right': '<Alt>Right',
'view.folder-delete': 'Delete',
# Version control actions
'view.vc-commit': '<Primary>M',
'view.vc-console-visible': 'F9',
}
def register_accels(app: Gtk.Application):
for name, accel in VIEW_ACCELERATORS.items():
accel = accel if isinstance(accel, tuple) else (accel,)
app.set_accels_for_action(name, accel)
| gpl-2.0 | 2,280,502,922,794,579,500 | 36.339286 | 68 | 0.604495 | false |
isandlaTech/cohorte-3rdparty | herald/src/main/python/herald/transports/xmpp/directory.py | 1 | 3659 | #!/usr/bin/python
# -- Content-Encoding: UTF-8 --
"""
Herald XMPP transport directory
:author: Thomas Calmant
:copyright: Copyright 2014, isandlaTech
:license: Apache License 2.0
:version: 0.0.1
:status: Alpha
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Module version
__version_info__ = (0, 0, 2)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
# Herald XMPP
from . import SERVICE_XMPP_DIRECTORY, ACCESS_ID
from .beans import XMPPAccess
# Herald
import herald
# Standard library
import logging
from pelix.ipopo.decorators import ComponentFactory, Requires, Provides, \
Property, Validate, Invalidate, Instantiate
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
@ComponentFactory('herald-xmpp-directory-factory')
@Requires('_directory', herald.SERVICE_DIRECTORY)
@Property('_access_id', herald.PROP_ACCESS_ID, ACCESS_ID)
@Provides((herald.SERVICE_TRANSPORT_DIRECTORY, SERVICE_XMPP_DIRECTORY))
@Instantiate('herald-xmpp-directory')
class XMPPDirectory(object):
"""
XMPP Directory for Herald
"""
def __init__(self):
"""
Sets up the transport directory
"""
# Herald Core Directory
self._directory = None
self._access_id = ACCESS_ID
# JID -> Peer UID
self._jid_uid = {}
# Group name -> XMPP room JID
self._groups = {}
@Validate
def _validate(self, context):
"""
Component validated
"""
self._jid_uid.clear()
self._groups.clear()
@Invalidate
def _invalidate(self, context):
"""
Component invalidated
"""
self._jid_uid.clear()
self._groups.clear()
def load_access(self, data):
"""
Loads a dumped access
:param data: Result of a call to XmppAccess.dump()
:return: An XMPPAccess bean
"""
return XMPPAccess(data)
def peer_access_set(self, peer, data):
"""
The access to the given peer matching our access ID has been set
:param peer: The Peer bean
:param data: The peer access data, previously loaded with load_access()
"""
if peer.uid != self._directory.local_uid:
self._jid_uid[data.jid] = peer
def peer_access_unset(self, peer, data):
"""
The access to the given peer matching our access ID has been removed
:param peer: The Peer bean
:param data: The peer access data
"""
try:
del self._jid_uid[data.jid]
except KeyError:
pass
def from_jid(self, jid):
"""
Returns the peer UID associated to the given JID
:param jid: A peer (full) JID
:return: A peer UID
:raise KeyError: Unknown JID
"""
return self._jid_uid[jid]
| apache-2.0 | -196,636,214,492,240,220 | 26.103704 | 80 | 0.586499 | false |
jantman/tuxtruck | networkmanager/TuxTruck_NetworkManager.py | 1 | 2942 | # TuxTruck_NetworkManager.py
#
# Time-stamp: "2009-08-26 08:59:30 jantman"
#
# +----------------------------------------------------------------------+
# | TuxTruck Project http://tuxtruck.jasonantman.com |
# +----------------------------------------------------------------------+
# | Copyright (c) 2009 Jason Antman. |
# | |
# | This program is free software; you can redistribute it and/or modify |
# | it under the terms of the GNU General Public License as published by |
# | the Free Software Foundation; either version 3 of the License, or |
# | (at your option) any later version. |
# | |
# | This program is distributed in the hope that it will be useful, |
# | but WITHOUT ANY WARRANTY; without even the implied warranty of |
# | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
# | GNU General Public License for more details. |
# | |
# | You should have received a copy of the GNU General Public License |
# | along with this program; if not, write to: |
# | |
# | Free Software Foundation, Inc. |
# | 59 Temple Place - Suite 330 |
# | Boston, MA 02111-1307, USA. |
# +----------------------------------------------------------------------+
# |Please use the above URL for bug reports and feature/support requests.|
# +----------------------------------------------------------------------+
# | Authors: Jason Antman <[email protected]> |
# +----------------------------------------------------------------------+
# | $LastChangedRevision:: $ |
# | $HeadURL:: $ |
# +----------------------------------------------------------------------+
import dbus
import time
class TuxTruck_NetworkManager():
"""
Class to perform DBUS-based cotrol of NetworkManager.
"""
PARENT = None
BUS = None
NM = None
def __init__(self, parent):
"""
Get the DBUS object and initialize things.
"""
self.PARENT = parent
self.BUS = dbus.SystemBus()
self.NM = self.BUS.get_object('org.freedesktop.NetworkManager', '/org/freedesktop/NetworkManager')
def run(self):
"""
DO the shit.
"""
print "run()"
print "STATE:"
print self.NM.state()
print "ACTIVE CONNECTIONS:"
print self.NM.GetActiveConnections()
| gpl-3.0 | 4,532,818,929,343,650,300 | 45.698413 | 106 | 0.395309 | false |
dpshelio/sunpy | sunpy/net/helioviewer.py | 1 | 18644 | """
This module provides a wrapper around the Helioviewer API.
"""
import json
import codecs
import urllib
from pathlib import Path
from collections import OrderedDict
import parfive
from astropy.utils.decorators import lazyproperty
import sunpy
from sunpy.time import parse_time
from sunpy.util.xml import xml_to_dict
from sunpy.util.util import partial_key_match
__all__ = ['HelioviewerClient']
class HelioviewerClient(object):
"""Helioviewer.org Client"""
def __init__(self, url="https://api.helioviewer.org/"):
"""
Parameters
----------
url : `str`
Default URL points to the Helioviewer API.
"""
self._api = url
@lazyproperty
def data_sources(self):
"""
We trawl through the return from `getDataSources` to create a clean
dictionary for all available sourceIDs.
Here is a list of all of them: https://api.helioviewer.org/docs/v2/#appendix_datasources
"""
data_sources_dict = dict()
datasources = self.get_data_sources()
for name, observ in datasources.items():
# TRACE only has measurements and is thus nested once
if name == "TRACE":
for instr, params in observ.items():
data_sources_dict[(name, None, None, instr)] = params['sourceId']
else:
for inst, detect in observ.items():
for wavelength, params in detect.items():
if 'sourceId' in params:
data_sources_dict[(name, inst, None, wavelength)] = params['sourceId']
else:
for wave, adict in params.items():
data_sources_dict[(name, inst, wavelength, wave)] = adict['sourceId']
# Sort the output for printing purposes
return OrderedDict(sorted(data_sources_dict.items(), key=lambda x: x[1]))
def get_data_sources(self):
"""
Return a hierarchical dictionary of the available datasources on helioviewer.org.
This uses ``getDataSources`` from the Helioviewer API.
Returns
-------
out : `dict`
A dictionary containing meta-information for each data source that Helioviewer supports.
"""
params = {"action": "getDataSources"}
return self._get_json(params)
def get_closest_image(self, date, observatory=None, instrument=None,
detector=None, measurement=None, source_id=None):
"""
Finds the closest image available for the specified source and date.
**This does not download any file.**
This uses `getClosestImage <https://api.helioviewer.org/docs/v2/#OfficialClients>`_ from the Helioviewer API.
.. note::
We can use `observatory` and `measurement` or `instrument` and `measurement` to get the value for source ID which
can then be used to get required information.
Parameters
----------
date : `astropy.time.Time`, `str`
A `~sunpy.time.parse_time` parsable string or `~astropy.time.Time`
object for the desired date of the image
observatory : `str`
Observatory name
instrument : `str`
Instrument name
detector : `str`
Detector name
measurement : `str`
Measurement name
source_id : `int`
ID number for the required instrument/measurement.
This can be used directly instead of using the previous parameters.
Returns
-------
out : `dict`
A dictionary containing meta-information for the closest image matched
Examples
--------
>>> from sunpy.net import helioviewer
>>> client = helioviewer.HelioviewerClient() # doctest: +REMOTE_DATA
>>> metadata = client.get_closest_image('2012/01/01', source_id=11) # doctest: +REMOTE_DATA
>>> print(metadata['date']) # doctest: +REMOTE_DATA
2012-01-01T00:00:07.000
"""
if source_id is None:
source_id = self._get_source_id((observatory, instrument, detector, measurement))
params = {
"action": "getClosestImage",
"date": self._format_date(date),
"sourceId": source_id
}
response = self._get_json(params)
# Cast date string to Time
response['date'] = parse_time(response['date'])
return response
def download_jp2(self, date, observatory=None, instrument=None, detector=None,
measurement=None, source_id=None, directory=None, overwrite=False):
"""
Downloads the JPEG 2000 that most closely matches the specified time and
data source.
We can use `observatory` and `measurement` or `instrument` and `measurement` to get the value for source ID which
can then be used to get required information.
This uses `getJP2Image <https://api.helioviewer.org/docs/v2/#JPEG2000>`_ from the Helioviewer API.
.. note::
We can use `observatory` and `measurement` or `instrument` and `measurement` to get the value for source ID which
can then be used to get required information.
Parameters
----------
date : `astropy.time.Time`, `str`
A string or `~astropy.time.Time` object for the desired date of the image
observatory : `str`
Observatory name
instrument : `str`
Instrument name
measurement : `str`
Measurement name
detector : `str`
Detector name
source_id : `int`
ID number for the required instrument/measurement.
This can be used directly instead of using the previous parameters.
directory : `str`
Directory to download JPEG 2000 image to.
overwrite : bool
Defaults to False.
If set to True, will overwrite any files with the same name.
Returns
-------
out : `str`
Returns a filepath to the downloaded JPEG 2000 image.
Examples
--------
>>> import sunpy.map
>>> from sunpy.net import helioviewer
>>> hv = helioviewer.HelioviewerClient() # doctest: +REMOTE_DATA
>>> filepath = hv.download_jp2('2012/07/03 14:30:00', observatory='SDO',
... instrument='HMI', detector=None, measurement='continuum') # doctest: +REMOTE_DATA
>>> filepath = hv.download_jp2('2012/07/03 14:30:00', observatory='SDO', measurement='continuum') # doctest: +REMOTE_DATA
>>> filepath = hv.download_jp2('2012/07/03 14:30:00', instrument='HMI', measurement='continuum') # doctest: +REMOTE_DATA
>>> aia = sunpy.map.Map(filepath) # doctest: +REMOTE_DATA
>>> aia.peek() # doctest: +SKIP
"""
if source_id is None:
source_id = self._get_source_id((observatory, instrument, detector, measurement))
params = {
"action": "getJP2Image",
"date": self._format_date(date),
"sourceId": source_id,
}
return self._get_file(params, directory=directory, overwrite=overwrite)
def get_jp2_header(self, date, observatory=None, instrument=None, detector=None, measurement=None, jp2_id=None):
"""
Get the XML header embedded in a JPEG2000 image. Includes the FITS header as well as a section
of Helioviewer-specific metadata.
We can use `observatory` and `measurement` or `instrument` and `measurement` to get the value for source ID which
can then be used to get required information.
This uses `getJP2Header <https://api.helioviewer.org/docs/v2/#JPEG2000>`_ from the Helioviewer API.
.. note::
We can use `observatory` and `measurement` or `instrument` and `measurement` to get the value for source ID which
can then be used to get required information.
Parameters
----------
date : `astropy.time.Time`, `str`
A `~sunpy.time.parse_time` parsable string or `~astropy.time.Time`
object for the desired date of the image
observatory : `str`
Observatory name
instrument : `str`
Instrument name
measurement : `str`
Measurement name
detector : `str`
Detector name
jp2_id : `int`
Unique JP2 image identifier.
This can be used directly instead of using the previous parameters.
Returns
-------
out : `dict`
Returns a dictionary containing the header information of JPEG 2000 image.
The returned dictionary may have either one or two keys: *fits* and *helioviewer*.
Examples
--------
>>> from sunpy.net import helioviewer
>>> hv = helioviewer.HelioviewerClient() # doctest: +REMOTE_DATA
>>> header = hv.get_jp2_header('2012/07/03', observatory='SDO',
... instrument='HMI', detector=None, measurement='continuum') # doctest: +REMOTE_DATA
>>> # The key 'fits' can be used to get the fits header information
>>> fits_header = header['fits'] # doctest: +REMOTE_DATA
>>> # The keys 'helioviewer' can be used to extract the helioviewer specific metadata.
>>> helioviewer_meta_data = header['helioviewer'] # doctest: +REMOTE_DATA
"""
if jp2_id is None:
jp2_id = self.get_closest_image(date, observatory, instrument, detector, measurement)['id']
params = {
"action": "getJP2Header",
"id" : jp2_id,
}
responses = self._request(params)
# Reads the output from HTTPResponse object and decodes it.
responses = responses.read().decode('utf-8')
return xml_to_dict(responses)['meta']
def download_png(self, date, image_scale, layers,
directory=None, overwrite=False, watermark=False,
events="", event_labels=False,
scale=False, scale_type="earth", scale_x=0, scale_y=0,
width=4096, height=4096, x0=0, y0=0,
x1=None, y1=None, x2=None, y2=None):
"""
Downloads the PNG that most closely matches the specified time and
data source.
This function is different to `~sunpy.net.helioviewer.HelioviewerClient.download_jp2`.
Here you get PNG images and return more complex images.
For example you can return an image that has multiple layers composited together
from different sources.
Also mark solar features/events with an associated text label.
The image can also be cropped to a smaller field of view.
These parameters are not pre-validated before they are passed to Helioviewer API.
See https://api.helioviewer.org/docs/v2/#appendix_coordinates for more information about
what coordinates values you can pass into this function.
This uses `takeScreenshot <https://api.helioviewer.org/docs/v2/#Screenshots>`_ from the Helioviewer API.
.. note::
Parameters ``x1``, ``y1``, ``x2`` and ``y2`` are set to `None`.
If all 4 are set to values, then keywords: ``width``, ``height``, ``x0``, ``y0`` will be ignored.
Parameters
----------
date : `astropy.time.Time`, `str`
A `parse_time` parsable string or `~astropy.time.Time` object
for the desired date of the image
image_scale : `float`
The zoom scale of the image in arcseconds per pixel.
For example, the scale of an AIA image is 0.6.
layers : `str`
Image datasource layer/layers to include in the screeshot.
Each layer string is comma-separated with either:
"[sourceId,visible,opacity]" or "[obs,inst,det,meas,visible,opacity]".
Multiple layers are: "[layer1],[layer2],[layer3]".
events : `str`, optional
Defaults to an empty string to indicate no feature/event annotations.
List feature/event types and FRMs to use to annoate the image.
Example could be "[AR,HMI_HARP;SPoCA,1]" or "[CH,all,1]"
event_labels : `bool`, optional
Defaults to False.
Annotate each event marker with a text label.
watermark : `bool`, optional
Defaults to False.
Overlay a watermark consisting of a Helioviewer logo and
the datasource abbreviation(s) and timestamp(s) in the screenshot.
directory : `str`, optional
Directory to download JPEG 2000 image to.
overwrite : bool, optional
Defaults to False.
If set to True, will overwrite any files with the same name.
scale : `bool`, optional
Defaults to False.
Overlay an image scale indicator.
scale_type : `str`, optional
Defaults to Earth.
What is the image scale indicator will be.
scale_x : `int`, optional
Defaults to 0 (i.e, in the middle)
Horizontal offset of the image scale indicator in arcseconds with respect
to the center of the Sun.
scale_y : `int`, optional
Defaults to 0 (i.e, in the middle)
Vertical offset of the image scale indicator in arcseconds with respect
to the center of the Sun.
x0 : `float`, optional
The horizontal offset from the center of the Sun.
y0 : `float`, optional
The vertical offset from the center of the Sun.
width : `int`, optional
Defaults to 4096.
Width of the image in pixels.
height : `int`, optional
Defaults to 4096.
Height of the image in pixels.
x1 : `float`, optional
Defaults to None
The offset of the image's left boundary from the center
of the sun, in arcseconds.
y1 : `float`, optional
Defaults to None
The offset of the image's top boundary from the center
of the sun, in arcseconds.
x2 : `float`, optional
Defaults to None
The offset of the image's right boundary from the
center of the sun, in arcseconds.
y2 : `float`, optional
Defaults to None
The offset of the image's bottom boundary from the
center of the sun, in arcseconds.
Returns
-------
out : `str`
Returns a filepath to the downloaded PNG image.
Examples
--------
>>> from sunpy.net.helioviewer import HelioviewerClient
>>> hv = HelioviewerClient() # doctest: +REMOTE_DATA
>>> file = hv.download_png('2012/07/16 10:08:00', 2.4,
... "[SDO,AIA,AIA,171,1,100]",
... x0=0, y0=0, width=1024, height=1024) # doctest: +REMOTE_DATA
>>> file = hv.download_png('2012/07/16 10:08:00', 4.8,
... "[SDO,AIA,AIA,171,1,100],[SOHO,LASCO,C2,white-light,1,100]",
... x1=-2800, x2=2800, y1=-2800, y2=2800) # doctest: +REMOTE_DATA
"""
params = {
"action": "takeScreenshot",
"date": self._format_date(date),
"imageScale": image_scale,
"layers": layers,
"eventLabels": event_labels,
"events": events,
"watermark": watermark,
"scale": scale,
"scaleType": scale_type,
"scaleX": scale_x,
"scaleY": scale_y,
# Returns the image which we do not want a user to change.
"display": True
}
# We want to enforce that all values of x1, x2, y1, y2 are not None.
# You can not use both scaling parameters so we try to exclude that here.
if any(i is None for i in [x1, x2, y1, y2]):
adict = {"x0": x0, "y0": y0,
"width": width, "height": height}
else:
adict = {"x1": x1, "x2": x2,
"y1": y1, "y2": y2}
params.update(adict)
return self._get_file(params, directory=directory, overwrite=overwrite)
def is_online(self):
"""Returns True if Helioviewer is online and available."""
try:
self.get_data_sources()
except urllib.error.URLError:
return False
return True
def _get_json(self, params):
"""Returns a JSON result as a string."""
reader = codecs.getreader("utf-8")
response = self._request(params)
return json.load(reader(response))
def _get_file(self, params, directory=None, overwrite=False):
"""Downloads a file and return the filepath to that file."""
if directory is None:
directory = Path(sunpy.config.get('downloads', 'download_dir'))
else:
directory = Path(directory).expanduser().absolute()
downloader = parfive.Downloader(overwrite=overwrite)
url = urllib.parse.urljoin(self._api,
"?" + urllib.parse.urlencode(params))
downloader.enqueue_file(url, path=directory)
res = downloader.download()
if len(res) == 1:
return res[0]
else:
return res
def _request(self, params):
"""
Sends an API request and returns the result.
Parameters
----------
params : `dict`
Parameters to send
Returns
-------
out : result of the request
"""
response = urllib.request.urlopen(
self._api, urllib.parse.urlencode(params).encode('utf-8'))
return response
def _format_date(self, date):
"""Formats a date for Helioviewer API requests"""
return parse_time(date).isot + "Z"
def _get_source_id(self, key):
"""
Returns source_id based on the key.
"""
source_id_list = list(partial_key_match(key, self.data_sources))
if len(source_id_list) != 1:
raise KeyError(f"The values used: {key} do not correspond to one source_id "
f"but {len(source_id_list)} source_id(s)."
" Please check the list using HelioviewerClient.data_sources.")
return source_id_list[0]
| bsd-2-clause | -8,021,347,846,941,426,000 | 39.530435 | 130 | 0.57729 | false |
ls-cwi/heinz | script/makeHeinzRpcstMcDimacs.py | 1 | 1116 | #!/usr/bin/python
import sys
if len(sys.argv) != 5:
sys.stderr.write("Usage: " + sys.argv[0] + " <instances> <full_output_dir> <timelimit_pbs> <timelimit_heinz>\n")
sys.exit(1)
lines = open(sys.argv[1]).readlines()
n = len(lines)
bins = n / 15
if n % 15 != 0:
bins += 1
full_output_dir = sys.argv[2]
timelimit_pbs = sys.argv[3]
timelimit_heinz = sys.argv[4]
for i in range(bins):
with open(str(i) + ".pbs", "w") as f:
f.write("#PBS -N " + str(i)+"\n")
f.write("#PBS -o " + str(i) + ".out\n")
f.write("#PBS -e " + str(i) + ".err\n")
f.write("#PBS -lwalltime=" + timelimit_pbs + "\n")
f.write("#PBS -lnodes=1:cpu3\n")
f.write("cd ~/DIMACS2014/\n")
nn = 15
if i == bins - 1:
nn = n % 15
for j in range(nn):
idx = 15 * i + j
s = lines[idx].rstrip("\n").split(" ")
filename1 = full_output_dir + "/" + "4-" + s[0]
f.write("( /usr/bin/time -o " + filename1 + ".time bin/heinz_rpcst_mc " + s[2] + " " + timelimit_heinz + " 2 " + filename1 + ".dimacs" + " > " + filename1 + ".out 2> " + filename1 + ".err ) &\n")
f.write("wait\n")
| mit | -4,746,288,819,771,932,000 | 30 | 201 | 0.525986 | false |
googleads/dfp-playground | dfp_playground.py | 1 | 1688 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AppEngine application WSGI entry point.
Configures the web application that will display the DFP Playground UI.
"""
import os
import sys
# Prepend lib directory that contains third-party libraries to the system path
sys.path.insert(0, os.path.join(os.path.abspath('.'), 'lib'))
from views import APIViewHandler
from views import Login
from views import LoginCallback
from views import LoginErrorPage
from views import MainPage
from views import MakeTestNetworkPage
from views import PutCredentials
from views import RevokeOldRefreshTokens
import webapp2
VERSION = '1.0.10'
app = webapp2.WSGIApplication(
[
webapp2.Route('/', MainPage),
webapp2.Route('/login', Login),
webapp2.Route('/oauth2callback', LoginCallback),
webapp2.Route('/login/error', LoginErrorPage),
webapp2.Route('/tasks/revoke', RevokeOldRefreshTokens),
webapp2.Route('/make-test-network', MakeTestNetworkPage),
webapp2.Route('/api/<method>', handler=APIViewHandler),
webapp2.Route('/tasks/put-credentials', PutCredentials)
],
debug=True)
| apache-2.0 | 2,642,138,051,772,439,600 | 33.44898 | 78 | 0.742299 | false |
0ED/Toy | auto_sender/gmail.py | 1 | 1234 | #!/usr/bin/ python
# -*- coding: utf-8 -*-
import sys
import smtplib
from email import Encoders
from email.MIMEText import MIMEText
from email.MIMEBase import MIMEBase
from email.MIMEMultipart import MIMEMultipart
from email.Header import Header
from email.Utils import formatdate
class Gmail(object):
def create_message(self, from_address, to_address, a_title, file_info, a_body):
a_message = MIMEMultipart()
a_body = MIMEText(a_body, _charset='iso-2022-jp')
a_message['Subject'] = a_title
a_message['From'] = from_address
a_message['To'] = to_address
a_message['Date'] = formatdate()
a_message.attach(a_body)
attachment = MIMEBase(file_info['type'], file_info['subtype'])
with open(file_info['path']) as a_file:
attachment.set_payload(a_file.read())
Encoders.encode_base64(attachment)
a_message.attach(attachment)
attachment.add_header("Content-Disposition", "attachment", filename=file_info['name'])
return a_message
def send(self, from_address, to_address, a_message):
a_smtp = smtplib.SMTP('smtp.gmail.com', 587)
a_smtp.ehlo()
a_smtp.starttls()
a_smtp.ehlo(0)
a_smtp.login(from_address,'4?SiFLV=tY')
a_smtp.sendmail(from_address, to_address, a_message.as_string())
a_smtp.close()
| mit | 1,113,345,427,749,023,500 | 31.473684 | 88 | 0.718801 | false |
kawu/tagger | utils/tei2linc/tei2linc.py | 1 | 7737 | import sys
import os
import shutil
import argparse
import tarfile
import re
from collections import defaultdict
class Lexem:
def __init__(self, ctag=None, base=None, msds=None):
self.ctag = ctag
self.base = base
self.msds = msds if msds is not None else set()
def add(self, msd):
self.msds.add(msd)
class Disamb:
def __init__(self, ctag, base, msd):
self.ctag = ctag
self.base = base if base.strip() else None
self.msd = msd
def label(self):
return (self.ctag + ":" + self.msd).strip(":")
class Segment:
def __init__(self, orth=None, nps=False, disamb=None, lexs=None):
self.orth = orth
self.nps = nps
self.disamb = disamb
self.lexs = lexs if lexs is not None else []
def append(self, lex):
self.lexs.append(lex)
def label(self):
return self.disamb.label()
def labels(self):
return set([
(lex.ctag + ":" + msd).strip(":")
for lex in self.lexs
for msd in lex.msds
])
SYMBOL = re.compile('<symbol .*? ?value="(.*?)"')
STRING = re.compile('<string>(.*?)</string>')
BINARY = re.compile('<binary .*? ?value="(.*?)"')
CTAGS = set(["adja", "adjp", "adjc", "conj", "comp", "interp", "pred",
"xxx", "adv", "imps", "inf", "pant", "pcon", "qub", "prep",
"siebie", "subst", "depr", "ger", "ppron12", "ppron3", "num",
"numcol", "adj", "pact", "ppas", "winien", "praet", "bedzie",
"fin", "impt", "aglt", "ign", "brev", "burk", "interj"])
def parse_disamb(disamb):
k = 0
disamb = list(reversed(disamb.split(":")))
for x in disamb:
if x.strip() in CTAGS:
break
k += 1
ctag = disamb[k].strip()
base = ":".join(reversed(disamb[k+1:]))
msd = ":".join(reversed([x.strip() for x in disamb[:k]]))
# return ":".join(reversed(result)).strip(": ")
return Disamb(ctag, base, msd)
def value(line, regex):
match = regex.search(line)
return match.group(1) if match else None
def check_dir(path, overwrite):
"""Is string representing a valid, non-existing directory path ?"""
if not os.path.isdir(os.path.dirname(path)):
msg = "%s is not a valid path" % path
raise argparse.ArgumentTypeError(msg)
elif os.path.exists(path) and overwrite == False:
msg = "%s already exists" % path
raise argparse.ArgumentTypeError(msg)
return path
def make_args_parser():
parser = argparse.ArgumentParser(
description="Convert TEI corpus to LINC corpus.")
parser.add_argument("tei",
help="TEI corpus compressed as a tar archive file")
parser.add_argument("-d", "--out-dir",
help="Save results in separate files in the output directory.")
parser.add_argument("-w", "--overwrite",
default=False, action="store_true",
help="Overwrite files in output directory when using -d option.")
parser.add_argument("-b", "--preserve-bases",
default=False, action="store_true",
help="Preserve base forms of individual words.")
return parser
def parse_args(parser):
args = parser.parse_args()
if args.out_dir is not None:
check_dir(args.out_dir, args.overwrite)
return args
def morph_files_in_tar(tar):
for member in tar:
if "ann_morphosyntax.xml" in member.name:
yield member
def _parse_morph(f):
felem = re.compile('<f name="(.*?)"')
seg = []
sent = []
for line in f:
line = line.strip()
if "</s>" in line:
yield sent
sent = []
if "</seg>" in line:
sent.append(seg)
seg = []
match = felem.search(line)
if match != None:
inside = match.group(1)
if line.startswith("<string"):
seg.append((inside, value(line, STRING)))
elif line.startswith("<symbol"):
seg.append((inside, value(line, SYMBOL)))
elif line.startswith("<binary"):
seg.append((inside, value(line, BINARY)))
def parse_morph(f):
for sent_info in _parse_morph(f):
sent = []
for k, seg_info in enumerate(sent_info):
for (tp, val) in seg_info:
if tp == "orth":
seg = Segment(orth=val)
elif tp == "nps" and val == "true":
seg.nps = True
elif tp == "base":
lex = Lexem(base=val)
seg.append(lex)
elif tp == "ctag":
lex.ctag = val
elif tp == "msd":
lex.add(val.strip(": "))
# interp = (ctag + ":" + val).strip(": ")
# interps.append(interp)
elif tp == "interpretation":
seg.disamb = parse_disamb(val)
# interps = list(set(interps))
# sent.append((orth, disamb, interps))
assert seg.label() in seg.labels()
# print [msd for lex in seg.lexs for msd in lex.msds]
sent.append(seg)
yield sent
def esc(s):
if s is None:
s = ""
return '"' + s.replace('"', '""') + '"'
def print_full_sent(output, sent, src):
print >> output, "Sent"
print >> output, " Source"
print >> output, " Path", esc(src[0])
print >> output, " Id", esc(src[1])
for seg in sent:
if seg.nps:
print >> output, " Word", "Nps"
else:
print >> output, " Word"
print >> output, " Orth", esc(seg.orth)
for lex in seg.lexs:
print >> output, " Lex", esc(lex.base)
for msd in sorted(lex.msds):
interp = (lex.ctag + ":" + msd).strip(":")
if (
msd == seg.disamb.msd and
lex.ctag == seg.disamb.ctag and
(seg.disamb.base is None or lex.base == seg.disamb.base)):
print >> output, " * " + interp
else:
print >> output, " - " + interp
# print >> output, " WordEnd"
# print >> output, "SentEnd"
def print_sent(output, sent, src):
print >> output, "Sent"
print >> output, " Source"
print >> output, " Path", esc(src[0])
print >> output, " Id", src[1]
for seg in sent:
interps = seg.labels()
disamb = seg.label()
if seg.nps:
print >> output, " Word", "Nps"
else:
print >> output, " Word"
print >> output, " Orth", seg.orth
print >> output, " Interps"
for interp in sorted(interps):
if interp == disamb:
print >> output, " * " + interp
else:
print >> output, " - " + interp
print >> output, " InterpsEnd"
# print >> output, " WordEnd"
# print >> output, "SentEnd"
def output_for(path, k):
if path is None:
return sys.stdout
else:
return open(os.path.join(path, str(k) + ".linc"), "w")
if __name__ == "__main__":
parser = make_args_parser()
args = parse_args(parser)
if args.out_dir is not None:
if os.path.exists(args.out_dir):
shutil.rmtree(args.out_dir)
os.mkdir(args.out_dir)
tar = tarfile.open(args.tei)
n = 0
for morph in morph_files_in_tar(tar):
for i, sent in enumerate(parse_morph(tar.extractfile(morph))):
src = (morph.name, str(i))
out = output_for(args.out_dir, n)
if not args.preserve_bases:
print_sent(out, sent, src)
else:
print_full_sent(out, sent, src)
n += 1
tar.close()
| bsd-3-clause | 2,646,679,915,105,169,000 | 30.579592 | 78 | 0.516996 | false |
erikulven/flapmagick | camera.py | 1 | 1925 | """ Uses camera and takes images for documentation of motion """
import time
from PIL import Image
import urllib
import StringIO
import settings
user = settings.cam_user
pwd = settings.cam_pwd
cam_url = settings.cam_url
def fetch_snapshot_image():
im = StringIO.StringIO(urllib.urlopen(settings.cam_url).read())
return im
def dummy():
img = Image.open(im)
r = requests.get(settings.cam_url, auth=(user, pwd), stream=True)
if r.status_code == 200:
imageData = StringIO.StringIO()
imageData.write(r.raw)
imageData.seek(0)
return imageData
return None
def compare(buffer1, buffer2, threshold=0):
"""
diffs images by pixels and exits if diffs exceeds threshold
code taken from script written by brainflakes posted at raspberry
forum. http://www.raspberrypi.org/phpBB3/viewtopic.php?t=45235
"""
# Count changed pixels
changedPixels = 0
print "In compare buf1: %s buf2: %s" % (buffer1, buffer2)
for x in xrange(0, 100):
# Scan one line of image then check sensitivity for movement
for y in xrange(0, 75):
# Just check green channel as it's the highest quality channel
pixdiff = abs(buffer1[x, y][1] - buffer2[x, y][1])
if pixdiff > threshold:
changedPixels += 1
if __name__ == "__main__":
print "Starting camera surv"
counter = 0
prev_img = None
while counter < 50:
img = fetch_snapshot_image()
print "found img: %s" % img
if img is not None and prev_img is not None:
print "Doing comparison"
im = Image.open(img)
buf = im.load()
prev_im = Image.open(prev_img)
prev_buf = prev_im.load()
print "Diff in images is: %s" % compare(prev_buf, buf)
im.close()
prev_im.close()
prev_img = img
time.sleep(1)
| bsd-2-clause | -8,973,370,309,740,577,000 | 29.078125 | 74 | 0.604156 | false |
patrickm/chromium.src | tools/perf/measurements/thread_times.py | 1 | 2083 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from measurements import timeline_controller
from metrics import timeline
from telemetry.page import page_measurement
class ThreadTimes(page_measurement.PageMeasurement):
def __init__(self):
super(ThreadTimes, self).__init__('RunSmoothness')
self._timeline_controller = None
@classmethod
def AddCommandLineArgs(cls, parser):
parser.add_option('--report-silk-results', action='store_true',
help='Report results relevant to silk.')
parser.add_option('--report-silk-details', action='store_true',
help='Report details relevant to silk.')
@property
def results_are_the_same_on_every_page(self):
return False
def WillRunActions(self, page, tab):
self._timeline_controller = timeline_controller.TimelineController()
if self.options.report_silk_details:
# We need the other traces in order to have any details to report.
self.timeline_controller.trace_categories = \
timeline_controller.DEFAULT_TRACE_CATEGORIES
else:
self._timeline_controller.trace_categories = \
timeline_controller.MINIMAL_TRACE_CATEGORIES
self._timeline_controller.Start(page, tab)
def DidRunAction(self, page, tab, action):
self._timeline_controller.AddActionToIncludeInMetric(action)
def DidRunActions(self, page, tab):
self._timeline_controller.Stop(tab)
def MeasurePage(self, page, tab, results):
metric = timeline.ThreadTimesTimelineMetric(
self._timeline_controller.model,
self._timeline_controller.renderer_process,
self._timeline_controller.action_ranges)
if self.options.report_silk_results:
metric.results_to_report = timeline.ReportSilkResults
if self.options.report_silk_details:
metric.details_to_report = timeline.ReportSilkDetails
metric.AddResults(tab, results)
def CleanUpAfterPage(self, _, tab):
self._timeline_controller.CleanUp(tab)
| bsd-3-clause | -1,464,746,998,573,629,700 | 38.301887 | 72 | 0.722516 | false |
MTG/dunya | jingju/models.py | 1 | 5375 | from django.db import models
import data.models
from jingju import managers
class JingjuStyle(object):
def get_style(self):
return "jingju"
def get_object_map(self, key):
return {
"performance": RecordingInstrumentalist,
"release": Release,
"artist": Artist,
"recording": Recording,
"work": Work,
"instrument": Instrument
}[key]
class Recording(JingjuStyle, data.models.BaseModel):
class Meta:
ordering = ['id']
title = models.CharField(max_length=200, blank=True, null=True)
mbid = models.UUIDField(blank=True, null=True)
work = models.ForeignKey('Work', null=True, on_delete=models.CASCADE)
performers = models.ManyToManyField('Artist')
instrumentalists = models.ManyToManyField('Artist', through='RecordingInstrumentalist',
related_name='instrumentalist')
shengqiangbanshi = models.ManyToManyField('ShengqiangBanshi')
objects = managers.CollectionRecordingManager()
def __str__(self):
return u"%s" % self.title
class RecordingInstrumentalist(JingjuStyle, models.Model):
recording = models.ForeignKey('Recording', on_delete=models.CASCADE)
artist = models.ForeignKey('Artist', on_delete=models.CASCADE)
instrument = models.ForeignKey('Instrument', on_delete=models.CASCADE)
class Artist(data.models.Artist):
romanisation = models.CharField(max_length=200, blank=True, null=True)
role_type = models.ForeignKey('RoleType', blank=True, null=True, on_delete=models.CASCADE)
instrument = models.ForeignKey('Instrument', blank=True, null=True, related_name='jingju', on_delete=models.CASCADE)
objects = managers.ArtistManager()
class Meta:
ordering = ['id']
class Composer(data.models.Composer):
alias = models.CharField(max_length=200, blank=True, null=True)
objects = managers.ArtistManager()
class Meta:
ordering = ['id']
class Instrument(data.models.Instrument):
class Meta:
ordering = ['id']
class RecordingRelease(models.Model):
recording = models.ForeignKey('Recording', on_delete=models.CASCADE)
release = models.ForeignKey('Release', on_delete=models.CASCADE)
sequence = models.IntegerField(blank=True, null=True)
# The number that the track comes in the concert. Numerical 1-n
track = models.IntegerField(blank=True, null=True)
# The disc number. 1-n
disc = models.IntegerField(blank=True, null=True)
# The track number within this disc. 1-n
disctrack = models.IntegerField(blank=True, null=True)
class Meta:
ordering = ("track",)
def __str__(self):
return u"%s: %s from %s" % (self.track, self.recording, self.release)
class Work(JingjuStyle, data.models.Work):
class Meta:
ordering = ['id']
title = models.CharField(max_length=200, blank=True, null=True)
mbid = models.UUIDField(blank=True, null=True)
score = models.ForeignKey('Score', blank=True, null=True, on_delete=models.CASCADE)
play = models.ForeignKey('Play', blank=True, null=True, on_delete=models.CASCADE)
def __str__(self):
return u"%s" % self.title
class Release(JingjuStyle, data.models.Release):
class Meta:
ordering = ['id']
recordings = models.ManyToManyField('Recording', through='RecordingRelease')
collection = models.ForeignKey('data.Collection', blank=True, null=True, on_delete=models.CASCADE)
objects = managers.CollectionReleaseManager()
class RoleType(data.models.BaseModel):
class Meta:
ordering = ['code']
# The code used in tags in musicbrainz to identify this roletype (hd00)
# the first digit specifies a "parent" roletype, and the second digit a subtype.
code = models.CharField(max_length=10, db_index=True)
name = models.CharField(max_length=100)
romanisation = models.CharField(max_length=100)
uuid = models.UUIDField()
# The "main" roletype for a more specific one. An artist who performs in a specific roletype
# by definition performs in the parent roletype
parent = models.ForeignKey('RoleType', blank=True, null=True, on_delete=models.CASCADE)
def __str__(self):
return u"{}: {}/{}".format(self.code, self.name, self.romanisation)
class Play(data.models.BaseModel):
title = models.CharField(max_length=100)
uuid = models.UUIDField(blank=True, null=True)
class Score(data.models.BaseModel):
# the name of the work
name = models.CharField(max_length=100)
# The name of the series
source = models.CharField(max_length=100)
# read this from the annotation of the series (we need to make it machine readable)
citation = models.CharField(max_length=100, blank=True, null=True)
citation_romanisation = models.CharField(max_length=100, blank=True, null=True)
# This shouldn't be a uuidfield (
uuid = models.UUIDField(blank=True, null=True)
class ShengqiangBanshi(data.models.BaseModel):
# The code used in tags in musicbrainz to identify this shengqiangbanshi (sqbs000)
code = models.CharField(max_length=10, db_index=True, unique=True)
name = models.CharField(max_length=100)
romanisation = models.CharField(max_length=100)
def __str__(self):
return u"{}: {}/{}".format(self.code, self.name, self.romanisation)
| agpl-3.0 | -2,666,498,335,469,265,000 | 34.361842 | 120 | 0.685209 | false |
edsu/lochief | kochief/discovery/management/commands/index.py | 1 | 4772 | #! /usr/bin/python
# -*- coding: utf8 -*-
# Copyright 2009 Gabriel Sean Farrell
# Copyright 2008 Mark A. Matienzo
#
# This file is part of Kochief.
#
# Kochief is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Kochief is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Kochief. If not, see <http://www.gnu.org/licenses/>.
"""Indexes documents in a Solr instance."""
import os
import optparse
import sys
import time
import urllib
from optparse import make_option
try:
from xml.etree import ElementTree as et # builtin in Python 2.5
except ImportError:
import elementtree.ElementTree as et
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
CSV_FILE = 'tmp.csv'
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('-n', '--new',
action='store_true',
dest='new',
help='Create a new index. If the index already exists, it will be replaced.'),
make_option('-p', '--parser',
dest='parser',
metavar='PARSER',
help='Use PARSER (in kochief/parsers/) to parse files or urls for indexing'),
)
help = 'Indexes documents in a Solr instance.'
args = '[file_or_url ...]'
def handle(self, *file_or_urls, **options):
new = options.get('new')
if new:
# create/replace index
pass
if file_or_urls:
parser = options.get('parser')
module = None
if parser:
if parser.endswith('.py'):
parser = parser[:-3]
module = __import__('kochief.parsers.' + parser, globals(),
locals(), [parser])
for file_or_url in file_or_urls:
if not module:
# guess parser based on file extension
if file_or_url.endswith('.mrc'):
from kochief.parsers import marc as module
if not module:
raise CommandError("Please specify a parser.")
print "Converting %s to CSV ..." % file_or_url
t1 = time.time()
data_handle = urllib.urlopen(file_or_url)
try:
csv_handle = open(CSV_FILE, 'w')
record_count = module.write_csv(data_handle, csv_handle)
finally:
csv_handle.close()
t2 = time.time()
self._load_solr(CSV_FILE)
t3 = time.time()
os.remove(CSV_FILE)
p_time = (t2 - t1) / 60
l_time = (t3 - t2) / 60
t_time = p_time + l_time
rate = record_count / (t3 - t1)
print """Processing took %0.3f minutes.
Loading took %0.3f minutes.
That's %0.3f minutes total for %d records,
at a rate of %0.3f records per second.
""" % (p_time, l_time, t_time, record_count, rate)
def _get_multi(self):
"""Inspect solr schema.xml for multivalue fields."""
multivalue_fieldnames = []
schema = et.parse(settings.SOLR_DIR + 'conf/schema.xml')
fields_element = schema.find('fields')
field_elements = fields_element.findall('field')
for field in field_elements:
if field.get('multiValued') == 'true':
multivalue_fieldnames.append(field.get('name'))
return multivalue_fieldnames
def _load_solr(self, csv_file):
"""
Load CSV file into Solr. solr_params are a dictionary of parameters
sent to solr on the index request.
"""
file_path = os.path.abspath(csv_file)
solr_params = {}
for fieldname in self._get_multi():
tag_split = "f.%s.split" % fieldname
solr_params[tag_split] = 'true'
tag_separator = "f.%s.separator" % fieldname
solr_params[tag_separator] = '|'
solr_params['stream.file'] = file_path
solr_params['commit'] = 'true'
params = urllib.urlencode(solr_params)
update_url = settings.SOLR_URL + 'update/csv?%s'
print "Loading records into Solr ..."
try:
output = urllib.urlopen(update_url % params)
except IOError:
raise IOError, 'Unable to connect to the Solr instance.'
print "Solr response:\n"
print output.read()
| gpl-3.0 | -521,989,871,261,624,260 | 35.427481 | 91 | 0.586756 | false |
xtiankisutsa/MARA_Framework | tools/qark/qark/modules/createSploit.py | 1 | 2666 | from __future__ import absolute_import
'''Copyright 2015 LinkedIn Corp. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.'''
import shutil
import errno
import fileinput
import os
from qark.modules import common
from qark.modules.common import logger
class exploitType:
"""
Enum type for exploitatin category
"""
MANIFEST, ACTIVITY, INTENT, PERMISSION, SERVICE, RECEIVER, BROADCAST_INTENT = range(7)
def copy_template(src,dest):
"""
Given a source and destination, copy all files/folders under source to destination\n
Overwrites destination if any files/folders already exists\n
Used to copy the exploit template
"""
try:
shutil.copytree(src, dest)
except OSError as e:
# If the error was caused because the source wasn't a directory
if e.errno == errno.ENOTDIR:
shutil.copy(src, dst)
status='ERROR'
else:
print('Directory not copied. Error: %s' % e)
#TODO - give an option to specify a different dir, if the specified one already exists
status='ERROR'
return status
def modify_template(path,filename,temp_text,repl_text):
"""
Deprecated code
"""
tmp=path+filename
tmp2=path+filename+'_tmp'
f1 = open(tmp, 'r')
f2 = open(tmp2, 'w')
for line in f1:
f2.write(line.replace(temp_text, repl_text))
#putting back template text, for re-use
f2.write('//REPLACEME-TT2')
f1.close()
f2.close()
os.remove(tmp)
os.rename(tmp2,tmp)
return
def modify_template_2(filename,placeholder,replacement):
"""
Takes a filename,placeholder value to be replaced and the actual replacement value\n
Uncomments the commented out code from exploit template, replaces the placeholder with actual value and adds this content on the next line to facilitate multiple substitutions
"""
flag = False
for line in fileinput.input(filename, inplace=1):
if placeholder in line:
if str(line).strip().startswith("//"):
line1 = str(line).split("//")[1]
flag=True
#print line1.replace(placeholder, replacement)
print line,
if flag:
print line1.replace(placeholder, replacement),
flag=False
| lgpl-3.0 | 3,166,646,589,954,059,300 | 34.078947 | 179 | 0.668792 | false |
Alecardv/College-projects | Metodos Numericos 2012/trapecio.py | 1 | 1308 | import function
from matplotlib.pyplot import *
from pylab import *
import numpy as np
import math
class Trapecio:
def __init__(self, fun, xi, xf):
self.fun = function.Function(fun,'x')
self.a,self.b = xi,xf
self.fig, self.ax = subplots()
def relativeError(self):
f = self.fun.getDerivate()
Ea = ((self.b-self.a)**3/12)*((f.evalFunction(self.b) - f.evalFunction(self.a))/(self.b-self.a))
return Ea
def graph(self):
figure()
root = self.method()
print 'AreaAprox = ',root
print 'AreaReal = ',self.fun.getAndEvalIntegral([self.a,self.b])
print 'Error = ',self.relativeError()
Ox = np.arange(self.a-5,self.b+5, 0.02)
Oy = []
for i in Ox:
Oy.append( self.fun.evalFunction(i) )
self.ax.plot(Ox, Oy, color = "blue",lw = 1,label="f(x)")
self.ax.legend(loc=2)
show()
def px(self,x):
return (self.fun.evalFunction(self.b)-self.fun.evalFunction(self.a))/(self.b-self.a)*(x-self.a) + self.fun.evalFunction(self.a)
def method(self):
I = (self.b-self.a)*((self.fun.evalFunction(self.a) + self.fun.evalFunction(self.b))/2)
self.ax.vlines(self.a,0,self.fun.evalFunction(self.a))
self.ax.vlines(self.b,0,self.fun.evalFunction(self.b))
Ox = np.arange(self.a,self.b, 0.02)
Oy = []
for i in Ox:
Oy.append(self.px(i))
self.ax.plot(Ox, Oy,lw = 2)
return I
| gpl-3.0 | 1,179,837,174,657,071,400 | 28.727273 | 129 | 0.65367 | false |
CurrencyCloud/currencycloud-python | src/currencycloud/clients/reference.py | 1 | 2780 | '''This module provides a class for Reference calls to the CC API'''
from currencycloud.http import Http
from currencycloud.resources import BeneficiaryRequiredDetails, ConversionDates, Currency, SettlementAccount, PayerRequiredDetails, PaymentPurposeCode, BankDetails, PaymentFeeRule
class Reference(Http):
'''This class provides an interface to the Reference endpoints of the CC API'''
def beneficiary_required_details(self, **kwargs):
'''Returns required beneficiary details and their basic validation formats.'''
response = self.get('/v2/reference/beneficiary_required_details', query=kwargs)['details']
return [BeneficiaryRequiredDetails(self, **c) for c in response]
def conversion_dates(self, **kwargs):
'''Returns dates for which dates this currency pair can not be traded.'''
return ConversionDates(self, **self.get('/v2/reference/conversion_dates', query=kwargs))
def currencies(self):
'''Returns a list of all the currencies that are tradeable.'''
response = self.get('/v2/reference/currencies')['currencies']
return [Currency(self, **c) for c in response]
def payment_dates(self, **kwargs):
'''
This call returns a list of dates that are invalid when making payments of a specific
currency.
'''
return self.get('/v2/reference/payment_dates', query=kwargs)
def settlement_accounts(self, **kwargs):
'''Returns settlement account information, detailing where funds need to be sent to.'''
response = self.get('/v2/reference/settlement_accounts', query=kwargs)['settlement_accounts']
return [SettlementAccount(self, **c) for c in response]
def payer_required_details(self, **kwargs):
'''Returns required payer details and their basic validation formats.'''
response = self.get('/v2/reference/payer_required_details', query=kwargs)['details']
return [PayerRequiredDetails(self, **c) for c in response]
def payment_purpose_codes(self, **kwargs):
'''Returns a list of valid purpose codes for the specified currency.'''
response = self.get('/v2/reference/payment_purpose_codes', query=kwargs)['purpose_codes']
return [PaymentPurposeCode(self, **c) for c in response]
def bank_details(self, **kwargs):
'''Returns the details of the bank related to the specified identifier.'''
response = self.get('/v2/reference/bank_details', query=kwargs)
return BankDetails(self, **response)
def payment_fee_rules(self, **kwargs):
'''Returns a list of payment fee rules.'''
response = self.get('/v2/reference/payment_fee_rules', query=kwargs)['payment_fee_rules']
return [PaymentFeeRule(self, **c) for c in response] | mit | 8,175,697,936,148,620,000 | 50.5 | 179 | 0.690647 | false |
flake123p/ProjectH | Cpp_Platform2X/_toolchain/list_to_build_script.py | 1 | 3345 | #!/usr/bin/python
# Usage: list_to_make_var.py <in-file1:mod list> <in-file2:OFS> <out-file1:build script> <out-file2:clean script> <OS>
# argv: argv[0] argv[1] argv[2] argv[3] argv[4] argv[5]
#
# Include library
#
import os
import sys
def OpenFile(fileName, mode = 'r'): # mode : 'r', 'w', ...
try:
fp = open(fileName, mode)
except OSError as err:
print("OS error: {0}".format(err))
sys.exit(1)
except:
print("Unexpected error:", sys.exc_info()[0])
sys.exit(1)
return fp
mod_base_path = '../../mod/'
curr_os = str(sys.argv[5])
if curr_os == 'WIN':
mod_build_file = 'build_mod.bat'
mod_clean_file = 'clean_mod.bat'
else:
mod_build_file = 'build_mod.sh'
mod_clean_file = 'clean_mod.sh'
#
# main
#
if len(sys.argv) != 6:
print("Arguments Number Error. It should be 6.")
sys.exit(1)
finList = OpenFile(str(sys.argv[1]))
finOFS = OpenFile(str(sys.argv[2]))
foutBuildfile = OpenFile(str(sys.argv[3]), 'w')
foutCleanfile = OpenFile(str(sys.argv[4]), 'w')
#
# Extract OFS
#
state = 0
OFS_String = ""
OFS_Exist = 0
for each_line in finOFS:
each_word_list = each_line.split()
#print(each_word_list)
for each_word in each_word_list:
# Find OFS
if state == 0:
if each_word != "OFS":
print("Error. Miss \"OFS\" symbol in OFS file.")
sys.exit(1)
state = 1
# Find equal sign
elif state == 1:
if each_word != "=":
print("Error. Miss \"=\" symbol in OFS file.")
sys.exit(1)
state = 2
# Make OFS string
else:
OFS_Exist = 1
OFS_String = OFS_String + " " + each_word
if curr_os == 'WIN':
if OFS_Exist == 1:
OFS_String = "OFS=\"" + OFS_String + "\""
else:
if OFS_Exist == 1:
OFS_String = OFS_String ### "OFS=\"" + OFS_String + "\""
if curr_os == 'WIN':
foutBuildfile.write('@ECHO OFF\n')
foutCleanfile.write('@ECHO OFF\n')
foutBuildfile.write('SET CURR_CD=%CD%\n')
foutCleanfile.write('SET CURR_CD=%CD%\n')
for each_line in finList:
each_mod = each_line.strip()
# build files
str = 'CD ' + mod_base_path + each_mod + '\n' + 'CALL ' + mod_build_file + ' ' + OFS_String + '\n'
foutBuildfile.write(str)
foutBuildfile.write('set rc=%ERRORLEVEL%\n')
foutBuildfile.write('CD %CURR_CD%\n')
foutBuildfile.write('IF %rc% NEQ 0 ( exit /b %rc% )\n\n')
# clean files
str = 'CD ' + mod_base_path + each_mod + '\n' + 'CALL ' + mod_clean_file + ' --DisablePause\n'
foutCleanfile.write(str)
foutCleanfile.write('CD %CURR_CD%\n')
foutCleanfile.write('if "%1" NEQ "--DisablePause" (\n')
foutCleanfile.write(' pause\n')
foutCleanfile.write(')\n')
else: #LINUX
foutBuildfile.write('temp_local_path=$PWD\n')
foutCleanfile.write('temp_local_path=$PWD\n')
for each_line in finList:
each_mod = each_line.strip()
# build files
str = 'cd ' + mod_base_path + each_mod + '\n' + './' + mod_build_file + ' ' + OFS_String + '\n'
foutBuildfile.write(str)
foutBuildfile.write('rc=$?\n')
foutBuildfile.write('cd $temp_local_path\n')
foutBuildfile.write('if [ $rc != 0 ]; then\n')
foutBuildfile.write(' exit $rc\n')
foutBuildfile.write('fi\n\n')
# clean files
str = 'cd ' + mod_base_path + each_mod + '\n' + './' + mod_clean_file + '\n'
foutCleanfile.write(str)
foutCleanfile.write('cd $temp_local_path\n')
finList.close()
foutBuildfile.close()
foutCleanfile.close()
| gpl-3.0 | 1,168,732,192,291,529,700 | 26.875 | 126 | 0.611061 | false |
priestc/moneywagon | moneywagon/tx.py | 1 | 10878 | from moneywagon import (
get_unspent_outputs, CurrentPrice, get_optimal_fee, PushTx,
get_onchain_exchange_rates
)
from moneywagon.core import get_optimal_services, get_magic_bytes
from bitcoin import mktx, sign, pubtoaddr, privtopub
from .crypto_data import crypto_data
from .currency_support import CurrencySupport
class Transaction(object):
def __init__(self, crypto, hex=None, verbose=False):
c = CurrencySupport()
if crypto not in c.supported_currencies('moneywagon', 'transaction'):
form = crypto_data[crypto]['transaction_form']
raise NotImplementedError("%s not yet supported (tx form: %s)" % (
crypto.upper(), form
))
self.change_address = None
self.crypto = crypto
self.fee_satoshi = None
self.outs = []
self.ins = []
self.onchain_rate = None
self.verbose = verbose
if hex:
self.hex = hex
def from_unit_to_satoshi(self, value, unit='satoshi'):
"""
Convert a value to satoshis. units can be any fiat currency.
By default the unit is satoshi.
"""
if not unit or unit == 'satoshi':
return value
if unit == 'bitcoin' or unit == 'btc':
return value * 1e8
# assume fiat currency that we can convert
convert = get_current_price(self.crypto, unit)
return int(value / convert * 1e8)
def add_raw_inputs(self, inputs, private_key=None):
"""
Add a set of utxo's to this transaction. This method is better to use if you
want more fine control of which inputs get added to a transaction.
`inputs` is a list of "unspent outputs" (they were 'outputs' to previous transactions,
and 'inputs' to subsiquent transactions).
`private_key` - All inputs will be signed by the passed in private key.
"""
for i in inputs:
self.ins.append(dict(input=i, private_key=private_key))
self.change_address = i['address']
def _get_utxos(self, address, services, **modes):
"""
Using the service fallback engine, get utxos from remote service.
"""
return get_unspent_outputs(
self.crypto, address, services=services,
**modes
)
def private_key_to_address(self, pk):
"""
Convert a private key (in hex format) into an address.
"""
pub = privtopub(pk)
pub_byte, priv_byte = get_magic_bytes(self.crypto)
if priv_byte >= 128:
priv_byte -= 128 #pybitcointools bug
return pubtoaddr(pub, pub_byte)
def add_inputs(self, private_key=None, address=None, amount='all', max_ins=None, password=None, services=None, **modes):
"""
Make call to external service to get inputs from an address and/or private_key.
`amount` is the amount of [currency] worth of inputs (in satoshis) to add from
this address. Pass in 'all' (the default) to use *all* inputs found for this address.
Returned is the number of units (in satoshis) that were added as inputs to this tx.
"""
if private_key:
if private_key.startswith('6P'):
if not password:
raise Exception("Password required for BIP38 encoded private keys")
from .bip38 import Bip38EncryptedPrivateKey
private_key = Bip38EncryptedPrivateKey(self.crypto, private_key).decrypt(password)
address_from_priv = self.private_key_to_address(private_key)
if address and address != address_from_priv:
raise Exception("Invalid Private key")
address = address_from_priv
self.change_address = address
if not services:
services = get_optimal_services(self.crypto, 'unspent_outputs')
total_added_satoshi = 0
ins = 0
for utxo in self._get_utxos(address, services, **modes):
if max_ins and ins >= max_ins:
break
if (amount == 'all' or total_added_satoshi < amount):
ins += 1
self.ins.append(
dict(input=utxo, private_key=private_key)
)
total_added_satoshi += utxo['amount']
return total_added_satoshi, ins
def total_input_satoshis(self):
"""
Add up all the satoshis coming from all input tx's.
"""
just_inputs = [x['input'] for x in self.ins]
return sum([x['amount'] for x in just_inputs])
def select_inputs(self, amount):
'''Maximize transaction priority. Select the oldest inputs,
that are sufficient to cover the spent amount. Then,
remove any unneeded inputs, starting with
the smallest in value.
Returns sum of amounts of inputs selected'''
sorted_txin = sorted(self.ins, key=lambda x:-x['input']['confirmations'])
total_amount = 0
for (idx, tx_in) in enumerate(sorted_txin):
total_amount += tx_in['input']['amount']
if (total_amount >= amount):
break
sorted_txin = sorted(sorted_txin[:idx+1], key=lambda x:x['input']['amount'])
for (idx, tx_in) in enumerate(sorted_txin):
value = tx_in['input']['amount']
if (total_amount - value < amount):
break
else:
total_amount -= value
self.ins = sorted_txin[idx:]
return total_amount
def add_output(self, address, value, unit='satoshi'):
"""
Add an output (a person who will receive funds via this tx).
If no unit is specified, satoshi is implied.
"""
value_satoshi = self.from_unit_to_satoshi(value, unit)
if self.verbose:
print("Adding output of: %s satoshi (%.8f)" % (
value_satoshi, (value_satoshi / 1e8)
))
self.outs.append({
'address': address,
'value': value_satoshi
})
def onchain_exchange(self, withdraw_crypto, withdraw_address, value, unit='satoshi'):
"""
This method is like `add_output` but it sends to another
"""
self.onchain_rate = get_onchain_exchange_rates(
self.crypto, withdraw_crypto, best=True, verbose=self.verbose
)
exchange_rate = float(self.onchain_rate['rate'])
result = self.onchain_rate['service'].get_onchain_exchange_address(
self.crypto, withdraw_crypto, withdraw_address
)
address = result['deposit']
value_satoshi = self.from_unit_to_satoshi(value, unit)
if self.verbose:
print("Adding output of: %s satoshi (%.8f) via onchain exchange, converting to %s %s" % (
value_satoshi, (value_satoshi / 1e8),
exchange_rate * value_satoshi / 1e8, withdraw_crypto.upper()
))
self.outs.append({
'address': address,
'value': value_satoshi
})
def fee(self, value=None, unit='satoshi'):
"""
Set the miner fee, if unit is not set, assumes value is satoshi.
If using 'optimal', make sure you have already added all outputs.
"""
convert = None
if not value:
# no fee was specified, use $0.02 as default.
convert = get_current_price(self.crypto, "usd")
self.fee_satoshi = int(0.02 / convert * 1e8)
verbose = "Using default fee of:"
elif value == 'optimal':
self.fee_satoshi = get_optimal_fee(
self.crypto, self.estimate_size(), verbose=self.verbose
)
verbose = "Using optimal fee of:"
else:
self.fee_satoshi = self.from_unit_to_satoshi(value, unit)
verbose = "Using manually set fee of:"
if self.verbose:
if not convert:
convert = get_current_price(self.crypto, "usd")
fee_dollar = convert * self.fee_satoshi / 1e8
print(verbose + " %s satoshis ($%.2f)" % (self.fee_satoshi, fee_dollar))
def estimate_size(self):
"""
Estimate how many bytes this transaction will be by countng inputs
and outputs.
Formula taken from: http://bitcoin.stackexchange.com/a/3011/18150
"""
# if there are no outs use 1 (because the change will be an out)
outs = len(self.outs) or 1
return outs * 34 + 148 * len(self.ins) + 10
def get_hex(self, signed=True):
"""
Given all the data the user has given so far, make the hex using pybitcointools
"""
total_ins_satoshi = self.total_input_satoshis()
if total_ins_satoshi == 0:
raise ValueError("Can't make transaction, there are zero inputs")
# Note: there can be zero outs (sweep or coalesc transactions)
total_outs_satoshi = sum([x['value'] for x in self.outs])
if not self.fee_satoshi:
self.fee() # use default of $0.02
change_satoshi = total_ins_satoshi - (total_outs_satoshi + self.fee_satoshi)
if change_satoshi < 0:
raise ValueError(
"Input amount (%s) must be more than all output amounts (%s) plus fees (%s). You need more %s."
% (total_ins_satoshi, total_outs_satoshi, self.fee_satoshi, self.crypto.upper())
)
ins = [x['input'] for x in self.ins]
if change_satoshi > 0:
if self.verbose:
print("Adding change address of %s satoshis to %s" % (change_satoshi, self.change_address))
change = [{'value': change_satoshi, 'address': self.change_address}]
else:
change = [] # no change ?!
if self.verbose: print("Inputs == Outputs, no change address needed.")
tx = mktx(ins, self.outs + change)
if signed:
for i, input_data in enumerate(self.ins):
if not input_data['private_key']:
raise Exception("Can't sign transaction, missing private key for input %s" % i)
tx = sign(tx, i, input_data['private_key'])
return tx
def push(self, services=None, redundancy=1):
if not services:
services = get_optimal_services(self.crypto, "push_tx")
self.pushers = []
pusher = PushTx(services=services, verbose=self.verbose)
results = [pusher.action(self.crypto, self.get_hex())]
try:
for service in services[1:redundancy-1]:
pusher = PushTx(services=[service], verbose=self.verbose)
results.append(self.pusher.action(self.crypto, self.get_hex()))
self.pushers.append(pusher)
except:
raise Exception("Partial push. Some services returned success, some failed.")
return results
| mit | 7,023,996,752,570,535,000 | 37.168421 | 124 | 0.581633 | false |
Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/tools/checkperms/checkperms.py | 1 | 13388 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure files have the right permissions.
Some developers have broken SCM configurations that flip the executable
permission on for no good reason. Unix developers who run ls --color will then
see .cc files in green and get confused.
- For file extensions that must be executable, add it to EXECUTABLE_EXTENSIONS.
- For file extensions that must not be executable, add it to
NOT_EXECUTABLE_EXTENSIONS.
- To ignore all the files inside a directory, add it to IGNORED_PATHS.
- For file base name with ambiguous state and that should not be checked for
shebang, add it to IGNORED_FILENAMES.
Any file not matching the above will be opened and looked if it has a shebang
or an ELF header. If this does not match the executable bit on the file, the
file will be flagged.
Note that all directory separators must be slashes (Unix-style) and not
backslashes. All directories should be relative to the source root and all
file paths should be only lowercase.
"""
import json
import logging
import optparse
import os
import stat
import string
import subprocess
import sys
#### USER EDITABLE SECTION STARTS HERE ####
# Files with these extensions must have executable bit set.
#
# Case-sensitive.
EXECUTABLE_EXTENSIONS = (
'bat',
'dll',
'dylib',
'exe',
)
# These files must have executable bit set.
#
# Case-insensitive, lower-case only.
EXECUTABLE_PATHS = (
'chrome/test/data/app_shim/app_shim_32_bit.app/contents/'
'macos/app_mode_loader',
'chrome/test/data/extensions/uitest/plugins/plugin.plugin/contents/'
'macos/testnetscapeplugin',
'chrome/test/data/extensions/uitest/plugins_private/plugin.plugin/contents/'
'macos/testnetscapeplugin',
)
# These files must not have the executable bit set. This is mainly a performance
# optimization as these files are not checked for shebang. The list was
# partially generated from:
# git ls-files | grep "\\." | sed 's/.*\.//' | sort | uniq -c | sort -b -g
#
# Case-sensitive.
NON_EXECUTABLE_EXTENSIONS = (
'1',
'3ds',
'S',
'am',
'applescript',
'asm',
'c',
'cc',
'cfg',
'chromium',
'cpp',
'crx',
'cs',
'css',
'cur',
'def',
'der',
'expected',
'gif',
'grd',
'gyp',
'gypi',
'h',
'hh',
'htm',
'html',
'hyph',
'ico',
'idl',
'java',
'jpg',
'js',
'json',
'm',
'm4',
'mm',
'mms',
'mock-http-headers',
'nexe',
'nmf',
'onc',
'pat',
'patch',
'pdf',
'pem',
'plist',
'png',
'proto',
'rc',
'rfx',
'rgs',
'rules',
'spec',
'sql',
'srpc',
'svg',
'tcl',
'test',
'tga',
'txt',
'vcproj',
'vsprops',
'webm',
'word',
'xib',
'xml',
'xtb',
'zip',
)
# These files must not have executable bit set.
#
# Case-insensitive, lower-case only.
NON_EXECUTABLE_PATHS = (
'build/android/tests/symbolize/liba.so',
'build/android/tests/symbolize/libb.so',
'chrome/installer/mac/sign_app.sh.in',
'chrome/installer/mac/sign_versioned_dir.sh.in',
'chrome/test/data/extensions/uitest/plugins/plugin32.so',
'chrome/test/data/extensions/uitest/plugins/plugin64.so',
'chrome/test/data/extensions/uitest/plugins_private/plugin32.so',
'chrome/test/data/extensions/uitest/plugins_private/plugin64.so',
'courgette/testdata/elf-32-1',
'courgette/testdata/elf-32-2',
'courgette/testdata/elf-64',
)
# File names that are always whitelisted. (These are mostly autoconf spew.)
#
# Case-sensitive.
IGNORED_FILENAMES = (
'config.guess',
'config.sub',
'configure',
'depcomp',
'install-sh',
'missing',
'mkinstalldirs',
'naclsdk',
'scons',
)
# File paths starting with one of these will be ignored as well.
# Please consider fixing your file permissions, rather than adding to this list.
#
# Case-insensitive, lower-case only.
IGNORED_PATHS = (
'native_client_sdk/src/build_tools/sdk_tools/third_party/fancy_urllib/'
'__init__.py',
'out/',
# TODO(maruel): Fix these.
'third_party/bintrees/',
'third_party/closure_linter/',
'third_party/devscripts/licensecheck.pl.vanilla',
'third_party/hyphen/',
'third_party/lcov-1.9/contrib/galaxy/conglomerate_functions.pl',
'third_party/lcov-1.9/contrib/galaxy/gen_makefile.sh',
'third_party/lcov/contrib/galaxy/conglomerate_functions.pl',
'third_party/lcov/contrib/galaxy/gen_makefile.sh',
'third_party/libevent/autogen.sh',
'third_party/libevent/test/test.sh',
'third_party/libxml/linux/xml2-config',
'third_party/libxml/src/ltmain.sh',
'third_party/mesa/',
'third_party/protobuf/',
'third_party/python_gflags/gflags.py',
'third_party/sqlite/',
'third_party/talloc/script/mksyms.sh',
'third_party/tcmalloc/',
'third_party/tlslite/setup.py',
# TODO(nednguyen): Remove this when telemetry is moved to catapult
'tools/telemetry/third_party/',
)
#### USER EDITABLE SECTION ENDS HERE ####
assert set(EXECUTABLE_EXTENSIONS) & set(NON_EXECUTABLE_EXTENSIONS) == set()
assert set(EXECUTABLE_PATHS) & set(NON_EXECUTABLE_PATHS) == set()
VALID_CHARS = set(string.ascii_lowercase + string.digits + '/-_.')
for paths in (EXECUTABLE_PATHS, NON_EXECUTABLE_PATHS, IGNORED_PATHS):
assert all([set(path).issubset(VALID_CHARS) for path in paths])
def capture(cmd, cwd):
"""Returns the output of a command.
Ignores the error code or stderr.
"""
logging.debug('%s; cwd=%s' % (' '.join(cmd), cwd))
env = os.environ.copy()
env['LANGUAGE'] = 'en_US.UTF-8'
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd, env=env)
return p.communicate()[0]
def get_git_root(dir_path):
"""Returns the git checkout root or None."""
root = capture(['git', 'rev-parse', '--show-toplevel'], dir_path).strip()
if root:
return root
def is_ignored(rel_path):
"""Returns True if rel_path is in our whitelist of files to ignore."""
rel_path = rel_path.lower()
return (
os.path.basename(rel_path) in IGNORED_FILENAMES or
rel_path.lower().startswith(IGNORED_PATHS))
def must_be_executable(rel_path):
"""The file name represents a file type that must have the executable bit
set.
"""
return (os.path.splitext(rel_path)[1][1:] in EXECUTABLE_EXTENSIONS or
rel_path.lower() in EXECUTABLE_PATHS)
def must_not_be_executable(rel_path):
"""The file name represents a file type that must not have the executable
bit set.
"""
return (os.path.splitext(rel_path)[1][1:] in NON_EXECUTABLE_EXTENSIONS or
rel_path.lower() in NON_EXECUTABLE_PATHS)
def has_executable_bit(full_path):
"""Returns if any executable bit is set."""
permission = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
return bool(permission & os.stat(full_path).st_mode)
def has_shebang_or_is_elf(full_path):
"""Returns if the file starts with #!/ or is an ELF binary.
full_path is the absolute path to the file.
"""
with open(full_path, 'rb') as f:
data = f.read(4)
return (data[:3] == '#!/' or data == '#! /', data == '\x7fELF')
def check_file(root_path, rel_path):
"""Checks the permissions of the file whose path is root_path + rel_path and
returns an error if it is inconsistent. Returns None on success.
It is assumed that the file is not ignored by is_ignored().
If the file name is matched with must_be_executable() or
must_not_be_executable(), only its executable bit is checked.
Otherwise, the first few bytes of the file are read to verify if it has a
shebang or ELF header and compares this with the executable bit on the file.
"""
full_path = os.path.join(root_path, rel_path)
def result_dict(error):
return {
'error': error,
'full_path': full_path,
'rel_path': rel_path,
}
try:
bit = has_executable_bit(full_path)
except OSError:
# It's faster to catch exception than call os.path.islink(). Chromium
# tree happens to have invalid symlinks under
# third_party/openssl/openssl/test/.
return None
if must_be_executable(rel_path):
if not bit:
return result_dict('Must have executable bit set')
return
if must_not_be_executable(rel_path):
if bit:
return result_dict('Must not have executable bit set')
return
# For the others, it depends on the file header.
(shebang, elf) = has_shebang_or_is_elf(full_path)
if bit != (shebang or elf):
if bit:
return result_dict('Has executable bit but not shebang or ELF header')
if shebang:
return result_dict('Has shebang but not executable bit')
return result_dict('Has ELF header but not executable bit')
def check_files(root, files):
gen = (check_file(root, f) for f in files if not is_ignored(f))
return filter(None, gen)
class ApiBase(object):
def __init__(self, root_dir, bare_output):
self.root_dir = root_dir
self.bare_output = bare_output
self.count = 0
self.count_read_header = 0
def check_file(self, rel_path):
logging.debug('check_file(%s)' % rel_path)
self.count += 1
if (not must_be_executable(rel_path) and
not must_not_be_executable(rel_path)):
self.count_read_header += 1
return check_file(self.root_dir, rel_path)
def check_dir(self, rel_path):
return self.check(rel_path)
def check(self, start_dir):
"""Check the files in start_dir, recursively check its subdirectories."""
errors = []
items = self.list_dir(start_dir)
logging.info('check(%s) -> %d' % (start_dir, len(items)))
for item in items:
full_path = os.path.join(self.root_dir, start_dir, item)
rel_path = full_path[len(self.root_dir) + 1:]
if is_ignored(rel_path):
continue
if os.path.isdir(full_path):
# Depth first.
errors.extend(self.check_dir(rel_path))
else:
error = self.check_file(rel_path)
if error:
errors.append(error)
return errors
def list_dir(self, start_dir):
"""Lists all the files and directory inside start_dir."""
return sorted(
x for x in os.listdir(os.path.join(self.root_dir, start_dir))
if not x.startswith('.')
)
class ApiAllFilesAtOnceBase(ApiBase):
_files = None
def list_dir(self, start_dir):
"""Lists all the files and directory inside start_dir."""
if self._files is None:
self._files = sorted(self._get_all_files())
if not self.bare_output:
print 'Found %s files' % len(self._files)
start_dir = start_dir[len(self.root_dir) + 1:]
return [
x[len(start_dir):] for x in self._files if x.startswith(start_dir)
]
def _get_all_files(self):
"""Lists all the files and directory inside self._root_dir."""
raise NotImplementedError()
class ApiGit(ApiAllFilesAtOnceBase):
def _get_all_files(self):
return capture(['git', 'ls-files'], cwd=self.root_dir).splitlines()
def get_scm(dir_path, bare):
"""Returns a properly configured ApiBase instance."""
cwd = os.getcwd()
root = get_git_root(dir_path or cwd)
if root:
if not bare:
print('Found git repository at %s' % root)
return ApiGit(dir_path or root, bare)
# Returns a non-scm aware checker.
if not bare:
print('Failed to determine the SCM for %s' % dir_path)
return ApiBase(dir_path or cwd, bare)
def main():
usage = """Usage: python %prog [--root <root>] [tocheck]
tocheck Specifies the directory, relative to root, to check. This defaults
to "." so it checks everything.
Examples:
python %prog
python %prog --root /path/to/source chrome"""
parser = optparse.OptionParser(usage=usage)
parser.add_option(
'--root',
help='Specifies the repository root. This defaults '
'to the checkout repository root')
parser.add_option(
'-v', '--verbose', action='count', default=0, help='Print debug logging')
parser.add_option(
'--bare',
action='store_true',
default=False,
help='Prints the bare filename triggering the checks')
parser.add_option(
'--file', action='append', dest='files',
help='Specifics a list of files to check the permissions of. Only these '
'files will be checked')
parser.add_option('--json', help='Path to JSON output file')
options, args = parser.parse_args()
levels = [logging.ERROR, logging.INFO, logging.DEBUG]
logging.basicConfig(level=levels[min(len(levels) - 1, options.verbose)])
if len(args) > 1:
parser.error('Too many arguments used')
if options.root:
options.root = os.path.abspath(options.root)
if options.files:
# --file implies --bare (for PRESUBMIT.py).
options.bare = True
errors = check_files(options.root, options.files)
else:
api = get_scm(options.root, options.bare)
start_dir = args[0] if args else api.root_dir
errors = api.check(start_dir)
if not options.bare:
print('Processed %s files, %d files where tested for shebang/ELF '
'header' % (api.count, api.count_read_header))
if options.json:
with open(options.json, 'w') as f:
json.dump(errors, f)
if errors:
if options.bare:
print '\n'.join(e['full_path'] for e in errors)
else:
print '\nFAILED\n'
print '\n'.join('%s: %s' % (e['full_path'], e['error']) for e in errors)
return 1
if not options.bare:
print '\nSUCCESS\n'
return 0
if '__main__' == __name__:
sys.exit(main())
| mit | 8,642,970,473,588,044,000 | 27.12605 | 80 | 0.664924 | false |
bdfoster/blumate | blumate/components/mongo.py | 1 | 5577 | """
A component which allows you to send data to an Influx database.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/influxdb/
"""
import logging
from blumate.helpers.entity import Entity
import blumate.util as util
from blumate import bootstrap
from blumate.const import (EVENT_BLUMATE_STOP,
EVENT_BLUMATE_START,
EVENT_STATE_CHANGED,
EVENT_PLATFORM_DISCOVERED,
STATE_ACTIVE,
STATE_IDLE,
STATE_UNKNOWN,
ATTR_DISCOVERED,
ATTR_FRIENDLY_NAME,
ATTR_SERVICE)
_LOGGER = logging.getLogger(__name__)
DOMAIN = "mongo"
DEPENDENCIES = []
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 27017
DEFAULT_TZ_AWARE = True
DEFAULT_SOCKET_TIMEOUT_MS = None
DEFAULT_SSL = False
DEFAULT_MAX_POOL_SIZE = 100
DEFAULT_SOCKET_KEEP_ALIVE = False
REQUIREMENTS = ['pymongo==3.2.2']
CONF_HOST = 'host'
CONF_PORT = 'port'
CONF_TZ_AWARE = 'tz_aware'
CONF_SOCKET_TIMEOUT_MS = 'socket_timeout_ms'
CONF_SSL = 'ssl'
CONF_MAX_POOL_SIZE = 'max_pool_size'
CONF_SOCKET_KEEP_ALIVE = 'socket_keep_alive'
SERVICE_UNLOCK = 'unlock'
SERVICE_DISCOVER_DATABASES = 'discover_databases'
SERVICE_DISCONNECT = 'disconnect'
__client = None
class Mongo(Entity):
def __init__(self, bmss, config):
"""Setup the MongoDB component."""
self.__state = STATE_UNKNOWN
self.bmss = bmss
self.__config = config[DOMAIN]
self.__host = util.convert(self.__config.get(CONF_HOST), str, DEFAULT_HOST)
self.__port = util.convert(self.__config.get(CONF_PORT), int, DEFAULT_PORT)
self.__tz_aware = util.convert(self.__config.get(CONF_TZ_AWARE), bool, DEFAULT_TZ_AWARE)
self.__socket_timeout_ms = util.convert(self.__config.get(CONF_SOCKET_TIMEOUT_MS), int, DEFAULT_SOCKET_TIMEOUT_MS)
self.__ssl = util.convert(self.__config.get(CONF_SSL), bool, DEFAULT_SSL)
self.__max_pool_size = util.convert(self.__config.get(CONF_MAX_POOL_SIZE), int, DEFAULT_MAX_POOL_SIZE)
self.__socket_keep_alive = util.convert(self.__config.get(CONF_SOCKET_KEEP_ALIVE),
int,
DEFAULT_SOCKET_KEEP_ALIVE)
from pymongo import MongoClient
from pymongo.monitoring import CommandListener
class MongoCommandEvent(CommandListener):
"""
https://api.mongodb.com/python/current/api/pymongo/monitoring.html#module-pymongo.monitoring
"""
def started(self, event):
_LOGGER.debug("Command {0.command_name} with request id "
"{0.request_id} started on server "
"{0.connection_id}".format(event))
def succeeded(self, event):
_LOGGER.info("Command {0.command_name} with request id "
"{0.request_id} on server {0.connection_id} "
"succeeded in {0.duration_micros} "
"microseconds".format(event))
def failed(self, event):
_LOGGER.warn("Command {0.command_name} with request id "
"{0.request_id} on server {0.connection_id} "
"failed in {0.duration_micros} "
"microseconds".format(event))
self.__client = MongoClient(host = self.__host,
port = self.__port,
tz_aware=self.__tz_aware,
maxPoolSize=self.__max_pool_size,
socketTimeoutMS =self.__socket_timeout_ms,
ssl = self.__ssl,
socketKeepAlive = self.__socket_keep_alive,
document_class = dict,
connect = True,
event_listeners = [MongoCommandEvent()])
# Will fail here if connection is not able to be established
assert(self.__client is not None)
self.__state = STATE_IDLE
bmss.bus.listen_once(EVENT_BLUMATE_STOP, self.disconnect)
bmss.bus.listen_once(EVENT_BLUMATE_START, self.discover_databases)
bmss.services.register(DOMAIN, SERVICE_DISCOVER_DATABASES, self.discover_databases)
bmss.services.register(DOMAIN, SERVICE_UNLOCK, self.unlock)
bmss.services.register(DOMAIN, SERVICE_DISCONNECT, self.disconnect)
def discover_databases(self, event):
"""Discover available databases."""
self.__state = STATE_ACTIVE
database_list = self.__client.database_names()
self.__state = STATE_IDLE
_LOGGER.info("Available Databases: %s", database_list)
def unlock(self, event):
"""Enables writes to the server."""
_LOGGER.debug("Unlocking server...")
self.__client.unlock()
if self.__client.is_locked:
_LOGGER.warn("Server is still locked. Maybe a permissions issue?")
else:
_LOGGER.info("Server is unlocked.")
def disconnect(self, event):
"""Disconnect from the MongoDB Server."""
_LOGGER.debug("Disconnecting from MongoDB Server...")
self.__client.close()
_LOGGER.info("Disconnected from MongoDB Server.")
setup = Mongo
| mit | -8,416,987,383,555,925,000 | 38.553191 | 122 | 0.564461 | false |
kapilgarg1996/gmc | gmc/conf/__init__.py | 1 | 1489 | import os
import importlib
from gmc.conf import global_settings
ENVIRONMENT_VARIABLE = "GMC_SETTINGS_MODULE"
class Settings:
"""
Module to load settings to configure gmc
"""
def __init__(self, *args, **kwargs):
self.settings = None
self.settings_module = None
def __getattr__(self, name):
"""
Make settings available as the attributes.
Like settings.DATASET_DIR
"""
self.load_settings()
return self.settings[name]
def __iter__(self):
self.load_settings()
return iter(self.settings)
def load_settings(self):
settings_module = os.environ.get(ENVIRONMENT_VARIABLE)
if self.settings is not None and settings_module == self.settings_module:
return
self.settings = {}
for setting in dir(global_settings):
if setting.isupper():
self.settings[setting] = getattr(global_settings, setting)
self.settings_module = os.environ.get(ENVIRONMENT_VARIABLE, None)
if self.settings_module is not None:
mod = importlib.import_module(self.settings_module)
for setting in dir(mod):
if setting.isupper():
self.settings[setting] = getattr(mod, setting)
def modify(self, new_settings):
for name in new_settings:
if name in self.settings:
self.settings[name] = new_settings[name]
settings = Settings() | mit | 8,042,181,820,768,355,000 | 29.408163 | 81 | 0.601746 | false |
doirisks/dori | models/10.1016:j.jacc.2013.11.013/config_gener_a.py | 1 | 7083 | # -*- coding: utf-8 -*-
# a template for making config.json files for functions
#import pprint
#pp = pprint.PrettyPrinter(indent=4)
config = {}
# human and machine readable names for the model
config['id'] = {}
config['id']['DOI'] = '10.1016/j.jacc.2013.11.013'
config['id']['papertitle'] = 'Factors Associated With Major Bleeding Events: Insights From the ROCKET AF Trial'
config['id']['modeltitle'] = 'Cox Model for Stroke Risk in New-Onset Atrial Fibriallation' #TODO
config['id']['yearofpub'] = '2014'
config['id']['authors'] = ['Goodman, Shaun G.', 'Wojdyla, Daniel M.', 'Piccini, Jonathan P.',
'White, Harvey D.', 'Paolini, John F.', 'Nessel, Christopher C.', 'Berkowitz, Scott D. Berkowitz', 'Mahaffey, Kenneth W.', 'Patel, Manesh R.', 'Sherwood, Matthew W.', 'Becker, Richard C.', 'Halperin, Jonathan L.', 'Hacke, Werner', 'Singer, Daniel E.','Hankey, Graeme J.', 'Breithardt, Gunter', 'Fox, Keith A. A.', 'Califf, Robert M.']
# population constraints
config['population'] = {}
config['population']['must'] = ['']#['New-Onset Atrial Fibrillation']
config['population']['mustnot'] = ['Treated with Warfarin'] #['Prior Atrial Fibrillation', 'Treated with Warfarin']
config['population']['mustCUI'] = [''] #['NOCUI'] #C0004238 "new-onset" is NOT accounted for.
config['population']['mustnotCUI'] = ['C1532949'] #['NOCUI', 'C1532949'] #C0004238 "prior" is NOT accounted for.
# human and machine readable input descriptions
config['input'] = {}
config['input']['name'] = ['Male Sex','Age', 'Diastolic Blood Pressure', 'Chronic Obstructive Pulmonary Disease', 'Anemia', 'History of Gastrointestinal Bleeding', 'Aspirin']
config['input']['description'] = [
'Male Sex',
'Age',
'Diastolic Blood Pressure',
'Chronic Obstructive Pulmonary Disease (COPD)',
'Anemia at Baseline',
'Prior Gastrointestinal Bleed',
'Prior Aspirin (ASA) Use'
]
config['input']['CUI'] = ['C0086582','C0804405','C0488052','C0024117','C0002871','C0559225','C1277232']
config['input']['units'] = ['','years','mmHg','','','','']
config['input']['datatype'] = ['bool','float','float','bool','bool','bool','bool']
config['input']['upper'] = ['','94','200','','','','']
config['input']['lower'] = ['','55','30','','','','']
# human and machine readable output descriptions
config['output'] = {}
config['output']['name'] = '2Y Stroke Risk after New-Onset Atrial Fibrillation'
config['output']['outcomeName'] = 'Stroke'
config['output']['outcomeTime'] = '2'
config['output']['CUI'] = 'C3166383'
config['output']['outcomeCUI'] = 'C0038454'
# tabular or machine readable data available for download
config['data'] = {}
config['data']['filename'] = [''] # name tabular data file ['All of the Data']
config['data']['fileurl'] = [''] # some kind of pointer? ['/var/www/models/99.9999:aaa.a9/all.Rdata']
config['data']['datumname'] = ['Total Patients Randomized'] # important data for easy access ['Sample Size']
config['data']['datum'] = ['14264'] # values for important data ['8,000,000,000']
# model function and dependencies
config['model'] = {}
config['model']['language'] = 'R' # function's language 'python'
config['model']['uncompiled'] = ['model_a.R'] # some kind of pointer? ['model.py']
config['model']['compiled'] = ['model_a.Rdata','model_df_a.Rdata'] # some kind of pointer? ['']
config['model']['dependList'] = 'requirements.txt' # some kind of pointer? 'requirements.txt'
config['model']['example'] = ['example_a.R'] # some kind of pointer? ['example.py']
# I do not know what this would be used for
config['model_category'] = ['prognostic'] #choices: 'diagnostic','prognostic'
# I do not know what these are for...
config['predictive_ability'] = {}
config['predictive_ability']['type'] = []
config['predictive_ability']['metric'] = []
config['predictive_ability']['value'] = []
config['predictive_ability']['lcl'] = []
config['predictive_ability']['ucl'] = []
config_name = 'config_a'
config['config'] = config_name + '.json'
# dump json config file
import json
with open(config_name + '.json','w') as output:
json.dump(config,output)
# dump sql config file
import sql
models_table = sql.Table('models')
modvalues = [
config['id']['DOI'],
config['id']['papertitle'],
config['id']['modeltitle'],
config['id']['yearofpub'],
json.dumps(config['id']['authors']),
json.dumps(config['population']['must']),
json.dumps(config['population']['mustnot']),
json.dumps(config['population']['mustCUI']),
json.dumps(config['population']['mustnotCUI']),
json.dumps(config['input']['name']),
json.dumps(config['input']['description']),
json.dumps(config['input']['CUI']),
json.dumps(config['input']['units']),
json.dumps(config['input']['datatype']),
json.dumps(config['input']['upper']),
json.dumps(config['input']['lower']),
config['output']['name'],
config['output']['outcomeName'],
config['output']['outcomeTime'],
config['output']['CUI'],
config['output']['outcomeCUI'],
json.dumps(config['data']['filename']),
json.dumps(config['data']['fileurl']),
json.dumps(config['data']['datumname']),
json.dumps(config['data']['datum']),
config['model']['language'],
json.dumps(config['model']['uncompiled']),
json.dumps(config['model']['compiled']),
config['model']['dependList'],
json.dumps(config['model']['example']),
json.dumps(config['model_category']),
json.dumps(config['predictive_ability']['type']),
json.dumps(config['predictive_ability']['metric']),
json.dumps(config['predictive_ability']['value']),
json.dumps(config['predictive_ability']['lcl']),
json.dumps(config['predictive_ability']['ucl']),
config['config']
]
columns = [models_table.DOI,models_table.papertitle, models_table.modeltitle, models_table.yearofpub, models_table.authors, models_table.must, models_table.mustnot,models_table.mustCUI, models_table.mustnotCUI, models_table.inpname, models_table.inpdesc, models_table.inpCUI,models_table.inpunits,models_table.inpdatatype, models_table.upper, models_table.lower, models_table.output, models_table.outcome,models_table.outcometime, models_table.outputCUI, models_table.outcomeCUI, models_table.filename,models_table.filepointer, models_table.datumname,models_table.datum, models_table.language,models_table.uncompiled,models_table.compiled,models_table.dependList,models_table.example, models_table.model_category,models_table.type,models_table.metric,models_table.value, models_table.lcl, models_table.ucl, models_table.config, models_table.numofinputs]
# numofinputs was added after the fact!
for i in range(len(modvalues)):
modvalues[i] = modvalues[i].replace("'","''")
insertion = models_table.insert(columns = columns, values = [ modvalues + [len(config['input']['CUI'])] ])
model_tup = tuple(insertion)
query = model_tup[0].replace('%s',"'%s'").replace('"','')
query = query % tuple(model_tup[1])
#query = format(model_tup[0],*model_tup[1])
print(query + ';\n')
| gpl-3.0 | 7,234,410,869,433,950,000 | 44.403846 | 855 | 0.659184 | false |
MSEMJEJME/Get-Dumped | renpy/defaultstore.py | 1 | 10575 | # Copyright 2004-2012 Tom Rothamel <[email protected]>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from renpy.minstore import *
# But please note that this will not be available in the body
# of user code, unless we re-import it.
import renpy.display
import renpy.text
import renpy.display.im as im
import renpy.display.anim as anim
# Used by the call/return mechanism.
_return = None
_args = None
_kwargs = None
# Should the in-game window be shown?
_window = False
# The window subtitle.
_window_subtitle = ''
# Should rollback be allowed?
_rollback = True
# config.
_config = renpy.config
# The special character used for name-only dialogue.
name_only = None
# Used by the ui functions.
_widget_by_id = None
_widget_properties = { }
class _Config(object):
def register(self, name, default, cat=None, help=None): #@ReservedAssignment
setattr(self, name, default)
_config.help.append((cat, name, help))
def __getattr__(self, name):
cvars = vars(_config)
if name not in cvars:
raise Exception('config.%s is not a known configuration variable.' % (name))
return cvars[name]
def __setattr__(self, name, value):
cvars = vars(_config)
if name not in cvars and renpy.config.locked:
raise Exception('config.%s is not a known configuration variable.' % (name))
if name == "script_version":
renpy.store._set_script_version(value) # E1101 @UndefinedVariable
cvars[name] = value
def __delattr__(self, name):
if renpy.config.locked:
raise Exception('Deleting configuration variables is not supported.')
else:
delattr(renpy.config, name)
# The styles object.
style = None
config = _Config()
library = config
eval = renpy.python.py_eval #@ReservedAssignment
# Displayables.
Bar = renpy.display.behavior.Bar
Button = renpy.display.behavior.Button
Input = renpy.display.behavior.Input
ImageReference = renpy.display.image.ImageReference
Image = renpy.display.im.image
Frame = renpy.display.imagelike.Frame
Solid = renpy.display.imagelike.Solid
LiveComposite = renpy.display.layout.LiveComposite
LiveCrop = renpy.display.layout.LiveCrop
LiveTile = renpy.display.layout.LiveTile
Null = renpy.display.layout.Null
Window = renpy.display.layout.Window
Viewport = renpy.display.layout.Viewport
DynamicDisplayable = renpy.display.layout.DynamicDisplayable
ConditionSwitch = renpy.display.layout.ConditionSwitch
ShowingSwitch = renpy.display.layout.ShowingSwitch
Transform = renpy.display.motion.Transform
Animation = anim.Animation
Movie = renpy.display.video.Movie
Particles = renpy.display.particle.Particles
SnowBlossom = renpy.display.particle.SnowBlossom
Text = renpy.text.text.Text
ParameterizedText = renpy.text.extras.ParameterizedText
Drag = renpy.display.dragdrop.Drag
DragGroup = renpy.display.dragdrop.DragGroup
Sprite = renpy.display.particle.Sprite
SpriteManager = renpy.display.particle.SpriteManager
# Currying things.
Alpha = renpy.curry.curry(renpy.display.layout.Alpha)
Position = renpy.curry.curry(renpy.display.layout.Position)
Pan = renpy.curry.curry(renpy.display.motion.Pan)
Move = renpy.curry.curry(renpy.display.motion.Move)
Motion = renpy.curry.curry(renpy.display.motion.Motion)
Revolve = renpy.curry.curry(renpy.display.motion.Revolve)
Zoom = renpy.curry.curry(renpy.display.motion.Zoom)
RotoZoom = renpy.curry.curry(renpy.display.motion.RotoZoom)
FactorZoom = renpy.curry.curry(renpy.display.motion.FactorZoom)
SizeZoom = renpy.curry.curry(renpy.display.motion.SizeZoom)
Fade = renpy.curry.curry(renpy.display.transition.Fade)
Dissolve = renpy.curry.curry(renpy.display.transition.Dissolve)
ImageDissolve = renpy.curry.curry(renpy.display.transition.ImageDissolve)
AlphaDissolve = renpy.curry.curry(renpy.display.transition.AlphaDissolve)
CropMove = renpy.curry.curry(renpy.display.transition.CropMove)
Pixellate = renpy.curry.curry(renpy.display.transition.Pixellate)
OldMoveTransition = renpy.curry.curry(renpy.display.movetransition.OldMoveTransition)
MoveTransition = renpy.curry.curry(renpy.display.movetransition.MoveTransition)
MoveFactory = renpy.curry.curry(renpy.display.movetransition.MoveFactory)
MoveIn = renpy.curry.curry(renpy.display.movetransition.MoveIn)
MoveOut = renpy.curry.curry(renpy.display.movetransition.MoveOut)
ZoomInOut = renpy.curry.curry(renpy.display.movetransition.ZoomInOut)
RevolveInOut = renpy.curry.curry(renpy.display.movetransition.RevolveInOut)
MultipleTransition = renpy.curry.curry(renpy.display.transition.MultipleTransition)
ComposeTransition = renpy.curry.curry(renpy.display.transition.ComposeTransition)
Pause = renpy.curry.curry(renpy.display.transition.NoTransition)
SubTransition = renpy.curry.curry(renpy.display.transition.SubTransition)
# Misc.
ADVSpeaker = ADVCharacter = renpy.character.ADVCharacter
Speaker = Character = renpy.character.Character
DynamicCharacter = renpy.character.DynamicCharacter
MultiPersistent = renpy.loadsave.MultiPersistent
Action = renpy.ui.Action
BarValue = renpy.ui.BarValue
Style = renpy.style.Style
absolute = renpy.display.core.absolute
def layout(cls, doc, nargs=0, **extra_kwargs):
def f(*args, **properties):
conargs = args[:nargs]
kids = args[nargs:]
kwargs = extra_kwargs.copy()
kwargs.update(properties)
rv = cls(*conargs, **kwargs)
for i in kids:
rv.add(renpy.easy.displayable(i))
return rv
f.__doc__ = doc
return f
Fixed = layout(renpy.display.layout.MultiBox, """
:doc: disp_box
:args: (*args, **properties)
A box that fills the screen. Its members are laid out
from back to front, with their position properties
controlling their position.
""", layout="fixed")
HBox = layout(renpy.display.layout.MultiBox, """
:doc: disp_box
:args: (*args, **properties)
A box that lays out its members from left to right.
""", layout='horizontal')
VBox = layout(renpy.display.layout.MultiBox, """
:doc: disp_box
:args: (*args, **properties)
A layout that lays out its members from top to bottom.
""", layout='vertical')
Grid = layout(renpy.display.layout.Grid, """
A layout that lays out displayables in a grid.
""", nargs=2, layout='vertical')
def AlphaBlend(control, old, new, alpha=False):
"""
:doc: disp_effects
This transition uses a `control` displayable (almost always some sort of
animated transform) to transition from one displayable to another. The
transform is evaluated. The `new` displayable is used where the transform
is opaque, and the `old` displayable is used when it is transparent.
`alpha`
If true, the image is composited with what's behind it. If false,
the default, the image is opaque and overwrites what's behind it.
"""
return renpy.display.transition.AlphaDissolve(control, 0.0, old_widget=old, new_widget=new, alpha=alpha)
del layout
def At(d, *args):
"""
:doc: disp_at
Given a displayable `d`, applies each of the transforms in `args`
to it. The transforms are applied in left-to-right order, so that
the outermost transform is the rightmost argument. ::
transform birds_transform:
xpos -200
linear 10 xpos 800
pause 20
repeat
image birds = At("birds.png", birds_transform)
"""
rv = renpy.easy.displayable(d)
for i in args:
rv = i(rv)
return rv
# The color function. (Moved, since text needs it, too.)
color = renpy.easy.color
# Conveniently get rid of all the packages we had imported before.
import renpy.exports as renpy #@Reimport
# The default menu functions.
menu = renpy.display_menu
predict_menu = renpy.predict_menu
# The default transition.
default_transition = None
# Is the mouse visible?
mouse_visible = True
# Is the overlay suppressed?
suppress_overlay = False
# The default ADVCharacter.
adv = ADVCharacter(None,
who_prefix='',
who_suffix='',
what_prefix='',
what_suffix='',
show_function=renpy.show_display_say,
predict_function=renpy.predict_show_display_say,
condition=None,
dynamic=False,
image=None,
interact=True,
slow=True,
slow_abortable=True,
afm=True,
ctc=None,
ctc_pause=None,
ctc_timedpause=None,
ctc_position="nestled",
all_at_once=False,
with_none=None,
callback=None,
type='say',
who_style='say_label',
what_style='say_dialogue',
window_style='say_window',
screen='say',
mode='say',
kind=False)
def predict_say(who, what):
who = Character(who, kind=name_only)
try:
who.predict(what)
except:
pass
def say(who, what, interact=True):
who = Character(who, kind=name_only)
who(what, interact=interact)
# Used by renpy.reshow_say.
_last_say_who = None
_last_say_what = None
# Used to store the things pinned into the cache.
_cache_pin_set = set()
# Make these available to user code.
import sys
import os
def public_api():
ui
im
object
range
sorted
os
sys
del public_api
| gpl-2.0 | 1,057,408,744,182,847,100 | 28.788732 | 109 | 0.692766 | false |
ihrwein/yarg | yarg/gui/listmodel.py | 1 | 8053 | from PyQt5 import QtCore
class QObjectListModel(QtCore.QAbstractListModel):
"""
QObjectListModel provides a more powerful, but still easy to use, alternative to using
QObjectList lists as models for QML views. As a QAbstractListModel, it has the ability to
automatically notify the view of specific changes to the list, such as adding or removing
items. At the same time it provides QList-like convenience functions such as append, at,
and removeAt for easily working with the model from Python.
This class is the Python port of the C++ QObjectListModel class.
"""
def __init__(self, parent=None):
""" Constructs an object list model with the given parent. """
super(QObjectListModel, self).__init__(parent)
self._objects = list() # Internal list of objects
self.roles = QtCore.QAbstractListModel.roleNames(self)
self.ObjectRole = QtCore.Qt.UserRole + 1
self.roles[self.ObjectRole] = "object"
def roleNames(self):
return self.roles
def __iter__(self):
""" Enables iteration over the list of objects. """
return iter(self._objects)
def __len__(self):
return self.size()
def __bool__(self):
return self.size() > 0
def __getitem__(self, index):
""" Enables the [] operator """
return self._objects[index]
def data(self, index, role):
""" Returns data for the specified role, from the item with the
given index. The only valid role is ObjectRole.
If the view requests an invalid index or role, an invalid variant
is returned.
"""
if index.row() < 0 or index.row() >= len(self._objects):
return None
if role == self.ObjectRole:
return self._objects[index.row()]
return None
def rowCount(self, parent):
""" Returns the number of rows in the model. This value corresponds to the
number of items in the model's internal object list.
"""
return self.size()
def objectList(self):
""" Returns the object list used by the model to store data. """
return self._objects
def setObjectList(self, objects):
""" Sets the model's internal objects list to objects. The model will
notify any attached views that its underlying data has changed.
"""
oldSize = self.size()
self.beginResetModel()
self._objects = objects
self.endResetModel()
self.dataChanged.emit(self.index(0), self.index(self.size() - 1), [])
if self.size() != oldSize:
self.countChanged.emit()
############
# List API #
############
def append(self, toAppend):
""" Inserts object(s) at the end of the model and notifies any views.
Accepts both QObject and list of QObjects.
"""
if not isinstance(toAppend, list):
toAppend = [toAppend]
self.beginInsertRows(QtCore.QModelIndex(), self.size(), self.size() + len(toAppend) - 1)
self._objects.extend(toAppend)
self.endInsertRows()
self.countChanged.emit()
def insert(self, i, toInsert):
""" Inserts object(s) at index position i in the model and notifies
any views. If i is 0, the object is prepended to the model. If i
is size(), the object is appended to the list.
Accepts both QObject and list of QObjects.
"""
if not isinstance(toInsert, list):
toInsert = [toInsert]
self.beginInsertRows(QtCore.QModelIndex(), i, i + len(toInsert) - 1)
for obj in reversed(toInsert):
self._objects.insert(i, obj)
self.endInsertRows()
self.countChanged.emit()
def at(self, i):
""" Use [] instead - Return the object at index i. """
return self._objects[i]
def replace(self, i, obj):
""" Replaces the item at index position i with object and
notifies any views. i must be a valid index position in the list
(i.e., 0 <= i < size()).
"""
self._objects[i] = obj
self.dataChanged.emit(self.index(i), self.index(i), [])
def move(self, fromIndex, toIndex):
""" Moves the item at index position from to index position to
and notifies any views.
This function assumes that both from and to are at least 0 but less than
size(). To avoid failure, test that both from and to are at
least 0 and less than size().
"""
value = toIndex
if toIndex > fromIndex:
value += 1
if not self.beginMoveRows(QtCore.QModelIndex(), fromIndex, fromIndex, QtCore.QModelIndex(), value):
return
self._objects.insert(toIndex, self._objects.pop(fromIndex))
self.endMoveRows()
def removeAt(self, i, count=1):
""" Removes count number of items from index position i and notifies any views.
i must be a valid index position in the model (i.e., 0 <= i < size()), as
must as i + count - 1.
"""
self.beginRemoveRows(QtCore.QModelIndex(), i, i + count - 1)
for cpt in range(count):
self._objects.pop(i)
self.endRemoveRows()
self.countChanged.emit()
def remove(self, obj):
""" Removes the first occurrence of the given object. Raises a ValueError if not in list. """
if not self.contains(obj):
raise ValueError("QObjectListModel.remove(obj) : obj not in list")
self.removeAt(self.indexOf(obj))
def takeAt(self, i):
""" Removes the item at index position i (notifying any views) and returns it.
i must be a valid index position in the model (i.e., 0 <= i < size()).
"""
self.beginRemoveRows(QtCore.QModelIndex(), i, i)
obj = self._objects.pop(i)
self.endRemoveRows()
self.countChanged.emit()
return obj
def clear(self):
""" Removes all items from the model and notifies any views. """
if not self._objects:
return
self.beginRemoveRows(QtCore.QModelIndex(), 0, self.size() - 1)
self._objects = []
self.endRemoveRows()
self.countChanged.emit()
def contains(self, obj):
""" Returns true if the list contains an occurrence of object;
otherwise returns false.
"""
return obj in self._objects
def indexOf(self, matchObj, fromIndex=0, positive=True):
""" Returns the index position of the first occurrence of object in
the model, searching forward from index position from.
If positive is True, will always return a positive index.
"""
index = self._objects[fromIndex:].index(matchObj) + fromIndex
if positive and index < 0:
index += self.size()
return index
def lastIndexOf(self, matchObj, fromIndex=-1, positive=True):
""" Returns the index position of the last occurrence of object in
the list, searching backward from index position from. If
from is -1 (the default), the search starts at the last item.
If positive is True, will always return a positive index.
"""
r = list(self._objects)
r.reverse()
index = - r[-fromIndex - 1:].index(matchObj) + fromIndex
if positive and index < 0:
index += self.size()
return index
def size(self):
""" Returns the number of items in the model. """
return len(self._objects)
@QtCore.pyqtSlot(result=bool)
def isEmpty(self):
""" Returns true if the model contains no items; otherwise returns false. """
return len(self._objects) == 0
@QtCore.pyqtSlot(int, result="QVariant")
def get(self, i):
""" For usage from QML.
Note: return param is mandatory to mimic Q_INVOKABLE C++ method behavior
"""
return self._objects[i]
countChanged = QtCore.pyqtSignal()
count = QtCore.pyqtProperty(int, size, notify=countChanged) | gpl-2.0 | -3,779,364,264,348,425,700 | 36.635514 | 107 | 0.609711 | false |
jmartty/taller2server | tests/rest/Test_rest.py | 1 | 25876 | # -*- coding: utf-8 -*-
import requests
import unittest
import json
class TestRestApi(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestRestApi, self).__init__(*args, **kwargs)
self.__api_base_url = "http://localhost:5000"
self.__user_url = "/usuario/"
self.__ruser = "?r_user="
self.__token = "&token="
self.__pass = "&password="
self.__users_url = "/usuarios"
self.__login = "/login"
self.__conversation = "/conversacion/"
self.__broadcast = "/broadcast"
self.__lines = "&lines="
#Usuario
#Registro de usuarios
def test_1user_add(self):
'''Pruebo que se cree correctamente un nuevo usuario'''
user_prueba = "Fulano"
registro_valido = { "id": "Fulano", "password": "abcde", "nombre": "Fulano", "foto": "mifoto", "ubicacion": "miubicacion"}
registro_valido_js = json.dumps(registro_valido)
r = requests.post(self.__api_base_url + self.__user_url + user_prueba, data = registro_valido_js)
self.assertEqual(r.status_code, 201)
def test_2existing_user_add(self):
'''Pruebo que falle al crear un usuario que ya existe'''
user_existente = "Fulano"
registro_valido = { "password": "abcde", "nombre": "Admin", "foto": "mifoto", "ubicación": "miubicacion"}
registro_valido_js = json.dumps(registro_valido)
r = requests.post(self.__api_base_url + self.__user_url + user_existente, data = registro_valido_js)
self.assertEqual(r.status_code, 401)
def test_3long_user_add(self):
'''Pruebo que falle al crear un usuario que contiene mas de 12 caracteres'''
user_largo = "Fulano12345678910"
registro_valido = { "password": "abcde", "nombre": "Admin", "foto": "mifoto", "ubicación": "miubicacion"}
registro_valido_js = json.dumps(registro_valido)
r = requests.post(self.__api_base_url + self.__user_url + user_largo, data = registro_valido_js)
self.assertEqual(r.status_code, 401)
def test_5empty_user_add(self):
'''Pruebo que falle al crear un usuario sin caracteres'''
user_wrong = ""
registro_valido = { "password": "abcde", "nombre": "Admin", "foto": "mifoto", "ubicación": "miubicacion"}
registro_valido_js = json.dumps(registro_valido)
r = requests.post(self.__api_base_url + self.__user_url + user_wrong, data = registro_valido_js)
self.assertEqual(r.status_code, 401)
def test_6empty_pass_user_add(self):
'''Pruebo que falle al crear un usuario con contrasenia sin caracteres'''
user_valido = "Mengano"
registro_invalido = { "password": "", "nombre": "Admin", "foto": "mifoto", "ubicación": "miubicacion"}
registro_invalido_js = json.dumps(registro_invalido)
r = requests.post(self.__api_base_url + self.__user_url + user_valido, data = registro_invalido_js)
self.assertEqual(r.status_code, 401)
def test_7empty_name_user_add(self):
'''Pruebo que falle al crear un usuario con nombre sin caracteres'''
user_valido = "Mengano"
registro_invalido = { "password": "abcde12", "nombre": "", "foto": "mifoto", "ubicación": "miubicacion"}
registro_invalido_js = json.dumps(registro_invalido)
r = requests.post(self.__api_base_url + self.__user_url + user_valido, data = registro_invalido_js)
self.assertEqual(r.status_code, 401)
#Login
#Loguear usuario en el sistema
def test_8authentication_fail(self):
'''Pruebo que falle cuando se loguea un usuario inexistente'''
user_incorrecto = { "id": "user", "password": "1234" }
user_incorrecto_js = json.dumps(user_incorrecto)
r = requests.post(self.__api_base_url + self.__login, data = user_incorrecto_js)
self.assertEqual(r.status_code, 401)
def test_9authentication_default_user(self):
'''Pruebo que funcione el login de un usuario existente, con su pass adecuada'''
user_correcto = { "id": "Fulano", "password": "abcde" }
user_correcto_js = json.dumps(user_correcto)
r = requests.post(self.__api_base_url + self.__login, data = user_correcto_js)
self.assertEqual(r.status_code, 201)
def test_91authentication_wrong_pass_fail(self):
'''Pruebo que falle cuando se loguea un usuario existente, con pass incorrecta'''
pass_incorrecto = { "id": "admin", "password": "1234" }
pass_incorrecto_js = json.dumps(pass_incorrecto)
r = requests.post(self.__api_base_url + self.__login, data = pass_incorrecto_js)
self.assertEqual(r.status_code, 401)
#Solicitar lista de usuarios
def test_92get_user_collection(self):
'''Pruebo obtener lista de usuarios con token correcto'''
user_prueba = "Wulano"
registro_valido = { "password": "abcde", "nombre": "Wulano", "foto": "mifoto", "ubicación": "miubicacion"}
registro_valido_js = json.dumps(registro_valido)
r = requests.post(self.__api_base_url + self.__user_url + user_prueba, data = registro_valido_js)
user_correcto = { "id": "Wulano", "password": "abcde" }
user_correcto_js = json.dumps(user_correcto)
r2 = requests.post(self.__api_base_url + self.__login, data = user_correcto_js)
token = r2.json()
token["token"] = token["token"][0:16]
r3 = requests.get(self.__api_base_url + self.__users_url + self.__ruser + "Wulano" + self.__token + token["token"])
self.assertEqual(r3.status_code, 200)
def test_93get_user_collection_wrong_token(self):
'''Pruebo obtener lista de usuarios con token incorrecto'''
r = requests.get(self.__api_base_url + self.__users_url + self.__ruser + "Fulano" + self.__token + "unTokenIncorrecto")
self.assertEqual(r.status_code, 401)
#Solicitar perfil de usuario
def test_94get_user_profile(self):
'''Pruebo obtener perfil de usuario'''
user_prueba = "Fulano2"
registro_valido = { "password": "abcde", "nombre": "Fulano2", "foto": "mifoto", "ubicación": "miubicacion"}
registro_valido_js = json.dumps(registro_valido)
r = requests.post(self.__api_base_url + self.__user_url + user_prueba, data = registro_valido_js)
user_correcto = { "id": "Fulano2", "password": "abcde" }
user_correcto_js = json.dumps(user_correcto)
r2 = requests.post(self.__api_base_url + self.__login, data = user_correcto_js)
token = r2.json()
token["token"] = token["token"][0:16]
r = requests.get(self.__api_base_url + self.__user_url + user_prueba + self.__ruser + "Fulano2" + self.__token + token["token"])
self.assertEqual(r.status_code, 200)
def test_95get_user_profile_wrong_token(self):
'''Pruebo obtener perfil de usuario con token incorrecto'''
user_prueba = "Fulano3"
registro_valido = { "password": "abcde", "nombre": "Fulano3", "foto": "mifoto", "ubicación": "miubicacion"}
registro_valido_js = json.dumps(registro_valido)
r = requests.post(self.__api_base_url + self.__user_url + user_prueba, data = registro_valido_js)
user_correcto = { "id": "Fulano3", "password": "abcde" }
user_correcto_js = json.dumps(user_correcto)
r2 = requests.post(self.__api_base_url + self.__login, data = user_correcto_js)
token = r2.json()
token["token"] = token["token"][0:16]
r = requests.get(self.__api_base_url + self.__user_url + user_prueba + self.__ruser + "Fulano3" + self.__token + "unTokenIncorrecto")
self.assertEqual(r.status_code, 401)
def test_96get_user_profile_wrong_user(self):
'''Pruebo obtener perfil de usuario con usuario incorrecto'''
user_prueba = "Fulano4"
registro_valido = { "password": "abcde", "nombre": "Fulano4", "foto": "mifoto", "ubicación": "miubicacion"}
registro_valido_js = json.dumps(registro_valido)
r = requests.post(self.__api_base_url + self.__user_url + user_prueba, data = registro_valido_js)
user_correcto = { "id": "Fulano4", "password": "abcde" }
user_correcto_js = json.dumps(user_correcto)
r2 = requests.post(self.__api_base_url + self.__login, data = user_correcto_js)
token = r2.json()
token["token"] = token["token"][0:16]
r = requests.get(self.__api_base_url + self.__user_url + user_prueba + self.__ruser + "UserIncorrecto" + self.__token + token["token"])
self.assertEqual(r.status_code, 401)
#Modificar usuario
def test_97modify_user(self):
'''Pruebo modificar usuario'''
user_prueba = "Fulano5"
registro_valido = { "password": "abcde", "nombre": "Fulano5", "foto": "mifoto", "ubicación": "miubicacion"}
registro_valido_js = json.dumps(registro_valido)
r = requests.post(self.__api_base_url + self.__user_url + user_prueba, data = registro_valido_js)
user_correcto = { "id": "Fulano5", "password": "abcde" }
user_correcto_js = json.dumps(user_correcto)
r2 = requests.post(self.__api_base_url + self.__login, data = user_correcto_js)
token = r2.json()
token["token"] = token["token"][0:16]
datos = {"nombre": "Otronombre","password": "abcde","foto": "otrafoto","ubicacion": "unaubicacion"}
datos_js = json.dumps(datos)
r = requests.put(self.__api_base_url + self.__user_url + user_prueba + self.__ruser + "Fulano5" + self.__token + token["token"]+self.__pass + "abcde", data = datos_js)
self.assertEqual(r.status_code, 201)
def test_99modify_user_wrong_user(self):
'''Pruebo modificar usuario con usuario invalido'''
user_prueba = "Fulano7"
registro_valido = { "password": "abcde", "nombre": "Fulano7", "foto": "mifoto", "ubicación": "miubicacion"}
registro_valido_js = json.dumps(registro_valido)
r = requests.post(self.__api_base_url + self.__user_url + user_prueba, data = registro_valido_js)
user_correcto = { "id": "Fulano7", "password": "abcde" }
user_correcto_js = json.dumps(user_correcto)
r2 = requests.post(self.__api_base_url + self.__login, data = user_correcto_js)
token = r2.json()
token["token"] = token["token"][0:16]
datos = {"nombre": "Otronombre","password": "abcde","foto": "otrafoto","ubicación": "unaubicacion"}
datos_js = json.dumps(datos)
r = requests.put(self.__api_base_url + self.__user_url + "UsuarioInvalido" + self.__ruser + "Fulano7" + self.__token + token["token"]+self.__pass + "abcde", data = datos_js)
self.assertEqual(r.status_code, 401)
def test_991modify_user_wrong_name(self):
'''Pruebo modificar usuario con nombre invalido'''
user_prueba = "Fulano8"
registro_valido = { "password": "abcde", "nombre": "Fulano8", "foto": "mifoto", "ubicación": "miubicacion"}
registro_valido_js = json.dumps(registro_valido)
r = requests.post(self.__api_base_url + self.__user_url + user_prueba, data = registro_valido_js)
user_correcto = { "id": "Fulano8", "password": "abcde" }
user_correcto_js = json.dumps(user_correcto)
r2 = requests.post(self.__api_base_url + self.__login, data = user_correcto_js)
token = r2.json()
token["token"] = token["token"][0:16]
datos = {"nombre": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","password": "abcde","foto": "otrafoto","ubicación": "unaubicacion"}
datos_js = json.dumps(datos)
r = requests.put(self.__api_base_url + self.__user_url + user_prueba + self.__ruser + "Fulano8" + self.__token + token["token"]+self.__pass + "abcde", data = datos_js)
self.assertEqual(r.status_code, 401)
#Conversacion
#Solicitar conversación anterior entre usuario <user1> y <user2>
def test_992get_messages(self):
'''Pruebo obtener conversacion'''
#Registro User1
user_prueba = "Fulanito"
registro_valido = { "password": "abcde", "nombre": "Fulanito", "foto": "mifoto", "ubicación": "miubicacion"}
registro_valido_js = json.dumps(registro_valido)
r = requests.post(self.__api_base_url + self.__user_url + user_prueba, data = registro_valido_js)
#Registro User2
user_prueba2 = "Menito"
registro_valido2 = { "password": "abcdef", "nombre": "Menito", "foto": "mifotoo", "ubicación": "miubicacionn"}
registro_valido2_js = json.dumps(registro_valido2)
r2 = requests.post(self.__api_base_url + self.__user_url + user_prueba2, data = registro_valido2_js)
#Login
user_correcto = { "id": "Fulanito", "password": "abcde" }
user_correcto_js = json.dumps(user_correcto)
r3 = requests.post(self.__api_base_url + self.__login, data = user_correcto_js)
token1 = r3.json()
token1["token"] = token1["token"][0:16]
user_correcto2 = { "id": "Menito", "password": "abcdef" }
user_correcto2_js = json.dumps(user_correcto2)
r4 = requests.post(self.__api_base_url + self.__login, data = user_correcto2_js)
token2 = r4.json()
token2["token"] = token2["token"][0:16]
#Envio un mensaje
mensaje = {"mensaje" : "hola"}
mensaje_js = json.dumps(mensaje)
r5 = requests.post(self.__api_base_url + self.__conversation + "Menito" + self.__ruser + "Fulanito" + self.__token + token1["token"], data = mensaje_js)
r6 = requests.get(self.__api_base_url + self.__conversation + "Menito" + self.__ruser + "Fulanito" + self.__token + token1["token"])
print r6
self.assertEqual(r6.status_code, 200)
def test_993get_messages_wrong_token(self):
'''Pruebo obtener conversacion con token incorrecto'''
#Registro User1
user_prueba = "Fulanit1"
registro_valido = { "password": "abcde", "nombre": "Fulanit1", "foto": "mifoto", "ubicación": "miubicacion"}
registro_valido_js = json.dumps(registro_valido)
r = requests.post(self.__api_base_url + self.__user_url + user_prueba, data = registro_valido_js)
#Registro User2
user_prueba2 = "Menito2"
registro_valido2 = { "password": "abcde", "nombre": "Menganito2", "foto": "mifoto", "ubicación": "miubicacion"}
registro_valido2_js = json.dumps(registro_valido)
r2 = requests.post(self.__api_base_url + self.__user_url + user_prueba2, data = registro_valido2_js)
#Login
user_correcto = { "id": "Fulanit1", "password": "abcde" }
user_correcto_js = json.dumps(user_correcto)
r3 = requests.post(self.__api_base_url + self.__login, data = user_correcto_js)
token1 = r3.json()
token1["token"] = token1["token"][0:16]
user_correcto2 = { "id": "Menito2", "password": "abcde" }
user_correcto2_js = json.dumps(user_correcto2)
r4 = requests.post(self.__api_base_url + self.__login, data = user_correcto2_js)
token2 = r4.json()
token2["token"] = token2["token"][0:16]
#Envio un mensaje
mensaje = {"mensaje" : "hola"}
mensaje_js = json.dumps(mensaje)
r5 = requests.post(self.__api_base_url + self.__conversation + "Menito2" + self.__ruser + "Fulanit1" + self.__token + token1["token"], data = mensaje_js)
r6 = requests.get(self.__api_base_url + self.__conversation + "Menito2"+ self.__ruser + "Fulanit1" + self.__token + "TokenIncorrecto")
self.assertEqual(r6.status_code, 401)
def test_994get_messages_wrong_user(self):
'''Pruebo obtener conversacion con user incorrecto'''
#Registro User1
user_prueba = "Fulanit3"
registro_valido = { "password": "abcde", "nombre": "Fulanit3", "foto": "mifoto", "ubicación": "miubicacion"}
registro_valido_js = json.dumps(registro_valido)
r = requests.post(self.__api_base_url + self.__user_url + user_prueba, data = registro_valido_js)
#Registro User2
user_prueba2 = "Menito3"
registro_valido = { "password": "abcde", "nombre": "Menganito3", "foto": "mifoto", "ubicación": "miubicacion"}
registro_valido_js = json.dumps(registro_valido)
r = requests.post(self.__api_base_url + self.__user_url + user_prueba2, data = registro_valido_js)
#Login
user_correcto = { "id": "Fulanit3", "password": "abcde" }
user_correcto_js = json.dumps(user_correcto)
r3 = requests.post(self.__api_base_url + self.__login, data = user_correcto_js)
token1 = r3.json()
token1["token"] = token1["token"][0:16]
user_correcto2 = { "id": "Menito3", "password": "abcde" }
user_correcto2_js = json.dumps(user_correcto2)
r4 = requests.post(self.__api_base_url + self.__login, data = user_correcto2_js)
token2 = r4.json()
token2["token"] = token2["token"][0:16]
#Envio un mensaje
mensaje = {"mensaje" : "hola"}
mensaje_js = json.dumps(mensaje)
r5 = requests.post(self.__api_base_url + self.__conversation + "Menito3" + self.__ruser + "Fulanit3" + self.__token + token1["token"], data = mensaje_js)
r6 = requests.get(self.__api_base_url + self.__conversation + "Menito3" + self.__ruser + "UserIncorrecto" + self.__token + token1["token"])
self.assertEqual(r6.status_code, 401)
#Enviar mensaje de usuario <user1> a <user2>
def test_995send_message(self):
'''Pruebo el envio de un mensaje entre un usuario y otro'''
#Registro User1
user_prueba = "Fulanit4"
registro_valido = { "password": "abcde", "nombre": "Fulanit4", "foto": "mifoto", "ubicación": "miubicacion"}
registro_valido_js = json.dumps(registro_valido)
r = requests.post(self.__api_base_url + self.__user_url + user_prueba, data = registro_valido_js)
#Registro User2
user_prueba2 = "Menito4"
registro_valido = { "password": "abcde", "nombre": "Menganito4", "foto": "mifoto", "ubicación": "miubicacion"}
registro_valido_js = json.dumps(registro_valido)
r = requests.post(self.__api_base_url + self.__user_url + user_prueba2, data = registro_valido_js)
#Login
user_correcto = { "id": "Fulanit4", "password": "abcde" }
user_correcto_js = json.dumps(user_correcto)
r3 = requests.post(self.__api_base_url + self.__login, data = user_correcto_js)
token1 = r3.json()
token1["token"] = token1["token"][0:16]
user_correcto2 = { "id": "Menito4", "password": "abcde" }
user_correcto2_js = json.dumps(user_correcto2)
r4 = requests.post(self.__api_base_url + self.__login, data = user_correcto2_js)
token2 = r4.json()
token2["token"] = token2["token"][0:16]
mensaje = {"mensaje" : "hola"}
mensaje_js = json.dumps(mensaje)
r = requests.post(self.__api_base_url + self.__conversation + "Menito4" + self.__ruser + "Fulanit4" + self.__token + token1["token"], data = mensaje_js)
self.assertEqual(r.status_code, 201)
def test_996send_message_wrong_token(self):
'''Pruebo el envio de un mensaje entre un usuario y otro, con token incorrecto'''
#Registro User1
user_prueba = "Fulanit5"
registro_valido = { "password": "abcde", "nombre": "Fulanito5", "foto": "mifoto", "ubicación": "miubicacion"}
registro_valido_js = json.dumps(registro_valido)
r = requests.post(self.__api_base_url + self.__user_url + user_prueba, data = registro_valido_js)
#Registro User2
user_prueba2 = "Menito5"
registro_valido = { "password": "abcde", "nombre": "Menganito5", "foto": "mifoto", "ubicación": "miubicacion"}
registro_valido_js = json.dumps(registro_valido)
r = requests.post(self.__api_base_url + self.__user_url + user_prueba2, data = registro_valido_js)
#Login
user_correcto = { "id": "Fulanit5", "password": "abcde" }
user_correcto_js = json.dumps(user_correcto)
r3 = requests.post(self.__api_base_url + self.__login, data = user_correcto_js)
token1 = r3.json()
token1["token"] = token1["token"][0:16]
user_correcto2 = { "id": "Menito5", "password": "abcde" }
user_correcto2_js = json.dumps(user_correcto2)
r4 = requests.post(self.__api_base_url + self.__login, data = user_correcto2_js)
token2 = r4.json()
token2["token"] = token2["token"][0:16]
mensaje = {"mensaje" : "hola"}
mensaje_js = json.dumps(mensaje)
r = requests.post(self.__api_base_url + self.__conversation + "Menito5"+ self.__ruser + "Fulanit5" + self.__token + "tokenInvalido", data = mensaje_js)
self.assertEqual(r.status_code, 401)
def test_997send_message_wrong_user(self):
'''Pruebo el envio de un mensaje entre un usuario y otro, con usuario invalido'''
#Registro User1
user_prueba = "Fulanit6"
registro_valido = { "password": "abcde", "nombre": "Fulanito6", "foto": "mifoto", "ubicación": "miubicacion"}
registro_valido_js = json.dumps(registro_valido)
r = requests.post(self.__api_base_url + self.__user_url + user_prueba, data = registro_valido_js)
#Registro User2
user_prueba2 = "Menito6"
registro_valido = { "password": "abcde", "nombre": "Menganito6", "foto": "mifoto", "ubicación": "miubicacion"}
registro_valido_js = json.dumps(registro_valido)
r = requests.post(self.__api_base_url + self.__user_url + user_prueba2, data = registro_valido_js)
#Login
user_correcto = { "id": "Fulanit6", "password": "abcde" }
user_correcto_js = json.dumps(user_correcto)
r3 = requests.post(self.__api_base_url + self.__login, data = user_correcto_js)
token1 = r3.json()
token1["token"] = token1["token"][0:16]
user_correcto2 = { "id": "Menito6", "password": "abcde" }
user_correcto2_js = json.dumps(user_correcto2)
r4 = requests.post(self.__api_base_url + self.__login, data = user_correcto2_js)
token2 = r4.json()
token2["token"] = token2["token"][0:16]
mensaje = {"mensaje" : "hola"}
mensaje_js = json.dumps(mensaje)
r = requests.post(self.__api_base_url + self.__conversation + "sUsuarioIncorrecto"+ self.__ruser + "Fulanit6" + self.__token + token1["token"], data = mensaje_js)
self.assertEqual(r.status_code, 401)
# Broadcast
def test_998get_broadcast(self):
'''Pruebo el envio de un mensaje entre un usuario y otro'''
#Registro User1
user_prueba = "John"
registro_valido = { "password": "abcde", "nombre": "Fulanit4", "foto": "mifoto", "ubicación": "miubicacion"}
registro_valido_js = json.dumps(registro_valido)
r = requests.post(self.__api_base_url + self.__user_url + user_prueba, data = registro_valido_js)
#Registro User2
user_prueba2 = "Paul"
registro_valido = { "password": "abcde", "nombre": "Menganito4", "foto": "mifoto", "ubicación": "miubicacion"}
registro_valido_js = json.dumps(registro_valido)
r = requests.post(self.__api_base_url + self.__user_url + user_prueba2, data = registro_valido_js)
#Login
user_correcto = { "id": "John", "password": "abcde" }
user_correcto_js = json.dumps(user_correcto)
r3 = requests.post(self.__api_base_url + self.__login, data = user_correcto_js)
token1 = r3.json()
token1["token"] = token1["token"][0:16]
user_correcto2 = { "id": "Paul", "password": "abcde" }
user_correcto2_js = json.dumps(user_correcto2)
r4 = requests.post(self.__api_base_url + self.__login, data = user_correcto2_js)
token2 = r4.json()
token2["token"] = token2["token"][0:16]
mensaje = {"mensaje" : "hola"}
mensaje_js = json.dumps(mensaje)
r = requests.post(self.__api_base_url + self.__conversation + "Paul" + self.__ruser + "John" + self.__token + token1["token"], data = mensaje_js)
r = requests.get(self.__api_base_url + self.__broadcast + self.__ruser + "John" + self.__token + token1["token"] + self.__lines + "1")
self.assertEqual(r.status_code, 200)
def test_999post_broadcast(self):
'''Pruebo el envio de un mensaje entre un usuario y otro'''
#Registro User1
user_prueba = "John"
registro_valido = { "password": "abcde", "nombre": "Fulanit4", "foto": "mifoto", "ubicación": "miubicacion"}
registro_valido_js = json.dumps(registro_valido)
r = requests.post(self.__api_base_url + self.__user_url + user_prueba, data = registro_valido_js)
#Registro User2
user_prueba2 = "Paul"
registro_valido = { "password": "abcde", "nombre": "Menganito4", "foto": "mifoto", "ubicación": "miubicacion"}
registro_valido_js = json.dumps(registro_valido)
r = requests.post(self.__api_base_url + self.__user_url + user_prueba2, data = registro_valido_js)
#Login
user_correcto = { "id": "John", "password": "abcde" }
user_correcto_js = json.dumps(user_correcto)
r3 = requests.post(self.__api_base_url + self.__login, data = user_correcto_js)
token1 = r3.json()
token1["token"] = token1["token"][0:16]
user_correcto2 = { "id": "Paul", "password": "abcde" }
user_correcto2_js = json.dumps(user_correcto2)
r4 = requests.post(self.__api_base_url + self.__login, data = user_correcto2_js)
token2 = r4.json()
token2["token"] = token2["token"][0:16]
mensaje = {"mensaje" : "hola"}
mensaje_js = json.dumps(mensaje)
r = requests.post(self.__api_base_url + self.__conversation + "Paul" + self.__ruser + "John" + self.__token + token1["token"], data = mensaje_js)
mensaje = {"mensaje" : "hola"}
mensaje_js = json.dumps(mensaje)
r = requests.post(self.__api_base_url + self.__broadcast + self.__ruser + "John" + self.__token + token1["token"],data = mensaje_js)
self.assertEqual(r.status_code, 201)
| gpl-2.0 | -8,995,136,570,888,199,000 | 50.178218 | 238 | 0.618534 | false |
purism/pdak | dak/dakdb/update70.py | 1 | 3429 | #!/usr/bin/env python
# coding=utf8
"""
Add suite options for overrides and control-suite to DB
@contact: Debian FTP Master <[email protected]>
@copyright: 2011 Mark Hymers <[email protected]>
@license: GNU General Public License version 2 or later
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
################################################################################
import psycopg2
from daklib.dak_exceptions import DBUpdateError
from daklib.config import Config
################################################################################
def do_update(self):
"""
Add suite options for overrides and control-suite to DB
"""
print __doc__
try:
cnf = Config()
c = self.db.cursor()
c.execute("ALTER TABLE suite ADD COLUMN overrideprocess BOOLEAN NOT NULL DEFAULT FALSE")
c.execute("COMMENT ON COLUMN suite.overrideprocess IS %s", ['If true, check-overrides will process the suite by default'])
c.execute("ALTER TABLE suite ADD COLUMN overrideorigin TEXT DEFAULT NULL")
c.execute("COMMENT ON COLUMN suite.overrideprocess IS %s", ['If NOT NULL, check-overrides will take missing overrides from the named suite'])
# Migrate config file values into database
if "Check-Overrides::OverrideSuites" in cnf:
for suitename in cnf.subtree("Check-Overrides::OverrideSuites").list():
if cnf.get("Check-Overrides::OverrideSuites::%s::Process" % suitename, "0") == "1":
print "Marking %s to have overrides processed automatically" % suitename.lower()
c.execute("UPDATE suite SET overrideprocess = TRUE WHERE suite_name = %s", [suitename.lower()])
originsuite = cnf.get("Check-Overrides::OverrideSuites::%s::OriginSuite" % suitename, '')
if originsuite != '':
print "Setting %s to use %s as origin for overrides" % (suitename.lower(), originsuite.lower())
c.execute("UPDATE suite SET overrideorigin = %s WHERE suite_name = %s", [originsuite.lower(), suitename.lower()])
c.execute("ALTER TABLE suite ADD COLUMN allowcsset BOOLEAN NOT NULL DEFAULT FALSE")
c.execute("COMMENT ON COLUMN suite.allowcsset IS %s", ['Allow control-suite to be used with the --set option without forcing'])
# Import historical hard-coded values
c.execute("UPDATE suite SET allowcsset = TRUE WHERE suite_name IN ('testing', 'squeeze-updates')")
c.execute("UPDATE config SET value = '70' WHERE name = 'db_revision'")
self.db.commit()
except psycopg2.ProgrammingError as msg:
self.db.rollback()
raise DBUpdateError('Unable to apply sick update 70, rollback issued. Error message : %s' % (str(msg)))
| gpl-2.0 | -1,284,880,315,698,919,400 | 47.295775 | 149 | 0.654418 | false |
emc-openstack/storops | storops/unity/resource/quota_config.py | 1 | 3711 | # coding=utf-8
# Copyright (c) 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
import logging
from storops.exception import UnityQuotaConfigModifyException, \
UnityResourceNotFoundError
from storops.unity.resource import UnityResource, UnityResourceList
__author__ = 'Rajendra Indukuri'
log = logging.getLogger(__name__)
class UnityQuotaConfig(UnityResource):
"""
Support for Unity quotaConfig component
Operations supported:
modify: Modify quota_config using the quota_config_id
"""
@classmethod
def modify(cls, cli, quota_config_id,
quota_policy=None,
is_user_quota_enabled=None,
delete_user_quotas_with_disable=None,
is_access_deny_enabled=None,
grace_period=None,
default_hard_limit=None,
default_soft_limit=None):
"""
Modifies tree_quota params for the specified tree_quota_id
:param quota_config_id: This is required which specifies
quota_config to be modified
:param quota_policy: This is an enum which specifies how
disk usage shold be measured in blocks/file_size
:param is_user_quota_enabled: To see if user_quota is
enabled. Cannot be passed with quota_policy
:param delete_user_quotas_with_disable: whether to delete
user_quotas when disabling user quotas
:param is_access_deny_enabled: when true access will be
denied when limit is exceeded
:param grace_period: Grace period for soft limit
:param default_hard_limit: Default hard limit of user quotas
and tree quotas
:param default_soft_limit: Default soft limit of user quotas
and tree quotas.
:return: None.
"""
quota_config = UnityQuotaConfig.get(_id=quota_config_id, cli=cli)
if not quota_config.existed:
raise UnityResourceNotFoundError(
'cannot find quota_config {}.'.format(quota_config_id))
# quota_policy and is_user_quota_enabled cannot be used together
if quota_policy is not None and is_user_quota_enabled is not None:
raise UnityQuotaConfigModifyException()
req_body = cli.make_body(
quotaPolicy=quota_policy,
isUserQuotaEnabled=is_user_quota_enabled,
deleteUserQuotasWithDisable=delete_user_quotas_with_disable,
isAccessDenyEnabled=is_access_deny_enabled,
gracePeriod=grace_period,
defaultHardLimit=default_hard_limit,
defaultSoftLimit=default_soft_limit
)
resp = cli.action(cls().resource_class, quota_config_id,
'modify', **req_body)
resp.raise_if_err()
return resp
class UnityQuotaConfigList(UnityResourceList):
"""
List representation of quota_config
"""
def __init__(self, cli=None, **filters):
super(UnityQuotaConfigList, self).__init__(cli, **filters)
@classmethod
def get_resource_class(cls):
return UnityQuotaConfig
| apache-2.0 | 8,178,510,219,616,063,000 | 36.484848 | 78 | 0.656427 | false |
spyder-ide/spyder.line_profiler | spyder_line_profiler/widgets/lineprofiler.py | 1 | 22093 | # -*- coding: utf-8 -*-
#
# Copyright © 2011 Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see LICENSE.txt for details)
"""
Line Profiler widget
See the official documentation of line_profiler:
http://pythonhosted.org/line_profiler/
"""
# Standard library imports
from __future__ import with_statement
import hashlib
import inspect
import linecache
import os
import os.path as osp
import time
import sys
# Third party imports
from qtpy.compat import getopenfilename
from qtpy.QtCore import (QByteArray, QProcess, Qt, QTextCodec,
QProcessEnvironment, Signal)
from qtpy.QtGui import QBrush, QColor, QFont
from qtpy.QtWidgets import (QHBoxLayout, QWidget, QMessageBox, QVBoxLayout,
QLabel, QTreeWidget, QTreeWidgetItem, QApplication)
# Local imports
from spyder.config.base import get_conf_path, get_translation
from spyder.utils import programs
from spyder.utils.qthelpers import create_toolbutton, get_icon
from spyder.widgets.comboboxes import PythonModulesComboBox
from spyder.utils.misc import add_pathlist_to_PYTHONPATH
from spyder.widgets.variableexplorer.texteditor import TextEditor
try:
from spyder.py3compat import to_text_string, getcwd, pickle
except ImportError:
# python2
to_text_string = unicode
getcwd = os.getcwdu
import cPickle as pickle
# This is needed for testing this module as a stand alone script
try:
_ = get_translation("line_profiler", dirname="spyder_line_profiler")
except KeyError as error:
import gettext
_ = gettext.gettext
locale_codec = QTextCodec.codecForLocale()
COL_NO = 0
COL_HITS = 1
COL_TIME = 2
COL_PERHIT = 3
COL_PERCENT = 4
COL_LINE = 5
COL_POS = 0 # Position is not displayed but set as Qt.UserRole
CODE_NOT_RUN_COLOR = QBrush(QColor.fromRgb(128, 128, 128, 200))
WEBSITE_URL = 'http://pythonhosted.org/line_profiler/'
def is_lineprofiler_installed():
"""
Checks if the program and the library for line_profiler is installed.
"""
return (programs.is_module_installed('line_profiler')
and programs.find_program('kernprof') is not None)
class LineProfilerWidget(QWidget):
"""
Line profiler widget.
"""
DATAPATH = get_conf_path('lineprofiler.results')
VERSION = '0.0.1'
redirect_stdio = Signal(bool)
sig_finished = Signal()
def __init__(self, parent):
QWidget.__init__(self, parent)
# Need running QApplication before importing runconfig
from spyder.plugins import runconfig
self.runconfig = runconfig
self.spyder_pythonpath = None
self.setWindowTitle("Line profiler")
self.output = None
self.error_output = None
self.use_colors = True
self._last_wdir = None
self._last_args = None
self._last_pythonpath = None
self.filecombo = PythonModulesComboBox(self)
self.start_button = create_toolbutton(
self, icon=get_icon('run.png'),
text=_("Profile by line"),
tip=_("Run line profiler"),
triggered=(lambda checked=False: self.analyze()), text_beside_icon=True)
self.stop_button = create_toolbutton(
self,
icon=get_icon('terminate.png'),
text=_("Stop"),
tip=_("Stop current profiling"),
text_beside_icon=True)
self.filecombo.valid.connect(self.start_button.setEnabled)
#self.filecombo.valid.connect(self.show_data)
# FIXME: The combobox emits this signal on almost any event
# triggering show_data() too early, too often.
browse_button = create_toolbutton(
self, icon=get_icon('fileopen.png'),
tip=_('Select Python script'),
triggered=self.select_file)
self.datelabel = QLabel()
self.log_button = create_toolbutton(
self, icon=get_icon('log.png'),
text=_("Output"),
text_beside_icon=True,
tip=_("Show program's output"),
triggered=self.show_log)
self.datatree = LineProfilerDataTree(self)
self.collapse_button = create_toolbutton(
self,
icon=get_icon('collapse.png'),
triggered=lambda dD=-1: self.datatree.collapseAll(),
tip=_('Collapse all'))
self.expand_button = create_toolbutton(
self,
icon=get_icon('expand.png'),
triggered=lambda dD=1: self.datatree.expandAll(),
tip=_('Expand all'))
hlayout1 = QHBoxLayout()
hlayout1.addWidget(self.filecombo)
hlayout1.addWidget(browse_button)
hlayout1.addWidget(self.start_button)
hlayout1.addWidget(self.stop_button)
hlayout2 = QHBoxLayout()
hlayout2.addWidget(self.collapse_button)
hlayout2.addWidget(self.expand_button)
hlayout2.addStretch()
hlayout2.addWidget(self.datelabel)
hlayout2.addStretch()
hlayout2.addWidget(self.log_button)
layout = QVBoxLayout()
layout.addLayout(hlayout1)
layout.addLayout(hlayout2)
layout.addWidget(self.datatree)
self.setLayout(layout)
self.process = None
self.set_running_state(False)
self.start_button.setEnabled(False)
if not is_lineprofiler_installed():
for widget in (self.datatree, self.filecombo, self.log_button,
self.start_button, self.stop_button, browse_button,
self.collapse_button, self.expand_button):
widget.setDisabled(True)
text = _(
'<b>Please install the <a href="%s">line_profiler module</a></b>'
) % WEBSITE_URL
self.datelabel.setText(text)
self.datelabel.setOpenExternalLinks(True)
else:
pass # self.show_data()
def analyze(self, filename=None, wdir=None, args=None, pythonpath=None,
use_colors=True):
self.use_colors = use_colors
if not is_lineprofiler_installed():
return
self.kill_if_running()
#index, _data = self.get_data(filename) # FIXME: storing data is not implemented yet
if filename is not None:
filename = osp.abspath(to_text_string(filename))
index = self.filecombo.findText(filename)
if index == -1:
self.filecombo.addItem(filename)
self.filecombo.setCurrentIndex(self.filecombo.count()-1)
else:
self.filecombo.setCurrentIndex(index)
self.filecombo.selected()
if self.filecombo.is_valid():
filename = to_text_string(self.filecombo.currentText())
runconf = self.runconfig.get_run_configuration(filename)
if runconf is not None:
if wdir is None:
if runconf.wdir_enabled:
wdir = runconf.wdir
elif runconf.cw_dir:
wdir = os.getcwd()
elif runconf.file_dir:
wdir = osp.dirname(filename)
elif runconf.fixed_dir:
wdir = runconf.dir
if args is None:
if runconf.args_enabled:
args = runconf.args
if wdir is None:
wdir = osp.dirname(filename)
if pythonpath is None:
pythonpath = self.spyder_pythonpath
self.start(wdir, args, pythonpath)
def select_file(self):
self.redirect_stdio.emit(False)
filename, _selfilter = getopenfilename(
self, _("Select Python script"), getcwd(),
_("Python scripts")+" (*.py ; *.pyw)")
self.redirect_stdio.emit(False)
if filename:
self.analyze(filename)
def show_log(self):
if self.output:
TextEditor(self.output, title=_("Line profiler output"),
readonly=True, size=(700, 500)).exec_()
def show_errorlog(self):
if self.error_output:
TextEditor(self.error_output, title=_("Line profiler output"),
readonly=True, size=(700, 500)).exec_()
def start(self, wdir=None, args=None, pythonpath=None):
filename = to_text_string(self.filecombo.currentText())
if wdir is None:
wdir = self._last_wdir
if wdir is None:
wdir = osp.basename(filename)
if args is None:
args = self._last_args
if args is None:
args = []
if pythonpath is None:
pythonpath = self._last_pythonpath
self._last_wdir = wdir
self._last_args = args
self._last_pythonpath = pythonpath
self.datelabel.setText(_('Profiling, please wait...'))
self.process = QProcess(self)
self.process.setProcessChannelMode(QProcess.SeparateChannels)
self.process.setWorkingDirectory(wdir)
self.process.readyReadStandardOutput.connect(self.read_output)
self.process.readyReadStandardError.connect(
lambda: self.read_output(error=True))
self.process.finished.connect(self.finished)
self.stop_button.clicked.connect(self.process.kill)
if pythonpath is not None:
env = [to_text_string(_pth)
for _pth in self.process.systemEnvironment()]
add_pathlist_to_PYTHONPATH(env, pythonpath)
processEnvironment = QProcessEnvironment()
for envItem in env:
envName, separator, envValue = envItem.partition('=')
processEnvironment.insert(envName, envValue)
self.process.setProcessEnvironment(processEnvironment)
self.output = ''
self.error_output = ''
if os.name == 'nt':
# On Windows, one has to replace backslashes by slashes to avoid
# confusion with escape characters (otherwise, for example, '\t'
# will be interpreted as a tabulation):
filename = osp.normpath(filename).replace(os.sep, '/')
p_args = ['-lvb', '-o', '"' + self.DATAPATH + '"',
'"' + filename + '"']
if args:
p_args.extend(programs.shell_split(args))
executable = '"' + programs.find_program('kernprof') + '"'
executable += ' ' + ' '.join(p_args)
executable = executable.replace(os.sep, '/')
self.process.start(executable)
else:
p_args = ['-lvb', '-o', self.DATAPATH, filename]
if args:
p_args.extend(programs.shell_split(args))
executable = 'kernprof'
self.process.start(executable, p_args)
running = self.process.waitForStarted()
self.set_running_state(running)
if not running:
QMessageBox.critical(self, _("Error"),
_("Process failed to start"))
def set_running_state(self, state=True):
self.start_button.setEnabled(not state)
self.stop_button.setEnabled(state)
def read_output(self, error=False):
if error:
self.process.setReadChannel(QProcess.StandardError)
else:
self.process.setReadChannel(QProcess.StandardOutput)
qba = QByteArray()
while self.process.bytesAvailable():
if error:
qba += self.process.readAllStandardError()
else:
qba += self.process.readAllStandardOutput()
text = to_text_string(locale_codec.toUnicode(qba.data()))
if error:
self.error_output += text
else:
self.output += text
def finished(self):
self.set_running_state(False)
self.show_errorlog() # If errors occurred, show them.
self.output = self.error_output + self.output
# FIXME: figure out if show_data should be called here or
# as a signal from the combobox
self.show_data(justanalyzed=True)
self.sig_finished.emit()
def kill_if_running(self):
if self.process is not None:
if self.process.state() == QProcess.Running:
self.process.kill()
self.process.waitForFinished()
def show_data(self, justanalyzed=False):
if not justanalyzed:
self.output = None
self.log_button.setEnabled(
self.output is not None and len(self.output) > 0)
self.kill_if_running()
filename = to_text_string(self.filecombo.currentText())
if not filename:
return
self.datatree.load_data(self.DATAPATH)
self.datelabel.setText(_('Sorting data, please wait...'))
QApplication.processEvents()
self.datatree.show_tree()
text_style = "<span style=\'color: #444444\'><b>%s </b></span>"
date_text = text_style % time.strftime("%d %b %Y %H:%M",
time.localtime())
self.datelabel.setText(date_text)
class LineProfilerDataTree(QTreeWidget):
"""
Convenience tree widget (with built-in model)
to store and view line profiler data.
"""
def __init__(self, parent=None):
QTreeWidget.__init__(self, parent)
self.header_list = [
_('Line #'), _('Hits'), _('Time (ms)'), _('Per hit (ms)'),
_('% Time'), _('Line contents')]
self.stats = None # To be filled by self.load_data()
self.max_time = 0 # To be filled by self.load_data()
self.header().setDefaultAlignment(Qt.AlignCenter)
self.setColumnCount(len(self.header_list))
self.setHeaderLabels(self.header_list)
self.clear()
self.itemActivated.connect(self.item_activated)
def show_tree(self):
"""Populate the tree with line profiler data and display it."""
self.clear() # Clear before re-populating
self.setItemsExpandable(True)
self.setSortingEnabled(False)
self.populate_tree()
self.expandAll()
for col in range(self.columnCount()-1):
self.resizeColumnToContents(col)
if self.topLevelItemCount() > 1:
self.collapseAll()
self.setSortingEnabled(True)
self.sortItems(COL_POS, Qt.AscendingOrder)
def load_data(self, profdatafile):
"""Load line profiler data saved by kernprof module"""
# lstats has the following layout :
# lstats.timings =
# {(filename1, line_no1, function_name1):
# [(line_no1, hits1, total_time1),
# (line_no2, hits2, total_time2)],
# (filename2, line_no2, function_name2):
# [(line_no1, hits1, total_time1),
# (line_no2, hits2, total_time2),
# (line_no3, hits3, total_time3)]}
# lstats.unit = time_factor
with open(profdatafile, 'rb') as fid:
lstats = pickle.load(fid)
# First pass to group by filename
self.stats = dict()
linecache.checkcache()
for func_info, stats in lstats.timings.items():
# func_info is a tuple containing (filename, line, function anme)
filename, start_line_no = func_info[:2]
# Read code
start_line_no -= 1 # include the @profile decorator
all_lines = linecache.getlines(filename)
block_lines = inspect.getblock(all_lines[start_line_no:])
# Loop on each line of code
func_stats = []
func_total_time = 0.0
next_stat_line = 0
for line_no, code_line in enumerate(block_lines):
line_no += start_line_no + 1 # Lines start at 1
code_line = code_line.rstrip('\n')
if (next_stat_line >= len(stats)
or line_no != stats[next_stat_line][0]):
# Line didn't run
hits, line_total_time, time_per_hit = None, None, None
else:
# Compute line times
hits, line_total_time = stats[next_stat_line][1:]
line_total_time *= lstats.unit
time_per_hit = line_total_time / hits
func_total_time += line_total_time
next_stat_line += 1
func_stats.append(
[line_no, code_line, line_total_time, time_per_hit,
hits])
# Compute percent time
for line in func_stats:
line_total_time = line[2]
if line_total_time is None:
line.append(None)
else:
line.append(line_total_time / func_total_time)
# Fill dict
self.stats[func_info] = [func_stats, func_total_time]
def fill_item(self, item, filename, line_no, code, time, percent, perhit,
hits):
item.setData(COL_POS, Qt.UserRole, (osp.normpath(filename), line_no))
item.setData(COL_NO, Qt.DisplayRole, line_no)
item.setData(COL_LINE, Qt.DisplayRole, code)
if percent is None:
percent = ''
else:
percent = '%.1f' % (100 * percent)
item.setData(COL_PERCENT, Qt.DisplayRole, percent)
item.setTextAlignment(COL_PERCENT, Qt.AlignCenter)
if time is None:
time = ''
else:
time = '%.3f' % (time * 1e3)
item.setData(COL_TIME, Qt.DisplayRole, time)
item.setTextAlignment(COL_TIME, Qt.AlignCenter)
if perhit is None:
perhit = ''
else:
perhit = '%.3f' % (perhit * 1e3)
item.setData(COL_PERHIT, Qt.DisplayRole, perhit)
item.setTextAlignment(COL_PERHIT, Qt.AlignCenter)
if hits is None:
hits = ''
else:
hits = '%d' % hits
item.setData(COL_HITS, Qt.DisplayRole, hits)
item.setTextAlignment(COL_HITS, Qt.AlignCenter)
def populate_tree(self):
"""Create each item (and associated data) in the tree"""
if not self.stats:
warn_item = QTreeWidgetItem(self)
warn_item.setData(
0, Qt.DisplayRole,
_('No timings to display. '
'Did you forget to add @profile decorators ?')
.format(url=WEBSITE_URL))
warn_item.setFirstColumnSpanned(True)
warn_item.setTextAlignment(0, Qt.AlignCenter)
font = warn_item.font(0)
font.setStyle(QFont.StyleItalic)
warn_item.setFont(0, font)
return
try:
monospace_font = self.window().editor.get_plugin_font()
except AttributeError: # If run standalone for testing
monospace_font = QFont("Courier New")
monospace_font.setPointSize(10)
for func_info, func_data in self.stats.items():
# Function name and position
filename, start_line_no, func_name = func_info
func_stats, func_total_time = func_data
func_item = QTreeWidgetItem(self)
func_item.setData(
0, Qt.DisplayRole,
_('{func_name} ({time_ms:.3f}ms) in file "{filename}", '
'line {line_no}').format(
filename=filename,
line_no=start_line_no,
func_name=func_name,
time_ms=func_total_time * 1e3))
func_item.setFirstColumnSpanned(True)
func_item.setData(COL_POS, Qt.UserRole,
(osp.normpath(filename), start_line_no))
# For sorting by time
func_item.setData(COL_TIME, Qt.DisplayRole, func_total_time * 1e3)
func_item.setData(COL_PERCENT, Qt.DisplayRole,
func_total_time * 1e3)
if self.parent().use_colors:
# Choose deteministic unique color for the function
md5 = hashlib.md5((filename + func_name).encode("utf8")).hexdigest()
hue = (int(md5[:2], 16) - 68) % 360 # avoid blue (unreadable)
func_color = QColor.fromHsv(hue, 200, 255)
else:
# Red color only
func_color = QColor.fromRgb(255, 0, 0)
# Lines of code
for line_info in func_stats:
line_item = QTreeWidgetItem(func_item)
(line_no, code_line, line_total_time, time_per_hit,
hits, percent) = line_info
self.fill_item(
line_item, filename, line_no, code_line,
line_total_time, percent, time_per_hit, hits)
# Color background
if line_total_time is not None:
alpha = percent
color = QColor(func_color)
color.setAlphaF(alpha) # Returns None
color = QBrush(color)
for col in range(self.columnCount()):
line_item.setBackground(col, color)
else:
for col in range(self.columnCount()):
line_item.setForeground(col, CODE_NOT_RUN_COLOR)
# Monospace font for code
line_item.setFont(COL_LINE, monospace_font)
def item_activated(self, item):
filename, line_no = item.data(COL_POS, Qt.UserRole)
self.parent().edit_goto.emit(filename, line_no, '')
def test():
"""Run widget test"""
from spyder.utils.qthelpers import qapplication
app = qapplication()
widget = LineProfilerWidget(None)
widget.resize(800, 600)
widget.show()
widget.analyze(osp.normpath(osp.join(osp.dirname(__file__), os.pardir,
'tests/profiling_test_script.py')),
use_colors=True)
sys.exit(app.exec_())
if __name__ == '__main__':
test()
| mit | 1,243,849,583,142,293,200 | 36.317568 | 92 | 0.567988 | false |
ryfeus/lambda-packs | Spacy/source2.7/spacy/tokens/printers.py | 1 | 2687 | # coding: utf8
from __future__ import unicode_literals
from .doc import Doc
from ..symbols import HEAD, TAG, DEP, ENT_IOB, ENT_TYPE
def merge_ents(doc):
"""Helper: merge adjacent entities into single tokens; modifies the doc."""
for ent in doc.ents:
ent.merge(ent.root.tag_, ent.text, ent.label_)
return doc
def format_POS(token, light, flat):
"""Helper: form the POS output for a token."""
subtree = dict([
("word", token.text),
("lemma", token.lemma_), # trigger
("NE", token.ent_type_), # trigger
("POS_fine", token.tag_),
("POS_coarse", token.pos_),
("arc", token.dep_),
("modifiers", [])
])
if light:
subtree.pop("lemma")
subtree.pop("NE")
if flat:
subtree.pop("arc")
subtree.pop("modifiers")
return subtree
def POS_tree(root, light=False, flat=False):
"""Helper: generate a POS tree for a root token. The doc must have
`merge_ents(doc)` ran on it.
"""
subtree = format_POS(root, light=light, flat=flat)
for c in root.children:
subtree["modifiers"].append(POS_tree(c))
return subtree
def parse_tree(doc, light=False, flat=False):
"""Make a copy of the doc and construct a syntactic parse tree similar to
displaCy. Generates the POS tree for all sentences in a doc.
doc (Doc): The doc for parsing.
RETURNS (dict): The parse tree.
EXAMPLE:
>>> doc = nlp('Bob brought Alice the pizza. Alice ate the pizza.')
>>> trees = doc.print_tree()
>>> trees[1]
{'modifiers': [
{'modifiers': [], 'NE': 'PERSON', 'word': 'Alice', 'arc': 'nsubj',
'POS_coarse': 'PROPN', 'POS_fine': 'NNP', 'lemma': 'Alice'},
{'modifiers': [
{'modifiers': [], 'NE': '', 'word': 'the', 'arc': 'det',
'POS_coarse': 'DET', 'POS_fine': 'DT', 'lemma': 'the'}],
'NE': '', 'word': 'pizza', 'arc': 'dobj', 'POS_coarse': 'NOUN',
'POS_fine': 'NN', 'lemma': 'pizza'},
{'modifiers': [], 'NE': '', 'word': '.', 'arc': 'punct',
'POS_coarse': 'PUNCT', 'POS_fine': '.', 'lemma': '.'}],
'NE': '', 'word': 'ate', 'arc': 'ROOT', 'POS_coarse': 'VERB',
'POS_fine': 'VBD', 'lemma': 'eat'}
"""
doc_clone = Doc(doc.vocab, words=[w.text for w in doc])
doc_clone.from_array([HEAD, TAG, DEP, ENT_IOB, ENT_TYPE],
doc.to_array([HEAD, TAG, DEP, ENT_IOB, ENT_TYPE]))
merge_ents(doc_clone) # merge the entities into single tokens first
return [POS_tree(sent.root, light=light, flat=flat)
for sent in doc_clone.sents]
| mit | 3,038,236,058,106,726,400 | 35.310811 | 79 | 0.54224 | false |
pombredanne/django-url-filter | url_filter/backends/sqlalchemy.py | 1 | 4251 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import itertools
from sqlalchemy import func
from sqlalchemy.orm import class_mapper
from sqlalchemy.sql.expression import not_
from .base import BaseFilterBackend
def lower(value):
try:
return value.lower()
except AttributeError:
return value
class SQLAlchemyFilterBackend(BaseFilterBackend):
supported_lookups = {
'contains',
'endswith',
'exact',
'gt',
'gte',
'icontains',
'iendswith',
'iexact',
'in',
'isnull',
'istartswith',
'lt',
'lte',
'range',
'startswith',
}
def __init__(self, *args, **kwargs):
super(SQLAlchemyFilterBackend, self).__init__(*args, **kwargs)
assert len(self.queryset._entities) == 1, (
'{} does not support filtering when multiple entities '
'are being queried (e.g. session.query(Foo, Bar)).'
''.format(self.__class__.__name__)
)
def get_model(self):
return self.queryset._primary_entity.entities[0]
def filter(self):
if not self.specs:
return self.queryset
clauses = [self.build_clause(spec) for spec in self.specs]
conditions, joins = zip(*clauses)
joins = list(itertools.chain(*joins))
qs = self.queryset
if joins:
qs = qs.join(*joins)
return qs.filter(*conditions)
def build_clause(self, spec):
to_join = []
model = self.model
for component in spec.components:
_field = getattr(model, component)
field = self._get_properties_for_model(model)[component]
try:
model = self._get_related_model_for_field(field)
except AttributeError:
break
else:
to_join.append(_field)
builder = getattr(self, '_build_clause_{}'.format(spec.lookup))
column = self._get_attribute_for_field(field)
clause = builder(spec, column)
if spec.is_negated:
clause = not_(clause)
return clause, to_join
def _build_clause_contains(self, spec, column):
return column.contains(spec.value)
def _build_clause_endswith(self, spec, column):
return column.endswith(spec.value)
def _build_clause_exact(self, spec, column):
return column == spec.value
def _build_clause_gt(self, spec, column):
return column > spec.value
def _build_clause_gte(self, spec, column):
return column >= spec.value
def _build_clause_icontains(self, spec, column):
return func.lower(column).contains(lower(spec.value))
def _build_clause_iendswith(self, spec, column):
return func.lower(column).endswith(lower(spec.value))
def _build_clause_iexact(self, spec, column):
return func.lower(column) == lower(spec.value)
def _build_clause_in(self, spec, column):
return column.in_(spec.value)
def _build_clause_isnull(self, spec, column):
if spec.value:
return column == None # noqa
else:
return column != None # noqa
def _build_clause_istartswith(self, spec, column):
return func.lower(column).startswith(lower(spec.value))
def _build_clause_lt(self, spec, column):
return column < spec.value
def _build_clause_lte(self, spec, column):
return column <= spec.value
def _build_clause_range(self, spec, column):
return column.between(*spec.value)
def _build_clause_startswith(self, spec, column):
return column.startswith(spec.value)
@classmethod
def _get_properties_for_model(cls, model):
mapper = class_mapper(model)
return {
i.key: i
for i in mapper.iterate_properties
}
@classmethod
def _get_column_for_field(cls, field):
return field.columns[0]
@classmethod
def _get_attribute_for_field(cls, field):
return field.class_attribute
@classmethod
def _get_related_model_for_field(self, field):
return field._dependency_processor.mapper.class_
| mit | -6,336,739,438,157,124,000 | 26.784314 | 72 | 0.597271 | false |
kreeger/etcetera | urls.py | 1 | 1576 | from django.conf.urls.defaults import *
from django.contrib.auth.views import login, logout
from django.contrib import admin
from etcetera.settings import SITE_ROOT, DEBUG
admin.autodiscover()
# For master/general use.
urlpatterns = patterns('',
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/jsi18n/$', 'django.views.i18n.javascript_catalog'),
url(r'^admin/', include(admin.site.urls)),
url(r'^login/$', login, name="etcetera-login"),
url(r'^logout/$', logout, name="etcetera-logout"),
)
# For only when in development.
if DEBUG:
urlpatterns += patterns('',
url(r'^_media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': (SITE_ROOT + '/_media')}),
)
# For equipment management.
urlpatterns += patterns('',
url(r'^equipment/', include('etcetera.equipment.urls')),
)
# For checkout/reservation management.
urlpatterns += patterns('',
url(r'^checkout/', include('etcetera.checkout.urls')),
)
# For service management.
urlpatterns += patterns('',
url(r'^service/', include('etcetera.service.urls')),
)
# For report generation.
urlpatterns += patterns('',
url(r'^reports/', include('etcetera.reports.urls')),
)
# For university structure management.
urlpatterns += patterns('',
url(r'^structure/', include('etcetera.structure.urls')),
)
# For extra things.
urlpatterns += patterns('',
url(r'^extras/', include('etcetera.extras.urls')),
url(r'^user/', include('etcetera.extras.urls-profile')),
url(r'^$', 'etcetera.extras.views.index', name="etcetera-home"),
) | bsd-3-clause | -469,688,098,523,634,000 | 28.203704 | 111 | 0.673223 | false |
kaplun/ops | modules/bibupload/lib/bibupload.py | 1 | 146505 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
BibUpload: Receive MARC XML file and update the appropriate database
tables according to options.
"""
__revision__ = "$Id$"
import os
import re
import sys
import time
from datetime import datetime
from zlib import compress
import socket
import marshal
import copy
import tempfile
import urlparse
import urllib2
import urllib
from invenio.config import CFG_OAI_ID_FIELD, \
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG, \
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG, \
CFG_BIBUPLOAD_EXTERNAL_OAIID_PROVENANCE_TAG, \
CFG_BIBUPLOAD_STRONG_TAGS, \
CFG_BIBUPLOAD_CONTROLLED_PROVENANCE_TAGS, \
CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE, \
CFG_BIBUPLOAD_DELETE_FORMATS, \
CFG_SITE_URL, \
CFG_SITE_SECURE_URL, \
CFG_SITE_RECORD, \
CFG_OAI_PROVENANCE_ALTERED_SUBFIELD, \
CFG_BIBUPLOAD_DISABLE_RECORD_REVISIONS, \
CFG_BIBUPLOAD_CONFLICTING_REVISION_TICKET_QUEUE, \
CFG_CERN_SITE, \
CFG_INSPIRE_SITE
from invenio.jsonutils import json, CFG_JSON_AVAILABLE
from invenio.bibupload_config import CFG_BIBUPLOAD_CONTROLFIELD_TAGS, \
CFG_BIBUPLOAD_SPECIAL_TAGS, \
CFG_BIBUPLOAD_DELETE_CODE, \
CFG_BIBUPLOAD_DELETE_VALUE, \
CFG_BIBUPLOAD_OPT_MODES
from invenio.dbquery import run_sql
from invenio.bibrecord import create_records, \
record_add_field, \
record_delete_field, \
record_xml_output, \
record_get_field_instances, \
record_get_field_value, \
record_get_field_values, \
field_get_subfield_values, \
field_get_subfield_instances, \
record_modify_subfield, \
record_delete_subfield_from, \
record_delete_fields, \
record_add_subfield_into, \
record_find_field, \
record_extract_oai_id, \
record_extract_dois, \
record_has_field, \
records_identical, \
record_drop_duplicate_fields
from invenio.search_engine import get_record, record_exists, search_pattern
from invenio.dateutils import convert_datestruct_to_datetext
from invenio.errorlib import register_exception
from invenio.bibcatalog import BIBCATALOG_SYSTEM
from invenio.intbitset import intbitset
from invenio.urlutils import make_user_agent_string
from invenio.textutils import wash_for_xml
from invenio.config import CFG_BIBDOCFILE_FILEDIR
from invenio.bibtask import task_init, write_message, \
task_set_option, task_get_option, task_get_task_param, \
task_update_progress, task_sleep_now_if_required, fix_argv_paths, \
RecoverableError
from invenio.bibdocfile import BibRecDocs, file_strip_ext, normalize_format, \
get_docname_from_url, check_valid_url, download_url, \
KEEP_OLD_VALUE, decompose_bibdocfile_url, InvenioBibDocFileError, \
bibdocfile_url_p, CFG_BIBDOCFILE_AVAILABLE_FLAGS, guess_format_from_url, \
BibRelation, MoreInfo
from invenio.search_engine import search_pattern
from invenio.bibupload_revisionverifier import RevisionVerifier, \
InvenioBibUploadConflictingRevisionsError, \
InvenioBibUploadInvalidRevisionError, \
InvenioBibUploadMissing005Error, \
InvenioBibUploadUnchangedRecordError
#Statistic variables
stat = {}
stat['nb_records_to_upload'] = 0
stat['nb_records_updated'] = 0
stat['nb_records_inserted'] = 0
stat['nb_errors'] = 0
stat['nb_holdingpen'] = 0
stat['exectime'] = time.localtime()
_WRITING_RIGHTS = None
CFG_BIBUPLOAD_ALLOWED_SPECIAL_TREATMENTS = ('oracle', )
CFG_HAS_BIBCATALOG = "UNKNOWN"
def check_bibcatalog():
"""
Return True if bibcatalog is available.
"""
global CFG_HAS_BIBCATALOG # pylint: disable=W0603
if CFG_HAS_BIBCATALOG != "UNKNOWN":
return CFG_HAS_BIBCATALOG
CFG_HAS_BIBCATALOG = True
if BIBCATALOG_SYSTEM is not None:
bibcatalog_response = BIBCATALOG_SYSTEM.check_system()
else:
bibcatalog_response = "No ticket system configured"
if bibcatalog_response != "":
write_message("BibCatalog error: %s\n" % (bibcatalog_response,))
CFG_HAS_BIBCATALOG = False
return CFG_HAS_BIBCATALOG
## Let's set a reasonable timeout for URL request (e.g. FFT)
socket.setdefaulttimeout(40)
def parse_identifier(identifier):
"""Parse the identifier and determine if it is temporary or fixed"""
id_str = str(identifier)
if not id_str.startswith("TMP:"):
return (False, identifier)
else:
return (True, id_str[4:])
def resolve_identifier(tmps, identifier):
"""Resolves an identifier. If the identifier is not temporary, this
function is an identity on the second argument. Otherwise, a resolved
value is returned or an exception raised"""
is_tmp, tmp_id = parse_identifier(identifier)
if is_tmp:
if not tmp_id in tmps:
raise StandardError("Temporary identifier %s not present in the dictionary" % (tmp_id, ))
if tmps[tmp_id] == -1:
# the identifier has been signalised but never assigned a value - probably error during processing
raise StandardError("Temporary identifier %s has been declared, but never assigned a value. Probably an error during processign of an appropriate FFT has happened. Please see the log" % (tmp_id, ))
return int(tmps[tmp_id])
else:
return int(identifier)
_re_find_001 = re.compile('<controlfield\\s+tag=("001"|\'001\')\\s*>\\s*(\\d*)\\s*</controlfield>', re.S)
def bibupload_pending_recids():
"""This function embed a bit of A.I. and is more a hack than an elegant
algorithm. It should be updated in case bibupload/bibsched are modified
in incompatible ways.
This function return the intbitset of all the records that are being
(or are scheduled to be) touched by other bibuploads.
"""
options = run_sql("""SELECT arguments FROM schTASK WHERE status<>'DONE' AND
proc='bibupload' AND (status='RUNNING' OR status='CONTINUING' OR
status='WAITING' OR status='SCHEDULED' OR status='ABOUT TO STOP' OR
status='ABOUT TO SLEEP')""")
ret = intbitset()
xmls = []
if options:
for arguments in options:
arguments = marshal.loads(arguments[0])
for argument in arguments[1:]:
if argument.startswith('/'):
# XMLs files are recognizable because they're absolute
# files...
xmls.append(argument)
for xmlfile in xmls:
# Let's grep for the 001
try:
xml = open(xmlfile).read()
ret += [int(group[1]) for group in _re_find_001.findall(xml)]
except:
continue
return ret
### bibupload engine functions:
def bibupload(record, opt_mode=None, opt_notimechange=0, oai_rec_id="", pretend=False,
tmp_ids=None, tmp_vers=None):
"""Main function: process a record and fit it in the tables
bibfmt, bibrec, bibrec_bibxxx, bibxxx with proper record
metadata.
Return (error_code, recID) of the processed record.
"""
if tmp_ids is None:
tmp_ids = {}
if tmp_vers is None:
tmp_vers = {}
if opt_mode == 'reference':
## NOTE: reference mode has been deprecated in favour of 'correct'
opt_mode = 'correct'
assert(opt_mode in CFG_BIBUPLOAD_OPT_MODES)
try:
record_xml_output(record).decode('utf-8')
except UnicodeDecodeError:
msg = " Failed: Invalid utf-8 characters."
write_message(msg, verbose=1, stream=sys.stderr)
return (1, -1, msg)
error = None
affected_tags = {}
original_record = {}
rec_old = {}
now = datetime.now() # will hold record creation/modification date
record_had_altered_bit = False
is_opt_mode_delete = False
# Extraction of the Record Id from 001, SYSNO or OAIID or DOI tags:
rec_id = retrieve_rec_id(record, opt_mode, pretend=pretend)
if rec_id == -1:
msg = " Failed: either the record already exists and insert was " \
"requested or the record does not exists and " \
"replace/correct/append has been used"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, -1, msg)
elif rec_id > 0:
write_message(" -Retrieve record ID (found %s): DONE." % rec_id, verbose=2)
(unique_p, msg) = check_record_doi_is_unique(rec_id, record)
if not unique_p:
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
if not record.has_key('001'):
# Found record ID by means of SYSNO or OAIID or DOI, and the
# input MARCXML buffer does not have this 001 tag, so we
# should add it now:
error = record_add_field(record, '001', controlfield_value=rec_id)
if error is None:
msg = " Failed: Error during adding the 001 controlfield " \
"to the record"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
else:
error = None
write_message(" -Added tag 001: DONE.", verbose=2)
write_message(" -Check if the xml marc file is already in the database: DONE" , verbose=2)
record_deleted_p = False
if opt_mode == 'insert' or \
(opt_mode == 'replace_or_insert') and rec_id is None:
insert_mode_p = True
# Insert the record into the bibrec databases to have a recordId
rec_id = create_new_record(pretend=pretend)
write_message(" -Creation of a new record id (%d): DONE" % rec_id, verbose=2)
# we add the record Id control field to the record
error = record_add_field(record, '001', controlfield_value=rec_id)
if error is None:
msg = " Failed: Error during adding the 001 controlfield " \
"to the record"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
else:
error = None
if '005' not in record:
error = record_add_field(record, '005', controlfield_value=now.strftime("%Y%m%d%H%M%S.0"))
if error is None:
msg = " ERROR: during adding to 005 controlfield to record"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
else:
error = None
else:
write_message(" Note: 005 already existing upon inserting of new record. Keeping it.", verbose=2)
elif opt_mode != 'insert':
insert_mode_p = False
# Update Mode
# Retrieve the old record to update
rec_old = get_record(rec_id)
record_had_altered_bit = record_get_field_values(rec_old, CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4], CFG_OAI_PROVENANCE_ALTERED_SUBFIELD)
# Also save a copy to restore previous situation in case of errors
original_record = get_record(rec_id)
if rec_old is None:
msg = " Failed during the creation of the old record!"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
else:
write_message(" -Retrieve the old record to update: DONE", verbose=2)
# flag to check whether the revisions have been verified and patch generated.
# If revision verification failed, then we need to manually identify the affected tags
# and process them
revision_verified = False
rev_verifier = RevisionVerifier()
#check for revision conflicts before updating record
if record_has_field(record, '005') and not CFG_BIBUPLOAD_DISABLE_RECORD_REVISIONS:
write_message(" -Upload Record has 005. Verifying Revision", verbose=2)
try:
rev_res = rev_verifier.verify_revision(record, original_record, opt_mode)
if rev_res:
opt_mode = rev_res[0]
record = rev_res[1]
affected_tags = rev_res[2]
revision_verified = True
write_message(lambda: " -Patch record generated. Changing opt_mode to correct.\nPatch:\n%s " % record_xml_output(record), verbose=2)
else:
write_message(" -No Patch Record.", verbose=2)
except InvenioBibUploadUnchangedRecordError, err:
msg = " -ISSUE: %s" % err
write_message(msg, verbose=1, stream=sys.stderr)
write_message(msg, " Continuing anyway in case there are FFT or other tags")
except InvenioBibUploadConflictingRevisionsError, err:
msg = " -ERROR: Conflicting Revisions - %s" % err
write_message(msg, verbose=1, stream=sys.stderr)
submit_ticket_for_holding_pen(rec_id, err, "Conflicting Revisions. Inserting record into holding pen.", pretend=pretend)
insert_record_into_holding_pen(record, str(rec_id), pretend=pretend)
return (2, int(rec_id), msg)
except InvenioBibUploadInvalidRevisionError, err:
msg = " -ERROR: Invalid Revision - %s" % err
write_message(msg)
submit_ticket_for_holding_pen(rec_id, err, "Invalid Revisions. Inserting record into holding pen.", pretend=pretend)
insert_record_into_holding_pen(record, str(rec_id), pretend=pretend)
return (2, int(rec_id), msg)
except InvenioBibUploadMissing005Error, err:
msg = " -ERROR: Missing 005 - %s" % err
write_message(msg)
submit_ticket_for_holding_pen(rec_id, err, "Missing 005. Inserting record into holding pen.", pretend=pretend)
insert_record_into_holding_pen(record, str(rec_id), pretend=pretend)
return (2, int(rec_id), msg)
else:
write_message(" - No 005 Tag Present. Resuming normal flow.", verbose=2)
# dictionaries to temporarily hold original recs tag-fields
existing_tags = {}
retained_tags = {}
# in case of delete operation affected tags should be deleted in delete_bibrec_bibxxx
# but should not be updated again in STAGE 4
# utilising the below flag
is_opt_mode_delete = False
if not revision_verified:
# either 005 was not present or opt_mode was not correct/replace
# in this case we still need to find out affected tags to process
write_message(" - Missing 005 or opt_mode!=Replace/Correct.Revision Verifier not called.", verbose=2)
# Identify affected tags
if opt_mode == 'correct' or opt_mode == 'replace' or opt_mode == 'replace_or_insert':
rec_diff = rev_verifier.compare_records(record, original_record, opt_mode)
affected_tags = rev_verifier.retrieve_affected_tags_with_ind(rec_diff)
elif opt_mode == 'delete':
# populate an intermediate dictionary
# used in upcoming step related to 'delete' mode
is_opt_mode_delete = True
for tag, fields in original_record.iteritems():
existing_tags[tag] = [tag + (field[1] != ' ' and field[1] or '_') + (field[2] != ' ' and field[2] or '_') for field in fields]
elif opt_mode == 'append':
for tag, fields in record.iteritems():
if tag not in CFG_BIBUPLOAD_CONTROLFIELD_TAGS:
affected_tags[tag] = [(field[1], field[2]) for field in fields]
# In Replace mode, take over old strong tags if applicable:
if opt_mode == 'replace' or \
opt_mode == 'replace_or_insert':
copy_strong_tags_from_old_record(record, rec_old)
# Delete tags to correct in the record
if opt_mode == 'correct':
delete_tags_to_correct(record, rec_old)
write_message(" -Delete the old tags to correct in the old record: DONE",
verbose=2)
# Delete tags specified if in delete mode
if opt_mode == 'delete':
record = delete_tags(record, rec_old)
for tag, fields in record.iteritems():
retained_tags[tag] = [tag + (field[1] != ' ' and field[1] or '_') + (field[2] != ' ' and field[2] or '_') for field in fields]
#identify the tags that have been deleted
for tag in existing_tags.keys():
if tag not in retained_tags:
for item in existing_tags[tag]:
tag_to_add = item[0:3]
ind1, ind2 = item[3], item[4]
if tag_to_add in affected_tags and (ind1, ind2) not in affected_tags[tag_to_add]:
affected_tags[tag_to_add].append((ind1, ind2))
else:
affected_tags[tag_to_add] = [(ind1, ind2)]
else:
deleted = list(set(existing_tags[tag]) - set(retained_tags[tag]))
for item in deleted:
tag_to_add = item[0:3]
ind1, ind2 = item[3], item[4]
if tag_to_add in affected_tags and (ind1, ind2) not in affected_tags[tag_to_add]:
affected_tags[tag_to_add].append((ind1, ind2))
else:
affected_tags[tag_to_add] = [(ind1, ind2)]
write_message(" -Delete specified tags in the old record: DONE", verbose=2)
# Append new tag to the old record and update the new record with the old_record modified
if opt_mode == 'append' or opt_mode == 'correct':
record = append_new_tag_to_old_record(record, rec_old)
write_message(" -Append new tags to the old record: DONE", verbose=2)
write_message(" -Affected Tags found after comparing upload and original records: %s"%(str(affected_tags)), verbose=2)
# 005 tag should be added everytime the record is modified
# If an exiting record is modified, its 005 tag should be overwritten with a new revision value
if record.has_key('005'):
record_delete_field(record, '005')
write_message(" Deleted the existing 005 tag.", verbose=2)
last_revision = run_sql("SELECT MAX(job_date) FROM hstRECORD WHERE id_bibrec=%s", (rec_id, ))[0][0]
if last_revision and last_revision.strftime("%Y%m%d%H%M%S.0") == now.strftime("%Y%m%d%H%M%S.0"):
## We are updating the same record within the same seconds! It's less than
## the minimal granularity. Let's pause for 1 more second to take a breath :-)
time.sleep(1)
now = datetime.now()
error = record_add_field(record, '005', controlfield_value=now.strftime("%Y%m%d%H%M%S.0"))
if error is None:
write_message(" Failed: Error during adding to 005 controlfield to record", verbose=1, stream=sys.stderr)
return (1, int(rec_id))
else:
error=None
write_message(lambda: " -Added tag 005: DONE. " + str(record_get_field_value(record, '005', '', '')), verbose=2)
# adding 005 to affected tags will delete the existing 005 entry
# and update with the latest timestamp.
if '005' not in affected_tags:
affected_tags['005'] = [(' ', ' ')]
write_message(" -Stage COMPLETED", verbose=2)
record_deleted_p = False
try:
if not record_is_valid(record):
msg = "ERROR: record is not valid"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, -1, msg)
# Have a look if we have FFT tags
write_message("Stage 2: Start (Process FFT tags if exist).", verbose=2)
record_had_FFT = False
bibrecdocs = None
if extract_tag_from_record(record, 'FFT') is not None:
record_had_FFT = True
if not writing_rights_p():
msg = "ERROR: no rights to write fulltext files"
write_message(" Stage 2 failed: %s" % msg,
verbose=1, stream=sys.stderr)
raise StandardError(msg)
try:
bibrecdocs = BibRecDocs(rec_id)
record = elaborate_fft_tags(record, rec_id, opt_mode,
pretend=pretend, tmp_ids=tmp_ids,
tmp_vers=tmp_vers, bibrecdocs=bibrecdocs)
except Exception, e:
register_exception()
msg = " Stage 2 failed: ERROR: while elaborating FFT tags: %s" % e
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
if record is None:
msg = " Stage 2 failed: ERROR: while elaborating FFT tags"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
write_message(" -Stage COMPLETED", verbose=2)
else:
write_message(" -Stage NOT NEEDED", verbose=2)
# Have a look if we have FFT tags
write_message("Stage 2B: Start (Synchronize 8564 tags).", verbose=2)
if record_had_FFT or extract_tag_from_record(record, '856') is not None:
try:
if bibrecdocs is None:
bibrecdocs = BibRecDocs(rec_id)
record = synchronize_8564(rec_id, record, record_had_FFT, bibrecdocs, pretend=pretend)
# in case if FFT is in affected list make appropriate changes
if ('4', ' ') not in affected_tags.get('856', []):
if '856' not in affected_tags:
affected_tags['856'] = [('4', ' ')]
elif ('4', ' ') not in affected_tags['856']:
affected_tags['856'].append(('4', ' '))
write_message(" -Modified field list updated with FFT details: %s" % str(affected_tags), verbose=2)
except Exception, e:
register_exception(alert_admin=True)
msg = " Stage 2B failed: ERROR: while synchronizing 8564 tags: %s" % e
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
if record is None:
msg = " Stage 2B failed: ERROR: while synchronizing 8564 tags"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
write_message(" -Stage COMPLETED", verbose=2)
else:
write_message(" -Stage NOT NEEDED", verbose=2)
write_message("Stage 3: Start (Apply fields deletion requests).", verbose=2)
write_message(lambda: " Record before deletion:\n%s" % record_xml_output(record), verbose=9)
# remove fields with __DELETE_FIELDS__
# NOTE:creating a temporary deep copy of record for iteration to avoid RunTimeError
# RuntimeError due to change in dictionary size during iteration
tmp_rec = copy.deepcopy(record)
for tag in tmp_rec:
for data_tuple in record[tag]:
if (CFG_BIBUPLOAD_DELETE_CODE, CFG_BIBUPLOAD_DELETE_VALUE) in data_tuple[0]:
# delete the tag with particular indicator pairs from original record
record_delete_field(record, tag, data_tuple[1], data_tuple[2])
write_message(lambda: " Record after cleaning up fields to be deleted:\n%s" % record_xml_output(record), verbose=9)
if opt_mode == 'append':
write_message("Stage 3b: Drop duplicate fields in append mode.", verbose=2)
record = record_drop_duplicate_fields(record)
write_message(lambda: " Record after dropping duplicate fields:\n%s" % record_xml_output(record), verbose=9)
# Update of the BibFmt
write_message("Stage 4: Start (Update bibfmt).", verbose=2)
updates_exist = not records_identical(record, original_record)
if updates_exist:
# if record_had_altered_bit, this must be set to true, since the
# record has been altered.
if record_had_altered_bit:
oai_provenance_fields = record_get_field_instances(record, CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4])
for oai_provenance_field in oai_provenance_fields:
for i, (code, dummy_value) in enumerate(oai_provenance_field[0]):
if code == CFG_OAI_PROVENANCE_ALTERED_SUBFIELD:
oai_provenance_field[0][i] = (code, 'true')
tmp_indicators = (CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4])
if tmp_indicators not in affected_tags.get(CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:3], []):
if CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:3] not in affected_tags:
affected_tags[CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:3]] = [tmp_indicators]
else:
affected_tags[CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:3]].append(tmp_indicators)
write_message(lambda: " Updates exists:\n%s\n!=\n%s" % (record, original_record), verbose=9)
# format the single record as xml
rec_xml_new = record_xml_output(record)
# Update bibfmt with the format xm of this record
modification_date = time.strftime('%Y-%m-%d %H:%M:%S', time.strptime(record_get_field_value(record, '005'), '%Y%m%d%H%M%S.0'))
error = update_bibfmt_format(rec_id, rec_xml_new, 'xm', modification_date, pretend=pretend)
if error == 1:
msg = " Failed: ERROR: during update_bibfmt_format 'xm'"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
if CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE:
error = update_bibfmt_format(rec_id, marshal.dumps(record), 'recstruct', modification_date, pretend=pretend)
if error == 1:
msg = " Failed: ERROR: during update_bibfmt_format 'recstruct'"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
if not CFG_BIBUPLOAD_DISABLE_RECORD_REVISIONS:
# archive MARCXML format of this record for version history purposes:
error = archive_marcxml_for_history(rec_id, pretend=pretend)
if error == 1:
msg = " ERROR: Failed to archive MARCXML for history"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
else:
write_message(" -Archived MARCXML for history: DONE", verbose=2)
# delete some formats like HB upon record change:
if updates_exist or record_had_FFT:
for format_to_delete in CFG_BIBUPLOAD_DELETE_FORMATS:
try:
delete_bibfmt_format(rec_id, format_to_delete, pretend=pretend)
except:
# OK, some formats like HB could not have been deleted, no big deal
pass
write_message(" -Stage COMPLETED", verbose=2)
## Let's assert that one and only one 005 tag is existing at this stage.
assert len(record['005']) == 1
# Update the database MetaData
write_message("Stage 5: Start (Update the database with the metadata).",
verbose=2)
if insert_mode_p:
update_database_with_metadata(record, rec_id, oai_rec_id, pretend=pretend)
write_message(" -Stage COMPLETED", verbose=2)
elif opt_mode in ('replace', 'replace_or_insert',
'append', 'correct', 'delete') and updates_exist:
# now we clear all the rows from bibrec_bibxxx from the old
record_deleted_p = True
delete_bibrec_bibxxx(rec_old, rec_id, affected_tags, pretend=pretend)
# metadata update will insert tags that are available in affected_tags.
# but for delete, once the tags have been deleted from bibrec_bibxxx, they dont have to be inserted
# except for 005.
if is_opt_mode_delete:
tmp_affected_tags = copy.deepcopy(affected_tags)
for tag in tmp_affected_tags:
if tag != '005':
affected_tags.pop(tag)
write_message(" -Clean bibrec_bibxxx: DONE", verbose=2)
update_database_with_metadata(record, rec_id, oai_rec_id, affected_tags, pretend=pretend)
write_message(" -Stage COMPLETED", verbose=2)
else:
write_message(" -Stage NOT NEEDED in mode %s" % opt_mode,
verbose=2)
record_deleted_p = False
# Finally we update the bibrec table with the current date
write_message("Stage 6: Start (Update bibrec table with current date).",
verbose=2)
if opt_notimechange == 0 and (updates_exist or record_had_FFT):
bibrec_now = convert_datestruct_to_datetext(time.localtime())
write_message(" -Retrieved current localtime: DONE", verbose=2)
update_bibrec_date(bibrec_now, rec_id, insert_mode_p, pretend=pretend)
write_message(" -Stage COMPLETED", verbose=2)
else:
write_message(" -Stage NOT NEEDED", verbose=2)
# Increase statistics
if insert_mode_p:
stat['nb_records_inserted'] += 1
else:
stat['nb_records_updated'] += 1
# Upload of this record finish
write_message("Record "+str(rec_id)+" DONE", verbose=1)
return (0, int(rec_id), "")
finally:
if record_deleted_p:
## BibUpload has failed living the record deleted. We should
## back the original record then.
update_database_with_metadata(original_record, rec_id, oai_rec_id, pretend=pretend)
write_message(" Restored original record", verbose=1, stream=sys.stderr)
def record_is_valid(record):
"""
Check if the record is valid. Currently this simply checks if the record
has exactly one rec_id.
@param record: the record
@type record: recstruct
@return: True if the record is valid
@rtype: bool
"""
rec_ids = record_get_field_values(record, tag="001")
if len(rec_ids) != 1:
write_message(" The record is not valid: it has not a single rec_id: %s" % (rec_ids), stream=sys.stderr)
return False
return True
def find_record_ids_by_oai_id(oaiId):
"""
A method finding the records identifier provided the oai identifier
returns a list of identifiers matching a given oai identifier
"""
# Is this record already in invenio (matching by oaiid)
if oaiId:
recids = search_pattern(p=oaiId, f=CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG, m='e')
# Is this record already in invenio (matching by reportnumber i.e.
# particularly 037. Idea: to avoid double insertions)
repnumber = oaiId.split(":")[-1]
if repnumber:
recids |= search_pattern(p = repnumber,
f = "reportnumber",
m = 'e' )
# Is this record already in invenio (matching by reportnumber i.e.
# particularly 037. Idea: to avoid double insertions)
repnumber = "arXiv:" + oaiId.split(":")[-1]
recids |= search_pattern(p = repnumber,
f = "reportnumber",
m = 'e' )
if CFG_INSPIRE_SITE or CFG_CERN_SITE:
## FIXME: for the time being this functionality is available only on INSPIRE
## or CDS, and waiting to be replaced by proper pidstore integration
if CFG_CERN_SITE:
return recids - (search_pattern(p='DELETED', f='980__%', m='e') | search_pattern(p='DUMMY', f='980__%', m='e'))
else:
return recids - search_pattern(p='DELETED', f='980__%', m='e')
else:
return recids
else:
return intbitset()
def bibupload_post_phase(record, mode=None, rec_id="", pretend=False,
tmp_ids=None, tmp_vers=None):
def _elaborate_tag(record, tag, fun):
if extract_tag_from_record(record, tag) is not None:
try:
record = fun()
except Exception, e:
register_exception()
write_message(" Stage failed: ERROR: while elaborating %s tags: %s" % (tag, e),
verbose=1, stream=sys.stderr)
return (1, int(rec_id)) # TODO: ?
if record is None:
write_message(" Stage failed: ERROR: while elaborating %s tags" % (tag, ),
verbose=1, stream=sys.stderr)
return (1, int(rec_id))
write_message(" -Stage COMPLETED", verbose=2)
else:
write_message(" -Stage NOT NEEDED", verbose=2)
if tmp_ids is None:
tmp_ids = {}
if tmp_vers is None:
tmp_vers = {}
_elaborate_tag(record, "BDR", lambda: elaborate_brt_tags(record, rec_id = rec_id,
mode = mode,
pretend = pretend,
tmp_ids = tmp_ids,
tmp_vers = tmp_vers))
_elaborate_tag(record, "BDM", lambda: elaborate_mit_tags(record, rec_id = rec_id,
mode = mode,
pretend = pretend,
tmp_ids = tmp_ids,
tmp_vers = tmp_vers))
def submit_ticket_for_holding_pen(rec_id, err, msg, pretend=False):
"""
Submit a ticket via BibCatalog to report about a record that has been put
into the Holding Pen.
@rec_id: the affected record
@err: the corresponding Exception
msg: verbose message
"""
from invenio import bibtask
from invenio.webuser import get_email_from_username, get_uid_from_email
user = task_get_task_param("user")
uid = None
if user:
try:
uid = get_uid_from_email(get_email_from_username(user))
except Exception, err:
write_message("WARNING: can't reliably retrieve uid for user %s: %s" % (user, err), stream=sys.stderr)
if check_bibcatalog():
text = """
%(msg)s found for record %(rec_id)s: %(err)s
See: <%(siteurl)s/record/edit/#state=edit&recid=%(rec_id)s>
BibUpload task information:
task_id: %(task_id)s
task_specific_name: %(task_specific_name)s
user: %(user)s
task_params: %(task_params)s
task_options: %(task_options)s""" % {
"msg": msg,
"rec_id": rec_id,
"err": err,
"siteurl": CFG_SITE_SECURE_URL,
"task_id": task_get_task_param("task_id"),
"task_specific_name": task_get_task_param("task_specific_name"),
"user": user,
"task_params": bibtask._TASK_PARAMS,
"task_options": bibtask._OPTIONS}
if not pretend:
BIBCATALOG_SYSTEM.ticket_submit(subject="%s: %s by %s" % (msg, rec_id, user), recordid=rec_id, text=text, queue=CFG_BIBUPLOAD_CONFLICTING_REVISION_TICKET_QUEUE, owner=uid)
def insert_record_into_holding_pen(record, oai_id, pretend=False):
query = "INSERT INTO bibHOLDINGPEN (oai_id, changeset_date, changeset_xml, id_bibrec) VALUES (%s, NOW(), %s, %s)"
xml_record = record_xml_output(record)
bibrec_ids = find_record_ids_by_oai_id(oai_id) # here determining the identifier of the record
if len(bibrec_ids) > 0:
bibrec_id = bibrec_ids.pop()
else:
# id not found by using the oai_id, let's use a wider search based
# on any information we might have.
bibrec_id = retrieve_rec_id(record, 'holdingpen', pretend=pretend)
if bibrec_id is None:
bibrec_id = 0
if not pretend:
run_sql(query, (oai_id, compress(xml_record), bibrec_id))
# record_id is logged as 0! ( We are not inserting into the main database)
log_record_uploading(oai_id, task_get_task_param('task_id', 0), 0, 'H', pretend=pretend)
stat['nb_holdingpen'] += 1
def print_out_bibupload_statistics():
"""Print the statistics of the process"""
out = "Task stats: %(nb_input)d input records, %(nb_updated)d updated, " \
"%(nb_inserted)d inserted, %(nb_errors)d errors, %(nb_holdingpen)d inserted to holding pen. " \
"Time %(nb_sec).2f sec." % { \
'nb_input': stat['nb_records_to_upload'],
'nb_updated': stat['nb_records_updated'],
'nb_inserted': stat['nb_records_inserted'],
'nb_errors': stat['nb_errors'],
'nb_holdingpen': stat['nb_holdingpen'],
'nb_sec': time.time() - time.mktime(stat['exectime']) }
write_message(out)
def open_marc_file(path):
"""Open a file and return the data"""
try:
# open the file containing the marc document
marc_file = open(path, 'r')
marc = marc_file.read()
marc_file.close()
except IOError, erro:
write_message("ERROR: %s" % erro, verbose=1, stream=sys.stderr)
if erro.errno == 2:
# No such file or directory
# Not scary
e = RecoverableError('File does not exist: %s' % path)
else:
e = StandardError('File not accessible: %s' % path)
raise e
return marc
def xml_marc_to_records(xml_marc):
"""create the records"""
# Creation of the records from the xml Marc in argument
xml_marc = wash_for_xml(xml_marc)
recs = create_records(xml_marc, 1, 1)
if recs == []:
msg = "ERROR: Cannot parse MARCXML file."
write_message(msg, verbose=1, stream=sys.stderr)
raise StandardError(msg)
elif recs[0][0] is None:
msg = "ERROR: MARCXML file has wrong format: %s" % recs
write_message(msg, verbose=1, stream=sys.stderr)
raise RecoverableError(msg)
else:
recs = map((lambda x:x[0]), recs)
return recs
def find_record_format(rec_id, bibformat):
"""Look whether record REC_ID is formatted in FORMAT,
i.e. whether FORMAT exists in the bibfmt table for this record.
Return the number of times it is formatted: 0 if not, 1 if yes,
2 if found more than once (should never occur).
"""
out = 0
query = """SELECT COUNT(*) FROM bibfmt WHERE id_bibrec=%s AND format=%s"""
params = (rec_id, bibformat)
res = []
res = run_sql(query, params)
out = res[0][0]
return out
def find_record_from_recid(rec_id):
"""
Try to find record in the database from the REC_ID number.
Return record ID if found, None otherwise.
"""
res = run_sql("SELECT id FROM bibrec WHERE id=%s",
(rec_id,))
if res:
return res[0][0]
else:
return None
def find_record_from_sysno(sysno):
"""
Try to find record in the database from the external SYSNO number.
Return record ID if found, None otherwise.
"""
bibxxx = 'bib'+CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[0:2]+'x'
bibrec_bibxxx = 'bibrec_' + bibxxx
res = run_sql("""SELECT bb.id_bibrec FROM %(bibrec_bibxxx)s AS bb,
%(bibxxx)s AS b WHERE b.tag=%%s AND b.value=%%s
AND bb.id_bibxxx=b.id""" %
{'bibxxx': bibxxx,
'bibrec_bibxxx': bibrec_bibxxx},
(CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG, sysno,))
for recid in res:
if CFG_INSPIRE_SITE or CFG_CERN_SITE:
## FIXME: for the time being this functionality is available only on INSPIRE
## or CDS, and waiting to be replaced by proper pidstore integration
if record_exists(recid[0]) > 0: ## Only non deleted records
return recid[0]
else:
return recid[0]
return None
def find_records_from_extoaiid(extoaiid, extoaisrc=None):
"""
Try to find records in the database from the external EXTOAIID number.
Return list of record ID if found, None otherwise.
"""
assert(CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:5] == CFG_BIBUPLOAD_EXTERNAL_OAIID_PROVENANCE_TAG[:5])
bibxxx = 'bib'+CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[0:2]+'x'
bibrec_bibxxx = 'bibrec_' + bibxxx
write_message(' Looking for extoaiid="%s" with extoaisrc="%s"' % (extoaiid, extoaisrc), verbose=9)
id_bibrecs = intbitset(run_sql("""SELECT bb.id_bibrec FROM %(bibrec_bibxxx)s AS bb,
%(bibxxx)s AS b WHERE b.tag=%%s AND b.value=%%s
AND bb.id_bibxxx=b.id""" %
{'bibxxx': bibxxx,
'bibrec_bibxxx': bibrec_bibxxx},
(CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG, extoaiid,)))
write_message(' Partially found %s for extoaiid="%s"' % (id_bibrecs, extoaiid), verbose=9)
ret = intbitset()
for id_bibrec in id_bibrecs:
if CFG_INSPIRE_SITE or CFG_CERN_SITE:
## FIXME: for the time being this functionality is available only on INSPIRE
## or CDS, and waiting to be replaced by proper pidstore integration
if record_exists(id_bibrec) < 1:
## We don't match not existing records
continue
record = get_record(id_bibrec)
instances = record_get_field_instances(record, CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[0:3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4])
write_message(' recid %s -> instances "%s"' % (id_bibrec, instances), verbose=9)
for instance in instances:
this_extoaisrc = field_get_subfield_values(instance, CFG_BIBUPLOAD_EXTERNAL_OAIID_PROVENANCE_TAG[5])
this_extoaisrc = this_extoaisrc and this_extoaisrc[0] or None
this_extoaiid = field_get_subfield_values(instance, CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[5])
this_extoaiid = this_extoaiid and this_extoaiid[0] or None
write_message(" this_extoaisrc -> %s, this_extoaiid -> %s" % (this_extoaisrc, this_extoaiid), verbose=9)
if this_extoaiid == extoaiid:
write_message(' recid %s -> provenance "%s"' % (id_bibrec, this_extoaisrc), verbose=9)
if this_extoaisrc == extoaisrc:
write_message('Found recid %s for extoaiid="%s" with provenance="%s"' % (id_bibrec, extoaiid, extoaisrc), verbose=9)
ret.add(id_bibrec)
break
if this_extoaisrc is None:
write_message('WARNING: Found recid %s for extoaiid="%s" that doesn\'t specify any provenance, while input record does.' % (id_bibrec, extoaiid), stream=sys.stderr)
if extoaisrc is None:
write_message('WARNING: Found recid %s for extoaiid="%s" that specify a provenance (%s), while input record does not have a provenance.' % (id_bibrec, extoaiid, this_extoaisrc), stream=sys.stderr)
return ret
def find_record_from_oaiid(oaiid):
"""
Try to find record in the database from the OAI ID number and OAI SRC.
Return record ID if found, None otherwise.
"""
bibxxx = 'bib'+CFG_OAI_ID_FIELD[0:2]+'x'
bibrec_bibxxx = 'bibrec_' + bibxxx
res = run_sql("""SELECT bb.id_bibrec FROM %(bibrec_bibxxx)s AS bb,
%(bibxxx)s AS b WHERE b.tag=%%s AND b.value=%%s
AND bb.id_bibxxx=b.id""" %
{'bibxxx': bibxxx,
'bibrec_bibxxx': bibrec_bibxxx},
(CFG_OAI_ID_FIELD, oaiid,))
for recid in res:
if CFG_INSPIRE_SITE or CFG_CERN_SITE:
## FIXME: for the time being this functionality is available only on INSPIRE
## or CDS, and waiting to be replaced by proper pidstore integration
if record_exists(recid[0]) > 0: ## Only non deleted records
return recid[0]
else:
return recid[0]
return None
def find_record_from_doi(doi):
"""
Try to find record in the database from the given DOI.
Return record ID if found, None otherwise.
"""
bibxxx = 'bib02x'
bibrec_bibxxx = 'bibrec_' + bibxxx
res = run_sql("""SELECT bb.id_bibrec, bb.field_number
FROM %(bibrec_bibxxx)s AS bb, %(bibxxx)s AS b
WHERE b.tag=%%s AND b.value=%%s
AND bb.id_bibxxx=b.id""" %
{'bibxxx': bibxxx,
'bibrec_bibxxx': bibrec_bibxxx},
('0247_a', doi,))
# For each of the result, make sure that it is really tagged as doi
for (id_bibrec, field_number) in res:
if CFG_INSPIRE_SITE or CFG_CERN_SITE:
## FIXME: for the time being this functionality is available only on INSPIRE
## or CDS, and waiting to be replaced by proper pidstore integration
if record_exists(id_bibrec) < 1:
## We don't match not existing records
continue
res = run_sql("""SELECT bb.id_bibrec
FROM %(bibrec_bibxxx)s AS bb, %(bibxxx)s AS b
WHERE b.tag=%%s AND b.value=%%s
AND bb.id_bibxxx=b.id and bb.field_number=%%s and bb.id_bibrec=%%s""" %
{'bibxxx': bibxxx,
'bibrec_bibxxx': bibrec_bibxxx},
('0247_2', "doi", field_number, id_bibrec))
if res and res[0][0] == id_bibrec:
return res[0][0]
return None
def extract_tag_from_record(record, tag_number):
""" Extract the tag_number for record."""
# first step verify if the record is not already in the database
if record:
return record.get(tag_number, None)
return None
def retrieve_rec_id(record, opt_mode, pretend=False, post_phase = False):
"""Retrieve the record Id from a record by using tag 001 or SYSNO or OAI ID or DOI
tag. opt_mod is the desired mode.
@param post_phase Tells if we are calling this method in the postprocessing phase. If true, we accept presence of 001 fields even in the insert mode
@type post_phase boolean
"""
rec_id = None
# 1st step: we look for the tag 001
tag_001 = extract_tag_from_record(record, '001')
if tag_001 is not None:
# We extract the record ID from the tag
rec_id = tag_001[0][3]
# if we are in insert mode => error
if opt_mode == 'insert' and not post_phase:
write_message(" Failed: tag 001 found in the xml" \
" submitted, you should use the option replace," \
" correct or append to replace an existing" \
" record. (-h for help)",
verbose=1, stream=sys.stderr)
return -1
else:
# we found the rec id and we are not in insert mode => continue
# we try to match rec_id against the database:
if find_record_from_recid(rec_id) is not None:
# okay, 001 corresponds to some known record
return int(rec_id)
elif opt_mode in ('replace', 'replace_or_insert'):
if task_get_option('force'):
# we found the rec_id but it's not in the system and we are
# requested to replace records. Therefore we create on the fly
# a empty record allocating the recid.
write_message(" WARNING: tag 001 found in the xml with"
" value %(rec_id)s, but rec_id %(rec_id)s does"
" not exist. Since the mode replace was"
" requested the rec_id %(rec_id)s is allocated"
" on-the-fly." % {"rec_id": rec_id},
stream=sys.stderr)
return create_new_record(rec_id=rec_id, pretend=pretend)
else:
# Since --force was not used we are going to raise an error
write_message(" Failed: tag 001 found in the xml"
" submitted with value %(rec_id)s. The"
" corresponding record however does not"
" exists. If you want to really create"
" such record, please use the --force"
" parameter when calling bibupload." % {
"rec_id": rec_id}, stream=sys.stderr)
return -1
else:
# The record doesn't exist yet. We shall have try to check
# the SYSNO or OAI or DOI id later.
write_message(" -Tag 001 value not found in database.",
verbose=9)
rec_id = None
else:
write_message(" -Tag 001 not found in the xml marc file.", verbose=9)
if rec_id is None:
# 2nd step we look for the SYSNO
sysnos = record_get_field_values(record,
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[0:3],
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[3:4] != "_" and \
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[3:4] or "",
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[4:5] != "_" and \
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[4:5] or "",
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[5:6])
if sysnos:
sysno = sysnos[0] # there should be only one external SYSNO
write_message(" -Checking if SYSNO " + sysno + \
" exists in the database", verbose=9)
# try to find the corresponding rec id from the database
rec_id = find_record_from_sysno(sysno)
if rec_id is not None:
# rec_id found
pass
else:
# The record doesn't exist yet. We will try to check
# external and internal OAI ids later.
write_message(" -Tag SYSNO value not found in database.",
verbose=9)
rec_id = None
else:
write_message(" -Tag SYSNO not found in the xml marc file.",
verbose=9)
if rec_id is None:
# 2nd step we look for the external OAIID
extoai_fields = record_get_field_instances(record,
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[0:3],
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3:4] != "_" and \
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3:4] or "",
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4:5] != "_" and \
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4:5] or "")
if extoai_fields:
for field in extoai_fields:
extoaiid = field_get_subfield_values(field, CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[5:6])
extoaisrc = field_get_subfield_values(field, CFG_BIBUPLOAD_EXTERNAL_OAIID_PROVENANCE_TAG[5:6])
if extoaiid:
extoaiid = extoaiid[0]
if extoaisrc:
extoaisrc = extoaisrc[0]
else:
extoaisrc = None
write_message(" -Checking if EXTOAIID %s (%s) exists in the database" % (extoaiid, extoaisrc), verbose=9)
# try to find the corresponding rec id from the database
rec_ids = find_records_from_extoaiid(extoaiid, extoaisrc)
if rec_ids:
# rec_id found
rec_id = rec_ids.pop()
break
else:
# The record doesn't exist yet. We will try to check
# OAI id later.
write_message(" -Tag EXTOAIID value not found in database.",
verbose=9)
rec_id = None
else:
write_message(" -Tag EXTOAIID not found in the xml marc file.", verbose=9)
if rec_id is None:
# 4th step we look for the OAI ID
oaiidvalues = record_get_field_values(record,
CFG_OAI_ID_FIELD[0:3],
CFG_OAI_ID_FIELD[3:4] != "_" and \
CFG_OAI_ID_FIELD[3:4] or "",
CFG_OAI_ID_FIELD[4:5] != "_" and \
CFG_OAI_ID_FIELD[4:5] or "",
CFG_OAI_ID_FIELD[5:6])
if oaiidvalues:
oaiid = oaiidvalues[0] # there should be only one OAI ID
write_message(" -Check if local OAI ID " + oaiid + \
" exist in the database", verbose=9)
# try to find the corresponding rec id from the database
rec_id = find_record_from_oaiid(oaiid)
if rec_id is not None:
# rec_id found
pass
else:
write_message(" -Tag OAI ID value not found in database.",
verbose=9)
rec_id = None
else:
write_message(" -Tag SYSNO not found in the xml marc file.",
verbose=9)
if rec_id is None:
# 5th step we look for the DOI.
record_dois = record_extract_dois(record)
matching_recids = set()
if record_dois:
# try to find the corresponding rec id from the database
for record_doi in record_dois:
possible_recid = find_record_from_doi(record_doi)
if possible_recid:
matching_recids.add(possible_recid)
if len(matching_recids) > 1:
# Oops, this record refers to DOI existing in multiple records.
# Dunno which one to choose.
write_message(" Failed: Multiple records found in the" \
" database %s that match the DOI(s) in the input" \
" MARCXML %s" % (repr(matching_recids), repr(record_dois)),
verbose=1, stream=sys.stderr)
return -1
elif len(matching_recids) == 1:
rec_id = matching_recids.pop()
if opt_mode == 'insert':
write_message(" Failed: DOI tag matching record #%s found in the xml" \
" submitted, you should use the option replace," \
" correct or append to replace an existing" \
" record. (-h for help)" % rec_id,
verbose=1, stream=sys.stderr)
return -1
else:
write_message(" - Tag DOI value not found in database.",
verbose=9)
rec_id = None
else:
write_message(" -Tag DOI not found in the xml marc file.",
verbose=9)
# Now we should have detected rec_id from SYSNO or OAIID
# tags. (None otherwise.)
if rec_id:
if opt_mode == 'insert':
write_message(" Failed: Record found in the database," \
" you should use the option replace," \
" correct or append to replace an existing" \
" record. (-h for help)",
verbose=1, stream=sys.stderr)
return -1
else:
if opt_mode != 'insert' and \
opt_mode != 'replace_or_insert':
write_message(" Failed: Record not found in the database."\
" Please insert the file before updating it."\
" (-h for help)", verbose=1, stream=sys.stderr)
return -1
return rec_id and int(rec_id) or None
def check_record_doi_is_unique(rec_id, record):
"""
Check that DOI found in 'record' does not exist in any other
record than 'recid'.
Return (boolean, msg) where 'boolean' would be True if the DOI is
unique.
"""
record_dois = record_extract_dois(record)
if record_dois:
matching_recids = set()
for record_doi in record_dois:
possible_recid = find_record_from_doi(record_doi)
if possible_recid:
matching_recids.add(possible_recid)
if len(matching_recids) > 1:
# Oops, this record refers to DOI existing in multiple records.
msg = " Failed: Multiple records found in the" \
" database %s that match the DOI(s) in the input" \
" MARCXML %s" % (repr(matching_recids), repr(record_dois))
return (False, msg)
elif len(matching_recids) == 1:
matching_recid = matching_recids.pop()
if str(matching_recid) != str(rec_id):
# Oops, this record refers to DOI existing in a different record.
msg = " Failed: DOI(s) %s found in this record (#%s)" \
" already exist(s) in another other record (#%s)" % \
(repr(record_dois), rec_id, matching_recid)
return (False, msg)
return (True, "")
### Insert functions
def create_new_record(rec_id=None, pretend=False):
"""
Create new record in the database
@param rec_id: if specified the new record will have this rec_id.
@type rec_id: int
@return: the allocated rec_id
@rtype: int
@note: in case of errors will be returned None
"""
if rec_id is not None:
try:
rec_id = int(rec_id)
except (ValueError, TypeError), error:
write_message(" ERROR: during the creation_new_record function: %s "
% error, verbose=1, stream=sys.stderr)
return None
if run_sql("SELECT id FROM bibrec WHERE id=%s", (rec_id, )):
write_message(" ERROR: during the creation_new_record function: the requested rec_id %s already exists." % rec_id)
return None
if pretend:
if rec_id:
return rec_id
else:
return run_sql("SELECT max(id)+1 FROM bibrec")[0][0]
if rec_id is not None:
return run_sql("INSERT INTO bibrec (id, creation_date, modification_date) VALUES (%s, NOW(), NOW())", (rec_id, ))
else:
return run_sql("INSERT INTO bibrec (creation_date, modification_date) VALUES (NOW(), NOW())")
def insert_bibfmt(id_bibrec, marc, bibformat, modification_date='1970-01-01 00:00:00', pretend=False):
"""Insert the format in the table bibfmt"""
# compress the marc value
pickled_marc = compress(marc)
try:
time.strptime(modification_date, "%Y-%m-%d %H:%M:%S")
except ValueError:
modification_date = '1970-01-01 00:00:00'
query = """INSERT LOW_PRIORITY INTO bibfmt (id_bibrec, format, last_updated, value)
VALUES (%s, %s, %s, %s)"""
if not pretend:
row_id = run_sql(query, (id_bibrec, bibformat, modification_date, pickled_marc))
return row_id
else:
return 1
def insert_record_bibxxx(tag, value, pretend=False):
"""Insert the record into bibxxx"""
# determine into which table one should insert the record
table_name = 'bib'+tag[0:2]+'x'
# check if the tag, value combination exists in the table
query = """SELECT id,value FROM %s """ % table_name
query += """ WHERE tag=%s AND value=%s"""
params = (tag, value)
res = None
res = run_sql(query, params)
# Note: compare now the found values one by one and look for
# string binary equality (e.g. to respect lowercase/uppercase
# match), regardless of the charset etc settings. Ideally we
# could use a BINARY operator in the above SELECT statement, but
# we would have to check compatibility on various MySQLdb versions
# etc; this approach checks all matched values in Python, not in
# MySQL, which is less cool, but more conservative, so it should
# work better on most setups.
if res:
for row in res:
row_id = row[0]
row_value = row[1]
if row_value == value:
return (table_name, row_id)
# We got here only when the tag, value combination was not found,
# so it is now necessary to insert the tag, value combination into
# bibxxx table as new.
query = """INSERT INTO %s """ % table_name
query += """ (tag, value) values (%s , %s)"""
params = (tag, value)
if not pretend:
row_id = run_sql(query, params)
else:
return (table_name, 1)
return (table_name, row_id)
def insert_record_bibrec_bibxxx(table_name, id_bibxxx,
field_number, id_bibrec, pretend=False):
"""Insert the record into bibrec_bibxxx"""
# determine into which table one should insert the record
full_table_name = 'bibrec_'+ table_name
# insert the proper row into the table
query = """INSERT INTO %s """ % full_table_name
query += """(id_bibrec,id_bibxxx, field_number) values (%s , %s, %s)"""
params = (id_bibrec, id_bibxxx, field_number)
if not pretend:
res = run_sql(query, params)
else:
return 1
return res
def synchronize_8564(rec_id, record, record_had_FFT, bibrecdocs, pretend=False):
"""
Synchronize 8564_ tags and BibDocFile tables.
This function directly manipulate the record parameter.
@type rec_id: positive integer
@param rec_id: the record identifier.
@param record: the record structure as created by bibrecord.create_record
@type record_had_FFT: boolean
@param record_had_FFT: True if the incoming bibuploaded-record used FFT
@return: the manipulated record (which is also modified as a side effect)
"""
def merge_marc_into_bibdocfile(field, pretend=False):
"""
Internal function that reads a single field and stores its content
in BibDocFile tables.
@param field: the 8564_ field containing a BibDocFile URL.
"""
write_message('Merging field: %s' % (field, ), verbose=9)
url = field_get_subfield_values(field, 'u')[:1] or field_get_subfield_values(field, 'q')[:1]
description = field_get_subfield_values(field, 'y')[:1]
comment = field_get_subfield_values(field, 'z')[:1]
if url:
recid, docname, docformat = decompose_bibdocfile_url(url[0])
if recid != rec_id:
write_message("INFO: URL %s is not pointing to a fulltext owned by this record (%s)" % (url, recid), stream=sys.stderr)
else:
try:
bibdoc = bibrecdocs.get_bibdoc(docname)
if description and not pretend:
bibdoc.set_description(description[0], docformat)
if comment and not pretend:
bibdoc.set_comment(comment[0], docformat)
except InvenioBibDocFileError:
## Apparently the referenced docname doesn't exist anymore.
## Too bad. Let's skip it.
write_message("WARNING: docname %s does not seem to exist for record %s. Has it been renamed outside FFT?" % (docname, recid), stream=sys.stderr)
def merge_bibdocfile_into_marc(field, subfields):
"""
Internal function that reads BibDocFile table entries referenced by
the URL in the given 8564_ field and integrate the given information
directly with the provided subfields.
@param field: the 8564_ field containing a BibDocFile URL.
@param subfields: the subfields corresponding to the BibDocFile URL
generated after BibDocFile tables.
"""
write_message('Merging subfields %s into field %s' % (subfields, field), verbose=9)
subfields = dict(subfields) ## We make a copy not to have side-effects
subfield_to_delete = []
for subfield_position, (code, value) in enumerate(field_get_subfield_instances(field)):
## For each subfield instance already existing...
if code in subfields:
## ...We substitute it with what is in BibDocFile tables
record_modify_subfield(record, '856', code, subfields[code],
subfield_position, field_position_global=field[4])
del subfields[code]
else:
## ...We delete it otherwise
subfield_to_delete.append(subfield_position)
subfield_to_delete.sort()
for counter, position in enumerate(subfield_to_delete):
## FIXME: Very hackish algorithm. Since deleting a subfield
## will alterate the position of following subfields, we
## are taking note of this and adjusting further position
## by using a counter.
record_delete_subfield_from(record, '856', position - counter,
field_position_global=field[4])
subfields = subfields.items()
subfields.sort()
for code, value in subfields:
## Let's add non-previously existing subfields
record_add_subfield_into(record, '856', code, value,
field_position_global=field[4])
def get_bibdocfile_managed_info():
"""
Internal function, returns a dictionary of
BibDocFile URL -> wanna-be subfields.
This information is retrieved from internal BibDoc
structures rather than from input MARC XML files
@rtype: mapping
@return: BibDocFile URL -> wanna-be subfields dictionary
"""
ret = {}
latest_files = bibrecdocs.list_latest_files(list_hidden=False)
for afile in latest_files:
url = afile.get_url()
ret[url] = {'u': url}
description = afile.get_description()
comment = afile.get_comment()
subformat = afile.get_subformat()
if description:
ret[url]['y'] = description
if comment:
ret[url]['z'] = comment
if subformat:
ret[url]['x'] = subformat
return ret
write_message("Synchronizing MARC of recid '%s' with:\n%s" % (rec_id, record), verbose=9)
tags856s = record_get_field_instances(record, '856', '%', '%')
write_message("Original 856%% instances: %s" % tags856s, verbose=9)
tags8564s_to_add = get_bibdocfile_managed_info()
write_message("BibDocFile instances: %s" % tags8564s_to_add, verbose=9)
positions_tags8564s_to_remove = []
for local_position, field in enumerate(tags856s):
if field[1] == '4' and field[2] == ' ':
write_message('Analysing %s' % (field, ), verbose=9)
for url in field_get_subfield_values(field, 'u') + field_get_subfield_values(field, 'q'):
if url in tags8564s_to_add:
# there exists a link in the MARC of the record and the connection exists in BibDoc tables
if record_had_FFT:
merge_bibdocfile_into_marc(field, tags8564s_to_add[url])
else:
merge_marc_into_bibdocfile(field, pretend=pretend)
del tags8564s_to_add[url]
break
elif bibdocfile_url_p(url) and decompose_bibdocfile_url(url)[0] == rec_id:
# The link exists and is potentially correct-looking link to a document
# moreover, it refers to current record id ... but it does not exist in
# internal BibDoc structures. This could have happen in the case of renaming a document
# or its removal. In both cases we have to remove link... a new one will be created
positions_tags8564s_to_remove.append(local_position)
write_message("%s to be deleted and re-synchronized" % (field, ), verbose=9)
break
record_delete_fields(record, '856', positions_tags8564s_to_remove)
tags8564s_to_add = tags8564s_to_add.values()
tags8564s_to_add.sort()
## FIXME: we are not yet able to preserve the sorting
## of 8564 tags WRT FFT in BibUpload.
## See ticket #1606.
for subfields in tags8564s_to_add:
subfields = subfields.items()
subfields.sort()
record_add_field(record, '856', '4', ' ', subfields=subfields)
write_message('Final record: %s' % record, verbose=9)
return record
def _get_subfield_value(field, subfield_code, default=None):
res = field_get_subfield_values(field, subfield_code)
if res != [] and res != None:
return res[0]
else:
return default
def elaborate_mit_tags(record, rec_id, mode, pretend = False, tmp_ids = {},
tmp_vers = {}):
"""
Uploading MoreInfo -> BDM tags
"""
tuple_list = extract_tag_from_record(record, 'BDM')
# Now gathering information from BDR tags - to be processed later
write_message("Processing BDM entries of the record ")
recordDocs = BibRecDocs(rec_id)
if tuple_list:
for mit in record_get_field_instances(record, 'BDM', ' ', ' '):
relation_id = _get_subfield_value(mit, "r")
bibdoc_id = _get_subfield_value(mit, "i")
# checking for a possibly temporary ID
if not (bibdoc_id is None):
bibdoc_id = resolve_identifier(tmp_ids, bibdoc_id)
bibdoc_ver = _get_subfield_value(mit, "v")
if not (bibdoc_ver is None):
bibdoc_ver = resolve_identifier(tmp_vers, bibdoc_ver)
bibdoc_name = _get_subfield_value(mit, "n")
bibdoc_fmt = _get_subfield_value(mit, "f")
moreinfo_str = _get_subfield_value(mit, "m")
if bibdoc_id == None:
if bibdoc_name == None:
raise StandardError("Incorrect relation. Neither name nor identifier of the first obejct has been specified")
else:
# retrieving the ID based on the document name (inside current record)
# The document is attached to current record.
try:
bibdoc_id = recordDocs.get_docid(bibdoc_name)
except:
raise StandardError("BibDoc of a name %s does not exist within a record" % (bibdoc_name, ))
else:
if bibdoc_name != None:
write_message("WARNING: both name and id of the first document of a relation have been specified. Ignoring the name", stream=sys.stderr)
if (moreinfo_str is None or mode in ("replace", "correct")) and (not pretend):
MoreInfo(docid=bibdoc_id , version = bibdoc_ver,
docformat = bibdoc_fmt, relation = relation_id).delete()
if (not moreinfo_str is None) and (not pretend):
MoreInfo.create_from_serialised(moreinfo_str,
docid=bibdoc_id,
version = bibdoc_ver,
docformat = bibdoc_fmt,
relation = relation_id)
return record
def elaborate_brt_tags(record, rec_id, mode, pretend=False, tmp_ids = {}, tmp_vers = {}):
"""
Process BDR tags describing relations between existing objects
"""
tuple_list = extract_tag_from_record(record, 'BDR')
# Now gathering information from BDR tags - to be processed later
relations_to_create = []
write_message("Processing BDR entries of the record ")
recordDocs = BibRecDocs(rec_id) #TODO: check what happens if there is no record yet ! Will the class represent an empty set?
if tuple_list:
for brt in record_get_field_instances(record, 'BDR', ' ', ' '):
relation_id = _get_subfield_value(brt, "r")
bibdoc1_id = None
bibdoc1_name = None
bibdoc1_ver = None
bibdoc1_fmt = None
bibdoc2_id = None
bibdoc2_name = None
bibdoc2_ver = None
bibdoc2_fmt = None
if not relation_id:
bibdoc1_id = _get_subfield_value(brt, "i")
bibdoc1_name = _get_subfield_value(brt, "n")
if bibdoc1_id == None:
if bibdoc1_name == None:
raise StandardError("Incorrect relation. Neither name nor identifier of the first obejct has been specified")
else:
# retrieving the ID based on the document name (inside current record)
# The document is attached to current record.
try:
bibdoc1_id = recordDocs.get_docid(bibdoc1_name)
except:
raise StandardError("BibDoc of a name %s does not exist within a record" % \
(bibdoc1_name, ))
else:
# resolving temporary identifier
bibdoc1_id = resolve_identifier(tmp_ids, bibdoc1_id)
if bibdoc1_name != None:
write_message("WARNING: both name and id of the first document of a relation have been specified. Ignoring the name", stream=sys.stderr)
bibdoc1_ver = _get_subfield_value(brt, "v")
if not (bibdoc1_ver is None):
bibdoc1_ver = resolve_identifier(tmp_vers, bibdoc1_ver)
bibdoc1_fmt = _get_subfield_value(brt, "f")
bibdoc2_id = _get_subfield_value(brt, "j")
bibdoc2_name = _get_subfield_value(brt, "o")
if bibdoc2_id == None:
if bibdoc2_name == None:
raise StandardError("Incorrect relation. Neither name nor identifier of the second obejct has been specified")
else:
# retrieving the ID based on the document name (inside current record)
# The document is attached to current record.
try:
bibdoc2_id = recordDocs.get_docid(bibdoc2_name)
except:
raise StandardError("BibDoc of a name %s does not exist within a record" % (bibdoc2_name, ))
else:
bibdoc2_id = resolve_identifier(tmp_ids, bibdoc2_id)
if bibdoc2_name != None:
write_message("WARNING: both name and id of the first document of a relation have been specified. Ignoring the name", stream=sys.stderr)
bibdoc2_ver = _get_subfield_value(brt, "w")
if not (bibdoc2_ver is None):
bibdoc2_ver = resolve_identifier(tmp_vers, bibdoc2_ver)
bibdoc2_fmt = _get_subfield_value(brt, "g")
control_command = _get_subfield_value(brt, "d")
relation_type = _get_subfield_value(brt, "t")
if not relation_type and not relation_id:
raise StandardError("The relation type must be specified")
more_info = _get_subfield_value(brt, "m")
# the relation id might be specified in the case of updating
# MoreInfo table instead of other fields
rel_obj = None
if not relation_id:
rels = BibRelation.get_relations(rel_type = relation_type,
bibdoc1_id = bibdoc1_id,
bibdoc2_id = bibdoc2_id,
bibdoc1_ver = bibdoc1_ver,
bibdoc2_ver = bibdoc2_ver,
bibdoc1_fmt = bibdoc1_fmt,
bibdoc2_fmt = bibdoc2_fmt)
if len(rels) > 0:
rel_obj = rels[0]
relation_id = rel_obj.id
else:
rel_obj = BibRelation(rel_id=relation_id)
relations_to_create.append((relation_id, bibdoc1_id, bibdoc1_ver,
bibdoc1_fmt, bibdoc2_id, bibdoc2_ver,
bibdoc2_fmt, relation_type, more_info,
rel_obj, control_command))
record_delete_field(record, 'BDR', ' ', ' ')
if mode in ("insert", "replace_or_insert", "append", "correct", "replace"):
# now creating relations between objects based on the data
if not pretend:
for (relation_id, bibdoc1_id, bibdoc1_ver, bibdoc1_fmt,
bibdoc2_id, bibdoc2_ver, bibdoc2_fmt, rel_type,
more_info, rel_obj, control_command) in relations_to_create:
if rel_obj == None:
rel_obj = BibRelation.create(bibdoc1_id = bibdoc1_id,
bibdoc1_ver = bibdoc1_ver,
bibdoc1_fmt = bibdoc1_fmt,
bibdoc2_id = bibdoc2_id,
bibdoc2_ver = bibdoc2_ver,
bibdoc2_fmt = bibdoc2_fmt,
rel_type = rel_type)
relation_id = rel_obj.id
if mode in ("replace"):
# Clearing existing MoreInfo content
rel_obj.get_more_info().delete()
if more_info:
MoreInfo.create_from_serialised(more_info, relation = relation_id)
if control_command == "DELETE":
rel_obj.delete()
else:
write_message("BDR tag is not processed in the %s mode" % (mode, ))
return record
def elaborate_fft_tags(record, rec_id, mode, pretend=False,
tmp_ids = {}, tmp_vers = {}, bibrecdocs=None):
"""
Process FFT tags that should contain $a with file pathes or URLs
to get the fulltext from. This function enriches record with
proper 8564 URL tags, downloads fulltext files and stores them
into var/data structure where appropriate.
CFG_BIBUPLOAD_WGET_SLEEP_TIME defines time to sleep in seconds in
between URL downloads.
Note: if an FFT tag contains multiple $a subfields, we upload them
into different 856 URL tags in the metadata. See regression test
case test_multiple_fft_insert_via_http().
"""
# Let's define some handy sub procedure.
def _add_new_format(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, modification_date, pretend=False):
"""Adds a new format for a given bibdoc. Returns True when everything's fine."""
write_message('Add new format to %s url: %s, format: %s, docname: %s, doctype: %s, newname: %s, description: %s, comment: %s, flags: %s, modification_date: %s' % (repr(bibdoc), url, docformat, docname, doctype, newname, description, comment, flags, modification_date), verbose=9)
try:
if not url: # Not requesting a new url. Just updating comment & description
return _update_description_and_comment(bibdoc, docname, docformat, description, comment, flags, pretend=pretend)
try:
if not pretend:
bibdoc.add_file_new_format(url, description=description, comment=comment, flags=flags, modification_date=modification_date)
except StandardError, e:
write_message("('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s') not inserted because format already exists (%s)." % (url, docformat, docname, doctype, newname, description, comment, flags, modification_date, e), stream=sys.stderr)
raise
except Exception, e:
write_message("ERROR: in adding '%s' as a new format because of: %s" % (url, e), stream=sys.stderr)
raise
return True
def _add_new_version(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, modification_date, pretend=False):
"""Adds a new version for a given bibdoc. Returns True when everything's fine."""
write_message('Add new version to %s url: %s, format: %s, docname: %s, doctype: %s, newname: %s, description: %s, comment: %s, flags: %s' % (repr(bibdoc), url, docformat, docname, doctype, newname, description, comment, flags), verbose=9)
try:
if not url:
return _update_description_and_comment(bibdoc, docname, docformat, description, comment, flags, pretend=pretend)
try:
if not pretend:
bibdoc.add_file_new_version(url, description=description, comment=comment, flags=flags, modification_date=modification_date)
except StandardError, e:
write_message("('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s') not inserted because '%s'." % (url, docformat, docname, doctype, newname, description, comment, flags, modification_date, e), stream=sys.stderr)
raise
except Exception, e:
write_message("ERROR: in adding '%s' as a new version because of: %s" % (url, e), stream=sys.stderr)
raise
return True
def _update_description_and_comment(bibdoc, docname, docformat, description, comment, flags, pretend=False):
"""Directly update comments and descriptions."""
write_message('Just updating description and comment for %s with format %s with description %s, comment %s and flags %s' % (docname, docformat, description, comment, flags), verbose=9)
try:
if not pretend:
bibdoc.set_description(description, docformat)
bibdoc.set_comment(comment, docformat)
for flag in CFG_BIBDOCFILE_AVAILABLE_FLAGS:
if flag in flags:
bibdoc.set_flag(flag, docformat)
else:
bibdoc.unset_flag(flag, docformat)
except StandardError, e:
write_message("('%s', '%s', '%s', '%s', '%s') description and comment not updated because '%s'." % (docname, docformat, description, comment, flags, e))
raise
return True
def _process_document_moreinfos(more_infos, docname, version, docformat, mode):
if not mode in ('correct', 'append', 'replace_or_insert', 'replace', 'correct', 'insert'):
print "exited because the mode is incorrect"
return
docid = None
try:
docid = bibrecdocs.get_docid(docname)
except:
raise StandardError("MoreInfo: No document of a given name associated with the record")
if not version:
# We have to retrieve the most recent version ...
version = bibrecdocs.get_bibdoc(docname).get_latest_version()
doc_moreinfo_s, version_moreinfo_s, version_format_moreinfo_s, format_moreinfo_s = more_infos
if mode in ("replace", "replace_or_insert"):
if doc_moreinfo_s: #only if specified, otherwise do not touch
MoreInfo(docid = docid).delete()
if format_moreinfo_s: #only if specified... otherwise do not touch
MoreInfo(docid = docid, docformat = docformat).delete()
if not doc_moreinfo_s is None:
MoreInfo.create_from_serialised(ser_str = doc_moreinfo_s, docid = docid)
if not version_moreinfo_s is None:
MoreInfo.create_from_serialised(ser_str = version_moreinfo_s,
docid = docid, version = version)
if not version_format_moreinfo_s is None:
MoreInfo.create_from_serialised(ser_str = version_format_moreinfo_s,
docid = docid, version = version,
docformat = docformat)
if not format_moreinfo_s is None:
MoreInfo.create_from_serialised(ser_str = format_moreinfo_s,
docid = docid, docformat = docformat)
if mode == 'delete':
raise StandardError('FFT tag specified but bibupload executed in --delete mode')
tuple_list = extract_tag_from_record(record, 'FFT')
if tuple_list: # FFT Tags analysis
write_message("FFTs: "+str(tuple_list), verbose=9)
docs = {} # docnames and their data
for fft in record_get_field_instances(record, 'FFT', ' ', ' '):
# Very first, we retrieve the potentially temporary odentifiers...
#even if the rest fails, we should include them in teh dictionary
version = _get_subfield_value(fft, 'v', '')
# checking if version is temporary... if so, filling a different varaible
is_tmp_ver, bibdoc_tmpver = parse_identifier(version)
if is_tmp_ver:
version = None
else:
bibdoc_tmpver = None
if not version: #treating cases of empty string etc...
version = None
bibdoc_tmpid = field_get_subfield_values(fft, 'i')
if bibdoc_tmpid:
bibdoc_tmpid = bibdoc_tmpid[0]
else:
bibdoc_tmpid
is_tmp_id, bibdoc_tmpid = parse_identifier(bibdoc_tmpid)
if not is_tmp_id:
bibdoc_tmpid = None
# In the case of having temporary id's, we dont resolve them yet but signaklise that they have been used
# value -1 means that identifier has been declared but not assigned a value yet
if bibdoc_tmpid:
if bibdoc_tmpid in tmp_ids:
write_message("WARNING: the temporary identifier %s has been declared more than once. Ignoring the second occurance" % (bibdoc_tmpid, ), stream=sys.stderr)
else:
tmp_ids[bibdoc_tmpid] = -1
if bibdoc_tmpver:
if bibdoc_tmpver in tmp_vers:
write_message("WARNING: the temporary version identifier %s has been declared more than once. Ignoring the second occurance" % (bibdoc_tmpver, ), stream=sys.stderr)
else:
tmp_vers[bibdoc_tmpver] = -1
# Let's discover the type of the document
# This is a legacy field and will not be enforced any particular
# check on it.
doctype = _get_subfield_value(fft, 't', 'Main') #Default is Main
# Let's discover the url.
url = field_get_subfield_values(fft, 'a')
if url:
url = url[0]
try:
check_valid_url(url)
except StandardError, e:
raise StandardError, "fft '%s' specifies in $a a location ('%s') with problems: %s" % (fft, url, e)
else:
url = ''
#TODO: a lot of code can be compactified using similar syntax ... should be more readable on the longer scale
# maybe right side expressions look a bit cryptic, but the elaborate_fft function would be much clearer
if mode == 'correct' and doctype != 'FIX-MARC':
arg2 = ""
else:
arg2 = KEEP_OLD_VALUE
description = _get_subfield_value(fft, 'd', arg2)
# Let's discover the description
# description = field_get_subfield_values(fft, 'd')
# if description != []:
# description = description[0]
# else:
# if mode == 'correct' and doctype != 'FIX-MARC':
## If the user require to correct, and do not specify
## a description this means she really want to
## modify the description.
# description = ''
# else:
# description = KEEP_OLD_VALUE
# Let's discover the desired docname to be created/altered
name = field_get_subfield_values(fft, 'n')
if name:
## Let's remove undesired extensions
name = file_strip_ext(name[0] + '.pdf')
else:
if url:
name = get_docname_from_url(url)
elif mode != 'correct' and doctype != 'FIX-MARC':
raise StandardError, "WARNING: fft '%s' doesn't specifies either a location in $a or a docname in $n" % str(fft)
else:
continue
# Let's discover the desired new docname in case we want to change it
newname = field_get_subfield_values(fft, 'm')
if newname:
newname = file_strip_ext(newname[0] + '.pdf')
else:
newname = name
# Let's discover the desired format
docformat = field_get_subfield_values(fft, 'f')
if docformat:
docformat = normalize_format(docformat[0])
else:
if url:
docformat = guess_format_from_url(url)
else:
docformat = ""
# Let's discover the icon
icon = field_get_subfield_values(fft, 'x')
if icon != []:
icon = icon[0]
if icon != KEEP_OLD_VALUE:
try:
check_valid_url(icon)
except StandardError, e:
raise StandardError, "fft '%s' specifies in $x an icon ('%s') with problems: %s" % (fft, icon, e)
else:
icon = ''
# Let's discover the comment
comment = field_get_subfield_values(fft, 'z')
if comment != []:
comment = comment[0]
else:
if mode == 'correct' and doctype != 'FIX-MARC':
## See comment on description
comment = ''
else:
comment = KEEP_OLD_VALUE
# Let's discover the restriction
restriction = field_get_subfield_values(fft, 'r')
if restriction != []:
restriction = restriction[0]
else:
if mode == 'correct' and doctype != 'FIX-MARC':
## See comment on description
restriction = ''
else:
restriction = KEEP_OLD_VALUE
document_moreinfo = _get_subfield_value(fft, 'w')
version_moreinfo = _get_subfield_value(fft, 'p')
version_format_moreinfo = _get_subfield_value(fft, 'b')
format_moreinfo = _get_subfield_value(fft, 'u')
# Let's discover the timestamp of the file (if any)
timestamp = field_get_subfield_values(fft, 's')
if timestamp:
try:
timestamp = datetime(*(time.strptime(timestamp[0], "%Y-%m-%d %H:%M:%S")[:6]))
except ValueError:
write_message('WARNING: The timestamp is not in a good format, thus will be ignored. The format should be YYYY-MM-DD HH:MM:SS', stream=sys.stderr)
timestamp = ''
else:
timestamp = ''
flags = field_get_subfield_values(fft, 'o')
for flag in flags:
if flag not in CFG_BIBDOCFILE_AVAILABLE_FLAGS:
raise StandardError, "fft '%s' specifies a non available flag: %s" % (fft, flag)
if docs.has_key(name): # new format considered
(doctype2, newname2, restriction2, version2, urls, dummybibdoc_moreinfos2, dummybibdoc_tmpid2, dummybibdoc_tmpver2 ) = docs[name]
if doctype2 != doctype:
raise StandardError, "fft '%s' specifies a different doctype from previous fft with docname '%s'" % (str(fft), name)
if newname2 != newname:
raise StandardError, "fft '%s' specifies a different newname from previous fft with docname '%s'" % (str(fft), name)
if restriction2 != restriction:
raise StandardError, "fft '%s' specifies a different restriction from previous fft with docname '%s'" % (str(fft), name)
if version2 != version:
raise StandardError, "fft '%s' specifies a different version than the previous fft with docname '%s'" % (str(fft), name)
for (dummyurl2, format2, dummydescription2, dummycomment2, dummyflags2, dummytimestamp2) in urls:
if docformat == format2:
raise StandardError, "fft '%s' specifies a second file '%s' with the same format '%s' from previous fft with docname '%s'" % (str(fft), url, docformat, name)
if url or docformat:
urls.append((url, docformat, description, comment, flags, timestamp))
if icon:
urls.append((icon, icon[len(file_strip_ext(icon)):] + ';icon', description, comment, flags, timestamp))
else:
if url or docformat:
docs[name] = (doctype, newname, restriction, version, [(url, docformat, description, comment, flags, timestamp)], [document_moreinfo, version_moreinfo, version_format_moreinfo, format_moreinfo], bibdoc_tmpid, bibdoc_tmpver)
if icon:
docs[name][4].append((icon, icon[len(file_strip_ext(icon)):] + ';icon', description, comment, flags, timestamp))
elif icon:
docs[name] = (doctype, newname, restriction, version, [(icon, icon[len(file_strip_ext(icon)):] + ';icon', description, comment, flags, timestamp)], [document_moreinfo, version_moreinfo, version_format_moreinfo, format_moreinfo], bibdoc_tmpid, bibdoc_tmpver)
else:
docs[name] = (doctype, newname, restriction, version, [], [document_moreinfo, version_moreinfo, version_format_moreinfo, format_moreinfo], bibdoc_tmpid, bibdoc_tmpver)
write_message('Result of FFT analysis:\n\tDocs: %s' % (docs,), verbose=9)
# Let's remove all FFT tags
record_delete_field(record, 'FFT', ' ', ' ')
## Let's pre-download all the URLs to see if, in case of mode 'correct' or 'append'
## we can avoid creating a new revision.
for docname, (doctype, newname, restriction, version, urls, more_infos, bibdoc_tmpid, bibdoc_tmpver ) in docs.items():
downloaded_urls = []
try:
bibdoc = bibrecdocs.get_bibdoc(docname)
except InvenioBibDocFileError:
## A bibdoc with the given docname does not exists.
## So there is no chance we are going to revise an existing
## format with an identical file :-)
bibdoc = None
new_revision_needed = False
for url, docformat, description, comment, flags, timestamp in urls:
if url:
try:
downloaded_url = download_url(url, docformat)
write_message("%s saved into %s" % (url, downloaded_url), verbose=9)
except Exception, err:
write_message("ERROR: in downloading '%s' because of: %s" % (url, err), stream=sys.stderr)
raise
if mode == 'correct' and bibdoc is not None and not new_revision_needed:
downloaded_urls.append((downloaded_url, docformat, description, comment, flags, timestamp))
if not bibrecdocs.check_file_exists(downloaded_url, docformat):
new_revision_needed = True
else:
write_message("WARNING: %s is already attached to bibdoc %s for recid %s" % (url, docname, rec_id), stream=sys.stderr)
elif mode == 'append' and bibdoc is not None:
if not bibrecdocs.check_file_exists(downloaded_url, docformat):
downloaded_urls.append((downloaded_url, docformat, description, comment, flags, timestamp))
else:
write_message("WARNING: %s is already attached to bibdoc %s for recid %s" % (url, docname, rec_id), stream=sys.stderr)
else:
downloaded_urls.append((downloaded_url, docformat, description, comment, flags, timestamp))
else:
downloaded_urls.append(('', docformat, description, comment, flags, timestamp))
if mode == 'correct' and bibdoc is not None and not new_revision_needed:
## Since we don't need a new revision (because all the files
## that are being uploaded are different)
## we can simply remove the urls but keep the other information
write_message("No need to add a new revision for docname %s for recid %s" % (docname, rec_id), verbose=2)
docs[docname] = (doctype, newname, restriction, version, [('', docformat, description, comment, flags, timestamp) for (dummy, docformat, description, comment, flags, timestamp) in downloaded_urls], more_infos, bibdoc_tmpid, bibdoc_tmpver)
for downloaded_url, dummy, dummy, dummy, dummy, dummy in downloaded_urls:
## Let's free up some space :-)
if downloaded_url and os.path.exists(downloaded_url):
os.remove(downloaded_url)
else:
if downloaded_urls or mode != 'append':
docs[docname] = (doctype, newname, restriction, version, downloaded_urls, more_infos, bibdoc_tmpid, bibdoc_tmpver)
else:
## In case we are in append mode and there are no urls to append
## we discard the whole FFT
del docs[docname]
if mode == 'replace': # First we erase previous bibdocs
if not pretend:
for bibdoc in bibrecdocs.list_bibdocs():
bibdoc.delete()
bibrecdocs.dirty = True
for docname, (doctype, newname, restriction, version, urls, more_infos, bibdoc_tmpid, bibdoc_tmpver) in docs.iteritems():
write_message("Elaborating olddocname: '%s', newdocname: '%s', doctype: '%s', restriction: '%s', urls: '%s', mode: '%s'" % (docname, newname, doctype, restriction, urls, mode), verbose=9)
if mode in ('insert', 'replace'): # new bibdocs, new docnames, new marc
if newname in bibrecdocs.get_bibdoc_names():
write_message("('%s', '%s') not inserted because docname already exists." % (newname, urls), stream=sys.stderr)
raise StandardError("('%s', '%s') not inserted because docname already exists." % (newname, urls), stream=sys.stderr)
try:
if not pretend:
bibdoc = bibrecdocs.add_bibdoc(doctype, newname)
bibdoc.set_status(restriction)
else:
bibdoc = None
except Exception, e:
write_message("('%s', '%s', '%s') not inserted because: '%s'." % (doctype, newname, urls, e), stream=sys.stderr)
raise e
for (url, docformat, description, comment, flags, timestamp) in urls:
assert(_add_new_format(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, timestamp, pretend=pretend))
elif mode == 'replace_or_insert': # to be thought as correct_or_insert
try:
bibdoc = bibrecdocs.get_bibdoc(docname)
found_bibdoc = True
except InvenioBibDocFileError:
found_bibdoc = False
else:
if doctype not in ('PURGE', 'DELETE', 'EXPUNGE', 'REVERT', 'FIX-ALL', 'FIX-MARC', 'DELETE-FILE'):
if newname != docname:
try:
if not pretend:
bibrecdocs.change_name(newname=newname, docid=bibdoc.id)
write_message(lambda: "After renaming: %s" % bibrecdocs, verbose=9)
except StandardError, e:
write_message('ERROR: in renaming %s to %s: %s' % (docname, newname, e), stream=sys.stderr)
raise
try:
bibdoc = bibrecdocs.get_bibdoc(newname)
found_bibdoc = True
except InvenioBibDocFileError:
found_bibdoc = False
else:
if doctype == 'PURGE':
if not pretend:
bibdoc.purge()
bibrecdocs.dirty = True
elif doctype == 'DELETE':
if not pretend:
bibdoc.delete()
bibrecdocs.dirty = True
elif doctype == 'EXPUNGE':
if not pretend:
bibdoc.expunge()
bibrecdocs.dirty = True
elif doctype == 'FIX-ALL':
if not pretend:
bibrecdocs.fix(docname)
elif doctype == 'FIX-MARC':
pass
elif doctype == 'DELETE-FILE':
if urls:
for (url, docformat, description, comment, flags, timestamp) in urls:
if not pretend:
bibdoc.delete_file(docformat, version)
elif doctype == 'REVERT':
try:
if not pretend:
bibdoc.revert(version)
except Exception, e:
write_message('(%s, %s) not correctly reverted: %s' % (newname, version, e), stream=sys.stderr)
raise
else:
if restriction != KEEP_OLD_VALUE:
if not pretend:
bibdoc.set_status(restriction)
# Since the docname already existed we have to first
# bump the version by pushing the first new file
# then pushing the other files.
if urls:
(first_url, first_format, first_description, first_comment, first_flags, first_timestamp) = urls[0]
other_urls = urls[1:]
assert(_add_new_version(bibdoc, first_url, first_format, docname, doctype, newname, first_description, first_comment, first_flags, first_timestamp, pretend=pretend))
for (url, docformat, description, comment, flags, timestamp) in other_urls:
assert(_add_new_format(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, timestamp, pretend=pretend))
## Let's refresh the list of bibdocs.
if not found_bibdoc:
if not pretend:
bibdoc = bibrecdocs.add_bibdoc(doctype, newname)
bibdoc.set_status(restriction)
for (url, docformat, description, comment, flags, timestamp) in urls:
assert(_add_new_format(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, timestamp))
elif mode == 'correct':
try:
bibdoc = bibrecdocs.get_bibdoc(docname)
found_bibdoc = True
except InvenioBibDocFileError:
found_bibdoc = False
else:
if doctype not in ('PURGE', 'DELETE', 'EXPUNGE', 'REVERT', 'FIX-ALL', 'FIX-MARC', 'DELETE-FILE'):
if newname != docname:
try:
if not pretend:
bibrecdocs.change_name(newname=newname, docid=bibdoc.id)
write_message(lambda: "After renaming: %s" % bibrecdocs, verbose=9)
except StandardError, e:
write_message('ERROR: in renaming %s to %s: %s' % (docname, newname, e), stream=sys.stderr)
raise
try:
bibdoc = bibrecdocs.get_bibdoc(newname)
found_bibdoc = True
except InvenioBibDocFileError:
found_bibdoc = False
else:
if doctype == 'PURGE':
if not pretend:
bibdoc.purge()
bibrecdocs.dirty = True
elif doctype == 'DELETE':
if not pretend:
bibdoc.delete()
bibrecdocs.dirty = True
elif doctype == 'EXPUNGE':
if not pretend:
bibdoc.expunge()
bibrecdocs.dirty = True
elif doctype == 'FIX-ALL':
if not pretend:
bibrecdocs.fix(newname)
elif doctype == 'FIX-MARC':
pass
elif doctype == 'DELETE-FILE':
if urls:
for (url, docformat, description, comment, flags, timestamp) in urls:
if not pretend:
bibdoc.delete_file(docformat, version)
elif doctype == 'REVERT':
try:
if not pretend:
bibdoc.revert(version)
except Exception, e:
write_message('(%s, %s) not correctly reverted: %s' % (newname, version, e), stream=sys.stderr)
raise
else:
if restriction != KEEP_OLD_VALUE:
if not pretend:
bibdoc.set_status(restriction)
if doctype and doctype != KEEP_OLD_VALUE:
if not pretend:
bibdoc.change_doctype(doctype)
if urls:
(first_url, first_format, first_description, first_comment, first_flags, first_timestamp) = urls[0]
other_urls = urls[1:]
assert(_add_new_version(bibdoc, first_url, first_format, docname, doctype, newname, first_description, first_comment, first_flags, first_timestamp, pretend=pretend))
for (url, docformat, description, comment, flags, timestamp) in other_urls:
assert(_add_new_format(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, timestamp, pretend=pretend))
if not found_bibdoc:
if doctype in ('PURGE', 'DELETE', 'EXPUNGE', 'FIX-ALL', 'FIX-MARC', 'DELETE-FILE', 'REVERT'):
write_message("('%s', '%s', '%s') not performed because '%s' docname didn't existed." % (doctype, newname, urls, docname), stream=sys.stderr)
raise StandardError
else:
if not pretend:
bibdoc = bibrecdocs.add_bibdoc(doctype, newname)
bibdoc.set_status(restriction)
for (url, docformat, description, comment, flags, timestamp) in urls:
assert(_add_new_format(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, timestamp))
elif mode == 'append':
found_bibdoc = False
try:
bibdoc = bibrecdocs.get_bibdoc(docname)
found_bibdoc = True
except InvenioBibDocFileError:
found_bibdoc = False
else:
for (url, docformat, description, comment, flags, timestamp) in urls:
assert(_add_new_format(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, timestamp, pretend=pretend))
if not found_bibdoc:
try:
if not pretend:
bibdoc = bibrecdocs.add_bibdoc(doctype, docname)
bibdoc.set_status(restriction)
for (url, docformat, description, comment, flags, timestamp) in urls:
assert(_add_new_format(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, timestamp))
except Exception, e:
register_exception()
write_message("('%s', '%s', '%s') not appended because: '%s'." % (doctype, newname, urls, e), stream=sys.stderr)
raise
if not pretend and doctype not in ('PURGE', 'DELETE', 'EXPUNGE'):
_process_document_moreinfos(more_infos, newname, version, urls and urls[0][1], mode)
# resolving temporary version and identifier
if bibdoc_tmpid:
if bibdoc_tmpid in tmp_ids and tmp_ids[bibdoc_tmpid] != -1:
write_message("WARNING: the temporary identifier %s has been declared more than once. Ignoring the second occurance" % (bibdoc_tmpid, ), stream=sys.stderr)
else:
tmp_ids[bibdoc_tmpid] = bibrecdocs.get_docid(docname)
if bibdoc_tmpver:
if bibdoc_tmpver in tmp_vers and tmp_vers[bibdoc_tmpver] != -1:
write_message("WARNING: the temporary version identifier %s has been declared more than once. Ignoring the second occurance" % (bibdoc_tmpver, ), stream=sys.stderr)
else:
if version == None:
if version:
tmp_vers[bibdoc_tmpver] = version
else:
tmp_vers[bibdoc_tmpver] = bibrecdocs.get_bibdoc(docname).get_latest_version()
else:
tmp_vers[bibdoc_tmpver] = version
return record
### Update functions
def update_bibrec_date(now, bibrec_id, insert_mode_p, pretend=False):
"""Update the date of the record in bibrec table """
if insert_mode_p:
query = """UPDATE bibrec SET creation_date=%s, modification_date=%s WHERE id=%s"""
params = (now, now, bibrec_id)
else:
query = """UPDATE bibrec SET modification_date=%s WHERE id=%s"""
params = (now, bibrec_id)
if not pretend:
run_sql(query, params)
write_message(" -Update record creation/modification date: DONE" , verbose=2)
def update_bibfmt_format(id_bibrec, format_value, format_name, modification_date=None, pretend=False):
"""Update the format in the table bibfmt"""
if modification_date is None:
modification_date = time.strftime('%Y-%m-%d %H:%M:%S')
else:
try:
time.strptime(modification_date, "%Y-%m-%d %H:%M:%S")
except ValueError:
modification_date = '1970-01-01 00:00:00'
# We check if the format is already in bibFmt
nb_found = find_record_format(id_bibrec, format_name)
if nb_found == 1:
# we are going to update the format
# compress the format_value value
pickled_format_value = compress(format_value)
# update the format:
query = """UPDATE LOW_PRIORITY bibfmt SET last_updated=%s, value=%s WHERE id_bibrec=%s AND format=%s"""
params = (modification_date, pickled_format_value, id_bibrec, format_name)
if not pretend:
row_id = run_sql(query, params)
if not pretend and row_id is None:
write_message(" ERROR: during update_bibfmt_format function", verbose=1, stream=sys.stderr)
return 1
else:
write_message(" -Update the format %s in bibfmt: DONE" % format_name , verbose=2)
return 0
elif nb_found > 1:
write_message(" Failed: Same format %s found several time in bibfmt for the same record." % format_name, verbose=1, stream=sys.stderr)
return 1
else:
# Insert the format information in BibFMT
res = insert_bibfmt(id_bibrec, format_value, format_name, modification_date, pretend=pretend)
if res is None:
write_message(" ERROR: during insert_bibfmt", verbose=1, stream=sys.stderr)
return 1
else:
write_message(" -Insert the format %s in bibfmt: DONE" % format_name , verbose=2)
return 0
def delete_bibfmt_format(id_bibrec, format_name, pretend=False):
"""
Delete format FORMAT_NAME from bibfmt table fo record ID_BIBREC.
"""
if not pretend:
run_sql("DELETE LOW_PRIORITY FROM bibfmt WHERE id_bibrec=%s and format=%s", (id_bibrec, format_name))
return 0
def archive_marcxml_for_history(recID, pretend=False):
"""
Archive current MARCXML format of record RECID from BIBFMT table
into hstRECORD table. Useful to keep MARCXML history of records.
Return 0 if everything went fine. Return 1 otherwise.
"""
res = run_sql("SELECT id_bibrec, value, last_updated FROM bibfmt WHERE format='xm' AND id_bibrec=%s",
(recID,))
if res and not pretend:
run_sql("""INSERT INTO hstRECORD (id_bibrec, marcxml, job_id, job_name, job_person, job_date, job_details)
VALUES (%s,%s,%s,%s,%s,%s,%s)""",
(res[0][0], res[0][1], task_get_task_param('task_id', 0), 'bibupload', task_get_task_param('user', 'UNKNOWN'), res[0][2],
'mode: ' + task_get_option('mode', 'UNKNOWN') + '; file: ' + task_get_option('file_path', 'UNKNOWN') + '.'))
return 0
def update_database_with_metadata(record, rec_id, oai_rec_id="oai", affected_tags=None, pretend=False):
"""Update the database tables with the record and the record id given in parameter"""
# extract only those tags that have been affected.
# check happens at subfield level. This is to prevent overhead
# associated with inserting already existing field with given ind pair
write_message("update_database_with_metadata: record=%s, rec_id=%s, oai_rec_id=%s, affected_tags=%s" % (record, rec_id, oai_rec_id, affected_tags), verbose=9)
tmp_record = {}
if affected_tags:
for tag in record.keys():
if tag in affected_tags.keys():
write_message(" -Tag %s found to be modified.Setting up for update" % tag, verbose=9)
# initialize new list to hold affected field
new_data_tuple_list = []
for data_tuple in record[tag]:
ind1 = data_tuple[1]
ind2 = data_tuple[2]
if (ind1, ind2) in affected_tags[tag]:
write_message(" -Indicator pair (%s, %s) added to update list" % (ind1, ind2), verbose=9)
new_data_tuple_list.append(data_tuple)
tmp_record[tag] = new_data_tuple_list
write_message(lambda: " -Modified fields: \n%s" % record_xml_output(tmp_record), verbose=2)
else:
tmp_record = record
for tag in tmp_record.keys():
# check if tag is not a special one:
if tag not in CFG_BIBUPLOAD_SPECIAL_TAGS:
# for each tag there is a list of tuples representing datafields
tuple_list = tmp_record[tag]
# this list should contain the elements of a full tag [tag, ind1, ind2, subfield_code]
tag_list = []
tag_list.append(tag)
for single_tuple in tuple_list:
# these are the contents of a single tuple
subfield_list = single_tuple[0]
ind1 = single_tuple[1]
ind2 = single_tuple[2]
# append the ind's to the full tag
if ind1 == '' or ind1 == ' ':
tag_list.append('_')
else:
tag_list.append(ind1)
if ind2 == '' or ind2 == ' ':
tag_list.append('_')
else:
tag_list.append(ind2)
datafield_number = single_tuple[4]
if tag in CFG_BIBUPLOAD_SPECIAL_TAGS:
# nothing to do for special tags (FFT, BDR, BDM)
pass
elif tag in CFG_BIBUPLOAD_CONTROLFIELD_TAGS and tag != "001":
value = single_tuple[3]
# get the full tag
full_tag = ''.join(tag_list)
# update the tables
write_message(" insertion of the tag "+full_tag+" with the value "+value, verbose=9)
# insert the tag and value into into bibxxx
(table_name, bibxxx_row_id) = insert_record_bibxxx(full_tag, value, pretend=pretend)
#print 'tname, bibrow', table_name, bibxxx_row_id;
if table_name is None or bibxxx_row_id is None:
write_message(" Failed: during insert_record_bibxxx", verbose=1, stream=sys.stderr)
# connect bibxxx and bibrec with the table bibrec_bibxxx
res = insert_record_bibrec_bibxxx(table_name, bibxxx_row_id, datafield_number, rec_id, pretend=pretend)
if res is None:
write_message(" Failed: during insert_record_bibrec_bibxxx", verbose=1, stream=sys.stderr)
else:
# get the tag and value from the content of each subfield
for subfield in subfield_list:
subtag = subfield[0]
value = subfield[1]
tag_list.append(subtag)
# get the full tag
full_tag = ''.join(tag_list)
# update the tables
write_message(" insertion of the tag "+full_tag+" with the value "+value, verbose=9)
# insert the tag and value into into bibxxx
(table_name, bibxxx_row_id) = insert_record_bibxxx(full_tag, value, pretend=pretend)
if table_name is None or bibxxx_row_id is None:
write_message(" Failed: during insert_record_bibxxx", verbose=1, stream=sys.stderr)
# connect bibxxx and bibrec with the table bibrec_bibxxx
res = insert_record_bibrec_bibxxx(table_name, bibxxx_row_id, datafield_number, rec_id, pretend=pretend)
if res is None:
write_message(" Failed: during insert_record_bibrec_bibxxx", verbose=1, stream=sys.stderr)
# remove the subtag from the list
tag_list.pop()
tag_list.pop()
tag_list.pop()
tag_list.pop()
write_message(" -Update the database with metadata: DONE", verbose=2)
log_record_uploading(oai_rec_id, task_get_task_param('task_id', 0), rec_id, 'P', pretend=pretend)
def append_new_tag_to_old_record(record, rec_old):
"""Append new tags to a old record"""
def _append_tag(tag):
if tag in CFG_BIBUPLOAD_CONTROLFIELD_TAGS:
if tag == '001':
pass
else:
# if it is a controlfield, just access the value
for single_tuple in record[tag]:
controlfield_value = single_tuple[3]
# add the field to the old record
newfield_number = record_add_field(rec_old, tag,
controlfield_value=controlfield_value)
if newfield_number is None:
write_message(" ERROR: when adding the field"+tag, verbose=1, stream=sys.stderr)
else:
# For each tag there is a list of tuples representing datafields
for single_tuple in record[tag]:
# We retrieve the information of the tag
subfield_list = single_tuple[0]
ind1 = single_tuple[1]
ind2 = single_tuple[2]
if '%s%s%s' % (tag, ind1 == ' ' and '_' or ind1, ind2 == ' ' and '_' or ind2) in (CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:5], CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[:5]):
## We don't want to append the external identifier
## if it is already existing.
if record_find_field(rec_old, tag, single_tuple)[0] is not None:
write_message(" Not adding tag: %s ind1=%s ind2=%s subfields=%s: it's already there" % (tag, ind1, ind2, subfield_list), verbose=9)
continue
# We add the datafield to the old record
write_message(" Adding tag: %s ind1=%s ind2=%s subfields=%s" % (tag, ind1, ind2, subfield_list), verbose=9)
newfield_number = record_add_field(rec_old, tag, ind1,
ind2, subfields=subfield_list)
if newfield_number is None:
write_message(" ERROR: when adding the field"+tag, verbose=1, stream=sys.stderr)
# Go through each tag in the appended record
for tag in record:
_append_tag(tag)
return rec_old
def copy_strong_tags_from_old_record(record, rec_old):
"""
Look for strong tags in RECORD and REC_OLD. If no strong tags are
found in RECORD, then copy them over from REC_OLD. This function
modifies RECORD structure on the spot.
"""
for strong_tag in CFG_BIBUPLOAD_STRONG_TAGS:
if not record_get_field_instances(record, strong_tag, strong_tag[3:4] or '%', strong_tag[4:5] or '%'):
strong_tag_old_field_instances = record_get_field_instances(rec_old, strong_tag)
if strong_tag_old_field_instances:
for strong_tag_old_field_instance in strong_tag_old_field_instances:
sf_vals, fi_ind1, fi_ind2, controlfield, dummy = strong_tag_old_field_instance
record_add_field(record, strong_tag, fi_ind1, fi_ind2, controlfield, sf_vals)
return
### Delete functions
def delete_tags(record, rec_old):
"""
Returns a record structure with all the fields in rec_old minus the
fields in record.
@param record: The record containing tags to delete.
@type record: record structure
@param rec_old: The original record.
@type rec_old: record structure
@return: The modified record.
@rtype: record structure
"""
returned_record = copy.deepcopy(rec_old)
for tag, fields in record.iteritems():
if tag in ('001', ):
continue
for field in fields:
local_position = record_find_field(returned_record, tag, field)[1]
if local_position is not None:
record_delete_field(returned_record, tag, field_position_local=local_position)
return returned_record
def delete_tags_to_correct(record, rec_old):
"""
Delete tags from REC_OLD which are also existing in RECORD. When
deleting, pay attention not only to tags, but also to indicators,
so that fields with the same tags but different indicators are not
deleted.
"""
## Some fields are controlled via provenance information.
## We should re-add saved fields at the end.
fields_to_readd = {}
for tag in CFG_BIBUPLOAD_CONTROLLED_PROVENANCE_TAGS:
if tag[:3] in record:
tmp_field_instances = record_get_field_instances(record, tag[:3], tag[3], tag[4]) ## Let's discover the provenance that will be updated
provenances_to_update = []
for instance in tmp_field_instances:
for code, value in instance[0]:
if code == tag[5]:
if value not in provenances_to_update:
provenances_to_update.append(value)
break
else:
## The provenance is not specified.
## let's add the special empty provenance.
if '' not in provenances_to_update:
provenances_to_update.append('')
potential_fields_to_readd = record_get_field_instances(rec_old, tag[:3], tag[3], tag[4]) ## Let's take all the field corresponding to tag
## Let's save apart all the fields that should be updated, but
## since they have a different provenance not mentioned in record
## they should be preserved.
fields = []
for sf_vals, ind1, ind2, dummy_cf, dummy_line in potential_fields_to_readd:
for code, value in sf_vals:
if code == tag[5]:
if value not in provenances_to_update:
fields.append(sf_vals)
break
else:
if '' not in provenances_to_update:
## Empty provenance, let's protect in any case
fields.append(sf_vals)
fields_to_readd[tag] = fields
# browse through all the tags from the MARCXML file:
for tag in record:
# check if the tag exists in the old record too:
if tag in rec_old and tag != '001':
# the tag does exist, so delete all record's tag+ind1+ind2 combinations from rec_old
for dummy_sf_vals, ind1, ind2, dummy_cf, dummyfield_number in record[tag]:
write_message(" Delete tag: " + tag + " ind1=" + ind1 + " ind2=" + ind2, verbose=9)
record_delete_field(rec_old, tag, ind1, ind2)
## Ok, we readd necessary fields!
for tag, fields in fields_to_readd.iteritems():
for sf_vals in fields:
write_message(" Adding tag: " + tag[:3] + " ind1=" + tag[3] + " ind2=" + tag[4] + " code=" + str(sf_vals), verbose=9)
record_add_field(rec_old, tag[:3], tag[3], tag[4], subfields=sf_vals)
def delete_bibrec_bibxxx(record, id_bibrec, affected_tags={}, pretend=False):
"""Delete the database record from the table bibxxx given in parameters"""
# we clear all the rows from bibrec_bibxxx from the old record
# clearing only those tags that have been modified.
write_message(lambda: "delete_bibrec_bibxxx(record=%s, id_bibrec=%s, affected_tags=%s)" % (record, id_bibrec, affected_tags), verbose=9)
for tag in affected_tags:
# sanity check with record keys just to make sure its fine.
if tag not in CFG_BIBUPLOAD_SPECIAL_TAGS:
write_message("%s found in record"%tag, verbose=2)
# for each name construct the bibrec_bibxxx table name
table_name = 'bib'+tag[0:2]+'x'
bibrec_table = 'bibrec_'+table_name
# delete all the records with proper id_bibrec. Indicators matter for individual affected tags
tmp_ind_1 = ''
tmp_ind_2 = ''
# construct exact tag value using indicators
for ind_pair in affected_tags[tag]:
if ind_pair[0] == ' ':
tmp_ind_1 = '_'
else:
tmp_ind_1 = ind_pair[0]
if ind_pair[1] == ' ':
tmp_ind_2 = '_'
else:
tmp_ind_2 = ind_pair[1]
# need to escape incase of underscore so that mysql treats it as a char
tag_val = tag+"\\"+tmp_ind_1+"\\"+tmp_ind_2 + '%'
query = """DELETE br.* FROM `%s` br,`%s` b where br.id_bibrec=%%s and br.id_bibxxx=b.id and b.tag like %%s""" % (bibrec_table, table_name)
params = (id_bibrec, tag_val)
write_message(query % params, verbose=9)
if not pretend:
run_sql(query, params)
else:
write_message("%s not found"%tag, verbose=2)
def main():
"""Main that construct all the bibtask."""
task_init(authorization_action='runbibupload',
authorization_msg="BibUpload Task Submission",
description="""Receive MARC XML file and update appropriate database
tables according to options.
Examples:
$ bibupload -i input.xml
""",
help_specific_usage=""" -a, --append\t\tnew fields are appended to the existing record
-c, --correct\t\tfields are replaced by the new ones in the existing record, except
\t\t\twhen overridden by CFG_BIBUPLOAD_CONTROLLED_PROVENANCE_TAGS
-i, --insert\t\tinsert the new record in the database
-r, --replace\t\tthe existing record is entirely replaced by the new one,
\t\t\texcept for fields in CFG_BIBUPLOAD_STRONG_TAGS
-d, --delete\t\tspecified fields are deleted in existing record
-n, --notimechange\tdo not change record last modification date when updating
-o, --holdingpen\tInsert record into holding pen instead of the normal database
--pretend\t\tdo not really insert/append/correct/replace the input file
--force\t\twhen --replace, use provided 001 tag values, even if the matching
\t\t\trecord does not exist (thus allocating it on-the-fly)
--callback-url\tSend via a POST request a JSON-serialized answer (see admin guide), in
\t\t\torder to provide a feedback to an external service about the outcome of the operation.
--nonce\t\twhen used together with --callback add the nonce value in the JSON message.
--special-treatment=MODE\tif "oracle" is specified, when used together with --callback_url,
\t\t\tPOST an application/x-www-form-urlencoded request where the JSON message is encoded
\t\t\tinside a form field called "results".
""",
version=__revision__,
specific_params=("ircazdnoS:",
[
"insert",
"replace",
"correct",
"append",
"reference",
"delete",
"notimechange",
"holdingpen",
"pretend",
"force",
"callback-url=",
"nonce=",
"special-treatment=",
"stage=",
]),
task_submit_elaborate_specific_parameter_fnc=task_submit_elaborate_specific_parameter,
task_run_fnc=task_run_core,
task_submit_check_options_fnc=task_submit_check_options)
def task_submit_elaborate_specific_parameter(key, value, opts, args): # pylint: disable=W0613
""" Given the string key it checks it's meaning, eventually using the
value. Usually it fills some key in the options dict.
It must return True if it has elaborated the key, False, if it doesn't
know that key.
eg:
if key in ['-n', '--number']:
task_get_option(\1) = value
return True
return False
"""
# No time change option
if key in ("-n", "--notimechange"):
task_set_option('notimechange', 1)
# Insert mode option
elif key in ("-i", "--insert"):
if task_get_option('mode') == 'replace':
# if also replace found, then set to replace_or_insert
task_set_option('mode', 'replace_or_insert')
else:
task_set_option('mode', 'insert')
fix_argv_paths([args[0]])
task_set_option('file_path', os.path.abspath(args[0]))
# Replace mode option
elif key in ("-r", "--replace"):
if task_get_option('mode') == 'insert':
# if also insert found, then set to replace_or_insert
task_set_option('mode', 'replace_or_insert')
else:
task_set_option('mode', 'replace')
fix_argv_paths([args[0]])
task_set_option('file_path', os.path.abspath(args[0]))
# Holding pen mode option
elif key in ("-o", "--holdingpen"):
write_message("Holding pen mode", verbose=3)
task_set_option('mode', 'holdingpen')
fix_argv_paths([args[0]])
task_set_option('file_path', os.path.abspath(args[0]))
# Correct mode option
elif key in ("-c", "--correct"):
task_set_option('mode', 'correct')
fix_argv_paths([args[0]])
task_set_option('file_path', os.path.abspath(args[0]))
# Append mode option
elif key in ("-a", "--append"):
task_set_option('mode', 'append')
fix_argv_paths([args[0]])
task_set_option('file_path', os.path.abspath(args[0]))
# Deprecated reference mode option (now correct)
elif key in ("-z", "--reference"):
task_set_option('mode', 'correct')
fix_argv_paths([args[0]])
task_set_option('file_path', os.path.abspath(args[0]))
elif key in ("-d", "--delete"):
task_set_option('mode', 'delete')
fix_argv_paths([args[0]])
task_set_option('file_path', os.path.abspath(args[0]))
elif key in ("--pretend",):
task_set_option('pretend', True)
fix_argv_paths([args[0]])
task_set_option('file_path', os.path.abspath(args[0]))
elif key in ("--force",):
task_set_option('force', True)
fix_argv_paths([args[0]])
task_set_option('file_path', os.path.abspath(args[0]))
elif key in ("--callback-url", ):
task_set_option('callback_url', value)
elif key in ("--nonce", ):
task_set_option('nonce', value)
elif key in ("--special-treatment", ):
if value.lower() in CFG_BIBUPLOAD_ALLOWED_SPECIAL_TREATMENTS:
if value.lower() == 'oracle':
task_set_option('oracle_friendly', True)
else:
print >> sys.stderr, """The specified value is not in the list of allowed special treatments codes: %s""" % CFG_BIBUPLOAD_ALLOWED_SPECIAL_TREATMENTS
return False
elif key in ("-S", "--stage"):
print >> sys.stderr, """WARNING: the --stage parameter is deprecated and ignored."""
else:
return False
return True
def task_submit_check_options():
""" Reimplement this method for having the possibility to check options
before submitting the task, in order for example to provide default
values. It must return False if there are errors in the options.
"""
if task_get_option('mode') is None:
write_message("Please specify at least one update/insert mode!",
stream=sys.stderr)
return False
file_path = task_get_option('file_path')
if file_path is None:
write_message("Missing filename! -h for help.", stream=sys.stderr)
return False
try:
open(file_path).read().decode('utf-8')
except IOError:
write_message("""File is not accessible: %s""" % file_path,
stream=sys.stderr)
return False
except UnicodeDecodeError:
write_message("""File encoding is not valid utf-8: %s""" % file_path,
stream=sys.stderr)
return False
return True
def writing_rights_p():
"""Return True in case bibupload has the proper rights to write in the
fulltext file folder."""
if _WRITING_RIGHTS is not None:
return _WRITING_RIGHTS
try:
if not os.path.exists(CFG_BIBDOCFILE_FILEDIR):
os.makedirs(CFG_BIBDOCFILE_FILEDIR)
fd, filename = tempfile.mkstemp(suffix='.txt', prefix='test', dir=CFG_BIBDOCFILE_FILEDIR)
test = os.fdopen(fd, 'w')
test.write('TEST')
test.close()
if open(filename).read() != 'TEST':
raise IOError("Can not successfully write and readback %s" % filename)
os.remove(filename)
except:
register_exception(alert_admin=True)
return False
return True
def post_results_to_callback_url(results, callback_url):
write_message("Sending feedback to %s" % callback_url)
if not CFG_JSON_AVAILABLE:
from warnings import warn
warn("--callback-url used but simplejson/json not available")
return
json_results = json.dumps(results)
write_message("Message to send: %s" % json_results, verbose=9)
## <scheme>://<netloc>/<path>?<query>#<fragment>
scheme, dummynetloc, dummypath, dummyquery, dummyfragment = urlparse.urlsplit(callback_url)
## See: http://stackoverflow.com/questions/111945/is-there-any-way-to-do-http-put-in-python
if scheme == 'http':
opener = urllib2.build_opener(urllib2.HTTPHandler)
elif scheme == 'https':
opener = urllib2.build_opener(urllib2.HTTPSHandler)
else:
raise ValueError("Scheme not handled %s for callback_url %s" % (scheme, callback_url))
if task_get_option('oracle_friendly'):
write_message("Oracle friendly mode requested", verbose=9)
request = urllib2.Request(callback_url, data=urllib.urlencode({'results': json_results}))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
else:
request = urllib2.Request(callback_url, data=json_results)
request.add_header('Content-Type', 'application/json')
request.add_header('User-Agent', make_user_agent_string('BibUpload'))
write_message("Headers about to be sent: %s" % request.headers, verbose=9)
write_message("Data about to be sent: %s" % request.data, verbose=9)
res = opener.open(request)
msg = res.read()
write_message("Result of posting the feedback: %s %s" % (res.code, res.msg), verbose=9)
write_message("Returned message is: %s" % msg, verbose=9)
return res
def bibupload_records(records, opt_mode=None, opt_notimechange=0,
pretend=False, callback_url=None, results_for_callback=None):
"""perform the task of uploading a set of records
returns list of (error_code, recid) tuples for separate records
"""
#Dictionaries maintaining temporary identifiers
# Structure: identifier -> number
tmp_ids = {}
tmp_vers = {}
results = []
# The first phase -> assigning meaning to temporary identifiers
if opt_mode == 'reference':
## NOTE: reference mode has been deprecated in favour of 'correct'
opt_mode = 'correct'
record = None
for record in records:
record_id = record_extract_oai_id(record)
task_sleep_now_if_required(can_stop_too=True)
if opt_mode == "holdingpen":
#inserting into the holding pen
write_message("Inserting into holding pen", verbose=3)
insert_record_into_holding_pen(record, record_id)
else:
write_message("Inserting into main database", verbose=3)
error = bibupload(
record,
opt_mode = opt_mode,
opt_notimechange = opt_notimechange,
oai_rec_id = record_id,
pretend = pretend,
tmp_ids = tmp_ids,
tmp_vers = tmp_vers)
results.append(error)
if error[0] == 1:
if record:
write_message(lambda: record_xml_output(record),
stream=sys.stderr)
else:
write_message("Record could not have been parsed",
stream=sys.stderr)
stat['nb_errors'] += 1
if callback_url:
results_for_callback['results'].append({'recid': error[1], 'success': False, 'error_message': error[2]})
elif error[0] == 2:
if record:
write_message(lambda: record_xml_output(record),
stream=sys.stderr)
else:
write_message("Record could not have been parsed",
stream=sys.stderr)
if callback_url:
results_for_callback['results'].append({'recid': error[1], 'success': False, 'error_message': error[2]})
elif error[0] == 0:
if callback_url:
from invenio.search_engine import print_record
results_for_callback['results'].append({'recid': error[1], 'success': True, "marcxml": print_record(error[1], 'xm'), 'url': "%s/%s/%s" % (CFG_SITE_URL, CFG_SITE_RECORD, error[1])})
else:
if callback_url:
results_for_callback['results'].append({'recid': error[1], 'success': False, 'error_message': error[2]})
# stat us a global variable
task_update_progress("Done %d out of %d." % \
(stat['nb_records_inserted'] + \
stat['nb_records_updated'],
stat['nb_records_to_upload']))
# Second phase -> Now we can process all entries where temporary identifiers might appear (BDR, BDM)
write_message("Identifiers table after processing: %s versions: %s" % (str(tmp_ids), str(tmp_vers)), verbose=2)
write_message("Uploading BDR and BDM fields")
if opt_mode != "holdingpen":
for record in records:
record_id = retrieve_rec_id(record, opt_mode, pretend=pretend, post_phase = True)
bibupload_post_phase(record,
rec_id = record_id,
mode = opt_mode,
pretend = pretend,
tmp_ids = tmp_ids,
tmp_vers = tmp_vers)
return results
def task_run_core():
""" Reimplement to add the body of the task."""
write_message("Input file '%s', input mode '%s'." %
(task_get_option('file_path'), task_get_option('mode')))
write_message("STAGE 0:", verbose=2)
if task_get_option('file_path') is not None:
write_message("start preocessing", verbose=3)
task_update_progress("Reading XML input")
recs = xml_marc_to_records(open_marc_file(task_get_option('file_path')))
stat['nb_records_to_upload'] = len(recs)
write_message(" -Open XML marc: DONE", verbose=2)
task_sleep_now_if_required(can_stop_too=True)
write_message("Entering records loop", verbose=3)
callback_url = task_get_option('callback_url')
results_for_callback = {'results': []}
if recs is not None:
# We proceed each record by record
bibupload_records(records=recs, opt_mode=task_get_option('mode'),
opt_notimechange=task_get_option('notimechange'),
pretend=task_get_option('pretend'),
callback_url=callback_url,
results_for_callback=results_for_callback)
else:
write_message(" ERROR: bibupload failed: No record found",
verbose=1, stream=sys.stderr)
callback_url = task_get_option("callback_url")
if callback_url:
nonce = task_get_option("nonce")
if nonce:
results_for_callback["nonce"] = nonce
post_results_to_callback_url(results_for_callback, callback_url)
if task_get_task_param('verbose') >= 1:
# Print out the statistics
print_out_bibupload_statistics()
# Check if they were errors
return not stat['nb_errors'] >= 1
def log_record_uploading(oai_rec_id, task_id, bibrec_id, insertion_db, pretend=False):
if oai_rec_id != "" and oai_rec_id != None:
query = """UPDATE oaiHARVESTLOG SET date_inserted=NOW(), inserted_to_db=%s, id_bibrec=%s WHERE oai_id = %s AND bibupload_task_id = %s ORDER BY date_harvested LIMIT 1"""
if not pretend:
run_sql(query, (str(insertion_db), str(bibrec_id), str(oai_rec_id), str(task_id), ))
if __name__ == "__main__":
main()
| gpl-2.0 | -7,043,113,552,886,814,000 | 48.031124 | 287 | 0.559182 | false |
javrasya/watchdog | run_tests.py | 1 | 1034 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import unittest
current_path = os.path.abspath(os.path.dirname(__file__))
tests_path = os.path.join(current_path, 'tests')
sys.path[0:0] = [
current_path,
tests_path,
]
all_tests = [f[:-3] for f in os.listdir(tests_path)
if f.startswith('test_') and f.endswith(".py")]
def get_suite(tests):
tests = sorted(tests)
suite = unittest.TestSuite()
loader = unittest.TestLoader()
for test in tests:
suite.addTest(loader.loadTestsFromName(test))
return suite
if __name__ == '__main__':
"""
To run all tests:
$ python run_tests.py
To run a single test:
$ python run_tests.py app
To run a couple of tests:
$ python run_tests.py app config sessions
To run code coverage:
$ coverage run run_tests.py
$ coverage report -m
"""
tests = sys.argv[1:]
if not tests:
tests = all_tests
tests = ['%s' % t for t in tests]
suite = get_suite(tests)
unittest.TextTestRunner(verbosity=2).run(suite)
| apache-2.0 | 3,825,927,468,567,544,000 | 23.046512 | 60 | 0.637331 | false |
gfarnadi/FairPSL | problems/performance_review/evaluation.py | 1 | 4029 |
def calculate(counts,result):
n1 = 0.0
n2 = 0.0
a = 0.0
c = 0.0
for f1,f2,d in counts:
f1f2 = max(f1+f2-1,0)
nf1f2 = max(-f1+f2,0)
n1 += f1f2
n2 += nf1f2
if d[0]:
a+= max(f1f2 - d[1],0)
c+= max(nf1f2 - d[1],0)
else:
if f1f2==1:
a+= 1-result[d[1]]
else:
a+= 0
if nf1f2==1:
c+= 1-result[d[1]]
else:
c+=0
if (a==n1):
p1=1
else:
p1 = (a/n1)
if (c==n2):
p2 =1
else:
p2 = (c/n2)
return p1,p2
def evaluate(result, counts, fairMeasureCode):
p1,p2 = calculate(counts,result)
if fairMeasureCode=='RR':
RR = p1/p2
return RR
elif fairMeasureCode == 'RD':
RD = p1-p2
return RD
elif fairMeasureCode =='RC':
RC = (1-p1)/(1-p2)
return RC
def accuracy(dataPath, result, atoms):
employees = []
with open(dataPath+'employee.txt') as f:
for line in f:
line = line.strip()
if not line: continue
employees.append(line.split()[0])
vardic = atoms['promotion']
score = 0.0
for e in employees:
var = vardic[e][0]
if var in result:
predict = float(result[var])
truth = float(vardic[e][1])
if round(predict, 1)>=0.5:
if truth ==1.0:
score+=1.0
else:
if truth ==0.0:
score+=1.0
score = (float(score) / float(len(employees)))
return score
def accuracy_all(dataPath, result, atoms):
employees = []
with open(dataPath+'employee.txt') as f:
for line in f:
line = line.strip()
if not line: continue
employees.append(line.split()[0])
labels = dict()
with open(dataPath+'label.txt') as f:
for line in f:
line = line.strip()
if not line: continue
[employee, label] = line.split()
labels[employee] = label
vardic = atoms['promotion']
score = 0.0
score_A = 0.0
score_B = 0.0
size_A = 0.0
size_B = 0.0
for e in employees:
if labels[e] =='A':
size_A+=1
else:
size_B+=1
var = vardic[e][0]
if var in result:
predict = float(result[var])
truth = float(vardic[e][1])
if round(predict, 1)>=0.5:
if truth ==1.0:
score+=1.0
if labels[e] =='A':
score_A+=1
else:
score_B+=1
else:
if truth ==0.0:
score+=1.0
if labels[e] =='A':
score_A+=1
else:
score_B+=1
score = (float(score) / float(len(employees)))
score_A = (float(score_A) / float(size_A))
score_B = (float(score_B) / float(size_B))
return score, score_A, score_B
def accuracy_opinion(dataPath, result, atoms):
employees = []
with open(dataPath+'employee.txt') as f:
for line in f:
line = line.strip()
if not line: continue
employees.append(line.split()[0])
vardic = atoms['opinion']
score = 0.0
for e1 in employees:
for e2 in employees:
if e1==e2: continue
var = vardic[(e1,e2)][0]
if var in result:
predict = float(result[var])
truth = float(vardic[(e1,e2)][1])
if round(predict, 1)>=0.5:
if truth ==1.0:
score+=1.0
else:
if truth ==0.0:
score+=1.0
size = (float(len(employees))*float(len(employees)))- float(len(employees))
score = (float(score) / size)
return score
| mit | 5,984,133,284,167,620,000 | 26.979167 | 79 | 0.439563 | false |
DXCanas/kolibri | kolibri/core/auth/migrations/0001_initial.py | 1 | 8724 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-05-14 05:22
from __future__ import unicode_literals
import uuid
import django.core.validators
import django.db.models.deletion
import django.utils.timezone
import morango.utils.uuids
import mptt.fields
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DeviceOwner',
fields=[
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('username', models.CharField(help_text='Required. 30 characters or fewer. Letters and digits only', max_length=30, validators=[django.core.validators.RegexValidator('^\\w+$', 'Enter a valid username. This value may contain only letters and numbers.')], verbose_name='username')),
('full_name', models.CharField(blank=True, max_length=120, verbose_name='full name')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, editable=False, verbose_name='date joined')),
('id', morango.utils.uuids.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Collection',
fields=[
('id', morango.utils.uuids.UUIDField(editable=False, primary_key=True, serialize=False)),
('_morango_dirty_bit', models.BooleanField(default=True, editable=False)),
('_morango_source_id', models.CharField(editable=False, max_length=96)),
('_morango_partition', models.CharField(editable=False, max_length=128)),
('name', models.CharField(max_length=100)),
('kind', models.CharField(choices=[(b'facility', 'Facility'), (b'classroom', 'Classroom'), (b'learnergroup', 'Learner group')], max_length=20)),
('lft', models.PositiveIntegerField(db_index=True, editable=False)),
('rght', models.PositiveIntegerField(db_index=True, editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(db_index=True, editable=False)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='FacilityDataset',
fields=[
('id', morango.utils.uuids.UUIDField(editable=False, primary_key=True, serialize=False)),
('_morango_dirty_bit', models.BooleanField(default=True, editable=False)),
('_morango_source_id', models.CharField(editable=False, max_length=96)),
('_morango_partition', models.CharField(editable=False, max_length=128)),
('description', models.TextField(blank=True)),
('location', models.CharField(blank=True, max_length=200)),
('learner_can_edit_username', models.BooleanField(default=True)),
('learner_can_edit_name', models.BooleanField(default=True)),
('learner_can_edit_password', models.BooleanField(default=True)),
('learner_can_sign_up', models.BooleanField(default=True)),
('learner_can_delete_account', models.BooleanField(default=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='FacilityUser',
fields=[
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('id', morango.utils.uuids.UUIDField(editable=False, primary_key=True, serialize=False)),
('_morango_dirty_bit', models.BooleanField(default=True, editable=False)),
('_morango_source_id', models.CharField(editable=False, max_length=96)),
('_morango_partition', models.CharField(editable=False, max_length=128)),
('username', models.CharField(help_text='Required. 30 characters or fewer. Letters and digits only', max_length=30, validators=[django.core.validators.RegexValidator('^\\w+$', 'Enter a valid username. This value may contain only letters and numbers.')], verbose_name='username')),
('full_name', models.CharField(blank=True, max_length=120, verbose_name='full name')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, editable=False, verbose_name='date joined')),
('dataset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kolibriauth.FacilityDataset')),
],
),
migrations.CreateModel(
name='Membership',
fields=[
('id', morango.utils.uuids.UUIDField(editable=False, primary_key=True, serialize=False)),
('_morango_dirty_bit', models.BooleanField(default=True, editable=False)),
('_morango_source_id', models.CharField(editable=False, max_length=96)),
('_morango_partition', models.CharField(editable=False, max_length=128)),
('collection', mptt.fields.TreeForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kolibriauth.Collection')),
('dataset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kolibriauth.FacilityDataset')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kolibriauth.FacilityUser')),
],
),
migrations.CreateModel(
name='Role',
fields=[
('id', morango.utils.uuids.UUIDField(editable=False, primary_key=True, serialize=False)),
('_morango_dirty_bit', models.BooleanField(default=True, editable=False)),
('_morango_source_id', models.CharField(editable=False, max_length=96)),
('_morango_partition', models.CharField(editable=False, max_length=128)),
('kind', models.CharField(choices=[(b'admin', 'Admin'), (b'coach', 'Coach')], max_length=20)),
('collection', mptt.fields.TreeForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kolibriauth.Collection')),
('dataset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kolibriauth.FacilityDataset')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='roles', to='kolibriauth.FacilityUser')),
],
),
migrations.AddField(
model_name='collection',
name='dataset',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kolibriauth.FacilityDataset'),
),
migrations.AddField(
model_name='collection',
name='parent',
field=mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='kolibriauth.Collection'),
),
migrations.CreateModel(
name='Classroom',
fields=[
],
options={
'proxy': True,
},
bases=('kolibriauth.collection',),
),
migrations.CreateModel(
name='Facility',
fields=[
],
options={
'proxy': True,
},
bases=('kolibriauth.collection',),
),
migrations.CreateModel(
name='LearnerGroup',
fields=[
],
options={
'proxy': True,
},
bases=('kolibriauth.collection',),
),
migrations.AlterUniqueTogether(
name='role',
unique_together=set([('user', 'collection', 'kind')]),
),
migrations.AlterUniqueTogether(
name='membership',
unique_together=set([('user', 'collection')]),
),
migrations.AddField(
model_name='facilityuser',
name='facility',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kolibriauth.Facility'),
),
migrations.AlterUniqueTogether(
name='facilityuser',
unique_together=set([('username', 'facility')]),
),
]
| mit | -3,918,830,347,761,980,400 | 50.621302 | 296 | 0.588835 | false |
sashs/Ropper | ropper/common/enum.py | 1 | 7565 | # coding=utf-8
# Copyright 2018 Sascha Schirra
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" A ND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from re import match
from sys import version_info
import types
if version_info.major > 2:
long = int
class EnumError(BaseException):
def __init__(self, msg):
super(EnumError, self).__init__(msg)
class EnumElement(object):
def __init__(self, name, value, enum):
super(EnumElement, self).__init__()
self.__name = name
self.__value = value
self.__enum = enum
@property
def name(self):
return self.__name
@property
def value(self):
return self.__value
@property
def _enum(self):
return self.__enum
def __str__(self):
return self.__name
def __index__(self):
return self.__value
def __hash__(self):
return hash((self,))
@property
def value(self):
return self.__value
@property
def name(self):
return self.__name
def __repr__(self):
return str(self)
class IntEnumElement(EnumElement):
def __hash__(self):
return hash(self.value)
def __cmp__(self, other):
if isinstance(other, EnumElement):
return self.value - other.value
else:
return self.value - other
def __lt__(self, other):
return self.__cmp__(other) < 0
def __le__(self, other):
return self.__cmp__(other) <= 0
def __eq__(self, other):
return self.__cmp__(other) == 0
def __ge__(self, other):
return self.__cmp__(other) >= 0
def __gt__(self, other):
return self.__cmp__(other) > 0
def __and__(self, other):
if isinstance(other, int) or isinstance(other, long):
return self.value & other
elif isinstance(other, EnumElement):
return self.value & other.value
raise TypeError('This operation is not supported for type ' % type(other))
def __rand__(self, other):
if isinstance(other, int) or isinstance(other, long):
return self.value & other
elif isinstance(other, EnumElement):
return self.value & other.value
raise TypeError('This operation is not supported for type ' % type(other))
def __or__(self, other):
if isinstance(other, int) or isinstance(other, long):
return self.value | other
elif isinstance(other, EnumElement) :
return self.value | other.value
raise TypeError('This operation is not supported for type ' % type(other))
def __ror__(self, other):
if isinstance(other, int) or isinstance(other, long):
return self.value | other
elif isinstance(other, EnumElement):
return self.value | other.value
raise TypeError('This operation is not supported for type ' % type(other))
def __invert__(self):
return ~self.value
def __int__(self):
return self.value
class EnumIterator(object):
def __init__(self, enumData):
self.__enumData = enumData
self.__index = 0
def next(self):
if self.__index < len(self.__enumData):
data = self.__enumData[self.__index]
self.__index += 1
return data
raise StopIteration
class EnumMeta(type):
def __new__(cls, name, bases, dct):
def update(key, value):
if value in values:
raise EnumError('No aliases allowed: '+key+' and '+str(revData[value]))
if isinstance(value, types.FunctionType):
dct[key] = classmethod(value)
return
values.append(value)
if isinstance(value, int) or isinstance(value, long):
element = IntEnumElement(key, value, name)
else:
element = EnumElement(key, value, name)
revData[value] = element
valueData.append(element)
dct[key] = element
revData = {}
valueData = []
values = []
for key, value in dct.items():
if not key.startswith('_'):
update(key, value)
count = 0
if '_enum_' in dct:
enuminit = None
if isinstance(dct['_enum_'], str):
enuminit = dct['_enum_'].split(' ')
elif isinstance(dct['_enum_'], tuple) or isinstance(dct['_enum_'], list):
enuminit = dct['_enum_']
for key in enuminit:
if count in revData:
raise EnumError('The predefined elements have to have bigger value numbers')
update(key, count)
count += 1
dct['_revData'] = revData
dct['_enumData'] = sorted(valueData, key=lambda x: x.value)
return super(EnumMeta, cls).__new__(cls, name, bases, dct)
def __call__(cls, name, args):
if isinstance(args, list):
args = ' '.join(args)
return type(name, (cls,), {'_enum_':args})
def __iter__(cls):
return EnumIterator(cls._enumData)
def __str__(cls):
toReturn = '<'
for elem in cls._enumData:
toReturn += str(elem) + '|'
toReturn = toReturn[:-1] + '>'
return cls.__name__ + '='+toReturn
def __contains__(cls, item):
return item in cls._revData
def __getitem__(cls, key):
if isinstance(key, str):
return cls.__search(key)
elif isinstance(key, EnumElement):
return cls.__search(str(key))
elif isinstance(key, int) or isinstance(key, long):
if key in cls._revData:
return cls._revData[key]
return 'Unkown'
raise TypeError('key has to be an instance of int/long or str:' + key.__class__.__name__)
def __search(self, key):
for elem in self._enumData:
if str(elem) == key:
return elem;
def __instancecheck__(self, instance):
return isinstance(instance, EnumElement) and instance._enum == self.__name__
# For compatibility reason (python2 & python3)
Enum = EnumMeta('Enum', (), {})
| bsd-3-clause | 6,618,701,261,692,716,000 | 29.504032 | 97 | 0.594316 | false |
mucximilian/gimpmaps | gimprenderer/draw_circle.py | 1 | 1230 | #!/usr/bin/env python
# Draws a 150 px radius circle centered in an 800x600 px image
# Adapted from a scheme script-fu contributed by Simon Budig
from gimpfu import *
def draw_circle():
width = 600
height = 600
image = pdb.gimp_image_new(width, height, RGB)
layer = gimp.Layer(image, "layer", image.width, image.height,
RGBA_IMAGE, 100, NORMAL_MODE)
image.add_layer(layer)
gimp.set_foreground(0, 0, 0)
pdb.gimp_context_set_brush("Circle (03)")
vectors = pdb.gimp_vectors_new(image, "circle")
pdb.gimp_image_add_vectors(image, vectors, -1)
pdb.gimp_vectors_bezier_stroke_new_ellipse(vectors, 400, 300, 150, 150, 0)
pdb.gimp_image_set_active_vectors(image, vectors)
print "stroking"
pdb.gimp_edit_stroke_vectors(layer, vectors)
pdb.gimp_displays_flush()
out_path ="/home/mucx/Pictures/test.png"
print "saving"
pdb.file_png_save_defaults(
image,
layer,
out_path,
out_path
)
register(
"python-fu-draw-circle",
N_("Draw a circle"),
"Simple example of stroking a circular path",
"Simon Budig",
"Simon Budig",
"2007",
N_("_Draw Circle"),
"RGB*, GRAY*",
[],
[],
draw_circle,
menu="<Image>/Python-fu"
)
main() | gpl-2.0 | 1,101,271,776,541,197,400 | 20.982143 | 76 | 0.647967 | false |
beagles/neutron_hacking | neutron/services/vpn/service_drivers/__init__.py | 1 | 3163 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013, Nachi Ueno, NTT I3, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo import messaging
import six
from neutron.common import rpc
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class VpnDriver(object):
def __init__(self, service_plugin):
self.service_plugin = service_plugin
@property
def service_type(self):
pass
@abc.abstractmethod
def create_vpnservice(self, context, vpnservice):
pass
@abc.abstractmethod
def update_vpnservice(
self, context, old_vpnservice, vpnservice):
pass
@abc.abstractmethod
def delete_vpnservice(self, context, vpnservice):
pass
class BaseIPsecVpnAgentApi(object):
"""Base class for IPSec API to agent."""
def __init__(self, to_agent_topic, topic, default_version):
super(BaseIPsecVpnAgentApi, self).__init__()
target = messaging.Target(topic=topic, version=default_version)
self.client = rpc.get_client(target)
self.to_agent_topic = to_agent_topic
def _agent_notification(self, context, method, router_id,
version=None, **kwargs):
"""Notify update for the agent.
This method will find where is the router, and
dispatch notification for the agent.
"""
admin_context = context.is_admin and context or context.elevated()
plugin = manager.NeutronManager.get_service_plugins().get(
constants.L3_ROUTER_NAT)
if not version:
version = self.target.version
l3_agents = plugin.get_l3_agents_hosting_routers(
admin_context, [router_id],
admin_state_up=True,
active=True)
for l3_agent in l3_agents:
LOG.debug(_('Notify agent at %(topic)s.%(host)s the message '
'%(method)s'),
{'topic': self.to_agent_topic,
'host': l3_agent.host,
'method': method,
'args': kwargs})
cctxt = self.client.prepare(
version=version,
topic='%s.%s' % (self.to_agent_topic, l3_agent.host))
cctxt.cast(context, method, **kwargs)
def vpnservice_updated(self, context, router_id):
"""Send update event of vpnservices."""
self._agent_notification(context, 'vpnservice_updated', router_id)
| apache-2.0 | 5,851,842,106,440,160,000 | 33.010753 | 78 | 0.631995 | false |
sierisimo/PySongGen | PySongGen.py | 1 | 2115 | #! /usr/bin/env python3
# Name: PySongGen
#
# Version: 0.0.1
#
# Author: Sinuhe Jaime Valencia
#
# Author_email: [email protected]
#
# Description:
# Main code for running instances of pysonggen
from pysonggen import grammar
from pysonggen.songgen import SongG
gram = grammar.Grammar('./examples/example.mgram')
notes = None
audio = None
run = True
def get_phrase():
global notes
global audio
notes = gram.expand(input("""Give a sentence for making a song.
It's very IMPORTANT that use spaces between every letter
Example: A A A B
->"""))
audio = SongG(notes)
print("Your song is now ready, it has: " + str(len(notes)) +" notes.")
print("\n The length of the final song will be the same size, because we're using just one second per note")
def change_name():
global audio
print("Actual name is: "+audio.name+".ogg")
print("Ok. Let's give the song an awesome Name:")
name=input("New name: ")
audio.name = name
def save_song():
global audio
if audio != None:
audio.save_song()
else:
print("You have to make a song first...")
def print_notes():
global audio
if audio != None:
print("There you are, this are your notes:")
for i in audio.notes:
print(i,end=" ")
else:
print("You haven't make a song first...")
print("\n")
def exit_without_save():
print("See you later aligator")
while run:
options = {"s":save_song,
"c":change_name,
"n":get_phrase,
"w":print_notes,
"e":""
}
if audio == None:
decision = input("""
What do you want to do now?
n Make a new song
e Exit
Your choice: """)
else:
decision = input("""What do you want to do now?
s Store Song (With default name: Song.ogg)
c Change name of the song (The extension cannot be changed)
n Make a new song
w See the notes
e Exit
Your choice: """)
if len(decision) != 1 or not decision in list(options.keys()):
print("Invalid Option. Please choose a valid one")
continue
elif decision == "e":
exit_without_save()
break
options[decision]()
| gpl-2.0 | 5,985,681,783,417,540,000 | 20.804124 | 110 | 0.629787 | false |
rknightly/crawler-collage | collage_maker/collage_maker.py | 1 | 6709 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------
# Author: delimitry
# slightly edited by Ryan Knightly
# -----------------------------------------------------------------------
import os
import random
from PIL import Image
from optparse import OptionParser
WHITE = (248, 248, 255)
def make_collage(images, filename, width, init_height):
"""
Make a collage image with a width equal to `width` from `images` and save
to `filename`.
"""
if not images:
print('No images for collage found!')
return False
margin_size = 2
# run until a suitable arrangement of images is found
while True:
# copy images to images_list
images_list = images[:]
coefs_lines = []
images_line = []
x = 0
while images_list:
# get first image and resize to `init_height`
img_path = images_list.pop(0)
try:
img = Image.open(img_path)
except OSError:
print("An image could not be used")
print(img_path)
continue
img.thumbnail((width, init_height))
# when `x` will go beyond the `width`, start the next line
if x > width:
coefs_lines.append((float(x) / width, images_line))
images_line = []
x = 0
x += img.size[0] + margin_size
images_line.append(img_path)
# finally add the last line with images
coefs_lines.append((float(x) / width, images_line))
# compact the lines, by reducing the `init_height`, if any with one or
# less images
if len(coefs_lines) <= 1:
break
if any(map(lambda x: len(x[1]) <= 1, coefs_lines)):
# reduce `init_height`
init_height -= 10
else:
break
# get output height
out_height = 0
for coef, imgs_line in coefs_lines:
if imgs_line:
out_height += int(init_height / coef) + margin_size
if not out_height:
print('Height of collage could not be 0!')
return False
collage_image = Image.new('RGB', (width, int(out_height)), WHITE)
# put images to the collage
y = 0
for coef, imgs_line in coefs_lines:
if imgs_line:
x = 0
for img_path in imgs_line:
img = Image.open(img_path)
# if need to enlarge an image - use `resize`, otherwise use
# `thumbnail`, it's faster
k = (init_height / coef) / img.size[1]
if k > 1:
img = img.resize((int(img.size[0] * k),
int(img.size[1] * k)), Image.ANTIALIAS)
else:
img.thumbnail((int(width / coef),
int(init_height / coef)), Image.ANTIALIAS)
if collage_image:
collage_image.paste(img, (int(x), int(y)))
x += img.size[0] + margin_size
y += int(init_height / coef) + margin_size
collage_image.save(filename)
return True
def get_images(settings):
images = list(filter(is_image, os.listdir(settings.get_folder())))
image_paths = [os.path.join(settings.get_folder(), image) for
image in images]
return image_paths
def is_image(filename):
is_img = True
file_extension = os.path.splitext(filename)[1].lower()
if file_extension not in ['.jpg', '.jpeg', '.png']:
is_img = False
return is_img
class Settings:
"""Hold the settings passed in by the user"""
def __init__(self, folder='./images', output='collage.png', width=1000,
initial_height=25, shuffle=False):
self.folder = folder
self.output = output
self.width = width
self.initial_height = initial_height
self.shuffle = shuffle
def get_folder(self):
return self.folder
def get_output(self):
return self.output
def get_width(self):
return self.width
def get_initial_height(self):
return self.initial_height
def get_shuffle(self):
return self.shuffle
def run(settings):
"""Run the program with the given settings method"""
# get images
images = get_images(settings)
if not images:
print('No images for making collage! Please select other directory'
' with images!')
return
# shuffle images if needed
if settings.get_shuffle():
random.shuffle(images)
print('making collage...')
res = make_collage(images, settings.get_output(), settings.get_width(),
settings.get_initial_height())
if not res:
print('making collage failed!')
return
print('collage done!')
def main():
# prepare options parser
options = OptionParser(usage='%prog [options]',
description='Photo collage maker')
options.add_option('-f', '--folder', dest='folder',
help='folder with images (*.jpg, *.jpeg, *.png)',
default='.')
options.add_option('-o', '--output', dest='output',
help='output collage image filename',
default='collage.png')
options.add_option('-w', '--width', dest='width', type='int',
help='resulting collage image width')
options.add_option('-i', '--init_height', dest='init_height',
type='int', help='initial height for resize the images')
options.add_option('-s', '--shuffle', action='store_true', dest='shuffle',
help='enable images shuffle', default=False)
opts, args = options.parse_args()
settings = Settings(folder=opts.folder, output=opts.output,
width=opts.width, initial_height=opts.init_height,
shuffle=opts.shuffle)
if not opts.width or not opts.init_height:
options.print_help()
return
run(settings=settings)
# get images
images = get_images(opts)
print("Images:", images)
if not images:
print('No images for making collage! Please select other directory'
' with images!')
return
# shuffle images if needed
if opts.shuffle:
random.shuffle(images)
print('making collage...')
res = make_collage(images, opts.output, opts.width, opts.init_height)
if not res:
print('making collage failed!')
return
print('collage done!')
if __name__ == '__main__':
main()
| mit | -2,331,259,332,186,945,000 | 29.917051 | 79 | 0.538828 | false |
Elastica/kombu | kombu/transport/librabbitmq.py | 1 | 5547 | """
kombu.transport.librabbitmq
===========================
`librabbitmq`_ transport.
.. _`librabbitmq`: http://pypi.python.org/librabbitmq/
"""
from __future__ import absolute_import, unicode_literals
import os
import socket
import warnings
import librabbitmq as amqp
from librabbitmq import ChannelError, ConnectionError
from kombu.five import items, values
from kombu.utils.amq_manager import get_manager
from kombu.utils.text import version_string_as_tuple
from . import base
W_VERSION = """
librabbitmq version too old to detect RabbitMQ version information
so make sure you are using librabbitmq 1.5 when using rabbitmq > 3.3
"""
DEFAULT_PORT = 5672
DEFAULT_SSL_PORT = 5671
NO_SSL_ERROR = """\
ssl not supported by librabbitmq, please use pyamqp:// or stunnel\
"""
class Message(base.Message):
def __init__(self, channel, props, info, body):
super(Message, self).__init__(
channel,
body=body,
delivery_info=info,
properties=props,
delivery_tag=info.get('delivery_tag'),
content_type=props.get('content_type'),
content_encoding=props.get('content_encoding'),
headers=props.get('headers'))
class Channel(amqp.Channel, base.StdChannel):
Message = Message
def prepare_message(self, body, priority=None,
content_type=None, content_encoding=None,
headers=None, properties=None):
"""Encapsulate data into a AMQP message."""
properties = properties if properties is not None else {}
properties.update({'content_type': content_type,
'content_encoding': content_encoding,
'headers': headers,
'priority': priority})
return body, properties
class Connection(amqp.Connection):
Channel = Channel
Message = Message
class Transport(base.Transport):
Connection = Connection
default_port = DEFAULT_PORT
default_ssl_port = DEFAULT_SSL_PORT
connection_errors = (
base.Transport.connection_errors + (
ConnectionError, socket.error, IOError, OSError)
)
channel_errors = (
base.Transport.channel_errors + (ChannelError,)
)
driver_type = 'amqp'
driver_name = 'librabbitmq'
implements = base.Transport.implements.extend(
async=True,
heartbeats=False,
)
def __init__(self, client, **kwargs):
self.client = client
self.default_port = kwargs.get('default_port') or self.default_port
self.default_ssl_port = (kwargs.get('default_ssl_port') or
self.default_ssl_port)
self.__reader = None
def driver_version(self):
return amqp.__version__
def create_channel(self, connection):
return connection.channel()
def drain_events(self, connection, **kwargs):
return connection.drain_events(**kwargs)
def establish_connection(self):
"""Establish connection to the AMQP broker."""
conninfo = self.client
for name, default_value in items(self.default_connection_params):
if not getattr(conninfo, name, None):
setattr(conninfo, name, default_value)
if conninfo.ssl:
raise NotImplementedError(NO_SSL_ERROR)
opts = dict({
'host': conninfo.host,
'userid': conninfo.userid,
'password': conninfo.password,
'virtual_host': conninfo.virtual_host,
'login_method': conninfo.login_method,
'insist': conninfo.insist,
'ssl': conninfo.ssl,
'connect_timeout': conninfo.connect_timeout,
}, **conninfo.transport_options or {})
conn = self.Connection(**opts)
conn.client = self.client
self.client.drain_events = conn.drain_events
return conn
def close_connection(self, connection):
"""Close the AMQP broker connection."""
self.client.drain_events = None
connection.close()
def _collect(self, connection):
if connection is not None:
for channel in values(connection.channels):
channel.connection = None
try:
os.close(connection.fileno())
except OSError:
pass
connection.channels.clear()
connection.callbacks.clear()
self.client.drain_events = None
self.client = None
def verify_connection(self, connection):
return connection.connected
def register_with_event_loop(self, connection, loop):
loop.add_reader(
connection.fileno(), self.on_readable, connection, loop,
)
def get_manager(self, *args, **kwargs):
return get_manager(self.client, *args, **kwargs)
def qos_semantics_matches_spec(self, connection):
try:
props = connection.server_properties
except AttributeError:
warnings.warn(UserWarning(W_VERSION))
else:
if props.get('product') == 'RabbitMQ':
return version_string_as_tuple(props['version']) < (3, 3)
return True
@property
def default_connection_params(self):
return {
'userid': 'guest',
'password': 'guest',
'port': (self.default_ssl_port if self.client.ssl
else self.default_port),
'hostname': 'localhost',
'login_method': 'AMQPLAIN',
}
| bsd-3-clause | -3,182,920,128,225,371,600 | 29.988827 | 75 | 0.600505 | false |
rainer85ah/VisionViewer | src/Builders/Histogram/HistogramBuilder.py | 1 | 19510 | # -*- coding: utf-8 -*-
__author__ = 'Rainer Arencibia'
import PyQt4
import numpy as np
import cv2
from PyQt4.QtCore import QString
from PyQt4.QtGui import QColor, QPen, QBrush
from Histograms import Ui_Histograms
"""
The MIT License (MIT)
Copyright (c) 2016 Rainer Arencibia
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
class WindowHistogram(PyQt4.QtGui.QDialog):
"""
This class will show a new window for the histograms calculated.
"""
def __init__(self, parent=None):
"""
:param parent: This window do NOT have a parent.
:return: show a new window.
"""
PyQt4.QtGui.QDialog.__init__(self, parent)
self.ui = Ui_Histograms()
self.ui.setupUi(self)
self.setFixedSize(self.width(), self.height())
class HistogramBuilder:
"""
The class implements a lot´s methods, some with the intention to calculate the histograms of an image.
Other show the results on the window, using some methods from PyQt4 to draw lines & rectangles.
We have some (slots & methods) for the signals received from the window.
"""
def __init__(self, img):
"""
:param img: an image or a video frame.
:return: the information calculated from the different histograms in a window.
"""
img_read = cv2.imread(img)
self.image = cv2.cvtColor(img_read, cv2.COLOR_BGR2RGB) # we change the format of the image.
self.height = self.image.shape[0]
self.width = self.image.shape[1]
self.size = self.image.size
self.num_pixels = self.width * self.height
self.r_hist = np.zeros_like(self.image) # arrays that will contain the histogram calculated.
self.g_hist = np.zeros_like(self.image)
self.b_hist = np.zeros_like(self.image)
self.h_hist = np.zeros_like(self.image)
self.s_hist = np.zeros_like(self.image)
self.v_hist = np.zeros_like(self.image)
self.color_bool = False # True: if the image received it´s a color image.
self.gray_bool = False # True: if the image received it´s a gray image.
(r, g, b) = cv2.split(self.image)
if np.array_equal(r, g) and np.array_equal(r, b):
self.gray_bool = True
self.image = cv2.cvtColor(self.image, cv2.COLOR_RGB2GRAY)
else:
self.color_bool = True
self.hsv_image = cv2.cvtColor(self.image, cv2.COLOR_RGB2HSV)
self.window_histogram = WindowHistogram() # we create the window & connects the signals and slots.
self.connect_all_checkbox()
""" we save the size of the window, we need this lines to draw in future steps"""
self.scene_h = self.window_histogram.sizeHint().height() - int(0.10 * self.window_histogram.sizeHint().height())
self.scene_w = self.window_histogram.sizeHint().width() - int(0.10 * self.window_histogram.sizeHint().width())
def connect_all_checkbox(self):
"""
Just connect the signals with the slots.
"""
PyQt4.QtCore.QObject.connect(self.window_histogram.ui.redCheckBox, PyQt4.QtCore.SIGNAL('stateChanged(int)'),
self.on_redCheckBox_stateChanged)
PyQt4.QtCore.QObject.connect(self.window_histogram.ui.greenCheckBox, PyQt4.QtCore.SIGNAL('stateChanged(int)'),
self.on_greenCheckBox_stateChanged)
PyQt4.QtCore.QObject.connect(self.window_histogram.ui.blueCheckBox, PyQt4.QtCore.SIGNAL('stateChanged(int)'),
self.on_blueCheckBox_stateChanged)
PyQt4.QtCore.QObject.connect(self.window_histogram.ui.hueCheckBox, PyQt4.QtCore.SIGNAL('stateChanged(int)'),
self.on_hueCheckBox_stateChanged)
PyQt4.QtCore.QObject.connect(self.window_histogram.ui.saturationCheckBox,
PyQt4.QtCore.SIGNAL('stateChanged(int)'),
self.on_saturationCheckBox_stateChanged)
PyQt4.QtCore.QObject.connect(self.window_histogram.ui.valueCheckBox, PyQt4.QtCore.SIGNAL('stateChanged(int)'),
self.on_valueCheckBox_stateChanged)
PyQt4.QtCore.QObject.connect(self.window_histogram.ui.scaleComboBox,
PyQt4.QtCore.SIGNAL('currentIndexChanged(int)'),
self.on_scaleComboBox_currentIndexChanged)
def draw_256_range(self, scene):
"""
:param scene: QGraphicsView, we will paint on this.
:return: scene but with a numbers bar on the bottom from 0 to 255.
"""
val = 0
step_w = self.scene_w / 8.0
for pos in range(0, 9):
x = float(pos) * step_w
text = QString.number(val)
text_item = scene.addText(text)
text_item.setPos(int(x) - 10, self.scene_h)
val += 256 / 8
def draw_1_range(self, scene):
"""
:param scene: QGraphicsView, we will paint on this.
:return: scene but with a numbers bar on the bottom from 0.0 to 1.0.
"""
val = 0
step_w = self.scene_w / 8.0
for pos in range(0, 9):
x = float(pos) * step_w
text = '%.2f' % (val / 80.0)
text_item = scene.addText(text)
text_item.setPos(int(x) - 10, self.scene_h)
val += 10
def draw_360_range(self, scene):
"""
:param scene: QGraphicsView, we will paint on this.
:return: scene but with a numbers bar on the bottom from 0 to 360.
"""
val = 0
step_w = self.scene_w / 8.0
for pos in range(0, 9):
x = float(pos) * step_w
text = QString.number(val)
text_item = scene.addText(text)
text_item.setPos(int(x) - 10, self.scene_h)
val += 365 / 8
# draw the HUE range of values.
def draw_hue_range(self, scene):
"""
:param scene: QGraphicsView, we will paint on this.
:return: scene but with a HUE color bar on the bottom.
"""
for pos in range(0, self.scene_w + 1):
color = pos * 255 / self.scene_w
pen = PyQt4.QtGui.QPen(QColor(color, 255, 255))
scene.addLine(float(pos), self.scene_h + 4, float(pos), self.scene_h + 12, pen)
# draw an underline with black and white colors.
def draw_value_range(self, scene):
"""
:param scene: QGraphicsView, we will paint on this.
:return: scene but with a VALUE color bar on the bottom.
"""
for pos in range(0, self.scene_w + 1):
bright = pos * 255 / self.scene_w
pen = PyQt4.QtGui.QPen(QColor(bright, bright, bright))
scene.addLine(float(pos), self.scene_h + 4, float(pos), self.scene_h + 12, pen)
def draw_grid(self, scene):
"""
:param scene: QGraphicsView, we will paint on this.
:return: scene but with a grid painted.
"""
grey = PyQt4.QtGui.QPen(QColor(200, 200, 200))
step_w = self.scene_w / 8.0
step_h = self.scene_h / 8.0
for pos in range(0, 9):
x = pos * step_w
y = pos * step_h
scene.addLine(float(x), 0.0, float(x), self.scene_h, grey) # draw the vertical lines on the grid
scene.addLine(0.0, float(y), self.scene_w, float(y), grey) # draw the horizontal lines on the grid.
index = self.window_histogram.ui.scaleComboBox.currentIndex()
if index == 0:
self.draw_value_range(scene)
elif index == 1:
self.draw_256_range(scene)
elif index == 2:
self.draw_hue_range(scene)
elif index == 3:
self.draw_360_range(scene)
else:
self.draw_1_range(scene)
def draw_lines(self, scene):
"""
:param scene: QGraphicsView, we will paint on this.
:return: scene but with the lines of the colors selected on the window histogram.
"""
step_w = max(1.0, self.scene_w / 256.0)
red = PyQt4.QtGui.QPen(QColor(255, 0, 0))
red.setWidthF(step_w)
green = PyQt4.QtGui.QPen(QColor(0, 255, 0))
green.setWidthF(step_w)
blue = PyQt4.QtGui.QPen(QColor(0, 0, 255))
blue.setWidthF(step_w)
hue = PyQt4.QtGui.QPen(QColor(255, 0, 128))
hue.setWidthF(step_w)
saturation = PyQt4.QtGui.QPen(QColor(255, 128, 0))
saturation.setWidthF(step_w)
value = PyQt4.QtGui.QPen(QColor(0, 0, 0))
value.setWidthF(step_w)
draw_red = self.window_histogram.ui.redCheckBox.isChecked() and self.r_hist.max() > 0
draw_green = self.window_histogram.ui.greenCheckBox.isChecked() and self.g_hist.max() > 0
draw_blue = self.window_histogram.ui.blueCheckBox.isChecked() and self.b_hist.max() > 0
draw_hue = self.window_histogram.ui.hueCheckBox.isChecked() and self.h_hist.max() > 0
draw_saturation = self.window_histogram.ui.saturationCheckBox.isChecked() and self.s_hist.max() > 0
draw_value = self.window_histogram.ui.valueCheckBox.isChecked() and self.v_hist.max() > 0
if draw_red or draw_green or draw_blue or draw_hue or draw_saturation or draw_value:
x = 0
while x < self.scene_w + 1:
i1 = min(255.0, max(0.0, x * 255.0 / self.scene_w))
i2 = min(255.0, max(0.0, (x + step_w) * 255.0 / self.scene_w))
if draw_red:
scene.addLine(x, self.scene_h - self.scene_h * self.r_hist[i1], x + step_w,
self.scene_h - self.scene_h * self.r_hist[i2], red)
if draw_green:
scene.addLine(x, self.scene_h - self.scene_h * self.g_hist[i1], x + step_w,
self.scene_h - self.scene_h * self.g_hist[i2], green)
if draw_blue:
scene.addLine(x, self.scene_h - self.scene_h * self.b_hist[i1], x + step_w,
self.scene_h - self.scene_h * self.b_hist[i2], blue)
if draw_hue:
i1 = min(180.0, max(0.0, x * 180.0 / self.scene_w))
i2 = min(180.0, max(0.0, (x + step_w) * 180.0 / self.scene_w))
scene.addLine(x, self.scene_h - self.scene_h * self.h_hist[i1], x + step_w,
self.scene_h - self.scene_h * self.h_hist[i2], hue)
if draw_saturation:
scene.addLine(x, self.scene_h - self.scene_h * self.s_hist[i1], x + step_w,
self.scene_h - self.scene_h * self.s_hist[i2], saturation)
if draw_value:
scene.addLine(x, self.scene_h - self.scene_h * self.v_hist[i1], x + step_w,
self.scene_h - self.scene_h * self.v_hist[i2], value)
x += step_w
def draw_bars(self, scene):
"""
:param scene: QGraphicsView, we will paint on this.
:return: scene, if the image is bitonal draw a pair of rectangles with
the percentage of white and black pixels.
"""
draw_value = self.window_histogram.ui.valueCheckBox.isChecked() and self.v_hist.any() > 0.0
if draw_value:
bar1 = self.scene_h * self.v_hist[0]
bar2 = self.scene_h * self.v_hist[255]
pen = QPen(QColor(128, 128, 128))
brush_white = QBrush(QColor(225, 225, 225))
brush_black = QBrush(QColor(25, 25, 25))
scene.addRect(0, self.scene_h - bar1, self.scene_w / 2, bar1, pen, brush_black)
scene.addRect(self.scene_w / 2, self.scene_h - bar2, self.scene_w / 2, bar2, pen, brush_white)
total = self.v_hist[0] + self.v_hist[255]
result_0 = '%.0f' % (100 * self.v_hist[0] / total[0])
result_255 = '%.0f' % (100 * self.v_hist[255] / total[0])
black = str(result_0) + '%'
white = str(result_255) + '%'
scene.addText(black).setPos(self.scene_w / 4, self.scene_h)
scene.addText(white).setPos(3 * self.scene_w / 4, self.scene_h)
def draw_histograms(self):
"""
Make an new QGraphicsScene (scene) and send it to painted.
:return: if we have a bitonal image, we will paint rectangles.
if not we gonna draw lines on the scene.
But first we draw the grid.
"""
self.scene_h = self.window_histogram.sizeHint().height() - int(0.10 * self.window_histogram.sizeHint().height())
self.scene_w = self.window_histogram.sizeHint().width() - int(0.10 * self.window_histogram.sizeHint().width())
scene = PyQt4.QtGui.QGraphicsScene(0, 0, self.scene_w, self.scene_h)
self.window_histogram.ui.graphicsView.setScene(scene)
if self.gray_bool and self.is_bitonal():
self.draw_bars(scene)
else:
self.draw_grid(scene)
self.draw_lines(scene)
def is_bitonal(self):
"""
:return: True if an image is bitonal. This types of image only have 0(black) or 255(white) on the pixels value.
So we check the value histogram array, and only ask for the first and last position.
If the sum of this positions is the number of pixels of the image, we have a bitonal image.
"""
return self.v_hist[0] + self.v_hist[255] == self.num_pixels
def compute_red_histogram(self):
"""
Input: Color Image.
:return: self.r_hist, calculated the red histogram(normalize) of a color image.
"""
self.r_hist = cv2.calcHist([self.image], [0], None, [256], [0, 256])
cv2.normalize(self.r_hist, self.r_hist, 0, 1, cv2.NORM_MINMAX)
def compute_green_histogram(self):
"""
Input: Color Image.
:return: self.g_hist, calculated the green histogram(normalize) of a color image.
"""
self.g_hist = cv2.calcHist([self.image], [1], None, [256], [0, 256])
cv2.normalize(self.g_hist, self.g_hist, 0, 1, cv2.NORM_MINMAX)
def compute_blue_histogram(self):
"""
Input: Color Image.
:return: self.b_hist, calculated the blue histogram(normalize) of a color image.
"""
self.b_hist = cv2.calcHist([self.image], [2], None, [256], [0, 256])
cv2.normalize(self.b_hist, self.b_hist, 0, 1, cv2.NORM_MINMAX)
def compute_hue_histogram(self):
"""
Input: Color Image.
:return: self.h_hist, calculated the hue histogram(normalize) of a color image.
"""
self.h_hist = cv2.calcHist([self.hsv_image], [0], None, [256], [0, 180])
cv2.normalize(self.h_hist, self.h_hist, 0, 1, cv2.NORM_MINMAX)
def compute_saturation_histogram(self):
"""
Input: Color Image.
:return: self.s_hist, calculated the saturation histogram(normalize) of a color image.
"""
self.s_hist = cv2.calcHist([self.hsv_image], [1], None, [256], [0, 256])
cv2.normalize(self.s_hist, self.s_hist, 0, 1, cv2.NORM_MINMAX)
def compute_value_histogram(self):
"""
Input: Color / Gray Image.
:return: self.v_hist,
IF it´s a gray image calculated the value histogram("equalize" & normalize).
IF it´s a color image calculated the value histogram(normalize).
"""
if self.gray_bool:
equalize_gray_image = cv2.equalizeHist(self.image)
self.v_hist = cv2.calcHist([equalize_gray_image], [0], None, [256], [0, 256])
elif self.color_bool:
self.v_hist = cv2.calcHist([self.hsv_image], [2], None, [256], [0, 256])
cv2.normalize(self.v_hist, self.v_hist, 0, 1, cv2.NORM_MINMAX)
def compute_histograms(self):
"""
:return: If we have an image with at least one pixel, we send it to process.
If it´s a color image, do all of the histograms.
If it´s a gray scale image only will calculate the value histogram "equalize"
"""
if self.num_pixels > 0:
if self.r_hist.max() == 0 and self.color_bool:
self.compute_red_histogram()
if self.g_hist.max() == 0 and self.color_bool:
self.compute_green_histogram()
if self.b_hist.max() == 0 and self.color_bool:
self.compute_blue_histogram()
if self.h_hist.max() == 0 and self.color_bool:
self.compute_hue_histogram()
if self.s_hist.max() == 0 and self.color_bool:
self.compute_saturation_histogram()
if self.v_hist.max() == 0:
self.compute_value_histogram()
def on_redCheckBox_stateChanged(self):
"""
This slot is connected automatically in connectSlotsByName
"""
self.draw_histograms()
def on_greenCheckBox_stateChanged(self):
"""
This slot is connected automatically in connectSlotsByName
"""
self.draw_histograms()
def on_blueCheckBox_stateChanged(self):
"""
This slot is connected automatically in connectSlotsByName
"""
self.draw_histograms()
def on_hueCheckBox_stateChanged(self):
"""
This slot is connected automatically in connectSlotsByName
"""
self.draw_histograms()
def on_saturationCheckBox_stateChanged(self):
"""
This slot is connected automatically in connectSlotsByName
"""
self.draw_histograms()
def on_valueCheckBox_stateChanged(self):
"""
This slot is connected automatically in connectSlotsByName
"""
self.draw_histograms()
def on_scaleComboBox_currentIndexChanged(self):
"""
This slot is connected automatically in connectSlotsByName
"""
self.draw_histograms()
def update_panel_to_image(self, img):
if self.window_histogram.isVisible():
self.__init__(img)
self.compute_histograms()
self.draw_histograms()
def keyPressEvent(self, e):
"""
This slot is connected automatically in connectSlotsByName
"""
if e.key() == PyQt4.QtCore.Qt.Key_Escape:
self.window_histogram.close()
""" Know the value of a pixel of a color image.
(r, g, b) = self.image[0, 0]
print "Pixel at (0, 0) - Red: %d, Green: %d, Blue: %d" % (r, g, b)
""" | mit | 8,540,447,499,718,498,000 | 42.535714 | 120 | 0.58632 | false |
takmid/inasafe | safe/impact_functions/volcanic/volcano_population_evacuation_polygon_hazard.py | 1 | 7246 | import numpy
from safe.impact_functions.core import FunctionProvider
from safe.impact_functions.core import get_hazard_layer, get_exposure_layer
from safe.impact_functions.core import get_question
from safe.storage.vector import Vector
from safe.common.utilities import ugettext as _
from safe.common.tables import Table, TableRow
from safe.engine.interpolation import assign_hazard_values_to_exposure_data
class VolcanoFunctionVectorHazard(FunctionProvider):
"""Risk plugin for flood evacuation
:author AIFDR
:rating 4
:param requires category=='hazard' and \
subcategory in ['volcano'] and \
layertype=='vector'
:param requires category=='exposure' and \
subcategory=='population' and \
layertype=='raster' and \
datatype=='density'
"""
title = _('be affected')
target_field = 'population'
category_title = 'KRB'
def run(self, layers):
"""Risk plugin for flood population evacuation
Input
layers: List of layers expected to contain
H: Raster layer of volcano depth
P: Raster layer of population data on the same grid as H
Counts number of people exposed to flood levels exceeding
specified threshold.
Return
Map of population exposed to flood levels exceeding the threshold
Table with number of people evacuated and supplies required
"""
# Identify hazard and exposure layers
H = get_hazard_layer(layers) # Flood inundation
E = get_exposure_layer(layers)
question = get_question(H.get_name(),
E.get_name(),
self)
# Check that hazard is polygon type
if not H.is_vector:
msg = ('Input hazard %s was not a vector layer as expected '
% H.get_name())
raise Exception(msg)
msg = ('Input hazard must be a polygon layer. I got %s with layer '
'type %s' % (H.get_name(),
H.get_geometry_name()))
if not H.is_polygon_data:
raise Exception(msg)
category_title = self.category_title
if not category_title in H.get_attribute_names():
category_title = 'Radius'
# Run interpolation function for polygon2raster
P = assign_hazard_values_to_exposure_data(H, E,
attribute_name='population')
# Initialise attributes of output dataset with all attributes
# from input polygon and a population count of zero
new_attributes = H.get_data()
categories = {}
for attr in new_attributes:
attr[self.target_field] = 0
cat = attr[self.category_title]
categories[cat] = 0
# Count affected population per polygon and total
evacuated = 0
for attr in P.get_data():
# Get population at this location
pop = float(attr['population'])
# Update population count for associated polygon
poly_id = attr['polygon_id']
new_attributes[poly_id][self.target_field] += pop
# Update population count for each category
cat = new_attributes[poly_id][self.category_title]
categories[cat] += pop
# Update total
evacuated += pop
# Count totals
total = int(numpy.sum(E.get_data(nan=0, scaling=False)))
## # Don't show digits less than a 1000
## if total > 1000:
## total = total // 1000 * 1000
## if evacuated > 1000:
## evacuated = evacuated // 1000 * 1000
## # Calculate estimated needs based on BNPB Perka
## # 7/2008 minimum bantuan
## rice = evacuated * 2.8
## drinking_water = evacuated * 17.5
## water = evacuated * 67
## family_kits = evacuated / 5
## toilets = evacuated / 20
# Generate impact report for the pdf map
table_body = [question,
TableRow([_('People needing evacuation'),
'%i' % evacuated],
header=True),
TableRow([_('Category'), _('Total')],
header=True)]
if category_title != 'Radius':
for name, pop in categories.iteritems():
table_body.append(TableRow([name, int(pop)]))
table_body.append(TableRow(_('Map shows population affected in '
'each of volcano hazard polygons.')))
## TableRow([_('Needs per week'), _('Total')],
## header=True),
## [_('Rice [kg]'), int(rice)],
## [_('Drinking Water [l]'), int(drinking_water)],
## [_('Clean Water [l]'), int(water)],
## [_('Family Kits'), int(family_kits)],
## [_('Toilets'), int(toilets)]]
impact_table = Table(table_body).toNewlineFreeString()
# Extend impact report for on-screen display
table_body.extend([TableRow(_('Notes'), header=True),
_('Total population %i in view port') % total,
_('People need evacuation if they are within the '
'volcanic hazard zones.')])
impact_summary = Table(table_body).toNewlineFreeString()
map_title = _('People affected by volcanic hazard zone')
# Define classes for legend for flooded population counts
colours = ['#FFFFFF', '#38A800', '#79C900', '#CEED00',
'#FFCC00', '#FF6600', '#FF0000', '#7A0000']
population_counts = [x['population'] for x in new_attributes]
cls = [0] + numpy.linspace(1,
max(population_counts),
len(colours)).tolist()
# Define style info for output polygons showing population counts
style_classes = []
for i, colour in enumerate(colours):
lo = cls[i]
hi = cls[i + 1]
if i == 0:
label = _('0')
else:
label = _('%i - %i') % (lo, hi)
entry = dict(label=label, colour=colour, min=lo, max=hi,
transparency=0, size=1)
style_classes.append(entry)
# Override style info with new classes and name
style_info = dict(target_field=self.target_field,
style_classes=style_classes,
legend_title=_('Population Count'))
# Create vector layer and return
V = Vector(data=new_attributes,
projection=H.get_projection(),
geometry=H.get_geometry(),
name=_('Population affected by volcanic hazard zone'),
keywords={'impact_summary': impact_summary,
'impact_table': impact_table,
'map_title': map_title},
style_info=style_info)
return V
| gpl-3.0 | 2,129,394,776,019,534,600 | 37.956989 | 77 | 0.532846 | false |
sixpearls/django-mediacracy | mediacracy/widgets.py | 1 | 1103 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django import forms
from django.core import urlresolvers
from django.utils.safestring import mark_safe
from django.template.loader import render_to_string
from markitup.widgets import AdminMarkItUpWidget, MarkItUpWidget
from django.conf import settings as site_settings
use_mm = False
if 'massmedia' in site_settings.INSTALLED_APPS:
use_mm = True
class TextifyMarkitupAdminWidget(AdminMarkItUpWidget):
def render(self,*args,**kwargs):
attrs_copy = kwargs['attrs'].copy()
html = super(MarkItUpWidget,self).render(*args,**kwargs)
html += '<script type="text/javascript">'
html += render_to_string('mediacracy/markitup_helper.js',{ 'id': attrs_copy['id'], 'use_mm': use_mm })
html += '</script>'
return mark_safe(html)
def _media(self):
return super(TextifyMarkitupAdminWidget,self).media + forms.Media(
css={'all': ('mediacracy/markitup/markitup_helper.css',),},
js=("mediacracy/js/mediacracy_ajax_csrf.js",)
)
media = property(_media)
| bsd-2-clause | 3,709,738,566,038,608,400 | 34.580645 | 110 | 0.672711 | false |
openstack/congress | congress/tests/datalog/test_builtin.py | 1 | 56296 | #! /usr/bin/python
#
# Copyright (c) 2014 IBM, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from oslo_log import log as logging
from congress.datalog import base as datalog_base
from congress.datalog import builtin
from congress.datalog import compile
from congress import exception
from congress.policy_engines import agnostic
from congress.tests import base
from congress.tests import helper
LOG = logging.getLogger(__name__)
addmap = {
'comparison': [
{'func': 'f(x,y)', 'num_inputs': 2,
'code': lambda x, y: x if x > y else y}],
'newcategory': [
{'func': 'g(x,y)', 'num_inputs': 2, 'code': lambda x, y: x + y}]}
append_builtin = {'arithmetic': [{'func': 'div(x,y)',
'num_inputs': 2,
'code': 'lambda x,y: x / y'}]}
class TestBuiltins(base.TestCase):
def setUp(self):
super(TestBuiltins, self).setUp()
self.cbcmap = builtin.CongressBuiltinCategoryMap(
builtin._builtin_map)
self.predl = self.cbcmap.builtin('lt')
def test_add_and_delete_map(self):
cbcmap_before = self.cbcmap
self.cbcmap.add_map(append_builtin)
self.cbcmap.delete_map(append_builtin)
self.assertTrue(self.cbcmap.mapequal(cbcmap_before))
def test_add_map_only(self):
self.cbcmap.add_map(append_builtin)
predl = self.cbcmap.builtin('div')
self.assertIsNotNone(predl)
self.cbcmap.add_map(addmap)
predl = self.cbcmap.builtin('max')
self.assertIsNotNone(predl)
def test_add_and_delete_builtin(self):
cbcmap_before = self.cbcmap
self.cbcmap.add_map(append_builtin)
self.cbcmap.delete_builtin('arithmetic', 'div', 2)
self.assertTrue(self.cbcmap.mapequal(cbcmap_before))
def test_string_pred_string(self):
predstring = str(self.predl)
self.assertNotEqual(predstring, 'ltc(x,y')
def test_add_and_delete_to_category(self):
cbcmap_before = self.cbcmap
arglist = ['x', 'y', 'z']
pred = builtin.CongressBuiltinPred('testfunc', arglist, 1,
lambda x: not x)
self.cbcmap.insert_to_category('arithmetic', pred)
self.cbcmap.delete_from_category('arithmetic', pred)
self.assertTrue(self.cbcmap.mapequal(cbcmap_before))
def test_all_checks(self):
predtotest = self.cbcmap.builtin('lt')
self.assertTrue(self.cbcmap.builtin_is_registered(predtotest))
def test_eval_builtin(self):
predl = self.cbcmap.builtin('plus')
result = predl.code(1, 2)
self.assertEqual(result, 3)
predl = self.cbcmap.builtin('gt')
result = predl.code(1, 2)
self.assertFalse(result)
# NOTE(thinrichs): this test will be removed once we remove bare builtins
class TestReorder(base.TestCase):
def check(self, input_string, correct_string, msg):
rule = compile.parse1(input_string)
actual = compile.reorder_for_safety(rule)
correct = compile.parse1(correct_string)
if correct != actual:
emsg = "Correct: " + str(correct)
emsg += "; Actual: " + str(actual)
self.fail(msg + " :: " + emsg)
def check_err(self, input_string, unsafe_lit_strings, msg):
rule = compile.parse1(input_string)
try:
compile.reorder_for_safety(rule)
self.fail("Failed to raise exception for " + input_string)
except exception.PolicyException as e:
errmsg = str(e)
# parse then print to string so string rep same in err msg
unsafe_lits = [str(compile.parse1(x)) for x in unsafe_lit_strings]
missing_lits = [m for m in unsafe_lits
if m + " (vars" not in errmsg]
if len(missing_lits) > 0:
self.fail(
"Unsafe literals {} not reported in error: {}".format(
";".join(missing_lits), errmsg))
def test_reorder_builtins(self):
self.check("p(x, z) :- q(x, y), plus(x, y, z)",
"p(x, z) :- q(x, y), plus(x, y, z)",
"No reorder")
self.check("p(x, z) :- plus(x, y, z), q(x, y)",
"p(x, z) :- q(x, y), plus(x, y, z)",
"Basic reorder")
self.check("p(x, z) :- q(x, y), r(w), plus(x, y, z), plus(z, w, y)",
"p(x, z) :- q(x, y), r(w), plus(x, y, z), plus(z, w, y)",
"Chaining: no reorder")
self.check("p(x, z) :- q(x, y), plus(x, y, z), plus(z, w, y), r(w)",
"p(x, z) :- q(x, y), plus(x, y, z), r(w), plus(z, w, y)",
"Chaining: reorder")
self.check("p(x) :- lt(t, v), plus(z, w, t), plus(z, u, v), "
" plus(x, y, z), q(y), r(x), s(u), t(w) ",
"p(x) :- q(y), r(x), plus(x, y, z), s(u), plus(z, u, v), "
" t(w), plus(z, w, t), lt(t, v)",
"Partial-order chaining")
def test_unsafe_builtins(self):
# an output
self.check_err("p(x) :- q(x), plus(x, y, z)",
["plus(x,y,z)"],
"Basic Unsafe input")
self.check_err("p(x) :- q(x), r(z), plus(x, y, z)",
["plus(x,y,z)"],
"Basic Unsafe input 2")
self.check_err("p(x, z) :- plus(x, y, z), plus(z, y, x), "
" plus(x, z, y)",
["plus(x, y, z)", "plus(z, y, x)", "plus(x, z, y)"],
"Unsafe with cycle")
# no outputs
self.check_err("p(x) :- q(x), lt(x, y)",
["lt(x,y)"],
"Basic Unsafe input, no outputs")
self.check_err("p(x) :- q(y), lt(x, y)",
["lt(x,y)"],
"Basic Unsafe input, no outputs 2")
self.check_err("p(x, z) :- lt(x, y), lt(y, x)",
["lt(x,y)", "lt(y, x)"],
"Unsafe with cycle, no outputs")
# chaining
self.check_err("p(x) :- q(x, y), plus(x, y, z), plus(z, 3, w), "
" plus(w, t, u)",
["plus(w, t, u)"],
"Unsafe chaining")
self.check_err("p(x) :- q(x, y), plus(x, y, z), plus(z, 3, w), "
" lt(w, t)",
["lt(w, t)"],
"Unsafe chaining 2")
def test_reorder_negation(self):
self.check("p(x) :- q(x), not u(x), r(y), not s(x, y)",
"p(x) :- q(x), not u(x), r(y), not s(x, y)",
"No reordering")
self.check("p(x) :- not q(x), r(x)",
"p(x) :- r(x), not q(x)",
"Basic")
self.check("p(x) :- r(x), not q(x, y), s(y)",
"p(x) :- r(x), s(y), not q(x,y)",
"Partially safe")
self.check("p(x) :- not q(x, y), not r(x), not r(x, z), "
" t(x, y), u(x), s(z)",
"p(x) :- t(x,y), not q(x,y), not r(x), u(x), s(z), "
" not r(x, z)",
"Complex")
def test_unsafe_negation(self):
self.check_err("p(x) :- not q(x)",
["q(x)"],
"Basic")
self.check_err("p(x) :- not q(x), not r(x)",
["q(x)", "r(x)"],
"Cycle")
self.check_err("p(x) :- not q(x, y), r(y)",
["q(x, y)"],
"Partially safe")
def test_reorder_builtins_negation(self):
self.check("p(x) :- not q(z), plus(x, y, z), s(x), s(y)",
"p(x) :- s(x), s(y), plus(x, y, z), not q(z)",
"Basic")
self.check("p(x) :- not q(z, w), plus(x, y, z), lt(z, w), "
" plus(x, 3, w), s(x, y)",
"p(x) :- s(x,y), plus(x, y, z), plus(x, 3, w), "
" not q(z, w), lt(z, w)",
"Partial order")
def test_unsafe_builtins_negation(self):
self.check_err("p(x) :- plus(x, y, z), not q(x, y)",
['plus(x,y,z)', 'q(x,y)'],
'Unsafe cycle')
self.check_err("p(x) :- plus(x, y, z), plus(z, w, t), not q(z, t),"
" s(x), t(y)",
['plus(z, w, t)', 'q(z, t)'],
'Unsafety propagates')
NREC_THEORY = 'non-recursive theory test'
MAT_THEORY = 'materialized view theory test'
# NOTE(thinrichs): this test will be removed once we remove bare builtins
class TestTheories(base.TestCase):
def prep_runtime(self, code=None, msg=None, target=None):
# compile source
if msg is not None:
LOG.debug(msg)
if code is None:
code = ""
if target is None:
target = NREC_THEORY
run = agnostic.Runtime()
run.create_policy(NREC_THEORY, abbr="NRT",
kind=datalog_base.NONRECURSIVE_POLICY_TYPE)
run.create_policy(MAT_THEORY, abbr="MAT",
kind=datalog_base.MATERIALIZED_POLICY_TYPE)
run.debug_mode()
run.insert(code, target=target)
return run
def check_equal(self, actual_string, correct_string, msg):
self.assertTrue(helper.datalog_equal(
actual_string, correct_string, msg))
def test_materialized_builtins(self):
self.test_builtins(MAT_THEORY)
def test_builtins(self, th=NREC_THEORY):
"""Test the mechanism that implements builtins."""
run = self.prep_runtime()
run.insert('p(x) :- q(x,y), plus(x,y,z), r(z)'
'q(1,2)'
'q(2,3)'
'r(3)'
'r(5)', target=th)
self.check_equal(run.select('p(x)', target=th), "p(1) p(2)", "Plus")
run.delete('r(5)', target=th)
self.check_equal(run.select('p(x)', target=th), "p(1)", "Plus")
run = self.prep_runtime()
run.insert('p(x) :- q(x,y), minus(x,y,z), r(z)'
'q(2,1)'
'q(3,1)'
'r(1)'
'r(4)', target=th)
self.check_equal(run.select('p(x)', target=th), "p(2)", "Minus")
run.delete('r(4)', target=th)
run.insert('r(2)', target=th)
self.check_equal(run.select('p(x)', target=th), "p(2) p(3)", "Minus")
run = self.prep_runtime()
run.insert('p(x, z) :- q(x,y), plus(x,y,z)'
'q(1,2)'
'q(2,3)', target=th)
self.check_equal(run.select('p(x, y)', target=th),
"p(1, 3) p(2, 5)", "Plus")
run = self.prep_runtime()
run.insert('m(x) :- j(x,y), lt(x,y)'
'j(1,2)'
'j(3,2)', target=th)
self.check_equal(run.select('m(x)', target=th), 'm(1)', "LT")
run = self.prep_runtime()
run.insert('m(x) :- j(x,y), lt(x,y), r(y)'
'j(1,2)'
'j(2,3)'
'j(3,2)'
'r(2)', target=th)
self.check_equal(run.select('m(x)', target=th), 'm(1)', "LT 2")
run = self.prep_runtime()
run.insert('p(x,z) :- q(x), plus(x,1,z)'
'q(3)'
'q(5)', target=th)
self.check_equal(run.select('p(x,z)', target=th),
'p(3, 4) p(5,6)', "Bound input")
run = self.prep_runtime()
run.insert('p(x) :- q(x), plus(x,1,5)'
'q(4)'
'q(5)', target=th)
self.check_equal(run.select('p(x)', target=th),
'p(4)', "Bound output")
run = self.prep_runtime()
run.insert('p(x, z) :- plus(x,y,z), q(x), r(y)'
'q(4)'
'r(5)', target=th)
self.check_equal(run.select('p(x, y)', target=th),
'p(4, 9)',
"Reordering")
run = self.prep_runtime()
run.insert('p(x, z) :- plus(x,y,z), q(x), q(y)'
'q(4)'
'q(5)', target=th)
self.check_equal(run.select('p(x, y)', target=th),
'p(4, 9) p(4, 8) p(5, 9) p(5, 10)',
"Reordering with self joins")
def test_materialized_builtins_content(self):
self.test_builtins_content(MAT_THEORY)
def test_builtins_content(self, th=NREC_THEORY):
"""Test the content of the builtins, not the mechanism."""
def check_true(code, msg):
run = self.prep_runtime('')
run.insert(code, target=th)
self.check_equal(
run.select('p(x)', target=th),
'p(1)',
msg)
def check_false(code, msg):
th = NREC_THEORY
run = self.prep_runtime('')
run.insert(code, target=th)
self.check_equal(
run.select('p(x)', target=th),
'',
msg)
#
# Numbers
#
# int
code = 'p(1) :- int(2,2)'
check_true(code, "int")
code = 'p(1) :- int(2.3, 2)'
check_true(code, "int")
code = 'p(1) :- int(2, 3.3)'
check_false(code, "int")
# float
code = 'p(1) :- float(2,2.0)'
check_true(code, "float")
code = 'p(1) :- float(2.3,2.3)'
check_true(code, "float")
code = 'p(1) :- float(2,3.3)'
check_false(code, "int")
# plus
code = 'p(1) :- plus(2,3,5)'
check_true(code, "plus")
code = 'p(1) :- plus(2,3,1)'
check_false(code, "plus")
# minus
code = 'p(1) :- minus(5, 3, 2)'
check_true(code, "minus")
code = 'p(1) :- minus(5, 3, 6)'
check_false(code, "minus")
# minus negative: negative numbers should not be supported
# code = 'p(1) :- minus(3, 5, x)'
# check_false(code, "minus")
# times
code = 'p(1) :- mul(3, 5, 15)'
check_true(code, "multiply")
code = 'p(1) :- mul(2, 5, 1)'
check_false(code, "multiply")
# divides
code = 'p(1) :- div(10, 2, 5)'
check_true(code, "divides")
code = 'p(1) :- div(10, 4, 2)'
check_true(code, "integer divides")
code = 'p(1) :- div(10, 4.0, 2.5)'
check_true(code, "float divides")
code = 'p(1) :- div(10.0, 3, 3.3)'
check_false(code, "divides")
#
# Comparison
#
# less than
code = 'p(1) :- lt(1, 3)'
check_true(code, "lessthan")
code = 'p(1) :- lt(5, 2)'
check_false(code, "lessthan")
# less than equal
code = 'p(1) :- lteq(1, 3)'
check_true(code, "lessthaneq")
code = 'p(1) :- lteq(3, 3)'
check_true(code, "lessthaneq")
code = 'p(1) :- lteq(4, 3)'
check_false(code, "lessthaneq")
# greater than
code = 'p(1) :- gt(9, 5)'
check_true(code, "greaterthan")
code = 'p(1) :- gt(5, 9)'
check_false(code, "greaterthan")
# greater than equal
code = 'p(1) :- gteq(10, 5)'
check_true(code, "greaterthaneq")
code = 'p(1) :- gteq(10, 10)'
check_true(code, "greaterthaneq")
code = 'p(1) :- gteq(5, 20)'
check_false(code, "greaterthaneq")
# equal
code = 'p(1) :- equal(5, 5)'
check_true(code, "equal")
code = 'p(1) :- equal(5, 7)'
check_false(code, "equal")
# max
code = 'p(1) :- max(3, 4, 4)'
check_true(code, "max")
code = 'p(1) :- max(3, 7, 3)'
check_false(code, "max")
#
# Strings
#
# len
code = 'p(1) :- len("abcde", 5)'
check_true(code, "Len")
code = 'p(1) :- len("abcde", 7)'
check_false(code, "Len")
# concat
code = 'p(1) :- concat("abc", "def", "abcdef")'
check_true(code, "concat")
code = 'p(1) :- concat("abc", "def", "zxy")'
check_false(code, "concat")
#
# Datetime
# We should make some of these more robust but can't do
# that with the safety restrictions in place at the time
# of writing.
#
# lessthan
code = ('p(1) :- datetime_lt('
'"Jan 1, 2014 10:00:00", "2014-01-02 10:00:00")')
check_true(code, "True datetime_lt")
code = ('p(1) :- datetime_lt('
'"2014-01-03 10:00:00", "Jan 2, 2014 10:00:00")')
check_false(code, "False datetime_lt")
# lessthanequal
code = ('p(1) :- datetime_lteq('
'"Jan 1, 2014 10:00:00", "2014-01-02 10:00:00")')
check_true(code, "True datetime_lteq")
code = ('p(1) :- datetime_lteq('
'"Jan 1, 2014 10:00:00", "2014-01-01 10:00:00")')
check_true(code, "True datetime_lteq")
code = ('p(1) :- datetime_lteq('
'"2014-01-02 10:00:00", "Jan 1, 2014 10:00:00")')
check_false(code, "False datetime_lteq")
# greaterthan
code = ('p(1) :- datetime_gt('
'"Jan 5, 2014 10:00:00", "2014-01-02 10:00:00")')
check_true(code, "True datetime_gt")
code = ('p(1) :- datetime_gt('
'"2014-01-03 10:00:00", "Feb 2, 2014 10:00:00")')
check_false(code, "False datetime_gt")
# greaterthanequal
code = ('p(1) :- datetime_gteq('
'"Jan 5, 2014 10:00:00", "2014-01-02 10:00:00")')
check_true(code, "True datetime_gteq")
code = ('p(1) :- datetime_gteq('
'"Jan 5, 2014 10:00:00", "2014-01-05 10:00:00")')
check_true(code, "True datetime_gteq")
code = ('p(1) :- datetime_gteq('
'"2014-01-02 10:00:00", "Mar 1, 2014 10:00:00")')
check_false(code, "False datetime_gteq")
# equal
code = ('p(1) :- datetime_equal('
'"Jan 5, 2014 10:00:00", "2014-01-05 10:00:00")')
check_true(code, "True datetime_equal")
code = ('p(1) :- datetime_equal('
'"Jan 5, 2014 10:00:00", "2014-01-02 10:00:00")')
check_false(code, "False datetime_equal")
# plus
code = ('p(1) :- datetime_plus('
'"Jan 5, 2014 10:00:00", 3600, "2014-01-05 11:00:00")')
check_true(code, "True datetime_plus")
code = ('p(1) :- datetime_plus('
'"Jan 5, 2014 10:00:00", "1:00:00", "2014-01-05 11:00:00")')
check_true(code, "True datetime_plus")
code = ('p(1) :- datetime_plus('
'"Jan 5, 2014 10:00:00", 3600, "2014-01-05 12:00:00")')
check_false(code, "False datetime_plus")
# minus
code = ('p(1) :- datetime_minus('
'"Jan 5, 2014 10:00:00", "25:00:00", "2014-01-04 09:00:00")')
check_true(code, "True datetime_minus")
code = ('p(1) :- datetime_minus('
'"Jan 5, 2014 10:00:00", 3600, "2014-01-05 09:00:00")')
check_true(code, "True datetime_minus")
code = ('p(1) :- datetime_minus('
'"Jan 5, 2014 10:00:00", "9:00:00", "Jan 4, 2014 10:00:00")')
check_false(code, "False datetime_minus")
# to_seconds
code = ('p(1) :- datetime_to_seconds('
'"Jan 1, 1900 1:00:00", 3600)')
check_true(code, "True datetime_to_seconds")
code = ('p(1) :- datetime_to_seconds('
'"Jan 1, 1900 1:00:00", 3601)')
check_false(code, "False datetime_to_seconds")
# extract_time
code = ('p(1) :- extract_time('
'"Jan 1, 1900 1:00:00", "01:00:00")')
check_true(code, "True extract_time")
code = ('p(1) :- extract_time('
'"Jan 1, 1900 1:00:00", "02:00:00")')
check_false(code, "False extract_time")
# extract_date
code = ('p(1) :- extract_date('
'"Jan 1, 1900 1:00:00", "1900-01-01")')
check_true(code, "True extract_date")
code = ('p(1) :- extract_date('
'"Jan 1, 1900 1:00:00", "2000-01-01")')
check_false(code, "False extract_date")
# pack_datetime
code = ('p(1) :- pack_datetime(2000, 1, 1, 10, 5, 6, '
'"2000-1-1 10:5:6")')
check_true(code, "True pack_datetime")
code = ('p(1) :- pack_datetime(2000, 1, 1, 10, 5, 6, '
'"2000-1-1 10:5:20")')
check_false(code, "False pack_datetime")
# pack_date
code = ('p(1) :- pack_date(2000, 1, 1, '
'"2000-1-1")')
check_true(code, "True pack_date")
code = ('p(1) :- pack_date(2000, 1, 1, '
'"2000-1-2")')
check_false(code, "False pack_date")
# pack_time
code = ('p(1) :- pack_time(5, 6, 7, '
'"5:6:7")')
check_true(code, "True pack_time")
code = ('p(1) :- pack_time(5, 6, 7, '
'"10:6:7")')
check_false(code, "False pack_time")
# unpack_datetime
code = ('p(1) :- unpack_datetime("2000-1-1 10:5:6", '
'2000, 1, 1, 10, 5, 6)')
check_true(code, "True unpack_datetime")
code = ('p(1) :- unpack_datetime("2000-1-1 10:5:6", '
'2000, 1, 1, 12, 5, 6)')
check_false(code, "False unpack_datetime")
# unpack_date
code = ('p(1) :- unpack_date("2000-1-1 10:5:6", '
'2000, 1, 1)')
check_true(code, "True unpack_date")
code = ('p(1) :- unpack_date("2000-1-1 10:5:6", '
'2000, 1, 5)')
check_false(code, "False unpack_date")
# unpack_time
code = ('p(1) :- unpack_time("2000-1-1 10:5:6", '
'10, 5, 6)')
check_true(code, "True unpack_time")
code = ('p(1) :- unpack_time("2000-1-1 10:5:6", '
'12, 5, 6)')
check_false(code, "False unpack_time")
# unpack_time
code = 'p(1) :- now(x)'
check_true(code, "True unpack_time")
#
# Network Address IPv4
#
# ip equal
code = ('p(1) :- ips_equal("192.0.2.1", "192.0.2.1")')
check_true(code, "True ip_equal")
code = ('p(1) :- ips_equal("192.0.2.1", "192.0.2.2")')
check_false(code, "False ip_equal")
# ip less than
code = ('p(1) :- ips_lt("192.0.2.1", "192.0.2.2")')
check_true(code, "True ip_lt")
code = ('p(1) :- ips_lt("192.0.2.1", "192.0.2.1")')
check_false(code, "False ip_lt")
code = ('p(1) :- ips_lt("192.0.2.2", "192.0.2.1")')
check_false(code, "False ip_lt")
# ip less than equal
code = ('p(1) :- ips_lteq("192.0.2.1", "192.0.2.1")')
check_true(code, "True ip_lteq")
code = ('p(1) :- ips_lteq("192.0.2.1", "192.0.2.2")')
check_true(code, "True ip_lteq")
code = ('p(1) :- ips_lteq("192.0.2.2", "192.0.2.1")')
check_false(code, "False ip_lteq")
# ip greater than
code = ('p(1) :- ips_gt("192.0.2.2", "192.0.2.1")')
check_true(code, "True ip_gt")
code = ('p(1) :- ips_gt("192.0.2.1", "192.0.2.1")')
check_false(code, "False ip_gt")
code = ('p(1) :- ips_gt("192.0.2.1", "192.0.2.2")')
check_false(code, "False ip_gt")
# ip greater than equal
code = ('p(1) :- ips_gteq("192.0.2.2", "192.0.2.1")')
check_true(code, "True ip_gteq")
code = ('p(1) :- ips_gteq("192.0.2.2", "192.0.2.2")')
check_true(code, "True ip_gteq")
code = ('p(1) :- ips_gteq("192.0.2.1", "192.0.2.2")')
check_false(code, "False ip_gteq")
# networks equal
code = ('p(1) :- networks_equal("192.0.2.0/24", "192.0.2.112/24")')
check_true(code, "True networks_equal")
code = ('p(1) :- networks_equal("192.0.2.0/24", "192.0.3.0/24")')
check_false(code, "False networks_equal")
# networks overlap
code = ('p(1) :- networks_overlap("192.0.2.0/23", "192.0.2.0/24")')
check_true(code, "True networks_overlap")
code = ('p(1) :- networks_overlap("192.0.2.0/24", "192.0.3.0/24")')
check_false(code, "False networks_overlap")
# ip in network
code = ('p(1) :- ip_in_network("192.168.0.1", "192.168.0.0/24")')
check_true(code, "True ip_in_network")
code = ('p(1) :- ip_in_network("192.168.10.1", "192.168.0.0/24")')
check_false(code, "False ip_in_network")
#
# Network Address IPv6
#
# ip equal
code = ('p(1) :- ips_equal("::ffff:192.0.2.1", "::ffff:192.0.2.1")')
check_true(code, "True ip_equal v6")
code = ('p(1) :- ips_equal("::ffff:192.0.2.1", "::ffff:192.0.2.2")')
check_false(code, "False ip_equal v6")
# ip less than
code = ('p(1) :- ips_lt("::ffff:192.0.2.1", "::ffff:192.0.2.2")')
check_true(code, "True ip_lt v6")
code = ('p(1) :- ips_lt("::ffff:192.0.2.1", "::ffff:192.0.2.1")')
check_false(code, "False ip_lt v6")
code = ('p(1) :- ips_lt("::ffff:192.0.2.2", "::ffff:192.0.2.1")')
check_false(code, "False ip_lt v6")
# ip less than equal
code = ('p(1) :- ips_lteq("::ffff:192.0.2.1", "::ffff:192.0.2.1")')
check_true(code, "True ip_lteq v6")
code = ('p(1) :- ips_lteq("::ffff:192.0.2.1", "::ffff:192.0.2.2")')
check_true(code, "True ip_lteq v6")
code = ('p(1) :- ips_lteq("::ffff:192.0.2.2", "::ffff:192.0.2.1")')
check_false(code, "False ip_lteq v6")
# ip greater than
code = ('p(1) :- ips_gt("::ffff:192.0.2.2", "::ffff:192.0.2.1")')
check_true(code, "True ip_gt v6")
code = ('p(1) :- ips_gt("::ffff:192.0.2.1", "::ffff:192.0.2.1")')
check_false(code, "False ip_gt v6")
code = ('p(1) :- ips_gt("::ffff:192.0.2.1", "::ffff:192.0.2.2")')
check_false(code, "False ip_gt v6")
# ip greater than equal
code = ('p(1) :- ips_gteq("::ffff:192.0.2.2", "::ffff:192.0.2.1")')
check_true(code, "True ip_gteq v6")
code = ('p(1) :- ips_gteq("::ffff:192.0.2.2", "::ffff:192.0.2.2")')
check_true(code, "True ip_gteq v6")
code = ('p(1) :- ips_gteq("::ffff:192.0.2.1", "::ffff:192.0.2.2")')
check_false(code, "False ip_gteq v6")
# networks equal
code = ('p(1) :- networks_equal("fe80::ffff:192.0.2.0/24",'
' "fe80::ffff:192.0.2.112/24")')
check_true(code, "True networks_equal v6")
code = ('p(1) :- networks_equal("fe80::ffff:192.0.2.0/24",'
' "ae80::ffff:192.0.2.0/24")')
check_false(code, "False networks_equal v6")
# networks overlap
code = ('p(1) :- networks_overlap("fe80::ffff:192.0.2.0/23",'
' "fe80::ffff:192.0.2.0/24")')
check_true(code, "True networks_overlap v6")
code = ('p(1) :- networks_overlap("fe80::ffff:192.0.2.0/24",'
' "ae80::ffff:192.0.3.0/24")')
check_false(code, "False networks_overlap v6")
# ip in network
code = ('p(1) :- ip_in_network("fe80::ffff:192.168.0.1",'
' "fe80::ffff:192.168.0.0/24")')
check_true(code, "True ip_in_network v6")
code = ('p(1) :- ip_in_network("fe80::ffff:192.168.10.1",'
' "ae80::ffff:192.168.10.1/24")')
check_false(code, "False ip_in_network v6")
#
# OptType
#
code = ('p(1) :- validate_int(2, 7, 5, "")')
check_true(code, "True validate_int")
code = ('p(1) :- validate_int(2, 7, 9,"")')
check_false(code, "False validate_int (constraint)")
code = ('p(1) :- validate_int(2, 7, "string", "")')
check_false(code, "False validate_int (bad type)")
code = ('p(1) :- validate_float(2.3,4.5,3.3,"")')
check_true(code, "True validate_float")
code = ('p(1) :- validate_float(2.3,4.5,7.3,"")')
check_false(code, "False validate_float")
code = ('p(1) :- validate_string("a*", 5, 0, 0, "aaa","")')
check_true(code, "True validate_string")
code = ('p(1) :- validate_string("a*", 5, 0, 1, "aAa","")')
check_true(code, "True validate_string")
# code = ('p(1) :- validate_string("a*", 5, 0, 0, "aAa","")')
# check_false(code, "False validate_string")
class TestNamedspacedReorder(base.TestCase):
def check(self, input_string, correct_string, msg):
rule = compile.parse1(input_string)
actual = compile.reorder_for_safety(rule)
correct = compile.parse1(correct_string)
if correct != actual:
emsg = "Correct: " + str(correct)
emsg += "; Actual: " + str(actual)
self.fail(msg + " :: " + emsg)
def check_err(self, input_string, unsafe_lit_strings, msg):
rule = compile.parse1(input_string)
try:
compile.reorder_for_safety(rule)
self.fail("Failed to raise exception for " + input_string)
except exception.PolicyException as e:
errmsg = str(e)
# parse then print to string so string rep same in err msg
unsafe_lits = [str(compile.parse1(x)) for x in unsafe_lit_strings]
missing_lits = [m for m in unsafe_lits
if m + " (vars" not in errmsg]
if len(missing_lits) > 0:
self.fail(
"Unsafe literals {} not reported in error: {}".format(
";".join(missing_lits), errmsg))
def test_reorder_builtins(self):
self.check("p(x, z) :- q(x, y), builtin:plus(x, y, z)",
"p(x, z) :- q(x, y), builtin:plus(x, y, z)",
"No reorder")
self.check("p(x, z) :- builtin:plus(x, y, z), q(x, y)",
"p(x, z) :- q(x, y), builtin:plus(x, y, z)",
"Basic reorder")
self.check("p(x, z) :- q(x, y), r(w), builtin:plus(x, y, z), "
" builtin:plus(z, w, y)",
"p(x, z) :- q(x, y), r(w), builtin:plus(x, y, z), "
" builtin:plus(z, w, y)",
"Chaining: no reorder")
self.check("p(x, z) :- q(x, y), builtin:plus(x, y, z), "
" builtin:plus(z, w, y), r(w)",
"p(x, z) :- q(x, y), builtin:plus(x, y, z), r(w), "
" builtin:plus(z, w, y)",
"Chaining: reorder")
self.check("p(x) :- builtin:lt(t, v), builtin:plus(z, w, t), "
" builtin:plus(z, u, v), "
" builtin:plus(x, y, z), q(y), r(x), s(u), t(w) ",
"p(x) :- q(y), r(x), builtin:plus(x, y, z), s(u), "
" builtin:plus(z, u, v), "
" t(w), builtin:plus(z, w, t), builtin:lt(t, v)",
"Partial-order chaining")
def test_unsafe_builtins(self):
# an output
self.check_err("p(x) :- q(x), builtin:plus(x, y, z)",
["builtin:plus(x,y,z)"],
"Basic Unsafe input")
self.check_err("p(x) :- q(x), r(z), builtin:plus(x, y, z)",
["builtin:plus(x,y,z)"],
"Basic Unsafe input 2")
self.check_err("p(x, z) :- builtin:plus(x, y, z), "
" builtin:plus(z, y, x), builtin:plus(x, z, y)",
["builtin:plus(x, y, z)", "builtin:plus(z, y, x)",
"builtin:plus(x, z, y)"],
"Unsafe with cycle")
# no outputs
self.check_err("p(x) :- q(x), builtin:lt(x, y)",
["builtin:lt(x,y)"],
"Basic Unsafe input, no outputs")
self.check_err("p(x) :- q(y), builtin:lt(x, y)",
["builtin:lt(x,y)"],
"Basic Unsafe input, no outputs 2")
self.check_err("p(x, z) :- builtin:lt(x, y), builtin:lt(y, x)",
["builtin:lt(x,y)", "builtin:lt(y, x)"],
"Unsafe with cycle, no outputs")
# chaining
self.check_err("p(x) :- q(x, y), builtin:plus(x, y, z), "
" builtin:plus(z, 3, w), builtin:plus(w, t, u)",
["builtin:plus(w, t, u)"],
"Unsafe chaining")
self.check_err("p(x) :- q(x, y), builtin:plus(x, y, z), "
" builtin:plus(z, 3, w), builtin:lt(w, t)",
["builtin:lt(w, t)"],
"Unsafe chaining 2")
def test_reorder_negation(self):
self.check("p(x) :- q(x), not u(x), r(y), not s(x, y)",
"p(x) :- q(x), not u(x), r(y), not s(x, y)",
"No reordering")
self.check("p(x) :- not q(x), r(x)",
"p(x) :- r(x), not q(x)",
"Basic")
self.check("p(x) :- r(x), not q(x, y), s(y)",
"p(x) :- r(x), s(y), not q(x,y)",
"Partially safe")
self.check("p(x) :- not q(x, y), not r(x), not r(x, z), "
" t(x, y), u(x), s(z)",
"p(x) :- t(x,y), not q(x,y), not r(x), u(x), s(z), "
" not r(x, z)",
"Complex")
def test_unsafe_negation(self):
self.check_err("p(x) :- not q(x)",
["q(x)"],
"Basic")
self.check_err("p(x) :- not q(x), not r(x)",
["q(x)", "r(x)"],
"Cycle")
self.check_err("p(x) :- not q(x, y), r(y)",
["q(x, y)"],
"Partially safe")
def test_reorder_builtins_negation(self):
self.check("p(x) :- not q(z), builtin:plus(x, y, z), s(x), s(y)",
"p(x) :- s(x), s(y), builtin:plus(x, y, z), not q(z)",
"Basic")
self.check("p(x) :- not q(z, w), builtin:plus(x, y, z), "
" builtin:lt(z, w), builtin:plus(x, 3, w), s(x, y)",
"p(x) :- s(x,y), builtin:plus(x, y, z), "
" builtin:plus(x, 3, w), not q(z, w), builtin:lt(z, w)",
"Partial order")
def test_unsafe_builtins_negation(self):
self.check_err("p(x) :- builtin:plus(x, y, z), not q(x, y)",
['builtin:plus(x,y,z)', 'q(x,y)'],
'Unsafe cycle')
self.check_err("p(x) :- builtin:plus(x, y, z), builtin:plus(z, w, t),"
" not q(z, t), s(x), t(y)",
['builtin:plus(z, w, t)', 'q(z, t)'],
'Unsafety propagates')
class TestNamespacedTheories(base.TestCase):
def prep_runtime(self, code=None, msg=None, target=None):
# compile source
if msg is not None:
LOG.debug(msg)
if code is None:
code = ""
if target is None:
target = NREC_THEORY
run = agnostic.Runtime()
run.create_policy(NREC_THEORY, abbr="NRT",
kind=datalog_base.NONRECURSIVE_POLICY_TYPE)
run.create_policy(MAT_THEORY, abbr="MAT",
kind=datalog_base.MATERIALIZED_POLICY_TYPE)
run.debug_mode()
run.insert(code, target=target)
return run
def check_equal(self, actual_string, correct_string, msg):
self.assertTrue(helper.datalog_equal(
actual_string, correct_string, msg))
def test_materialized_builtins(self):
self.test_builtins(MAT_THEORY)
def test_builtins(self, th=NREC_THEORY):
"""Test the mechanism that implements builtins."""
run = self.prep_runtime()
run.insert('p(x) :- q(x,y), builtin:plus(x,y,z), r(z)'
'q(1,2)'
'q(2,3)'
'r(3)'
'r(5)', target=th)
self.check_equal(run.select('p(x)', target=th), "p(1) p(2)", "Plus")
run.delete('r(5)', target=th)
self.check_equal(run.select('p(x)', target=th), "p(1)", "Plus")
run = self.prep_runtime()
run.insert('p(x) :- q(x,y), builtin:minus(x,y,z), r(z)'
'q(2,1)'
'q(3,1)'
'r(1)'
'r(4)', target=th)
self.check_equal(run.select('p(x)', target=th), "p(2)", "Minus")
run.delete('r(4)', target=th)
run.insert('r(2)', target=th)
self.check_equal(run.select('p(x)', target=th), "p(2) p(3)", "Minus")
run = self.prep_runtime()
run.insert('p(x, z) :- q(x,y), builtin:plus(x,y,z)'
'q(1,2)'
'q(2,3)', target=th)
self.check_equal(run.select('p(x, y)', target=th),
"p(1, 3) p(2, 5)", "Plus")
run = self.prep_runtime()
run.insert('m(x) :- j(x,y), builtin:lt(x,y)'
'j(1,2)'
'j(3,2)', target=th)
self.check_equal(run.select('m(x)', target=th), 'm(1)', "LT")
run = self.prep_runtime()
run.insert('m(x) :- j(x,y), builtin:lt(x,y), r(y)'
'j(1,2)'
'j(2,3)'
'j(3,2)'
'r(2)', target=th)
self.check_equal(run.select('m(x)', target=th), 'm(1)', "LT 2")
run = self.prep_runtime()
run.insert('p(x,z) :- q(x), builtin:plus(x,1,z)'
'q(3)'
'q(5)', target=th)
self.check_equal(run.select('p(x,z)', target=th),
'p(3, 4) p(5,6)', "Bound input")
run = self.prep_runtime()
run.insert('p(x) :- q(x), builtin:plus(x,1,5)'
'q(4)'
'q(5)', target=th)
self.check_equal(run.select('p(x)', target=th),
'p(4)', "Bound output")
run = self.prep_runtime()
run.insert('p(x, z) :- builtin:plus(x,y,z), q(x), r(y)'
'q(4)'
'r(5)', target=th)
self.check_equal(run.select('p(x, y)', target=th),
'p(4, 9)',
"Reordering")
run = self.prep_runtime()
run.insert('p(x, z) :- builtin:plus(x,y,z), q(x), q(y)'
'q(4)'
'q(5)', target=th)
self.check_equal(run.select('p(x, y)', target=th),
'p(4, 9) p(4, 8) p(5, 9) p(5, 10)',
"Reordering with self joins")
def test_materialized_builtins_content(self):
self.test_builtins_content(MAT_THEORY)
def test_builtins_content(self, th=NREC_THEORY):
"""Test the content of the builtins, not the mechanism."""
def check_true(code, msg):
run = self.prep_runtime('')
run.insert(code, target=th)
self.check_equal(
run.select('p(x)', target=th),
'p(1)',
msg)
def check_false(code, msg):
th = NREC_THEORY
run = self.prep_runtime('')
run.insert(code, target=th)
self.check_equal(
run.select('p(x)', target=th),
'',
msg)
#
# Numbers
#
# int
code = 'p(1) :- builtin:int(2,2)'
check_true(code, "int")
code = 'p(1) :- builtin:int(2.3, 2)'
check_true(code, "int")
code = 'p(1) :- builtin:int(2, 3.3)'
check_false(code, "int")
# float
code = 'p(1) :- builtin:float(2,2.0)'
check_true(code, "float")
code = 'p(1) :- builtin:float(2.3,2.3)'
check_true(code, "float")
code = 'p(1) :- builtin:float(2,3.3)'
check_false(code, "int")
# plus
code = 'p(1) :- builtin:plus(2,3,5)'
check_true(code, "plus")
code = 'p(1) :- builtin:plus(2,3,1)'
check_false(code, "plus")
# minus
code = 'p(1) :- builtin:minus(5, 3, 2)'
check_true(code, "minus")
code = 'p(1) :- builtin:minus(5, 3, 6)'
check_false(code, "minus")
# minus negative: negative numbers should not be supported
# code = 'p(1) :- minus(3, 5, x)'
# check_false(code, "minus")
# times
code = 'p(1) :- builtin:mul(3, 5, 15)'
check_true(code, "multiply")
code = 'p(1) :- builtin:mul(2, 5, 1)'
check_false(code, "multiply")
# divides
code = 'p(1) :- builtin:div(10, 2, 5)'
check_true(code, "divides")
code = 'p(1) :- builtin:div(10, 4, 2)'
check_true(code, "integer divides")
code = 'p(1) :- builtin:div(10, 4.0, 2.5)'
check_true(code, "float divides")
code = 'p(1) :- builtin:div(10.0, 3, 3.3)'
check_false(code, "divides")
#
# Comparison
#
# less than
code = 'p(1) :- builtin:lt(1, 3)'
check_true(code, "lessthan")
code = 'p(1) :- builtin:lt(5, 2)'
check_false(code, "lessthan")
# less than equal
code = 'p(1) :- builtin:lteq(1, 3)'
check_true(code, "lessthaneq")
code = 'p(1) :- builtin:lteq(3, 3)'
check_true(code, "lessthaneq")
code = 'p(1) :- builtin:lteq(4, 3)'
check_false(code, "lessthaneq")
# greater than
code = 'p(1) :- builtin:gt(9, 5)'
check_true(code, "greaterthan")
code = 'p(1) :- builtin:gt(5, 9)'
check_false(code, "greaterthan")
# greater than equal
code = 'p(1) :- builtin:gteq(10, 5)'
check_true(code, "greaterthaneq")
code = 'p(1) :- builtin:gteq(10, 10)'
check_true(code, "greaterthaneq")
code = 'p(1) :- builtin:gteq(5, 20)'
check_false(code, "greaterthaneq")
# equal
code = 'p(1) :- builtin:equal(5, 5)'
check_true(code, "equal")
code = 'p(1) :- builtin:equal(5, 7)'
check_false(code, "equal")
# max
code = 'p(1) :- builtin:max(3, 4, 4)'
check_true(code, "max")
code = 'p(1) :- builtin:max(3, 7, 3)'
check_false(code, "max")
#
# Strings
#
# len
code = 'p(1) :- builtin:len("abcde", 5)'
check_true(code, "Len")
code = 'p(1) :- builtin:len("abcde", 7)'
check_false(code, "Len")
# concat
code = 'p(1) :- builtin:concat("abc", "def", "abcdef")'
check_true(code, "concat")
code = 'p(1) :- builtin:concat("abc", "def", "zxy")'
check_false(code, "concat")
#
# Datetime
# We should make some of these more robust but can't do
# that with the safety restrictions in place at the time
# of writing.
#
# lessthan
code = ('p(1) :- builtin:datetime_lt('
'"Jan 1, 2014 10:00:00", "2014-01-02 10:00:00")')
check_true(code, "True datetime_lt")
code = ('p(1) :- builtin:datetime_lt('
'"2014-01-03 10:00:00", "Jan 2, 2014 10:00:00")')
check_false(code, "False datetime_lt")
# lessthanequal
code = ('p(1) :- builtin:datetime_lteq('
'"Jan 1, 2014 10:00:00", "2014-01-02 10:00:00")')
check_true(code, "True datetime_lteq")
code = ('p(1) :- builtin:datetime_lteq('
'"Jan 1, 2014 10:00:00", "2014-01-01 10:00:00")')
check_true(code, "True datetime_lteq")
code = ('p(1) :- builtin:datetime_lteq('
'"2014-01-02 10:00:00", "Jan 1, 2014 10:00:00")')
check_false(code, "False datetime_lteq")
# greaterthan
code = ('p(1) :- builtin:datetime_gt('
'"Jan 5, 2014 10:00:00", "2014-01-02 10:00:00")')
check_true(code, "True datetime_gt")
code = ('p(1) :- builtin:datetime_gt('
'"2014-01-03 10:00:00", "Feb 2, 2014 10:00:00")')
check_false(code, "False datetime_gt")
# greaterthanequal
code = ('p(1) :- builtin:datetime_gteq('
'"Jan 5, 2014 10:00:00", "2014-01-02 10:00:00")')
check_true(code, "True datetime_gteq")
code = ('p(1) :- builtin:datetime_gteq('
'"Jan 5, 2014 10:00:00", "2014-01-05 10:00:00")')
check_true(code, "True datetime_gteq")
code = ('p(1) :- builtin:datetime_gteq('
'"2014-01-02 10:00:00", "Mar 1, 2014 10:00:00")')
check_false(code, "False datetime_gteq")
# equal
code = ('p(1) :- builtin:datetime_equal('
'"Jan 5, 2014 10:00:00", "2014-01-05 10:00:00")')
check_true(code, "True datetime_equal")
code = ('p(1) :- builtin:datetime_equal('
'"Jan 5, 2014 10:00:00", "2014-01-02 10:00:00")')
check_false(code, "False datetime_equal")
# plus
code = ('p(1) :- builtin:datetime_plus('
'"Jan 5, 2014 10:00:00", 3600, "2014-01-05 11:00:00")')
check_true(code, "True datetime_plus")
code = ('p(1) :- builtin:datetime_plus('
'"Jan 5, 2014 10:00:00", "1:00:00", "2014-01-05 11:00:00")')
check_true(code, "True datetime_plus")
code = ('p(1) :- builtin:datetime_plus('
'"Jan 5, 2014 10:00:00", 3600, "2014-01-05 12:00:00")')
check_false(code, "False datetime_plus")
# minus
code = ('p(1) :- builtin:datetime_minus('
'"Jan 5, 2014 10:00:00", "25:00:00", "2014-01-04 09:00:00")')
check_true(code, "True datetime_minus")
code = ('p(1) :- builtin:datetime_minus('
'"Jan 5, 2014 10:00:00", 3600, "2014-01-05 09:00:00")')
check_true(code, "True datetime_minus")
code = ('p(1) :- builtin:datetime_minus('
'"Jan 5, 2014 10:00:00", "9:00:00", "Jan 4, 2014 10:00:00")')
check_false(code, "False datetime_minus")
# to_seconds
code = ('p(1) :- builtin:datetime_to_seconds('
'"Jan 1, 1900 1:00:00", 3600)')
check_true(code, "True datetime_to_seconds")
code = ('p(1) :- builtin:datetime_to_seconds('
'"Jan 1, 1900 1:00:00", 3601)')
check_false(code, "False datetime_to_seconds")
# extract_time
code = ('p(1) :- builtin:extract_time('
'"Jan 1, 1900 1:00:00", "01:00:00")')
check_true(code, "True extract_time")
code = ('p(1) :- builtin:extract_time('
'"Jan 1, 1900 1:00:00", "02:00:00")')
check_false(code, "False extract_time")
# extract_date
code = ('p(1) :- builtin:extract_date('
'"Jan 1, 1900 1:00:00", "1900-01-01")')
check_true(code, "True extract_date")
code = ('p(1) :- builtin:extract_date('
'"Jan 1, 1900 1:00:00", "2000-01-01")')
check_false(code, "False extract_date")
# pack_datetime
code = ('p(1) :- builtin:pack_datetime(2000, 1, 1, 10, 5, 6, '
'"2000-1-1 10:5:6")')
check_true(code, "True pack_datetime")
code = ('p(1) :- builtin:pack_datetime(2000, 1, 1, 10, 5, 6, '
'"2000-1-1 10:5:20")')
check_false(code, "False pack_datetime")
# pack_date
code = ('p(1) :- builtin:pack_date(2000, 1, 1, '
'"2000-1-1")')
check_true(code, "True pack_date")
code = ('p(1) :- builtin:pack_date(2000, 1, 1, '
'"2000-1-2")')
check_false(code, "False pack_date")
# pack_time
code = ('p(1) :- builtin:pack_time(5, 6, 7, '
'"5:6:7")')
check_true(code, "True pack_time")
code = ('p(1) :- builtin:pack_time(5, 6, 7, '
'"10:6:7")')
check_false(code, "False pack_time")
# unpack_datetime
code = ('p(1) :- builtin:unpack_datetime("2000-1-1 10:5:6", '
'2000, 1, 1, 10, 5, 6)')
check_true(code, "True unpack_datetime")
code = ('p(1) :- builtin:unpack_datetime("2000-1-1 10:5:6", '
'2000, 1, 1, 12, 5, 6)')
check_false(code, "False unpack_datetime")
# unpack_date
code = ('p(1) :- builtin:unpack_date("2000-1-1 10:5:6", '
'2000, 1, 1)')
check_true(code, "True unpack_date")
code = ('p(1) :- builtin:unpack_date("2000-1-1 10:5:6", '
'2000, 1, 5)')
check_false(code, "False unpack_date")
# unpack_time
code = ('p(1) :- builtin:unpack_time("2000-1-1 10:5:6", '
'10, 5, 6)')
check_true(code, "True unpack_time")
code = ('p(1) :- builtin:unpack_time("2000-1-1 10:5:6", '
'12, 5, 6)')
check_false(code, "False unpack_time")
# unpack_time
code = 'p(1) :- builtin:now(x)'
check_true(code, "True unpack_time")
#
# Network Address IPv4
#
# ip equal
code = ('p(1) :- builtin:ips_equal("192.0.2.1", "192.0.2.1")')
check_true(code, "True ip_equal")
code = ('p(1) :- builtin:ips_equal("192.0.2.1", "192.0.2.2")')
check_false(code, "False ip_equal")
# ip less than
code = ('p(1) :- builtin:ips_lt("192.0.2.1", "192.0.2.2")')
check_true(code, "True ip_lt")
code = ('p(1) :- builtin:ips_lt("192.0.2.1", "192.0.2.1")')
check_false(code, "False ip_lt")
code = ('p(1) :- builtin:ips_lt("192.0.2.2", "192.0.2.1")')
check_false(code, "False ip_lt")
# ip less than equal
code = ('p(1) :- builtin:ips_lteq("192.0.2.1", "192.0.2.1")')
check_true(code, "True ip_lteq")
code = ('p(1) :- builtin:ips_lteq("192.0.2.1", "192.0.2.2")')
check_true(code, "True ip_lteq")
code = ('p(1) :- builtin:ips_lteq("192.0.2.2", "192.0.2.1")')
check_false(code, "False ip_lteq")
# ip greater than
code = ('p(1) :- builtin:ips_gt("192.0.2.2", "192.0.2.1")')
check_true(code, "True ip_gt")
code = ('p(1) :- builtin:ips_gt("192.0.2.1", "192.0.2.1")')
check_false(code, "False ip_gt")
code = ('p(1) :- builtin:ips_gt("192.0.2.1", "192.0.2.2")')
check_false(code, "False ip_gt")
# ip greater than equal
code = ('p(1) :- builtin:ips_gteq("192.0.2.2", "192.0.2.1")')
check_true(code, "True ip_gteq")
code = ('p(1) :- builtin:ips_gteq("192.0.2.2", "192.0.2.2")')
check_true(code, "True ip_gteq")
code = ('p(1) :- builtin:ips_gteq("192.0.2.1", "192.0.2.2")')
check_false(code, "False ip_gteq")
# networks equal
code = ('p(1) :- builtin:networks_equal("192.0.2.0/24", '
'"192.0.2.112/24")')
check_true(code, "True networks_equal")
code = ('p(1) :- builtin:networks_equal("192.0.2.0/24", '
'"192.0.3.0/24")')
check_false(code, "False networks_equal")
# networks overlap
code = ('p(1) :- builtin:networks_overlap("192.0.2.0/23", '
'"192.0.2.0/24")')
check_true(code, "True networks_overlap")
code = ('p(1) :- builtin:networks_overlap("192.0.2.0/24", '
'"192.0.3.0/24")')
check_false(code, "False networks_overlap")
# ip in network
code = ('p(1) :- builtin:ip_in_network("192.168.0.1", '
'"192.168.0.0/24")')
check_true(code, "True ip_in_network")
code = ('p(1) :- builtin:ip_in_network("192.168.10.1", '
'"192.168.0.0/24")')
check_false(code, "False ip_in_network")
#
# Network Address IPv6
#
# ip equal
code = ('p(1) :- builtin:ips_equal("::ffff:192.0.2.1", '
' "::ffff:192.0.2.1")')
check_true(code, "True ip_equal v6")
code = ('p(1) :- builtin:ips_equal("::ffff:192.0.2.1", '
' "::ffff:192.0.2.2")')
check_false(code, "False ip_equal v6")
# ip less than
code = ('p(1) :- builtin:ips_lt("::ffff:192.0.2.1", '
' "::ffff:192.0.2.2")')
check_true(code, "True ip_lt v6")
code = ('p(1) :- builtin:ips_lt("::ffff:192.0.2.1", '
' "::ffff:192.0.2.1")')
check_false(code, "False ip_lt v6")
code = ('p(1) :- builtin:ips_lt("::ffff:192.0.2.2", '
' "::ffff:192.0.2.1")')
check_false(code, "False ip_lt v6")
# ip less than equal
code = ('p(1) :- builtin:ips_lteq("::ffff:192.0.2.1", '
' "::ffff:192.0.2.1")')
check_true(code, "True ip_lteq v6")
code = ('p(1) :- builtin:ips_lteq("::ffff:192.0.2.1", '
' "::ffff:192.0.2.2")')
check_true(code, "True ip_lteq v6")
code = ('p(1) :- builtin:ips_lteq("::ffff:192.0.2.2", '
' "::ffff:192.0.2.1")')
check_false(code, "False ip_lteq v6")
# ip greater than
code = ('p(1) :- builtin:ips_gt("::ffff:192.0.2.2", '
' "::ffff:192.0.2.1")')
check_true(code, "True ip_gt v6")
code = ('p(1) :- builtin:ips_gt("::ffff:192.0.2.1", '
' "::ffff:192.0.2.1")')
check_false(code, "False ip_gt v6")
code = ('p(1) :- builtin:ips_gt("::ffff:192.0.2.1", '
' "::ffff:192.0.2.2")')
check_false(code, "False ip_gt v6")
# ip greater than equal
code = ('p(1) :- builtin:ips_gteq("::ffff:192.0.2.2", '
' "::ffff:192.0.2.1")')
check_true(code, "True ip_gteq v6")
code = ('p(1) :- builtin:ips_gteq("::ffff:192.0.2.2", '
' "::ffff:192.0.2.2")')
check_true(code, "True ip_gteq v6")
code = ('p(1) :- builtin:ips_gteq("::ffff:192.0.2.1", '
' "::ffff:192.0.2.2")')
check_false(code, "False ip_gteq v6")
# networks equal
code = ('p(1) :- builtin:networks_equal("fe80::ffff:192.0.2.0/24",'
' "fe80::ffff:192.0.2.112/24")')
check_true(code, "True networks_equal v6")
code = ('p(1) :- builtin:networks_equal("fe80::ffff:192.0.2.0/24",'
' "ae80::ffff:192.0.2.0/24")')
check_false(code, "False networks_equal v6")
# networks overlap
code = ('p(1) :- builtin:networks_overlap("fe80::ffff:192.0.2.0/23",'
' "fe80::ffff:192.0.2.0/24")')
check_true(code, "True networks_overlap v6")
code = ('p(1) :- builtin:networks_overlap("fe80::ffff:192.0.2.0/24",'
' "ae80::ffff:192.0.3.0/24")')
check_false(code, "False networks_overlap v6")
# ip in network
code = ('p(1) :- builtin:ip_in_network("fe80::ffff:192.168.0.1",'
' "fe80::ffff:192.168.0.0/24")')
check_true(code, "True ip_in_network v6")
code = ('p(1) :- builtin:ip_in_network("fe80::ffff:192.168.10.1",'
' "ae80::ffff:192.168.10.1/24")')
check_false(code, "False ip_in_network v6")
| apache-2.0 | 4,118,628,128,478,063,000 | 34.406289 | 78 | 0.460956 | false |
dmsovetov/dreemchest | Source/CLI/command_line/env.py | 1 | 5399 | #################################################################################
#
# The MIT License (MIT)
#
# Copyright (c) 2015 Dmitry Sovetov
#
# https://github.com/dmsovetov
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#################################################################################
from collections import namedtuple
import os
import tempfile
# Define named tuple for environment configuration
Configuration = namedtuple('Configuration', ['home', 'cmake', 'android', 'emscripten'])
# Environment variable that points to a Dreemchest home directory
DREEMCHEST_HOME = 'DREEMCHEST_HOME'
# Environemt variable that points to a CMake folder used by Dreemchest
DREEMCHEST_CMAKE = 'DREEMCHEST_CMAKE'
# Environment variable that points to a CMake bin folder
DREEMCHEST_CMAKE_BIN = 'DREEMCHEST_CMAKE_BIN'
# Environment variable that points to Android SDK used by Dreemchest
DREEMCHEST_ANDROID = 'DREEMCHEST_ANDROID'
# Environment variable that points to Emscripten SDK used by Dreemchest
DREEMCHEST_EMSCRIPTEN = 'DREEMCHEST_EMSCRIPTEN'
class Configuration:
"""An active environment configuration"""
def __init__(self):
# Load home directory
if DREEMCHEST_HOME not in os.environ.keys():
raise Exception("'%s' environment variable should point to a Dreemchest home directory." % DREEMCHEST_HOME)
self.home = os.environ[DREEMCHEST_HOME]
# Load CMake directory
if DREEMCHEST_CMAKE_BIN not in os.environ.keys():
raise Exception("'%s' environment variable should point to a CMake directory." % DREEMCHEST_CMAKE_BIN)
self._cmake = os.environ[DREEMCHEST_CMAKE_BIN]
# Load Android SDK directory
self._android = None
if DREEMCHEST_ANDROID in os.environ.keys():
self._android = os.environ[DREEMCHEST_ANDROID]
# Load Emscripten SDK directory
self._emscripten = None
if DREEMCHEST_EMSCRIPTEN in os.environ.keys():
self._emscripten = os.environ[DREEMCHEST_EMSCRIPTEN]
@property
def home(self):
"""Returns the Dreemchest home directory"""
return self._home
@property
def cmake(self):
"""Returns CMake home directory"""
return self._cmake
@property
def emscripten(self):
"""Returns the Emscripten SDK home directory"""
return self._emscripten
@property
def emscripten_toolchain(self):
if self.emscripten is None:
return None
return os.path.join(self.emscripten, 'cmake', 'Modules', 'Platform', 'Emscripten.cmake')
@property
def ios_toolchain(self):
"""Returns an iOS toolchain file"""
return os.path.join(self.home, 'CMake', 'Toolchains', 'iOS.cmake')
@property
def android_toolchain(self):
"""Returns an Android toolchain file"""
return os.path.join(self.home, 'CMake', 'Toolchains', 'Android.cmake')
@property
def android(self):
"""Returns the Android SDK home directory"""
return self._android
@property
def android_ndk(self):
"""Returns the Android NDK home directory"""
return os.path.join(self.android, 'ndk-bundle')
@property
def dependencies(self):
"""Returns a directory where all precompiled dependencies are stored"""
return os.path.join(self.build_dir, 'Dependencies')
@property
def build_dir(self):
"""Returns a build directory"""
return os.path.join(self.home, 'Build')
@property
def prebuilt(self):
"""Returns an prebuilt directory path"""
return os.path.join(self.build_dir, 'Prebuilt')
@property
def externals(self):
"""Returns externals source directory"""
return os.path.join(self.home, 'Externals')
@property
def source(self):
"""Returns engine source directory"""
return os.path.join(self.home, 'Source')
@property
def projects(self):
"""Returns the Projects directory path"""
return os.path.join(self.home, 'Projects')
@property
def bootstrap_temp_dir(self):
"""Returns a temporary directory where to store all intermediate artifacts for bootstrap process"""
return os.path.join(tempfile.gettempdir(), 'Bootstrap')
def load():
"""Loads an active configuration from environment"""
return Configuration() | mit | -1,461,173,353,538,070,500 | 33.394904 | 119 | 0.667346 | false |
cheral/orange3-text | orangecontrib/text/tests/test_pubmed.py | 1 | 11899 | import json
import os
import unittest
from unittest.mock import patch
import numpy as np
from orangecontrib.text.pubmed import (
Pubmed, PUBMED_TEXT_FIELDS,
_mesh_headings_to_class,
_date_to_iso, _corpus_from_records,
_records_to_corpus_entries
)
class MockEntrezHandle:
@staticmethod
def close():
return
class MockEntrez:
"""Used to mock Entrez/Medline reading/parsing methods.
Mocks read (after esearch and epost) and parse (after efetch).
"""
def __init__(self, cache):
self.mock_responses = {}
with open(cache, 'r') as f:
self.mock_responses = json.loads(f.read())
def esearch(self, db, term, **keywds):
return MockEntrezHandle()
def read(self, handle):
return self.mock_responses.get('read')
def efetch(self, db, **keywords):
return MockEntrezHandle()
def epost(self, db, **keywds):
return MockEntrezHandle()
def parse(self, handle):
return self.mock_responses.get('parse')
# Exception mocking.
def esearch_exception(self, db, term, **keywds):
raise IOError
def efetch_exception(self, db, **keywds):
raise Exception
def epost_exception(self, db, **keywds):
raise IOError
CACHE = os.path.join(os.path.dirname(__file__), 'pubmed-cache.txt')
mock_entrez = MockEntrez(CACHE)
def error_callback(exception):
return
def progress_callback(progress=None):
return
class PubmedTests(unittest.TestCase):
EMAIL = '[email protected]'
def setUp(self):
self.pubmed = Pubmed(
self.EMAIL,
progress_callback=progress_callback,
error_callback=error_callback
)
def test_pubmed_object_creation(self):
self.assertRaises(
ValueError,
Pubmed,
'faulty_email'
)
def test_mesh_headings_to_class(self):
input_headings = [
'heading1 & heading2/heading3,heading4/*heading5',
'heading1/heading2/*heading3',
]
self.assertEqual(_mesh_headings_to_class(input_headings), 'heading1')
def test_date_to_iso(self):
# Correct inputs.
input_dates = [
'2015 Nov',
'2015',
'2015 Sep-Oct',
'2015 Fall',
]
correct_results = [
1446336000.0,
1420070400.0,
1441065600.0,
1441065600.0,
]
for date, result in zip(input_dates, correct_results):
self.assertEqual(_date_to_iso(date), result)
# Unexpected inputs.
unexpected_input = '2015 Unexpected'
self.assertWarns(
RuntimeWarning,
_date_to_iso,
unexpected_input,
)
self.assertEqual(type(_date_to_iso(unexpected_input)), type(np.nan))
def test_record_to_corpus(self):
mock_records = [
{
'FAU': ['Mock Author 1', 'Mock Author 2'],
'TI': 'Mock title',
'MH': ['heading1/heading2'],
'AB': 'Mock abstract',
'DP': '2015 Sep',
'PMID': 1,
},
]
correct_metas = np.array([
[
'Mock Author 1 Mock Author 2',
'Mock title',
'heading1/heading2',
'Mock abstract',
'http://www.ncbi.nlm.nih.gov/pubmed/?term=1',
1441065600.0
]
], dtype=object)
correct_classes = np.array([
'heading1'
])
# Perform asserting.
meta_values, class_values = _records_to_corpus_entries(
mock_records,
PUBMED_TEXT_FIELDS
)
corpus = _corpus_from_records(mock_records, PUBMED_TEXT_FIELDS)
self.assertCountEqual(meta_values[0], correct_metas[0])
self.assertCountEqual(class_values, correct_classes)
self.assertIsNotNone(corpus)
@patch('Bio.Entrez.esearch', mock_entrez.esearch)
@patch('Bio.Entrez.read', mock_entrez.read)
def test_pubmed_search_records(self):
test_terms = ['orchid']
authors = []
pub_date_start = '2011/07/07'
pub_date_end = '2014/07/07'
self.pubmed._search_for_records(
terms=test_terms,
authors=authors,
pub_date_start=pub_date_start,
pub_date_end=pub_date_end
)
# The only certain check is to make sure we got all the parameters.
self.assertIsNotNone(self.pubmed.record_id_list)
self.assertIsNotNone(self.pubmed.search_record_count)
self.assertIsNotNone(self.pubmed.search_record_web_env)
self.assertIsNotNone(self.pubmed.search_record_query_key)
# Faulty input check.
self.assertRaises(
ValueError,
self.pubmed._search_for_records,
terms=test_terms,
authors=None,
pub_date_start=pub_date_start,
pub_date_end=pub_date_end
)
@patch('Bio.Entrez.esearch', mock_entrez.esearch)
@patch('Bio.Entrez.read', mock_entrez.read)
@patch('Bio.Entrez.efetch', mock_entrez.efetch)
@patch('Bio.Medline.parse', mock_entrez.parse)
def test_pubmed_retrieve_record_batch(self):
test_terms = ['orchid']
authors = []
pub_date_start = '2011/07/07'
pub_date_end = '2014/07/07'
offset = 0
num_requested_records = 5
# Attempt to retrieve without searching first.
self.assertRaises(
ValueError,
self.pubmed._retrieve_record_batch,
offset,
num_requested_records
)
# Must search for records first.
self.pubmed._search_for_records(
test_terms,
authors,
pub_date_start,
pub_date_end
)
# Retrieve the records.
data = self.pubmed._retrieve_record_batch(
offset,
num_requested_records
)
self.assertEqual(len(data), num_requested_records)
@patch('Bio.Entrez.esearch', mock_entrez.esearch)
@patch('Bio.Entrez.read', mock_entrez.read)
@patch('Bio.Entrez.efetch', mock_entrez.efetch)
@patch('Bio.Medline.parse', mock_entrez.parse)
@patch('Bio.Entrez.epost', mock_entrez.epost)
def test_pubmed_retrieve_records(self):
test_terms = ['orchid']
authors = []
pub_date_start = '2011/07/07'
pub_date_end = '2014/07/07'
num_records = 5
# Must search for records first.
self.pubmed._search_for_records(
test_terms,
authors,
pub_date_start,
pub_date_end
)
# Retrieve the records and build a corpus.
corpus = self.pubmed._retrieve_records(num_records)
self.assertEqual(len(corpus), num_records)
meta_fields = sorted([field_name
for field_name, field_tag
in PUBMED_TEXT_FIELDS])
test_meta_fields = sorted([m.name
for m
in corpus.domain.metas])
self.assertEqual(meta_fields, test_meta_fields)
@patch('Bio.Entrez.esearch', mock_entrez.esearch)
@patch('Bio.Entrez.read', mock_entrez.read)
@patch('Bio.Entrez.efetch', mock_entrez.efetch)
@patch('Bio.Medline.parse', mock_entrez.parse)
@patch('Bio.Entrez.epost', mock_entrez.epost)
def test_pubmed_retrieve_records_no_cache(self):
test_terms = ['orchid']
authors = []
pub_date_start = '2011/07/07'
pub_date_end = '2014/07/07'
num_records = 5
# Must search for records first.
self.pubmed._search_for_records(
test_terms,
authors,
pub_date_start,
pub_date_end
)
# Retrieve the records and build a corpus.
corpus = self.pubmed._retrieve_records(
num_records,
use_cache=False
)
self.assertEqual(len(corpus), num_records)
meta_fields = sorted([field_name
for field_name, field_tag
in PUBMED_TEXT_FIELDS])
test_meta_fields = sorted([m.name
for m
in corpus.domain.metas])
self.assertEqual(meta_fields, test_meta_fields)
@patch('Bio.Entrez.esearch', mock_entrez.esearch)
@patch('Bio.Entrez.read', mock_entrez.read)
@patch('Bio.Entrez.efetch', mock_entrez.efetch)
@patch('Bio.Medline.parse', mock_entrez.parse)
@patch('Bio.Entrez.epost', mock_entrez.epost)
def test_download_records(self):
test_terms = ['orchid']
authors = []
pub_date_start = '2011/07/07'
pub_date_end = '2014/07/07'
num_records = 5
# Retrieve the records and build a corpus.
corpus = self.pubmed.download_records(
test_terms,
authors,
pub_date_start,
pub_date_end,
num_records
)
self.assertEqual(len(corpus), num_records)
meta_fields = sorted([field_name
for field_name, field_tag
in PUBMED_TEXT_FIELDS])
test_meta_fields = sorted([m.name
for m
in corpus.domain.metas])
self.assertEqual(meta_fields, test_meta_fields)
@patch('Bio.Entrez.esearch', mock_entrez.esearch_exception)
def test_entrez_search_exceptions(self):
# Search exception.
test_terms = ['orchid']
authors = []
pub_date_start = '2011/07/07'
pub_date_end = '2014/07/07'
self.assertWarns(
RuntimeWarning,
self.pubmed._search_for_records,
terms=test_terms,
authors=authors,
pub_date_start=pub_date_start,
pub_date_end=pub_date_end
)
@patch('Bio.Entrez.esearch', mock_entrez.esearch)
@patch('Bio.Entrez.read', mock_entrez.read)
@patch('Bio.Entrez.efetch', mock_entrez.efetch_exception)
@patch('Bio.Medline.parse', mock_entrez.parse)
@patch('Bio.Entrez.epost', mock_entrez.epost)
def test_pubmed_retrieve_record_batch_exception(self):
test_terms = ['orchid']
authors = []
pub_date_start = '2011/07/07'
pub_date_end = '2014/07/07'
num_records = 5
# Must search for records first.
self.pubmed._search_for_records(
test_terms,
authors,
pub_date_start,
pub_date_end
)
self.assertWarns(
RuntimeWarning,
self.pubmed._retrieve_records,
num_records,
use_cache=False
)
@patch('Bio.Entrez.esearch', mock_entrez.esearch)
@patch('Bio.Entrez.read', mock_entrez.read)
@patch('Bio.Entrez.efetch', mock_entrez.efetch)
@patch('Bio.Medline.parse', mock_entrez.parse)
@patch('Bio.Entrez.epost', mock_entrez.epost_exception)
def test_pubmed_epost_exception(self):
test_terms = ['orchid']
authors = []
pub_date_start = '2011/07/07'
pub_date_end = '2014/07/07'
num_records = 5
# Must search for records first.
self.pubmed._search_for_records(
test_terms,
authors,
pub_date_start,
pub_date_end
)
self.assertWarns(
RuntimeWarning,
self.pubmed._retrieve_records,
num_records,
use_cache=False
)
| bsd-2-clause | 1,158,506,479,719,767,000 | 28.896985 | 77 | 0.5445 | false |
zhangxiaolins/python_base | essential/report/generators/conf.py | 2 | 1404 | # Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides OpenStack config generators
This module defines a class for configuration
generators for generating the model in
:mod:`essential.report.models.conf`.
"""
from essential.config import cfg
import essential.report.models.conf as cm
class ConfigReportGenerator(object):
"""A Configuration Data Generator
This generator returns
:class:`essential.report.models.conf.ConfigModel` ,
by default using the configuration options stored
in :attr:`essential.config.cfg.CONF`, which is where
OpenStack stores everything.
:param cnf: the configuration option object
:type cnf: :class:`essential.config.cfg.ConfigOpts`
"""
def __init__(self, cnf=cfg.CONF):
self.conf_obj = cnf
def __call__(self):
return cm.ConfigModel(self.conf_obj)
| apache-2.0 | 4,018,211,868,987,261,400 | 31.651163 | 78 | 0.722222 | false |
wordpress-mobile/WordPress-iOS | Scripts/localize.py | 1 | 5499 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This program is free software. It comes without any warranty, to
# the extent permitted by applicable law. You can redistribute it
# and/or modify it under the terms of the Do What The Fuck You Want
# To Public License, Version 2, as published by Sam Hocevar. See
# http://sam.zoy.org/wtfpl/COPYING for more details.
#
# Localize.py - Incremental localization on XCode projects
# João Moreno 2009
# http://joaomoreno.com/
from sys import argv
from codecs import open
from re import compile
from copy import copy
import os
re_translation = compile(r'^"(.+)" = "(.+)";$')
re_comment_single = compile(r'^/(/.*|\*.*\*/)$')
re_comment_start = compile(r'^/\*.*$')
re_comment_end = compile(r'^.*\*/$')
def print_help():
print u"""Usage: merge.py merged_file old_file new_file
Xcode localizable strings merger script. João Moreno 2009."""
class LocalizedString():
def __init__(self, comments, translation):
self.comments, self.translation = comments, translation
self.key, self.value = re_translation.match(self.translation).groups()
def __unicode__(self):
return u'%s%s\n' % (u''.join(self.comments), self.translation)
class LocalizedFile():
def __init__(self, fname=None, auto_read=False):
self.fname = fname
self.strings = []
self.strings_d = {}
if auto_read:
self.read_from_file(fname)
def read_from_file(self, fname=None):
fname = self.fname if fname == None else fname
try:
f = open(fname, encoding='utf_16', mode='r')
except:
print 'File %s does not exist.' % fname
exit(-1)
line = f.readline()
while line and line == u'\n':
line = f.readline()
while line:
comments = [line]
if not re_comment_single.match(line):
while line and not re_comment_end.match(line):
line = f.readline()
comments.append(line)
line = f.readline()
if line and re_translation.match(line):
translation = line
else:
raise Exception('invalid file: %s' % line)
line = f.readline()
while line and line == u'\n':
line = f.readline()
string = LocalizedString(comments, translation)
self.strings.append(string)
self.strings_d[string.key] = string
f.close()
def save_to_file(self, fname=None):
fname = self.fname if fname == None else fname
try:
f = open(fname, encoding='utf_16', mode='w')
except:
print 'Couldn\'t open file %s.' % fname
exit(-1)
for string in self.strings:
f.write(string.__unicode__())
f.close()
def merge_with(self, new):
merged = LocalizedFile()
for string in new.strings:
if self.strings_d.has_key(string.key):
new_string = copy(self.strings_d[string.key])
new_string.comments = string.comments
string = new_string
merged.strings.append(string)
merged.strings_d[string.key] = string
return merged
def merge(merged_fname, old_fname, new_fname):
try:
old = LocalizedFile(old_fname, auto_read=True)
new = LocalizedFile(new_fname, auto_read=True)
except Exception as e:
print 'Error: input files have invalid format. old: %s, new: %s' % (old_fname, new_fname)
print e
merged = old.merge_with(new)
merged.save_to_file(merged_fname)
STRINGS_FILE = 'Localizable.strings'
def localize(path, language, include_pods_and_frameworks):
if "Scripts" in path:
print "Must run script from the root folder"
quit()
os.chdir(path)
language = os.path.join(path, language)
original = merged = language + os.path.sep + STRINGS_FILE
old = original + '.old'
new = original + '.new'
# TODO: This is super ugly, we have to come up with a better way of doing it
if include_pods_and_frameworks:
find_cmd = 'find . ../Pods/WordPress* ../Pods/WPMediaPicker ../WordPressShared/WordPressShared ../Pods/Gutenberg -name "*.m" -o -name "*.swift" | grep -v Vendor | grep -v ./WordPressTest/I18n.swift | grep -v ./WordPressStatsWidgets/Views/Localization/LocalizedStringKey+extension.swift | grep -v Secrets.swift'
else:
find_cmd = 'find . -name "*.m" -o -name "*.swift" | grep -v Vendor | grep -v ./WordPressTest/I18n.swift | grep -v ./WordPressStatsWidgets/Views/Localization/LocalizedStringKey+extension.swift | grep -v Secrets.swift'
filelist = os.popen(find_cmd).read().strip().split('\n')
filelist = '"{0}"'.format('" "'.join(filelist))
if os.path.isfile(original):
os.rename(original, old)
os.system('genstrings -q -o "%s" %s' % (language, filelist))
os.rename(original, new)
merge(merged, old, new)
os.remove(new)
os.remove(old)
else:
os.system('genstrings -q -o "%s" %s' % (language, filelist))
if __name__ == '__main__':
basedir = os.getcwd()
localize(os.path.join(basedir, 'WordPress'), 'Resources/en.lproj', True)
localize(os.path.join(basedir, 'WordPress', 'WordPressTodayWidget'), 'Base.lproj', False)
localize(os.path.join(basedir, 'WordPress', 'WordPressShareExtension'), 'Base.lproj', False)
| gpl-2.0 | 3,877,940,906,966,676,000 | 33.791139 | 318 | 0.599418 | false |
cts-admin/cts | cts/members/management/commands/send_renewal_emails.py | 1 | 1296 | import datetime
from django.conf import settings
from django.core.management.base import BaseCommand
from django.template.loader import render_to_string
from ...models import CorporateMember
from home.tasks import mail_task
class Command(BaseCommand):
def handle(self, *args, **options):
thirty_days_from_now = datetime.date.today() + datetime.timedelta(days=30)
for member in CorporateMember.objects.filter(inactive=False):
if member.get_expiry_date() == thirty_days_from_now:
mail_task(
'Expiring Conservation Technology Solutions Membership for %s' % member.display_name,
render_to_string('members/corporate_member_renewal_email.txt', {
'contact_name': member.contact_name,
'member_name': member.display_name,
'expiry_date': member.get_expiry_date(),
'renewal_link': member.get_renewal_link(),
}),
settings.DEFAULT_FROM_EMAIL,
[
settings.DEFAULT_FROM_EMAIL,
member.contact_email,
'[email protected]'
],
)
| gpl-3.0 | 1,292,665,148,696,779,800 | 40.806452 | 105 | 0.56713 | false |
kmike/django-admin-user-stats | admin_user_stats/base_modules.py | 1 | 2779 | # -*- coding: utf-8 -*-
from datetime import timedelta
from django.db.models import Count
from django.utils.translation import ugettext_lazy as _
try:
from django.utils.timezone import now
except ImportError:
from datetime import datetime
now = datetime.now
from qsstats import QuerySetStats
from admin_tools.dashboard import modules
class BaseChart(modules.DashboardModule):
"""
Dashboard module with user registration charts.
With default values it is suited best for 2-column dashboard layouts.
"""
title = _('Registration chart')
template = 'admin_user_stats/modules/chart.html'
chart_size = "580x100"
days = None
values_count = 30
interval = 'days'
queryset = None
date_field = 'date_joined'
aggregate = Count('id')
def is_empty(self):
return False
def __init__(self, *args, **kwargs):
super(BaseChart, self).__init__(*args, **kwargs)
if self.days is None:
self.days = {'days': self.values_count, 'weeks': self.values_count*7, 'months': self.values_count*30, 'years': self.values_count*365}[self.interval]
self.data = self.get_data(self.interval, self.days)
self.prepare_template_data(self.data)
def get_caption(self, dt):
return {
'days': dt.day,
'months': dt.strftime("%b"),
'weeks': dt.strftime('%W'),
'years': dt.strftime('%Y'),
}[self.interval]
# @cached(60*5)
def get_data(self, interval, days):
""" Returns an array with new users count per interval """
stats = QuerySetStats(self.queryset, self.date_field, aggregate = self.aggregate)
today = now()
begin = today - timedelta(days=days-1)
return stats.time_series(begin, today+timedelta(days=1), interval)
def prepare_template_data(self, data):
""" Prepares data for template (it is passed as module attributes) """
self.captions = [self.get_caption(t[0]) for t in data]
self.values = [t[1] for t in data]
self.max_value = max(self.values)
class BaseCharts(modules.Group):
""" Group module with 3 default registration charts """
title = _('New Users')
chart_model = BaseChart
def __init__(self, *args, **kwargs):
kwargs.setdefault('children', self.get_charts())
super(BaseCharts, self).__init__(*args, **kwargs)
def get_charts(self):
""" Returns 3 basic chart modules (per-day, per-week and per-month) """
return [
self.chart_model(_('By Day'), interval='days'),
self.chart_model(_('By Week'), interval='weeks'),
self.chart_model(_('By Month'), interval='months'),
self.chart_model(_('By Year'), interval='years'),
]
| mit | 5,017,720,829,543,766,000 | 32.481928 | 160 | 0.617488 | false |
eirannejad/pyRevit | pyrevitlib/rpw/__revit.py | 1 | 5945 | """
The main rpw namespace and rpw.revit provide you with most of the imports will
need.
>>> from rpw import revit, db, ui
>>> db.Element(SomeElement)
>>> ui.Selection()
>>> revit.doc
>>> revit.uidoc.ActiveView
Revit Namespaces are also available:
>>> from rpw import DB, UI
>>> DB.ElementId(00000)
>>> UI.TaskDialog
In summary, if you use rpw, this could potentially be the only import line
you would need:
>>> from rpw import revit, db, ui, DB, UI
""" #
import rpw
from rpw.utils.dotnet import clr, Process
from rpw.utils.logger import logger
from rpw.base import BaseObject
class Revit(BaseObject):
"""
Revit Application Wrapper
Note:
The module path for the Revit Wrapper and its namespaces is ``rpw.__revit.Revit``.
However, the ``Revit()`` is always instantiated on the initialization of rpw,
and is stored along with the ``DB`` and ``UI`` namespaces in the
root of rpw module.
In other words, to use this wrapper all you need is to import
``from rpw import revit``
>>> from rpw import revit
>>> revit.doc
<Autodesk.Revit.DB.Document>
>>> revit.username
gtalarico
>>> revit.host
'Dynamo'
"""
class HOSTS():
RPS = 'RPS'
DYNAMO = 'Dynamo'
def __init__(self):
try:
self.uiapp = __revit__
self._host = Revit.HOSTS.RPS
except NameError:
try:
# Try Getting handler from Dynamo RevitServices
self.uiapp = self.find_dynamo_uiapp()
self._host = Revit.HOSTS.DYNAMO
except Exception as errmsg:
logger.warning('Revit Application handle could not be found')
try:
# Add DB UI Import to globals so it can be imported by rpw
clr.AddReference('RevitAPI')
clr.AddReference('RevitAPIUI')
from Autodesk.Revit import DB, UI
globals().update({'DB': DB, 'UI': UI})
except Exception:
# Replace Globals with Mock Objects for Sphinx and ipy direct exec.
logger.warning('RevitAPI References could not be added')
from rpw.utils.sphinx_compat import MockObject
globals().update({'DB': MockObject(fullname='Autodesk.Revit.DB'),
'UI': MockObject(fullname='Autodesk.Revit.DB')})
self.uiapp = MockObject(fullname='Autodesk.Revit.UI.UIApplication')
self._host = None
def find_dynamo_uiapp(self):
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
import sys
sys.path.append(r'C:\Program Files (x86)\IronPython 2.7\Lib')
return DocumentManager.Instance.CurrentUIApplication
@property
def host(self):
""" Host is set based on how revit handle was found.
Returns:
Host (str): Revit Application Host ['RPS', 'Dynamo']
"""
return self._host
def open(self, path):
""" Opens New Document """
@property
def doc(self):
""" Returns: uiapp.ActiveUIDocument.Document """
return getattr(self.uiapp.ActiveUIDocument, 'Document', None)
@property
def uidoc(self):
""" Returns: uiapp.ActiveUIDocument """
return getattr(self.uiapp, 'ActiveUIDocument', None)
@property
def active_view(self):
""" Returns: uidoc.ActiveView """
return rpw.db.Element(self.uidoc.ActiveView)
@active_view.setter
def active_view(self, view_reference):
self.uidoc.ActiveView = view_reference
@property
def app(self):
""" Returns: uidoc.Application """
return self.uiapp.Application
@property
def docs(self):
""" Returns: uidoc.Application.Documents """
return [doc for doc in self.app.Documents]
@property
def username(self):
""" Returns: uidoc.Application.Username """
return self.uiapp.Application.Username
@property
def version(self):
""" Returns: uidoc.Application.Username """
return RevitVersion(self.uiapp)
@property
def process(self):
""" Returns: Process.GetCurrentProcess() """
return Process.GetCurrentProcess()
@property
def process_id(self):
""" Returns: Process.GetCurrentProcess() """
return self.process.Id
@property
def process_name(self):
""" Returns: Process.GetCurrentProcess() """
return self.process.ProcessName
def __repr__(self):
return '<{version} [{process}:{pid}]>'.format(version=self.version,
process=self.process_name,
pid=self.process_id)
# Check what this is
# @property
# def process(self):
# clr.AddReferenceByPartialName('System.Windows.Forms')
# # noinspection PyUnresolvedReferences
# from System.Windows.Forms import Screen
# return Screen.FromHandle(Process.GetCurrentProcess().MainWindowHandle)
class RevitVersion():
def __init__(self, uiapp):
self.uiapp = uiapp
@property
def year(self):
return self.uiapp.Application.VersionNumber
@property
def name(self):
return self.uiapp.Application.VersionName
@property
def build(self):
return self.uiapp.Application.VersionBuild
def __lt__(self, other):
""" Handle Version Comparison Logic"""
raise NotImplemented
def __gt__(self, other):
""" Handle Version Comparison Logic"""
raise NotImplemented
def __repr__(self):
return '<Version: {year}: {build}>'.format(year=self.name,
build=self.build)
def __str__(self):
return '{name}:{build}'.format(name=self.name, build=self.build)
revit = Revit()
| gpl-3.0 | 7,026,033,917,477,913,000 | 28.142157 | 90 | 0.601346 | false |
LAST-EBD/Consultas | RandomlyMovePolygons.py | 1 | 5556 | import fiona, shapely, logging, sys, os, random
from shapely import affinity, speedups
from shapely.geometry import mapping, shape, Polygon
speedups.enable()
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
class randomly_move():
'''Con esta clase se pretende el mover aleatoriamente una serie de poligonos dentro de un shape'''
def __init__(self, shape, marco):
self.shape = shape
self.marco = marco
self.out = os.path.join(r'O:\consultas\shapes_moved', os.path.split(self.shape)[1])
self.moves = {0: 'NW', 1: 'NE', 2: 'SW', 3: 'SE'}
self.rndm = random.randrange(0,4)
self.diffX = 0
self.diffY = 0
self.count = 1
print('shape:', self.shape, '\nmarco: ', self.marco, '\nsalida:', self.out)
print('Nos movemos hacia el:', self.moves[self.rndm])
def get_extent_shapely(self, shp):
shp = fiona.open(shp)
#print('zone:', self.moves[self.rndm])
#GETTING THE GEOMETRY (COORDINATES)
feature1 = shp.next()
geom1 = feature1['geometry']
a1 = Polygon(geom1['coordinates'][0])
Oeste, Este, Norte, Sur = a1.bounds[0], a1.bounds[2], a1.bounds[3], a1.bounds[1]
#return(Oeste, Este, Norte, Sur)
move = {'NW': (Norte, Oeste), 'NE': (Norte, Este), 'SW': (Sur, Oeste), 'SE': (Sur, Este)}
return move[self.moves[self.rndm]]
def get_diff(self):
''' Este metodo computa la diferencia entre las coordenas del shape con respecto al marco (en la direccion aleatoria
que haya tocado. Retorna una tupla con valores (X, Y) con la diferencia. Por tanto get_diff[0] es la diferencia en X
y get_diff[1] es la diferencia en Y)'''
frameM = self.get_extent_shapely(self.marco)
#print(frameM)
NorteM, OesteM = frameM[0], frameM[1]
frameS = self.get_extent_shapely(self.shape)
#print(frameS)
NorteS, OesteS = frameS[0], frameS[1]
self.diffX = OesteM - OesteS
self.diffY = NorteM - NorteS
return(self.diffX, self.diffY)
def new_geom(self):
with fiona.open(self.shape, 'r') as source:
# **source.meta is a shortcut to get the crs, driver, and schema
# keyword arguments from the source Collection.
with fiona.open(self.out, 'w', **source.meta) as sink:
for f in source:
#print(f)
try:
feature1 = f['geometry']['coordinates'][0]
#geom1 = feature1['geometry']['coordinates']
#print(feature1)
#coords = geom1['coordinates'][0]
#CALCULAMOS UN VALOR RANDOM PARA MOVER EL SHAPE
X_offset = random.uniform(0.1, self.get_diff()[0])
Y_offset = random.uniform(0.1, self.get_diff()[1])
#print(X_offset, Y_offset)
#CREAMOS LA NUEVA LISTA DE COORDENADAS PARA EL SHAPE MOVIDO
#geom2 = [(X_offset + i[0], Y_offset + i[1]) for i in feature1]
new_shape = Polygon(feature1)
#PROBAMOS A GIRAR EL SHAPE
rotated_a = affinity.rotate(new_shape, random.randint(0, 360))
#PROBAMOS A MOVERLO CON SHAPELY (funciona de las 2 maneras)
rotated_b = shapely.affinity.translate(rotated_a, X_offset, Y_offset)
#COMPROBAMOS QUE ESTE DENTRO DEL MARCO SIN INTERSECTAR
if self.check(rotated_b) == True:
f['geometry'] = mapping(rotated_b)
sink.write(f)
else:
self.count += 1
f['geometry'] = mapping(rotated_b)
sink.write(f)
self.new_geom()
#print('intersecta')
except Exception as e:
# Writing uncleanable features to a different shapefile
# is another option.
print('error', e)
logging.exception("Error cleaning feature %s:", f['id'])
def check(self, ncoords):
'''En este metodo vamos a comprobar si el shape que estamos utilizando esta incluido dentro del marco'''
shape2 = fiona.open(self.marco)
feature2 = shape2.next()
geom2 = feature2['geometry']['coordinates'][0]
a2 = Polygon(geom2)
return(ncoords.within(a2))
def run(self):
if self.check() == True:
print('El shape esta totalmente incluido dentro del marco. La diferencia es: ')
print(self.get_diff())
else:
print('El shape no esta incluido dentro del marco') | mit | 1,503,735,368,378,538,500 | 37.324138 | 125 | 0.474622 | false |
wadester/wh_test_py | gdbm_test.py | 1 | 1061 | #!/usr/bin/env python
# Module: gdbm_test.py
# Purpose: gdbm test
# Date: N/A
# Notes:
# 1) Reference:
# https://docs.python.org/2/library/gdbm.html
#
import gdbm
import random as r
mydb="testdb.gdbm"
rcnt=10
print "creating test db",mydb, "with ", rcnt, "records"
db=gdbm.open(mydb, 'c')
for x in range(0,rcnt):
key="%03d" % x
val="%10f" % r.random()
print "K[v]=", key, '[', val, ']'
db[key]=val
print "using some dict methods"
keys=db.keys()
print "Keys=", keys
ll=len(db)
print "DB size=", ll
print "testing for key 000"
if ('000' in db):
print "Key 000 found"
else:
print "key 000 not found"
print "deleting key"
del db['000']
if ('000' in db):
print "Key 000 found"
else:
print "key 000 not found"
ll=len(db)
print "DB size=", ll
# shrink the DB
db.reorganize()
db.close()
print "opening and looking through all keys"
db=gdbm.open(mydb, 'r')
# use iteritems with anydbm
#for k,v in db.iteritems():
k=db.firstkey()
while k != None:
v=db[k]
print k, ' ', v
k = db.nextkey(k)
db.close()
| gpl-2.0 | 1,135,733,845,140,344,400 | 15.578125 | 55 | 0.618285 | false |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/py_compile.py | 1 | 6128 | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: py_compile.py
"""Routine to "compile" a .py file to a .pyc (or .pyo) file.
This module has intimate knowledge of the format of .pyc files.
"""
import __builtin__
import imp
import marshal
import os
import sys
import traceback
MAGIC = imp.get_magic()
__all__ = [
'compile', 'main', 'PyCompileError']
class PyCompileError(Exception):
"""Exception raised when an error occurs while attempting to
compile the file.
To raise this exception, use
raise PyCompileError(exc_type,exc_value,file[,msg])
where
exc_type: exception type to be used in error message
type name can be accesses as class variable
'exc_type_name'
exc_value: exception value to be used in error message
can be accesses as class variable 'exc_value'
file: name of file being compiled to be used in error message
can be accesses as class variable 'file'
msg: string message to be written as error message
If no value is given, a default exception message will be given,
consistent with 'standard' py_compile output.
message (or default) can be accesses as class variable 'msg'
"""
def __init__(self, exc_type, exc_value, file, msg=''):
exc_type_name = exc_type.__name__
if exc_type is SyntaxError:
tbtext = ''.join(traceback.format_exception_only(exc_type, exc_value))
errmsg = tbtext.replace('File "<string>"', 'File "%s"' % file)
else:
errmsg = 'Sorry: %s: %s' % (exc_type_name, exc_value)
Exception.__init__(self, msg or errmsg, exc_type_name, exc_value, file)
self.exc_type_name = exc_type_name
self.exc_value = exc_value
self.file = file
self.msg = msg or errmsg
def __str__(self):
return self.msg
def wr_long(f, x):
"""Internal; write a 32-bit int to a file in little-endian order."""
f.write(chr(x & 255))
f.write(chr(x >> 8 & 255))
f.write(chr(x >> 16 & 255))
f.write(chr(x >> 24 & 255))
def compile(file, cfile=None, dfile=None, doraise=False):
"""Byte-compile one Python source file to Python bytecode.
Arguments:
file: source filename
cfile: target filename; defaults to source with 'c' or 'o' appended
('c' normally, 'o' in optimizing mode, giving .pyc or .pyo)
dfile: purported filename; defaults to source (this is the filename
that will show up in error messages)
doraise: flag indicating whether or not an exception should be
raised when a compile error is found. If an exception
occurs and this flag is set to False, a string
indicating the nature of the exception will be printed,
and the function will return to the caller. If an
exception occurs and this flag is set to True, a
PyCompileError exception will be raised.
Note that it isn't necessary to byte-compile Python modules for
execution efficiency -- Python itself byte-compiles a module when
it is loaded, and if it can, writes out the bytecode to the
corresponding .pyc (or .pyo) file.
However, if a Python installation is shared between users, it is a
good idea to byte-compile all modules upon installation, since
other users may not be able to write in the source directories,
and thus they won't be able to write the .pyc/.pyo file, and then
they would be byte-compiling every module each time it is loaded.
This can slow down program start-up considerably.
See compileall.py for a script/module that uses this module to
byte-compile all installed files (or all files in selected
directories).
"""
with open(file, 'U') as f:
try:
timestamp = long(os.fstat(f.fileno()).st_mtime)
except AttributeError:
timestamp = long(os.stat(file).st_mtime)
codestring = f.read()
try:
codeobject = __builtin__.compile(codestring, dfile or file, 'exec')
except Exception as err:
py_exc = PyCompileError(err.__class__, err.args, dfile or file)
if doraise:
raise py_exc
else:
sys.stderr.write(py_exc.msg + '\n')
return
if cfile is None:
cfile = file + (__debug__ and 'c' or 'o')
with open(cfile, 'wb') as fc:
fc.write('\x00\x00\x00\x00')
wr_long(fc, timestamp)
marshal.dump(codeobject, fc)
fc.flush()
fc.seek(0, 0)
fc.write(MAGIC)
return
def main(args=None):
"""Compile several source files.
The files named in 'args' (or on the command line, if 'args' is
not specified) are compiled and the resulting bytecode is cached
in the normal manner. This function does not search a directory
structure to locate source files; it only compiles files named
explicitly. If '-' is the only parameter in args, the list of
files is taken from standard input.
"""
if args is None:
args = sys.argv[1:]
rv = 0
if args == ['-']:
while True:
filename = sys.stdin.readline()
if not filename:
break
filename = filename.rstrip('\n')
try:
compile(filename, doraise=True)
except PyCompileError as error:
rv = 1
sys.stderr.write('%s\n' % error.msg)
except IOError as error:
rv = 1
sys.stderr.write('%s\n' % error)
else:
for filename in args:
try:
compile(filename, doraise=True)
except PyCompileError as error:
rv = 1
sys.stderr.write(error.msg)
return rv
if __name__ == '__main__':
sys.exit(main()) | unlicense | 8,292,061,942,146,150,000 | 33.627119 | 84 | 0.598074 | false |
loveisbug/liveshow-sh | roll.py | 1 | 1679 | # -*- coding: utf-8 -*-
import urllib
from urllib.request import urlopen
import html.parser as h
from bs4 import BeautifulSoup
import sys
import time
import io
import re
reg =re.compile(r'\d+')
list = ['0', '10', '20']
for s in list:
url = ('https://site.douban.com/maosh/widget/events/1441569/?start='+s)
urlrequest = urlopen(url)
parser = BeautifulSoup(urlrequest, "html.parser")
elist = parser.find('div', 'events-list-s').findAll('li', 'item')
for event in elist:
urlevent = event.findNext('a')['href']
with open('aaa.txt', 'a', encoding='utf-8') as detail:
print(urlevent, file=detail)
detailrequest = urlopen(urlevent)
Detailparser = BeautifulSoup(detailrequest, 'html.parser')
DetailInfolist = Detailparser.find('div', 'event-info')
x = DetailInfolist.contents[1]
x1 = DetailInfolist.findAll('div', 'event-detail')
print (DetailInfolist.findNext('h1'). text.strip(),file=detail)
print (DetailInfolist.findNext('li','calendar-str-item ').text,file=detail)
# print(x.find('h1'))
# print (x1[3].reg)
# print (x1[2].text.split('\n').split(' '))
print (x1[2].text.replace('\t','').replace('\n','').replace(' ','').replace('\xa0','').split('\n'), file=detail)
print('\n', file=detail)
# # 本句打印价格,语法错误,会导致其他程序正常运行;
# print (DetailInfolist.findNext('span', 'tickets-info-price').text.split(' ')[1]+'\n',file=detail)
# print (DetailInfolist.find(span={itemprop:'tickets-info-price'}).text,file=detail) | mit | -884,590,390,812,692,500 | 41.947368 | 124 | 0.599632 | false |
dssg/wikienergy | disaggregator/build/pandas/pandas/tests/test_frame.py | 1 | 552242 | # -*- coding: utf-8 -*-
from __future__ import print_function
# pylint: disable-msg=W0612,E1101
from copy import deepcopy
from datetime import datetime, timedelta, time
import sys
import operator
import re
import csv
import nose
import functools
import itertools
from itertools import product
from distutils.version import LooseVersion
from pandas.compat import(
map, zip, range, long, lrange, lmap, lzip,
OrderedDict, u, StringIO
)
from pandas import compat
from numpy import random, nan
from numpy.random import randn
import numpy as np
import numpy.ma as ma
from numpy.testing import assert_array_equal
import numpy.ma.mrecords as mrecords
import pandas.core.nanops as nanops
import pandas.core.common as com
import pandas.core.format as fmt
import pandas.core.datetools as datetools
from pandas import (DataFrame, Index, Series, notnull, isnull,
MultiIndex, DatetimeIndex, Timestamp, date_range,
read_csv, timedelta_range, Timedelta,
option_context)
import pandas as pd
from pandas.parser import CParserError
from pandas.util.misc import is_little_endian
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp,
assertRaises,
makeCustomDataframe as mkdf,
ensure_clean)
from pandas.core.indexing import IndexingError
from pandas.core.common import PandasError
import pandas.util.testing as tm
import pandas.lib as lib
from numpy.testing.decorators import slow
#---------------------------------------------------------------------
# DataFrame test cases
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
MIXED_FLOAT_DTYPES = ['float16','float32','float64']
MIXED_INT_DTYPES = ['uint8','uint16','uint32','uint64','int8','int16',
'int32','int64']
def _check_mixed_float(df, dtype = None):
# float16 are most likely to be upcasted to float32
dtypes = dict(A = 'float32', B = 'float32', C = 'float16', D = 'float64')
if isinstance(dtype, compat.string_types):
dtypes = dict([ (k,dtype) for k, v in dtypes.items() ])
elif isinstance(dtype, dict):
dtypes.update(dtype)
if dtypes.get('A'):
assert(df.dtypes['A'] == dtypes['A'])
if dtypes.get('B'):
assert(df.dtypes['B'] == dtypes['B'])
if dtypes.get('C'):
assert(df.dtypes['C'] == dtypes['C'])
if dtypes.get('D'):
assert(df.dtypes['D'] == dtypes['D'])
def _check_mixed_int(df, dtype = None):
dtypes = dict(A = 'int32', B = 'uint64', C = 'uint8', D = 'int64')
if isinstance(dtype, compat.string_types):
dtypes = dict([ (k,dtype) for k, v in dtypes.items() ])
elif isinstance(dtype, dict):
dtypes.update(dtype)
if dtypes.get('A'):
assert(df.dtypes['A'] == dtypes['A'])
if dtypes.get('B'):
assert(df.dtypes['B'] == dtypes['B'])
if dtypes.get('C'):
assert(df.dtypes['C'] == dtypes['C'])
if dtypes.get('D'):
assert(df.dtypes['D'] == dtypes['D'])
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
# slicing
sl = self.frame[:20]
self.assertEqual(20, len(sl.index))
# column access
for _, series in compat.iteritems(sl):
self.assertEqual(20, len(series.index))
self.assertTrue(tm.equalContents(series.index, sl.index))
for key, _ in compat.iteritems(self.frame._series):
self.assertIsNotNone(self.frame[key])
self.assertNotIn('random', self.frame)
with assertRaisesRegexp(KeyError, 'random'):
self.frame['random']
df = self.frame.copy()
df['$10'] = randn(len(df))
ad = randn(len(df))
df['@awesome_domain'] = ad
self.assertRaises(KeyError, df.__getitem__, 'df["$10"]')
res = df['@awesome_domain']
assert_array_equal(ad, res.values)
def test_getitem_dupe_cols(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=['a', 'a', 'b'])
try:
df[['baf']]
except KeyError:
pass
else:
self.fail("Dataframe failed to raise KeyError")
def test_get(self):
b = self.frame.get('B')
assert_series_equal(b, self.frame['B'])
self.assertIsNone(self.frame.get('foo'))
assert_series_equal(self.frame.get('foo', self.frame['B']),
self.frame['B'])
# None
# GH 5652
for df in [DataFrame(), DataFrame(columns=list('AB')), DataFrame(columns=list('AB'),index=range(3)) ]:
result = df.get(None)
self.assertIsNone(result)
def test_getitem_iterator(self):
idx = iter(['A', 'B', 'C'])
result = self.frame.ix[:, idx]
expected = self.frame.ix[:, ['A', 'B', 'C']]
assert_frame_equal(result, expected)
def test_getitem_list(self):
self.frame.columns.name = 'foo'
result = self.frame[['B', 'A']]
result2 = self.frame[Index(['B', 'A'])]
expected = self.frame.ix[:, ['B', 'A']]
expected.columns.name = 'foo'
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
self.assertEqual(result.columns.name, 'foo')
with assertRaisesRegexp(KeyError, 'not in index'):
self.frame[['B', 'A', 'food']]
with assertRaisesRegexp(KeyError, 'not in index'):
self.frame[Index(['B', 'A', 'foo'])]
# tuples
df = DataFrame(randn(8, 3),
columns=Index([('foo', 'bar'), ('baz', 'qux'),
('peek', 'aboo')], name=['sth', 'sth2']))
result = df[[('foo', 'bar'), ('baz', 'qux')]]
expected = df.ix[:, :2]
assert_frame_equal(result, expected)
self.assertEqual(result.columns.names, ['sth', 'sth2'])
def test_setitem_list(self):
self.frame['E'] = 'foo'
data = self.frame[['A', 'B']]
self.frame[['B', 'A']] = data
assert_series_equal(self.frame['B'], data['A'])
assert_series_equal(self.frame['A'], data['B'])
with assertRaisesRegexp(ValueError, 'Columns must be same length as key'):
data[['A']] = self.frame[['A', 'B']]
with assertRaisesRegexp(ValueError, 'Length of values does not match '
'length of index'):
data['A'] = range(len(data.index) - 1)
df = DataFrame(0, lrange(3), ['tt1', 'tt2'], dtype=np.int_)
df.ix[1, ['tt1', 'tt2']] = [1, 2]
result = df.ix[1, ['tt1', 'tt2']]
expected = Series([1, 2], df.columns, dtype=np.int_)
assert_series_equal(result, expected)
df['tt1'] = df['tt2'] = '0'
df.ix[1, ['tt1', 'tt2']] = ['1', '2']
result = df.ix[1, ['tt1', 'tt2']]
expected = Series(['1', '2'], df.columns)
assert_series_equal(result, expected)
def test_setitem_list_not_dataframe(self):
data = np.random.randn(len(self.frame), 2)
self.frame[['A', 'B']] = data
assert_almost_equal(self.frame[['A', 'B']].values, data)
def test_setitem_list_of_tuples(self):
tuples = lzip(self.frame['A'], self.frame['B'])
self.frame['tuples'] = tuples
result = self.frame['tuples']
expected = Series(tuples, index=self.frame.index)
assert_series_equal(result, expected)
def test_setitem_mulit_index(self):
# GH7655, test that assigning to a sub-frame of a frame
# with multi-index columns aligns both rows and columns
it = ['jim', 'joe', 'jolie'], ['first', 'last'], \
['left', 'center', 'right']
cols = MultiIndex.from_product(it)
index = pd.date_range('20141006',periods=20)
vals = np.random.randint(1, 1000, (len(index), len(cols)))
df = pd.DataFrame(vals, columns=cols, index=index)
i, j = df.index.values.copy(), it[-1][:]
np.random.shuffle(i)
df['jim'] = df['jolie'].loc[i, ::-1]
assert_frame_equal(df['jim'], df['jolie'])
np.random.shuffle(j)
df[('joe', 'first')] = df[('jolie', 'last')].loc[i, j]
assert_frame_equal(df[('joe', 'first')], df[('jolie', 'last')])
np.random.shuffle(j)
df[('joe', 'last')] = df[('jolie', 'first')].loc[i, j]
assert_frame_equal(df[('joe', 'last')], df[('jolie', 'first')])
def test_inplace_ops_alignment(self):
# inplace ops / ops alignment
# GH 8511
columns = list('abcdefg')
X_orig = DataFrame(np.arange(10*len(columns)).reshape(-1,len(columns)), columns=columns, index=range(10))
Z = 100*X_orig.iloc[:,1:-1].copy()
block1 = list('bedcf')
subs = list('bcdef')
# add
X = X_orig.copy()
result1 = (X[block1] + Z).reindex(columns=subs)
X[block1] += Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] + Z[block1]).reindex(columns=subs)
X[block1] += Z[block1]
result4 = X.reindex(columns=subs)
assert_frame_equal(result1, result2)
assert_frame_equal(result1, result3)
assert_frame_equal(result1, result4)
# sub
X = X_orig.copy()
result1 = (X[block1] - Z).reindex(columns=subs)
X[block1] -= Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] - Z[block1]).reindex(columns=subs)
X[block1] -= Z[block1]
result4 = X.reindex(columns=subs)
assert_frame_equal(result1, result2)
assert_frame_equal(result1, result3)
assert_frame_equal(result1, result4)
def test_inplace_ops_identity(self):
# GH 5104
# make sure that we are actually changing the object
s_orig = Series([1, 2, 3])
df_orig = DataFrame(np.random.randint(0,5,size=10).reshape(-1,5))
# no dtype change
s = s_orig.copy()
s2 = s
s += 1
assert_series_equal(s,s2)
assert_series_equal(s_orig+1,s)
self.assertIs(s,s2)
self.assertIs(s._data,s2._data)
df = df_orig.copy()
df2 = df
df += 1
assert_frame_equal(df,df2)
assert_frame_equal(df_orig+1,df)
self.assertIs(df,df2)
self.assertIs(df._data,df2._data)
# dtype change
s = s_orig.copy()
s2 = s
s += 1.5
assert_series_equal(s,s2)
assert_series_equal(s_orig+1.5,s)
df = df_orig.copy()
df2 = df
df += 1.5
assert_frame_equal(df,df2)
assert_frame_equal(df_orig+1.5,df)
self.assertIs(df,df2)
self.assertIs(df._data,df2._data)
# mixed dtype
arr = np.random.randint(0,10,size=5)
df_orig = DataFrame({'A' : arr.copy(), 'B' : 'foo'})
df = df_orig.copy()
df2 = df
df['A'] += 1
expected = DataFrame({'A' : arr.copy()+1, 'B' : 'foo'})
assert_frame_equal(df,expected)
assert_frame_equal(df2,expected)
self.assertIs(df._data,df2._data)
df = df_orig.copy()
df2 = df
df['A'] += 1.5
expected = DataFrame({'A' : arr.copy()+1.5, 'B' : 'foo'})
assert_frame_equal(df,expected)
assert_frame_equal(df2,expected)
self.assertIs(df._data,df2._data)
def test_getitem_boolean(self):
# boolean indexing
d = self.tsframe.index[10]
indexer = self.tsframe.index > d
indexer_obj = indexer.astype(object)
subindex = self.tsframe.index[indexer]
subframe = self.tsframe[indexer]
self.assert_numpy_array_equal(subindex, subframe.index)
with assertRaisesRegexp(ValueError, 'Item wrong length'):
self.tsframe[indexer[:-1]]
subframe_obj = self.tsframe[indexer_obj]
assert_frame_equal(subframe_obj, subframe)
with tm.assertRaisesRegexp(ValueError, 'boolean values only'):
self.tsframe[self.tsframe]
# test that Series work
indexer_obj = Series(indexer_obj, self.tsframe.index)
subframe_obj = self.tsframe[indexer_obj]
assert_frame_equal(subframe_obj, subframe)
# test that Series indexers reindex
import warnings
warnings.filterwarnings(action='ignore', category=UserWarning)
indexer_obj = indexer_obj.reindex(self.tsframe.index[::-1])
subframe_obj = self.tsframe[indexer_obj]
assert_frame_equal(subframe_obj, subframe)
warnings.filterwarnings(action='default', category=UserWarning)
# test df[df > 0]
for df in [ self.tsframe, self.mixed_frame, self.mixed_float, self.mixed_int ]:
data = df._get_numeric_data()
bif = df[df > 0]
bifw = DataFrame(dict([ (c,np.where(data[c] > 0, data[c], np.nan)) for c in data.columns ]),
index=data.index, columns=data.columns)
# add back other columns to compare
for c in df.columns:
if c not in bifw:
bifw[c] = df[c]
bifw = bifw.reindex(columns = df.columns)
assert_frame_equal(bif, bifw, check_dtype=False)
for c in df.columns:
if bif[c].dtype != bifw[c].dtype:
self.assertEqual(bif[c].dtype, df[c].dtype)
def test_getitem_boolean_casting(self):
# don't upcast if we don't need to
df = self.tsframe.copy()
df['E'] = 1
df['E'] = df['E'].astype('int32')
df['E1'] = df['E'].copy()
df['F'] = 1
df['F'] = df['F'].astype('int64')
df['F1'] = df['F'].copy()
casted = df[df>0]
result = casted.get_dtype_counts()
expected = Series({'float64': 4, 'int32' : 2, 'int64' : 2})
assert_series_equal(result, expected)
# int block splitting
df.ix[1:3,['E1','F1']] = 0
casted = df[df>0]
result = casted.get_dtype_counts()
expected = Series({'float64': 6, 'int32' : 1, 'int64' : 1})
assert_series_equal(result, expected)
# where dtype conversions
# GH 3733
df = DataFrame(data = np.random.randn(100, 50))
df = df.where(df > 0) # create nans
bools = df > 0
mask = isnull(df)
expected = bools.astype(float).mask(mask)
result = bools.mask(mask)
assert_frame_equal(result,expected)
def test_getitem_boolean_list(self):
df = DataFrame(np.arange(12).reshape(3, 4))
def _checkit(lst):
result = df[lst]
expected = df.ix[df.index[lst]]
assert_frame_equal(result, expected)
_checkit([True, False, True])
_checkit([True, True, True])
_checkit([False, False, False])
def test_getitem_boolean_iadd(self):
arr = randn(5, 5)
df = DataFrame(arr.copy(), columns = ['A','B','C','D','E'])
df[df < 0] += 1
arr[arr < 0] += 1
assert_almost_equal(df.values, arr)
def test_boolean_index_empty_corner(self):
# #2096
blah = DataFrame(np.empty([0, 1]), columns=['A'],
index=DatetimeIndex([]))
# both of these should succeed trivially
k = np.array([], bool)
blah[k]
blah[k] = 0
def test_getitem_ix_mixed_integer(self):
df = DataFrame(np.random.randn(4, 3),
index=[1, 10, 'C', 'E'], columns=[1, 2, 3])
result = df.ix[:-1]
expected = df.ix[df.index[:-1]]
assert_frame_equal(result, expected)
result = df.ix[[1, 10]]
expected = df.ix[Index([1, 10], dtype=object)]
assert_frame_equal(result, expected)
def test_getitem_setitem_ix_negative_integers(self):
result = self.frame.ix[:, -1]
assert_series_equal(result, self.frame['D'])
result = self.frame.ix[:, [-1]]
assert_frame_equal(result, self.frame[['D']])
result = self.frame.ix[:, [-1, -2]]
assert_frame_equal(result, self.frame[['D', 'C']])
self.frame.ix[:, [-1]] = 0
self.assertTrue((self.frame['D'] == 0).all())
df = DataFrame(np.random.randn(8, 4))
self.assertTrue(isnull(df.ix[:, [-1]].values).all())
# #1942
a = DataFrame(randn(20, 2), index=[chr(x + 65) for x in range(20)])
a.ix[-1] = a.ix[-2]
assert_series_equal(a.ix[-1], a.ix[-2])
def test_getattr(self):
tm.assert_series_equal(self.frame.A, self.frame['A'])
self.assertRaises(AttributeError, getattr, self.frame,
'NONEXISTENT_NAME')
def test_setattr_column(self):
df = DataFrame({'foobar': 1}, index=lrange(10))
df.foobar = 5
self.assertTrue((df.foobar == 5).all())
def test_setitem(self):
# not sure what else to do here
series = self.frame['A'][::2]
self.frame['col5'] = series
self.assertIn('col5', self.frame)
tm.assert_dict_equal(series, self.frame['col5'],
compare_keys=False)
series = self.frame['A']
self.frame['col6'] = series
tm.assert_dict_equal(series, self.frame['col6'],
compare_keys=False)
with tm.assertRaises(KeyError):
self.frame[randn(len(self.frame) + 1)] = 1
# set ndarray
arr = randn(len(self.frame))
self.frame['col9'] = arr
self.assertTrue((self.frame['col9'] == arr).all())
self.frame['col7'] = 5
assert((self.frame['col7'] == 5).all())
self.frame['col0'] = 3.14
assert((self.frame['col0'] == 3.14).all())
self.frame['col8'] = 'foo'
assert((self.frame['col8'] == 'foo').all())
# this is partially a view (e.g. some blocks are view)
# so raise/warn
smaller = self.frame[:2]
def f():
smaller['col10'] = ['1', '2']
self.assertRaises(com.SettingWithCopyError, f)
self.assertEqual(smaller['col10'].dtype, np.object_)
self.assertTrue((smaller['col10'] == ['1', '2']).all())
# with a dtype
for dtype in ['int32','int64','float32','float64']:
self.frame[dtype] = np.array(arr,dtype=dtype)
self.assertEqual(self.frame[dtype].dtype.name, dtype)
# dtype changing GH4204
df = DataFrame([[0,0]])
df.iloc[0] = np.nan
expected = DataFrame([[np.nan,np.nan]])
assert_frame_equal(df,expected)
df = DataFrame([[0,0]])
df.loc[0] = np.nan
assert_frame_equal(df,expected)
def test_setitem_tuple(self):
self.frame['A', 'B'] = self.frame['A']
assert_series_equal(self.frame['A', 'B'], self.frame['A'])
def test_setitem_always_copy(self):
s = self.frame['A'].copy()
self.frame['E'] = s
self.frame['E'][5:10] = nan
self.assertTrue(notnull(s[5:10]).all())
def test_setitem_boolean(self):
df = self.frame.copy()
values = self.frame.values
df[df['A'] > 0] = 4
values[values[:, 0] > 0] = 4
assert_almost_equal(df.values, values)
# test that column reindexing works
series = df['A'] == 4
series = series.reindex(df.index[::-1])
df[series] = 1
values[values[:, 0] == 4] = 1
assert_almost_equal(df.values, values)
df[df > 0] = 5
values[values > 0] = 5
assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
assert_almost_equal(df.values, values)
# indexed with same shape but rows-reversed df
df[df[::-1] == 2] = 3
values[values == 2] = 3
assert_almost_equal(df.values, values)
with assertRaisesRegexp(TypeError, 'Must pass DataFrame with boolean '
'values only'):
df[df * 0] = 2
# index with DataFrame
mask = df > np.abs(df)
expected = df.copy()
df[df > np.abs(df)] = nan
expected.values[mask.values] = nan
assert_frame_equal(df, expected)
# set from DataFrame
expected = df.copy()
df[df > np.abs(df)] = df * 2
np.putmask(expected.values, mask.values, df.values * 2)
assert_frame_equal(df, expected)
def test_setitem_cast(self):
self.frame['D'] = self.frame['D'].astype('i8')
self.assertEqual(self.frame['D'].dtype, np.int64)
# #669, should not cast?
# this is now set to int64, which means a replacement of the column to
# the value dtype (and nothing to do with the existing dtype)
self.frame['B'] = 0
self.assertEqual(self.frame['B'].dtype, np.int64)
# cast if pass array of course
self.frame['B'] = np.arange(len(self.frame))
self.assertTrue(issubclass(self.frame['B'].dtype.type, np.integer))
self.frame['foo'] = 'bar'
self.frame['foo'] = 0
self.assertEqual(self.frame['foo'].dtype, np.int64)
self.frame['foo'] = 'bar'
self.frame['foo'] = 2.5
self.assertEqual(self.frame['foo'].dtype, np.float64)
self.frame['something'] = 0
self.assertEqual(self.frame['something'].dtype, np.int64)
self.frame['something'] = 2
self.assertEqual(self.frame['something'].dtype, np.int64)
self.frame['something'] = 2.5
self.assertEqual(self.frame['something'].dtype, np.float64)
# GH 7704
# dtype conversion on setting
df = DataFrame(np.random.rand(30, 3), columns=tuple('ABC'))
df['event'] = np.nan
df.loc[10,'event'] = 'foo'
result = df.get_dtype_counts().order()
expected = Series({'float64' : 3, 'object' : 1 }).order()
assert_series_equal(result, expected)
def test_setitem_boolean_column(self):
expected = self.frame.copy()
mask = self.frame['A'] > 0
self.frame.ix[mask, 'B'] = 0
expected.values[mask.values, 1] = 0
assert_frame_equal(self.frame, expected)
def test_setitem_corner(self):
# corner case
df = DataFrame({'B': [1., 2., 3.],
'C': ['a', 'b', 'c']},
index=np.arange(3))
del df['B']
df['B'] = [1., 2., 3.]
self.assertIn('B', df)
self.assertEqual(len(df.columns), 2)
df['A'] = 'beginning'
df['E'] = 'foo'
df['D'] = 'bar'
df[datetime.now()] = 'date'
df[datetime.now()] = 5.
# what to do when empty frame with index
dm = DataFrame(index=self.frame.index)
dm['A'] = 'foo'
dm['B'] = 'bar'
self.assertEqual(len(dm.columns), 2)
self.assertEqual(dm.values.dtype, np.object_)
# upcast
dm['C'] = 1
self.assertEqual(dm['C'].dtype, np.int64)
dm['E'] = 1.
self.assertEqual(dm['E'].dtype, np.float64)
# set existing column
dm['A'] = 'bar'
self.assertEqual('bar', dm['A'][0])
dm = DataFrame(index=np.arange(3))
dm['A'] = 1
dm['foo'] = 'bar'
del dm['foo']
dm['foo'] = 'bar'
self.assertEqual(dm['foo'].dtype, np.object_)
dm['coercable'] = ['1', '2', '3']
self.assertEqual(dm['coercable'].dtype, np.object_)
def test_setitem_corner2(self):
data = {"title": ['foobar', 'bar', 'foobar'] + ['foobar'] * 17,
"cruft": np.random.random(20)}
df = DataFrame(data)
ix = df[df['title'] == 'bar'].index
df.ix[ix, ['title']] = 'foobar'
df.ix[ix, ['cruft']] = 0
assert(df.ix[1, 'title'] == 'foobar')
assert(df.ix[1, 'cruft'] == 0)
def test_setitem_ambig(self):
# difficulties with mixed-type data
from decimal import Decimal
# created as float type
dm = DataFrame(index=lrange(3), columns=lrange(3))
coercable_series = Series([Decimal(1) for _ in range(3)],
index=lrange(3))
uncoercable_series = Series(['foo', 'bzr', 'baz'], index=lrange(3))
dm[0] = np.ones(3)
self.assertEqual(len(dm.columns), 3)
# self.assertIsNone(dm.objects)
dm[1] = coercable_series
self.assertEqual(len(dm.columns), 3)
# self.assertIsNone(dm.objects)
dm[2] = uncoercable_series
self.assertEqual(len(dm.columns), 3)
# self.assertIsNotNone(dm.objects)
self.assertEqual(dm[2].dtype, np.object_)
def test_setitem_clear_caches(self):
# GH #304
df = DataFrame({'x': [1.1, 2.1, 3.1, 4.1], 'y': [5.1, 6.1, 7.1, 8.1]},
index=[0, 1, 2, 3])
df.insert(2, 'z', np.nan)
# cache it
foo = df['z']
df.ix[2:, 'z'] = 42
expected = Series([np.nan, np.nan, 42, 42], index=df.index)
self.assertIsNot(df['z'], foo)
assert_series_equal(df['z'], expected)
def test_setitem_None(self):
# GH #766
self.frame[None] = self.frame['A']
assert_series_equal(self.frame.iloc[:,-1], self.frame['A'])
assert_series_equal(self.frame.loc[:,None], self.frame['A'])
assert_series_equal(self.frame[None], self.frame['A'])
repr(self.frame)
def test_delitem_corner(self):
f = self.frame.copy()
del f['D']
self.assertEqual(len(f.columns), 3)
self.assertRaises(KeyError, f.__delitem__, 'D')
del f['B']
self.assertEqual(len(f.columns), 2)
def test_getitem_fancy_2d(self):
f = self.frame
ix = f.ix
assert_frame_equal(ix[:, ['B', 'A']], f.reindex(columns=['B', 'A']))
subidx = self.frame.index[[5, 4, 1]]
assert_frame_equal(ix[subidx, ['B', 'A']],
f.reindex(index=subidx, columns=['B', 'A']))
# slicing rows, etc.
assert_frame_equal(ix[5:10], f[5:10])
assert_frame_equal(ix[5:10, :], f[5:10])
assert_frame_equal(ix[:5, ['A', 'B']],
f.reindex(index=f.index[:5], columns=['A', 'B']))
# slice rows with labels, inclusive!
expected = ix[5:11]
result = ix[f.index[5]:f.index[10]]
assert_frame_equal(expected, result)
# slice columns
assert_frame_equal(ix[:, :2], f.reindex(columns=['A', 'B']))
# get view
exp = f.copy()
ix[5:10].values[:] = 5
exp.values[5:10] = 5
assert_frame_equal(f, exp)
self.assertRaises(ValueError, ix.__getitem__, f > 0.5)
def test_slice_floats(self):
index = [52195.504153, 52196.303147, 52198.369883]
df = DataFrame(np.random.rand(3, 2), index=index)
s1 = df.ix[52195.1:52196.5]
self.assertEqual(len(s1), 2)
s1 = df.ix[52195.1:52196.6]
self.assertEqual(len(s1), 2)
s1 = df.ix[52195.1:52198.9]
self.assertEqual(len(s1), 3)
def test_getitem_fancy_slice_integers_step(self):
df = DataFrame(np.random.randn(10, 5))
# this is OK
result = df.ix[:8:2]
df.ix[:8:2] = np.nan
self.assertTrue(isnull(df.ix[:8:2]).values.all())
def test_getitem_setitem_integer_slice_keyerrors(self):
df = DataFrame(np.random.randn(10, 5), index=lrange(0, 20, 2))
# this is OK
cp = df.copy()
cp.ix[4:10] = 0
self.assertTrue((cp.ix[4:10] == 0).values.all())
# so is this
cp = df.copy()
cp.ix[3:11] = 0
self.assertTrue((cp.ix[3:11] == 0).values.all())
result = df.ix[4:10]
result2 = df.ix[3:11]
expected = df.reindex([4, 6, 8, 10])
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
# non-monotonic, raise KeyError
df2 = df.iloc[lrange(5) + lrange(5, 10)[::-1]]
self.assertRaises(KeyError, df2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, df2.ix.__setitem__, slice(3, 11), 0)
def test_setitem_fancy_2d(self):
f = self.frame
ix = f.ix
# case 1
frame = self.frame.copy()
expected = frame.copy()
frame.ix[:, ['B', 'A']] = 1
expected['B'] = 1.
expected['A'] = 1.
assert_frame_equal(frame, expected)
# case 2
frame = self.frame.copy()
frame2 = self.frame.copy()
expected = frame.copy()
subidx = self.frame.index[[5, 4, 1]]
values = randn(3, 2)
frame.ix[subidx, ['B', 'A']] = values
frame2.ix[[5, 4, 1], ['B', 'A']] = values
expected['B'].ix[subidx] = values[:, 0]
expected['A'].ix[subidx] = values[:, 1]
assert_frame_equal(frame, expected)
assert_frame_equal(frame2, expected)
# case 3: slicing rows, etc.
frame = self.frame.copy()
expected1 = self.frame.copy()
frame.ix[5:10] = 1.
expected1.values[5:10] = 1.
assert_frame_equal(frame, expected1)
expected2 = self.frame.copy()
arr = randn(5, len(frame.columns))
frame.ix[5:10] = arr
expected2.values[5:10] = arr
assert_frame_equal(frame, expected2)
# case 4
frame = self.frame.copy()
frame.ix[5:10, :] = 1.
assert_frame_equal(frame, expected1)
frame.ix[5:10, :] = arr
assert_frame_equal(frame, expected2)
# case 5
frame = self.frame.copy()
frame2 = self.frame.copy()
expected = self.frame.copy()
values = randn(5, 2)
frame.ix[:5, ['A', 'B']] = values
expected['A'][:5] = values[:, 0]
expected['B'][:5] = values[:, 1]
assert_frame_equal(frame, expected)
frame2.ix[:5, [0, 1]] = values
assert_frame_equal(frame2, expected)
# case 6: slice rows with labels, inclusive!
frame = self.frame.copy()
expected = self.frame.copy()
frame.ix[frame.index[5]:frame.index[10]] = 5.
expected.values[5:11] = 5
assert_frame_equal(frame, expected)
# case 7: slice columns
frame = self.frame.copy()
frame2 = self.frame.copy()
expected = self.frame.copy()
# slice indices
frame.ix[:, 1:3] = 4.
expected.values[:, 1:3] = 4.
assert_frame_equal(frame, expected)
# slice with labels
frame.ix[:, 'B':'C'] = 4.
assert_frame_equal(frame, expected)
# new corner case of boolean slicing / setting
frame = DataFrame(lzip([2, 3, 9, 6, 7], [np.nan] * 5),
columns=['a', 'b'])
lst = [100]
lst.extend([np.nan] * 4)
expected = DataFrame(lzip([100, 3, 9, 6, 7], lst),
columns=['a', 'b'])
frame[frame['a'] == 2] = 100
assert_frame_equal(frame, expected)
def test_fancy_getitem_slice_mixed(self):
sliced = self.mixed_frame.ix[:, -3:]
self.assertEqual(sliced['D'].dtype, np.float64)
# get view with single block
# setting it triggers setting with copy
sliced = self.frame.ix[:, -3:]
def f():
sliced['C'] = 4.
self.assertRaises(com.SettingWithCopyError, f)
self.assertTrue((self.frame['C'] == 4).all())
def test_fancy_setitem_int_labels(self):
# integer index defers to label-based indexing
df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))
tmp = df.copy()
exp = df.copy()
tmp.ix[[0, 2, 4]] = 5
exp.values[:3] = 5
assert_frame_equal(tmp, exp)
tmp = df.copy()
exp = df.copy()
tmp.ix[6] = 5
exp.values[3] = 5
assert_frame_equal(tmp, exp)
tmp = df.copy()
exp = df.copy()
tmp.ix[:, 2] = 5
# tmp correctly sets the dtype
# so match the exp way
exp[2] = 5
assert_frame_equal(tmp, exp)
def test_fancy_getitem_int_labels(self):
df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))
result = df.ix[[4, 2, 0], [2, 0]]
expected = df.reindex(index=[4, 2, 0], columns=[2, 0])
assert_frame_equal(result, expected)
result = df.ix[[4, 2, 0]]
expected = df.reindex(index=[4, 2, 0])
assert_frame_equal(result, expected)
result = df.ix[4]
expected = df.xs(4)
assert_series_equal(result, expected)
result = df.ix[:, 3]
expected = df[3]
assert_series_equal(result, expected)
def test_fancy_index_int_labels_exceptions(self):
df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))
# labels that aren't contained
self.assertRaises(KeyError, df.ix.__setitem__,
([0, 1, 2], [2, 3, 4]), 5)
# try to set indices not contained in frame
self.assertRaises(KeyError,
self.frame.ix.__setitem__,
['foo', 'bar', 'baz'], 1)
self.assertRaises(KeyError,
self.frame.ix.__setitem__,
(slice(None, None), ['E']), 1)
# partial setting now allows this GH2578
#self.assertRaises(KeyError,
# self.frame.ix.__setitem__,
# (slice(None, None), 'E'), 1)
def test_setitem_fancy_mixed_2d(self):
self.mixed_frame.ix[:5, ['C', 'B', 'A']] = 5
result = self.mixed_frame.ix[:5, ['C', 'B', 'A']]
self.assertTrue((result.values == 5).all())
self.mixed_frame.ix[5] = np.nan
self.assertTrue(isnull(self.mixed_frame.ix[5]).all())
self.mixed_frame.ix[5] = self.mixed_frame.ix[6]
assert_series_equal(self.mixed_frame.ix[5], self.mixed_frame.ix[6])
# #1432
df = DataFrame({1: [1., 2., 3.],
2: [3, 4, 5]})
self.assertTrue(df._is_mixed_type)
df.ix[1] = [5, 10]
expected = DataFrame({1: [1., 5., 3.],
2: [3, 10, 5]})
assert_frame_equal(df, expected)
def test_ix_align(self):
b = Series(randn(10))
b.sort()
df_orig = DataFrame(randn(10, 4))
df = df_orig.copy()
df.ix[:, 0] = b
assert_series_equal(df.ix[:, 0].reindex(b.index), b)
dft = df_orig.T
dft.ix[0, :] = b
assert_series_equal(dft.ix[0, :].reindex(b.index), b)
df = df_orig.copy()
df.ix[:5, 0] = b
s = df.ix[:5, 0]
assert_series_equal(s, b.reindex(s.index))
dft = df_orig.T
dft.ix[0, :5] = b
s = dft.ix[0, :5]
assert_series_equal(s, b.reindex(s.index))
df = df_orig.copy()
idx = [0, 1, 3, 5]
df.ix[idx, 0] = b
s = df.ix[idx, 0]
assert_series_equal(s, b.reindex(s.index))
dft = df_orig.T
dft.ix[0, idx] = b
s = dft.ix[0, idx]
assert_series_equal(s, b.reindex(s.index))
def test_ix_frame_align(self):
b = DataFrame(np.random.randn(3, 4))
df_orig = DataFrame(randn(10, 4))
df = df_orig.copy()
df.ix[:3] = b
out = b.ix[:3]
assert_frame_equal(out, b)
b.sort_index(inplace=True)
df = df_orig.copy()
df.ix[[0, 1, 2]] = b
out = df.ix[[0, 1, 2]].reindex(b.index)
assert_frame_equal(out, b)
df = df_orig.copy()
df.ix[:3] = b
out = df.ix[:3]
assert_frame_equal(out, b.reindex(out.index))
def test_getitem_setitem_non_ix_labels(self):
df = tm.makeTimeDataFrame()
start, end = df.index[[5, 10]]
result = df.ix[start:end]
result2 = df[start:end]
expected = df[5:11]
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
result = df.copy()
result.ix[start:end] = 0
result2 = df.copy()
result2[start:end] = 0
expected = df.copy()
expected[5:11] = 0
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_ix_multi_take(self):
df = DataFrame(np.random.randn(3, 2))
rs = df.ix[df.index == 0, :]
xp = df.reindex([0])
assert_frame_equal(rs, xp)
""" #1321
df = DataFrame(np.random.randn(3, 2))
rs = df.ix[df.index==0, df.columns==1]
xp = df.reindex([0], [1])
assert_frame_equal(rs, xp)
"""
def test_ix_multi_take_nonint_index(self):
df = DataFrame(np.random.randn(3, 2), index=['x', 'y', 'z'],
columns=['a', 'b'])
rs = df.ix[[0], [0]]
xp = df.reindex(['x'], columns=['a'])
assert_frame_equal(rs, xp)
def test_ix_multi_take_multiindex(self):
df = DataFrame(np.random.randn(3, 2), index=['x', 'y', 'z'],
columns=[['a', 'b'], ['1', '2']])
rs = df.ix[[0], [0]]
xp = df.reindex(['x'], columns=[('a', '1')])
assert_frame_equal(rs, xp)
def test_ix_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
df = DataFrame(np.random.randn(len(idx), 3), idx)
sub = df.ix[:'d']
assert_frame_equal(sub, df)
sub = df.ix['a':'c']
assert_frame_equal(sub, df.ix[0:4])
sub = df.ix['b':'d']
assert_frame_equal(sub, df.ix[2:])
def test_getitem_fancy_1d(self):
f = self.frame
ix = f.ix
# return self if no slicing...for now
self.assertIs(ix[:, :], f)
# low dimensional slice
xs1 = ix[2, ['C', 'B', 'A']]
xs2 = f.xs(f.index[2]).reindex(['C', 'B', 'A'])
assert_series_equal(xs1, xs2)
ts1 = ix[5:10, 2]
ts2 = f[f.columns[2]][5:10]
assert_series_equal(ts1, ts2)
# positional xs
xs1 = ix[0]
xs2 = f.xs(f.index[0])
assert_series_equal(xs1, xs2)
xs1 = ix[f.index[5]]
xs2 = f.xs(f.index[5])
assert_series_equal(xs1, xs2)
# single column
assert_series_equal(ix[:, 'A'], f['A'])
# return view
exp = f.copy()
exp.values[5] = 4
ix[5][:] = 4
assert_frame_equal(exp, f)
exp.values[:, 1] = 6
ix[:, 1][:] = 6
assert_frame_equal(exp, f)
# slice of mixed-frame
xs = self.mixed_frame.ix[5]
exp = self.mixed_frame.xs(self.mixed_frame.index[5])
assert_series_equal(xs, exp)
def test_setitem_fancy_1d(self):
# case 1: set cross-section for indices
frame = self.frame.copy()
expected = self.frame.copy()
frame.ix[2, ['C', 'B', 'A']] = [1., 2., 3.]
expected['C'][2] = 1.
expected['B'][2] = 2.
expected['A'][2] = 3.
assert_frame_equal(frame, expected)
frame2 = self.frame.copy()
frame2.ix[2, [3, 2, 1]] = [1., 2., 3.]
assert_frame_equal(frame, expected)
# case 2, set a section of a column
frame = self.frame.copy()
expected = self.frame.copy()
vals = randn(5)
expected.values[5:10, 2] = vals
frame.ix[5:10, 2] = vals
assert_frame_equal(frame, expected)
frame2 = self.frame.copy()
frame2.ix[5:10, 'B'] = vals
assert_frame_equal(frame, expected)
# case 3: full xs
frame = self.frame.copy()
expected = self.frame.copy()
frame.ix[4] = 5.
expected.values[4] = 5.
assert_frame_equal(frame, expected)
frame.ix[frame.index[4]] = 6.
expected.values[4] = 6.
assert_frame_equal(frame, expected)
# single column
frame = self.frame.copy()
expected = self.frame.copy()
frame.ix[:, 'A'] = 7.
expected['A'] = 7.
assert_frame_equal(frame, expected)
def test_getitem_fancy_scalar(self):
f = self.frame
ix = f.ix
# individual value
for col in f.columns:
ts = f[col]
for idx in f.index[::5]:
assert_almost_equal(ix[idx, col], ts[idx])
def test_setitem_fancy_scalar(self):
f = self.frame
expected = self.frame.copy()
ix = f.ix
# individual value
for j, col in enumerate(f.columns):
ts = f[col]
for idx in f.index[::5]:
i = f.index.get_loc(idx)
val = randn()
expected.values[i, j] = val
ix[idx, col] = val
assert_frame_equal(f, expected)
def test_getitem_fancy_boolean(self):
f = self.frame
ix = f.ix
expected = f.reindex(columns=['B', 'D'])
result = ix[:, [False, True, False, True]]
assert_frame_equal(result, expected)
expected = f.reindex(index=f.index[5:10], columns=['B', 'D'])
result = ix[5:10, [False, True, False, True]]
assert_frame_equal(result, expected)
boolvec = f.index > f.index[7]
expected = f.reindex(index=f.index[boolvec])
result = ix[boolvec]
assert_frame_equal(result, expected)
result = ix[boolvec, :]
assert_frame_equal(result, expected)
result = ix[boolvec, 2:]
expected = f.reindex(index=f.index[boolvec],
columns=['C', 'D'])
assert_frame_equal(result, expected)
def test_setitem_fancy_boolean(self):
# from 2d, set with booleans
frame = self.frame.copy()
expected = self.frame.copy()
mask = frame['A'] > 0
frame.ix[mask] = 0.
expected.values[mask.values] = 0.
assert_frame_equal(frame, expected)
frame = self.frame.copy()
expected = self.frame.copy()
frame.ix[mask, ['A', 'B']] = 0.
expected.values[mask.values, :2] = 0.
assert_frame_equal(frame, expected)
def test_getitem_fancy_ints(self):
result = self.frame.ix[[1, 4, 7]]
expected = self.frame.ix[self.frame.index[[1, 4, 7]]]
assert_frame_equal(result, expected)
result = self.frame.ix[:, [2, 0, 1]]
expected = self.frame.ix[:, self.frame.columns[[2, 0, 1]]]
assert_frame_equal(result, expected)
def test_getitem_setitem_fancy_exceptions(self):
ix = self.frame.ix
with assertRaisesRegexp(IndexingError, 'Too many indexers'):
ix[:, :, :]
with assertRaisesRegexp(IndexingError, 'only tuples of length <= 2 '
'supported'):
ix[:, :, :] = 1
def test_getitem_setitem_boolean_misaligned(self):
# boolean index misaligned labels
mask = self.frame['A'][::-1] > 1
result = self.frame.ix[mask]
expected = self.frame.ix[mask[::-1]]
assert_frame_equal(result, expected)
cp = self.frame.copy()
expected = self.frame.copy()
cp.ix[mask] = 0
expected.ix[mask] = 0
assert_frame_equal(cp, expected)
def test_getitem_setitem_boolean_multi(self):
df = DataFrame(np.random.randn(3, 2))
# get
k1 = np.array([True, False, True])
k2 = np.array([False, True])
result = df.ix[k1, k2]
expected = df.ix[[0, 2], [1]]
assert_frame_equal(result, expected)
expected = df.copy()
df.ix[np.array([True, False, True]),
np.array([False, True])] = 5
expected.ix[[0, 2], [1]] = 5
assert_frame_equal(df, expected)
def test_getitem_setitem_float_labels(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.random.randn(5, 5), index=index)
result = df.ix[1.5:4]
expected = df.reindex([1.5, 2, 3, 4])
assert_frame_equal(result, expected)
self.assertEqual(len(result), 4)
result = df.ix[4:5]
expected = df.reindex([4, 5])
assert_frame_equal(result, expected)
self.assertEqual(len(result), 2)
# loc_float changes this to work properly
result = df.ix[1:2]
expected = df.iloc[0:2]
assert_frame_equal(result, expected)
df.ix[1:2] = 0
result = df[1:2]
self.assertTrue((result==0).all().all())
# #2727
index = Index([1.0, 2.5, 3.5, 4.5, 5.0])
df = DataFrame(np.random.randn(5, 5), index=index)
# positional slicing only via iloc!
with tm.assert_produces_warning(FutureWarning):
result = df.iloc[1.0:5]
expected = df.reindex([2.5, 3.5, 4.5, 5.0])
assert_frame_equal(result, expected)
self.assertEqual(len(result), 4)
result = df.iloc[4:5]
expected = df.reindex([5.0])
assert_frame_equal(result, expected)
self.assertEqual(len(result), 1)
# GH 4892, float indexers in iloc are deprecated
import warnings
warnings.filterwarnings(action='error', category=FutureWarning)
cp = df.copy()
def f():
cp.iloc[1.0:5] = 0
self.assertRaises(FutureWarning, f)
def f():
result = cp.iloc[1.0:5] == 0
self.assertRaises(FutureWarning, f)
self.assertTrue(result.values.all())
self.assertTrue((cp.iloc[0:1] == df.iloc[0:1]).values.all())
warnings.filterwarnings(action='ignore', category=FutureWarning)
cp = df.copy()
cp.iloc[4:5] = 0
self.assertTrue((cp.iloc[4:5] == 0).values.all())
self.assertTrue((cp.iloc[0:4] == df.iloc[0:4]).values.all())
# float slicing
result = df.ix[1.0:5]
expected = df
assert_frame_equal(result, expected)
self.assertEqual(len(result), 5)
result = df.ix[1.1:5]
expected = df.reindex([2.5, 3.5, 4.5, 5.0])
assert_frame_equal(result, expected)
self.assertEqual(len(result), 4)
result = df.ix[4.51:5]
expected = df.reindex([5.0])
assert_frame_equal(result, expected)
self.assertEqual(len(result), 1)
result = df.ix[1.0:5.0]
expected = df.reindex([1.0, 2.5, 3.5, 4.5, 5.0])
assert_frame_equal(result, expected)
self.assertEqual(len(result), 5)
cp = df.copy()
cp.ix[1.0:5.0] = 0
result = cp.ix[1.0:5.0]
self.assertTrue((result == 0).values.all())
def test_setitem_single_column_mixed(self):
df = DataFrame(randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],
columns=['foo', 'bar', 'baz'])
df['str'] = 'qux'
df.ix[::2, 'str'] = nan
expected = [nan, 'qux', nan, 'qux', nan]
assert_almost_equal(df['str'].values, expected)
def test_setitem_single_column_mixed_datetime(self):
df = DataFrame(randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],
columns=['foo', 'bar', 'baz'])
df['timestamp'] = Timestamp('20010102')
# check our dtypes
result = df.get_dtype_counts()
expected = Series({'float64': 3, 'datetime64[ns]': 1})
assert_series_equal(result, expected)
# set an allowable datetime64 type
from pandas import tslib
df.ix['b', 'timestamp'] = tslib.iNaT
self.assertTrue(com.isnull(df.ix['b', 'timestamp']))
# allow this syntax
df.ix['c', 'timestamp'] = nan
self.assertTrue(com.isnull(df.ix['c', 'timestamp']))
# allow this syntax
df.ix['d', :] = nan
self.assertTrue(com.isnull(df.ix['c', :]).all() == False)
# as of GH 3216 this will now work!
# try to set with a list like item
#self.assertRaises(
# Exception, df.ix.__setitem__, ('d', 'timestamp'), [nan])
def test_setitem_frame(self):
piece = self.frame.ix[:2, ['A', 'B']]
self.frame.ix[-2:, ['A', 'B']] = piece.values
assert_almost_equal(self.frame.ix[-2:, ['A', 'B']].values,
piece.values)
# GH 3216
# already aligned
f = self.mixed_frame.copy()
piece = DataFrame([[ 1, 2], [3, 4]], index=f.index[0:2],columns=['A', 'B'])
key = (slice(None,2), ['A', 'B'])
f.ix[key] = piece
assert_almost_equal(f.ix[0:2, ['A', 'B']].values,
piece.values)
# rows unaligned
f = self.mixed_frame.copy()
piece = DataFrame([[ 1, 2 ], [3, 4], [5, 6], [7, 8]], index=list(f.index[0:2]) + ['foo','bar'],columns=['A', 'B'])
key = (slice(None,2), ['A', 'B'])
f.ix[key] = piece
assert_almost_equal(f.ix[0:2:, ['A', 'B']].values,
piece.values[0:2])
# key is unaligned with values
f = self.mixed_frame.copy()
piece = f.ix[:2, ['A']]
piece.index = f.index[-2:]
key = (slice(-2, None), ['A', 'B'])
f.ix[key] = piece
piece['B'] = np.nan
assert_almost_equal(f.ix[-2:, ['A', 'B']].values,
piece.values)
# ndarray
f = self.mixed_frame.copy()
piece = self.mixed_frame.ix[:2, ['A', 'B']]
key = (slice(-2, None), ['A', 'B'])
f.ix[key] = piece.values
assert_almost_equal(f.ix[-2:, ['A', 'B']].values,
piece.values)
# needs upcasting
df = DataFrame([[1,2,'foo'],[3,4,'bar']],columns=['A','B','C'])
df2 = df.copy()
df2.ix[:,['A','B']] = df.ix[:,['A','B']]+0.5
expected = df.reindex(columns=['A','B'])
expected += 0.5
expected['C'] = df['C']
assert_frame_equal(df2, expected)
def test_setitem_frame_align(self):
piece = self.frame.ix[:2, ['A', 'B']]
piece.index = self.frame.index[-2:]
piece.columns = ['A', 'B']
self.frame.ix[-2:, ['A', 'B']] = piece
assert_almost_equal(self.frame.ix[-2:, ['A', 'B']].values,
piece.values)
def test_setitem_fancy_exceptions(self):
pass
def test_getitem_boolean_missing(self):
pass
def test_setitem_boolean_missing(self):
pass
def test_getitem_setitem_ix_duplicates(self):
# #1201
df = DataFrame(np.random.randn(5, 3),
index=['foo', 'foo', 'bar', 'baz', 'bar'])
result = df.ix['foo']
expected = df[:2]
assert_frame_equal(result, expected)
result = df.ix['bar']
expected = df.ix[[2, 4]]
assert_frame_equal(result, expected)
result = df.ix['baz']
expected = df.ix[3]
assert_series_equal(result, expected)
def test_getitem_ix_boolean_duplicates_multiple(self):
# #1201
df = DataFrame(np.random.randn(5, 3),
index=['foo', 'foo', 'bar', 'baz', 'bar'])
result = df.ix[['bar']]
exp = df.ix[[2, 4]]
assert_frame_equal(result, exp)
result = df.ix[df[1] > 0]
exp = df[df[1] > 0]
assert_frame_equal(result, exp)
result = df.ix[df[0] > 0]
exp = df[df[0] > 0]
assert_frame_equal(result, exp)
def test_getitem_setitem_ix_bool_keyerror(self):
# #2199
df = DataFrame({'a': [1, 2, 3]})
self.assertRaises(KeyError, df.ix.__getitem__, False)
self.assertRaises(KeyError, df.ix.__getitem__, True)
self.assertRaises(KeyError, df.ix.__setitem__, False, 0)
self.assertRaises(KeyError, df.ix.__setitem__, True, 0)
def test_getitem_list_duplicates(self):
# #1943
df = DataFrame(np.random.randn(4, 4), columns=list('AABC'))
df.columns.name = 'foo'
result = df[['B', 'C']]
self.assertEqual(result.columns.name, 'foo')
expected = df.ix[:, 2:]
assert_frame_equal(result, expected)
def test_get_value(self):
for idx in self.frame.index:
for col in self.frame.columns:
result = self.frame.get_value(idx, col)
expected = self.frame[col][idx]
assert_almost_equal(result, expected)
def test_iteritems(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=['a', 'a', 'b'])
for k, v in compat.iteritems(df):
self.assertEqual(type(v), Series)
def test_lookup(self):
def alt(df, rows, cols):
result = []
for r, c in zip(rows, cols):
result.append(df.get_value(r, c))
return result
def testit(df):
rows = list(df.index) * len(df.columns)
cols = list(df.columns) * len(df.index)
result = df.lookup(rows, cols)
expected = alt(df, rows, cols)
assert_almost_equal(result, expected)
testit(self.mixed_frame)
testit(self.frame)
df = DataFrame({'label': ['a', 'b', 'a', 'c'],
'mask_a': [True, True, False, True],
'mask_b': [True, False, False, False],
'mask_c': [False, True, False, True]})
df['mask'] = df.lookup(df.index, 'mask_' + df['label'])
exp_mask = alt(df, df.index, 'mask_' + df['label'])
assert_almost_equal(df['mask'], exp_mask)
self.assertEqual(df['mask'].dtype, np.bool_)
with tm.assertRaises(KeyError):
self.frame.lookup(['xyz'], ['A'])
with tm.assertRaises(KeyError):
self.frame.lookup([self.frame.index[0]], ['xyz'])
with tm.assertRaisesRegexp(ValueError, 'same size'):
self.frame.lookup(['a', 'b', 'c'], ['a'])
def test_set_value(self):
for idx in self.frame.index:
for col in self.frame.columns:
self.frame.set_value(idx, col, 1)
assert_almost_equal(self.frame[col][idx], 1)
def test_set_value_resize(self):
res = self.frame.set_value('foobar', 'B', 0)
self.assertIs(res, self.frame)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res.get_value('foobar', 'B'), 0)
self.frame.loc['foobar','qux'] = 0
self.assertEqual(self.frame.get_value('foobar', 'qux'), 0)
res = self.frame.copy()
res3 = res.set_value('foobar', 'baz', 'sam')
self.assertEqual(res3['baz'].dtype, np.object_)
res = self.frame.copy()
res3 = res.set_value('foobar', 'baz', True)
self.assertEqual(res3['baz'].dtype, np.object_)
res = self.frame.copy()
res3 = res.set_value('foobar', 'baz', 5)
self.assertTrue(com.is_float_dtype(res3['baz']))
self.assertTrue(isnull(res3['baz'].drop(['foobar'])).all())
self.assertRaises(ValueError, res3.set_value, 'foobar', 'baz', 'sam')
def test_set_value_with_index_dtype_change(self):
df_orig = DataFrame(randn(3, 3), index=lrange(3), columns=list('ABC'))
# this is actually ambiguous as the 2 is interpreted as a positional
# so column is not created
df = df_orig.copy()
df.set_value('C', 2, 1.0)
self.assertEqual(list(df.index), list(df_orig.index) + ['C'])
#self.assertEqual(list(df.columns), list(df_orig.columns) + [2])
df = df_orig.copy()
df.loc['C', 2] = 1.0
self.assertEqual(list(df.index), list(df_orig.index) + ['C'])
#self.assertEqual(list(df.columns), list(df_orig.columns) + [2])
# create both new
df = df_orig.copy()
df.set_value('C', 'D', 1.0)
self.assertEqual(list(df.index), list(df_orig.index) + ['C'])
self.assertEqual(list(df.columns), list(df_orig.columns) + ['D'])
df = df_orig.copy()
df.loc['C', 'D'] = 1.0
self.assertEqual(list(df.index), list(df_orig.index) + ['C'])
self.assertEqual(list(df.columns), list(df_orig.columns) + ['D'])
def test_get_set_value_no_partial_indexing(self):
# partial w/ MultiIndex raise exception
index = MultiIndex.from_tuples([(0, 1), (0, 2), (1, 1), (1, 2)])
df = DataFrame(index=index, columns=lrange(4))
self.assertRaises(KeyError, df.get_value, 0, 1)
# self.assertRaises(KeyError, df.set_value, 0, 1, 0)
def test_single_element_ix_dont_upcast(self):
self.frame['E'] = 1
self.assertTrue(issubclass(self.frame['E'].dtype.type,
(int, np.integer)))
result = self.frame.ix[self.frame.index[5], 'E']
self.assertTrue(com.is_integer(result))
def test_irow(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2))
result = df.irow(1)
exp = df.ix[2]
assert_series_equal(result, exp)
result = df.irow(2)
exp = df.ix[4]
assert_series_equal(result, exp)
# slice
result = df.irow(slice(4, 8))
expected = df.ix[8:14]
assert_frame_equal(result, expected)
# verify slice is view
# setting it makes it raise/warn
def f():
result[2] = 0.
self.assertRaises(com.SettingWithCopyError, f)
exp_col = df[2].copy()
exp_col[4:8] = 0.
assert_series_equal(df[2], exp_col)
# list of integers
result = df.irow([1, 2, 4, 6])
expected = df.reindex(df.index[[1, 2, 4, 6]])
assert_frame_equal(result, expected)
def test_icol(self):
df = DataFrame(np.random.randn(4, 10), columns=lrange(0, 20, 2))
result = df.icol(1)
exp = df.ix[:, 2]
assert_series_equal(result, exp)
result = df.icol(2)
exp = df.ix[:, 4]
assert_series_equal(result, exp)
# slice
result = df.icol(slice(4, 8))
expected = df.ix[:, 8:14]
assert_frame_equal(result, expected)
# verify slice is view
# and that we are setting a copy
def f():
result[8] = 0.
self.assertRaises(com.SettingWithCopyError, f)
self.assertTrue((df[8] == 0).all())
# list of integers
result = df.icol([1, 2, 4, 6])
expected = df.reindex(columns=df.columns[[1, 2, 4, 6]])
assert_frame_equal(result, expected)
def test_irow_icol_duplicates(self):
df = DataFrame(np.random.rand(3, 3), columns=list('ABC'),
index=list('aab'))
result = df.irow(0)
result2 = df.ix[0]
tm.assert_isinstance(result, Series)
assert_almost_equal(result.values, df.values[0])
assert_series_equal(result, result2)
result = df.T.icol(0)
result2 = df.T.ix[:, 0]
tm.assert_isinstance(result, Series)
assert_almost_equal(result.values, df.values[0])
assert_series_equal(result, result2)
# multiindex
df = DataFrame(np.random.randn(3, 3), columns=[['i', 'i', 'j'],
['A', 'A', 'B']],
index=[['i', 'i', 'j'], ['X', 'X', 'Y']])
rs = df.irow(0)
xp = df.ix[0]
assert_series_equal(rs, xp)
rs = df.icol(0)
xp = df.T.ix[0]
assert_series_equal(rs, xp)
rs = df.icol([0])
xp = df.ix[:, [0]]
assert_frame_equal(rs, xp)
# #2259
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1, 1, 2])
result = df.icol([0])
expected = df.take([0], axis=1)
assert_frame_equal(result, expected)
def test_icol_sparse_propegate_fill_value(self):
from pandas.sparse.api import SparseDataFrame
df = SparseDataFrame({'A': [999, 1]}, default_fill_value=999)
self.assertTrue(len(df['A'].sp_values) == len(df.icol(0).sp_values))
def test_iget_value(self):
for i, row in enumerate(self.frame.index):
for j, col in enumerate(self.frame.columns):
result = self.frame.iget_value(i, j)
expected = self.frame.get_value(row, col)
assert_almost_equal(result, expected)
def test_nested_exception(self):
# Ignore the strange way of triggering the problem
# (which may get fixed), it's just a way to trigger
# the issue or reraising an outer exception without
# a named argument
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8,
9]}).set_index(["a", "b"])
l = list(df.index)
l[0] = ["a", "b"]
df.index = l
try:
repr(df)
except Exception as e:
self.assertNotEqual(type(e), UnboundLocalError)
def test_reverse_reindex_ffill_raises(self):
dr = pd.date_range('2013-08-01', periods=6, freq='B')
data = np.random.randn(6,1)
df = pd.DataFrame(data, index=dr, columns=list('A'))
df['A'][3] = np.nan
df_rev = pd.DataFrame(data, index=dr[::-1], columns=list('A'))
# Reverse index is not 'monotonic'
self.assertRaises(ValueError, df_rev.reindex, df.index, method='pad')
self.assertRaises(ValueError, df_rev.reindex, df.index, method='ffill')
self.assertRaises(ValueError, df_rev.reindex, df.index, method='bfill')
def test_reversed_reindex_ffill_raises(self):
dr = pd.date_range('2013-08-01', periods=6, freq='B')
data = np.random.randn(6,1)
df = pd.DataFrame(data, index=dr, columns=list('A'))
df['A'][3] = np.nan
df = pd.DataFrame(data, index=dr, columns=list('A'))
# Reversed reindex is not 'monotonic'
self.assertRaises(ValueError, df.reindex, dr[::-1], method='pad')
self.assertRaises(ValueError, df.reindex, dr[::-1], method='ffill')
self.assertRaises(ValueError, df.reindex, dr[::-1], method='bfill')
def test_reindex_level(self):
from itertools import permutations
icol = ['jim', 'joe', 'jolie']
def verify_first_level(df, level, idx):
f = lambda val: np.nonzero(df[level] == val)[0]
i = np.concatenate(list(map(f, idx)))
left = df.set_index(icol).reindex(idx, level=level)
right = df.iloc[i].set_index(icol)
assert_frame_equal(left, right)
def verify(df, level, idx, indexer):
left = df.set_index(icol).reindex(idx, level=level)
right = df.iloc[indexer].set_index(icol)
assert_frame_equal(left, right)
df = pd.DataFrame({'jim':list('B' * 4 + 'A' * 2 + 'C' * 3),
'joe':list('abcdeabcd')[::-1],
'jolie':[10, 20, 30] * 3,
'joline': np.random.randint(0, 1000, 9)})
target = [['C', 'B', 'A'], ['F', 'C', 'A', 'D'], ['A'], ['D', 'F'],
['A', 'B', 'C'], ['C', 'A', 'B'], ['C', 'B'], ['C', 'A'],
['A', 'B'], ['B', 'A', 'C'], ['A', 'C', 'B']]
for idx in target:
verify_first_level(df, 'jim', idx)
verify(df, 'joe', list('abcde'), [3, 2, 1, 0, 5, 4, 8, 7, 6])
verify(df, 'joe', list('abcd'), [3, 2, 1, 0, 5, 8, 7, 6])
verify(df, 'joe', list('abc'), [3, 2, 1, 8, 7, 6])
verify(df, 'joe', list('eca'), [1, 3, 4, 6, 8])
verify(df, 'joe', list('edc'), [0, 1, 4, 5, 6])
verify(df, 'joe', list('eadbc'), [3, 0, 2, 1, 4, 5, 8, 7, 6])
verify(df, 'joe', list('edwq'), [0, 4, 5])
verify(df, 'joe', list('wq'), [])
df = DataFrame({'jim':['mid'] * 5 + ['btm'] * 8 + ['top'] * 7,
'joe':['3rd'] * 2 + ['1st'] * 3 + ['2nd'] * 3 +
['1st'] * 2 + ['3rd'] * 3 + ['1st'] * 2 +
['3rd'] * 3 + ['2nd'] * 2,
'jolie':np.random.randint(0, 1000, 20),
'joline': np.random.randn(20).round(3) * 10})
for idx in permutations(df['jim'].unique()):
for i in range(3):
verify_first_level(df, 'jim', idx[:i+1])
i = [2,3,4,0,1,8,9,5,6,7,10,11,12,13,14,18,19,15,16,17]
verify(df, 'joe', ['1st', '2nd', '3rd'], i)
i = [0,1,2,3,4,10,11,12,5,6,7,8,9,15,16,17,18,19,13,14]
verify(df, 'joe', ['3rd', '2nd', '1st'], i)
i = [0,1,5,6,7,10,11,12,18,19,15,16,17]
verify(df, 'joe', ['2nd', '3rd'], i)
i = [0,1,2,3,4,10,11,12,8,9,15,16,17,13,14]
verify(df, 'joe', ['3rd', '1st'], i)
def test_getitem_ix_float_duplicates(self):
df = pd.DataFrame(np.random.randn(3, 3),
index=[0.1, 0.2, 0.2], columns=list('abc'))
expect = df.iloc[1:]
tm.assert_frame_equal(df.loc[0.2], expect)
tm.assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[1:, 0]
tm.assert_series_equal(df.loc[0.2, 'a'], expect)
df.index = [1, 0.2, 0.2]
expect = df.iloc[1:]
tm.assert_frame_equal(df.loc[0.2], expect)
tm.assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[1:, 0]
tm.assert_series_equal(df.loc[0.2, 'a'], expect)
df = pd.DataFrame(np.random.randn(4, 3),
index=[1, 0.2, 0.2, 1], columns=list('abc'))
expect = df.iloc[1:-1]
tm.assert_frame_equal(df.loc[0.2], expect)
tm.assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[1:-1, 0]
tm.assert_series_equal(df.loc[0.2, 'a'], expect)
df.index = [0.1, 0.2, 2, 0.2]
expect = df.iloc[[1, -1]]
tm.assert_frame_equal(df.loc[0.2], expect)
tm.assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[[1, -1], 0]
tm.assert_series_equal(df.loc[0.2, 'a'], expect)
def test_setitem_with_sparse_value(self):
# GH8131
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_1': [1., 2., 3.]})
sp_series = pd.Series([0, 0, 1]).to_sparse(fill_value=0)
df['new_column'] = sp_series
tm.assert_series_equal(df['new_column'], sp_series)
def test_setitem_with_unaligned_sparse_value(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_1': [1., 2., 3.]})
sp_series = (pd.Series([0, 0, 1], index=[2, 1, 0])
.to_sparse(fill_value=0))
df['new_column'] = sp_series
tm.assert_series_equal(df['new_column'], pd.Series([1, 0, 0]))
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)
_frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])
_intframe = DataFrame(dict((k, v.astype(int))
for k, v in compat.iteritems(_seriesd)))
_tsframe = DataFrame(_tsd)
_mixed_frame = _frame.copy()
_mixed_frame['foo'] = 'bar'
class SafeForSparse(object):
_multiprocess_can_split_ = True
def test_copy_index_name_checking(self):
# don't want to be able to modify the index stored elsewhere after
# making a copy
for attr in ('index', 'columns'):
ind = getattr(self.frame, attr)
ind.name = None
cp = self.frame.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.frame, attr).name)
def test_getitem_pop_assign_name(self):
s = self.frame['A']
self.assertEqual(s.name, 'A')
s = self.frame.pop('A')
self.assertEqual(s.name, 'A')
s = self.frame.ix[:, 'B']
self.assertEqual(s.name, 'B')
s2 = s.ix[:]
self.assertEqual(s2.name, 'B')
def test_get_value(self):
for idx in self.frame.index:
for col in self.frame.columns:
result = self.frame.get_value(idx, col)
expected = self.frame[col][idx]
assert_almost_equal(result, expected)
def test_join_index(self):
# left / right
f = self.frame.reindex(columns=['A', 'B'])[:10]
f2 = self.frame.reindex(columns=['C', 'D'])
joined = f.join(f2)
self.assertTrue(f.index.equals(joined.index))
self.assertEqual(len(joined.columns), 4)
joined = f.join(f2, how='left')
self.assertTrue(joined.index.equals(f.index))
self.assertEqual(len(joined.columns), 4)
joined = f.join(f2, how='right')
self.assertTrue(joined.index.equals(f2.index))
self.assertEqual(len(joined.columns), 4)
# inner
f = self.frame.reindex(columns=['A', 'B'])[:10]
f2 = self.frame.reindex(columns=['C', 'D'])
joined = f.join(f2, how='inner')
self.assertTrue(joined.index.equals(f.index.intersection(f2.index)))
self.assertEqual(len(joined.columns), 4)
# outer
f = self.frame.reindex(columns=['A', 'B'])[:10]
f2 = self.frame.reindex(columns=['C', 'D'])
joined = f.join(f2, how='outer')
self.assertTrue(tm.equalContents(self.frame.index, joined.index))
self.assertEqual(len(joined.columns), 4)
assertRaisesRegexp(ValueError, 'join method', f.join, f2, how='foo')
# corner case - overlapping columns
for how in ('outer', 'left', 'inner'):
with assertRaisesRegexp(ValueError, 'columns overlap but no suffix'):
self.frame.join(self.frame, how=how)
def test_join_index_more(self):
af = self.frame.ix[:, ['A', 'B']]
bf = self.frame.ix[::2, ['C', 'D']]
expected = af.copy()
expected['C'] = self.frame['C'][::2]
expected['D'] = self.frame['D'][::2]
result = af.join(bf)
assert_frame_equal(result, expected)
result = af.join(bf, how='right')
assert_frame_equal(result, expected[::2])
result = bf.join(af, how='right')
assert_frame_equal(result, expected.ix[:, result.columns])
def test_join_index_series(self):
df = self.frame.copy()
s = df.pop(self.frame.columns[-1])
joined = df.join(s)
assert_frame_equal(joined, self.frame, check_names=False) # TODO should this check_names ?
s.name = None
assertRaisesRegexp(ValueError, 'must have a name', df.join, s)
def test_join_overlap(self):
df1 = self.frame.ix[:, ['A', 'B', 'C']]
df2 = self.frame.ix[:, ['B', 'C', 'D']]
joined = df1.join(df2, lsuffix='_df1', rsuffix='_df2')
df1_suf = df1.ix[:, ['B', 'C']].add_suffix('_df1')
df2_suf = df2.ix[:, ['B', 'C']].add_suffix('_df2')
no_overlap = self.frame.ix[:, ['A', 'D']]
expected = df1_suf.join(df2_suf).join(no_overlap)
# column order not necessarily sorted
assert_frame_equal(joined, expected.ix[:, joined.columns])
def test_add_prefix_suffix(self):
with_prefix = self.frame.add_prefix('foo#')
expected = ['foo#%s' % c for c in self.frame.columns]
self.assert_numpy_array_equal(with_prefix.columns, expected)
with_suffix = self.frame.add_suffix('#foo')
expected = ['%s#foo' % c for c in self.frame.columns]
self.assert_numpy_array_equal(with_suffix.columns, expected)
class TestDataFrame(tm.TestCase, CheckIndexing,
SafeForSparse):
klass = DataFrame
_multiprocess_can_split_ = True
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.frame = _frame.copy()
self.frame2 = _frame2.copy()
# force these all to int64 to avoid platform testing issues
self.intframe = DataFrame(dict([ (c,s) for c,s in compat.iteritems(_intframe) ]), dtype = np.int64)
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
self.mixed_float = DataFrame({ 'A': _frame['A'].copy().astype('float32'),
'B': _frame['B'].copy().astype('float32'),
'C': _frame['C'].copy().astype('float16'),
'D': _frame['D'].copy().astype('float64') })
self.mixed_float2 = DataFrame({ 'A': _frame2['A'].copy().astype('float32'),
'B': _frame2['B'].copy().astype('float32'),
'C': _frame2['C'].copy().astype('float16'),
'D': _frame2['D'].copy().astype('float64') })
self.mixed_int = DataFrame({ 'A': _intframe['A'].copy().astype('int32'),
'B': np.ones(len(_intframe['B']),dtype='uint64'),
'C': _intframe['C'].copy().astype('uint8'),
'D': _intframe['D'].copy().astype('int64') })
self.all_mixed = DataFrame({'a': 1., 'b': 2, 'c': 'foo', 'float32' : np.array([1.]*10,dtype='float32'),
'int32' : np.array([1]*10,dtype='int32'),
}, index=np.arange(10))
self.ts1 = tm.makeTimeSeries()
self.ts2 = tm.makeTimeSeries()[5:]
self.ts3 = tm.makeTimeSeries()[-5:]
self.ts4 = tm.makeTimeSeries()[1:-1]
self.ts_dict = {
'col1': self.ts1,
'col2': self.ts2,
'col3': self.ts3,
'col4': self.ts4,
}
self.empty = DataFrame({})
arr = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]])
self.simple = DataFrame(arr, columns=['one', 'two', 'three'],
index=['a', 'b', 'c'])
def test_get_axis(self):
f = self.frame
self.assertEqual(f._get_axis_number(0), 0)
self.assertEqual(f._get_axis_number(1), 1)
self.assertEqual(f._get_axis_number('index'), 0)
self.assertEqual(f._get_axis_number('rows'), 0)
self.assertEqual(f._get_axis_number('columns'), 1)
self.assertEqual(f._get_axis_name(0), 'index')
self.assertEqual(f._get_axis_name(1), 'columns')
self.assertEqual(f._get_axis_name('index'), 'index')
self.assertEqual(f._get_axis_name('rows'), 'index')
self.assertEqual(f._get_axis_name('columns'), 'columns')
self.assertIs(f._get_axis(0), f.index)
self.assertIs(f._get_axis(1), f.columns)
assertRaisesRegexp(ValueError, 'No axis named', f._get_axis_number, 2)
assertRaisesRegexp(ValueError, 'No axis.*foo', f._get_axis_name, 'foo')
assertRaisesRegexp(ValueError, 'No axis.*None', f._get_axis_name, None)
assertRaisesRegexp(ValueError, 'No axis named', f._get_axis_number, None)
def test_set_index(self):
idx = Index(np.arange(len(self.mixed_frame)))
# cache it
_ = self.mixed_frame['foo']
self.mixed_frame.index = idx
self.assertIs(self.mixed_frame['foo'].index, idx)
with assertRaisesRegexp(ValueError, 'Length mismatch'):
self.mixed_frame.index = idx[::2]
def test_set_index_cast(self):
# issue casting an index then set_index
df = DataFrame({'A' : [1.1,2.2,3.3], 'B' : [5.0,6.1,7.2]},
index = [2010,2011,2012])
expected = df.ix[2010]
new_index = df.index.astype(np.int32)
df.index = new_index
result = df.ix[2010]
assert_series_equal(result,expected)
def test_set_index2(self):
df = DataFrame({'A': ['foo', 'foo', 'foo', 'bar', 'bar'],
'B': ['one', 'two', 'three', 'one', 'two'],
'C': ['a', 'b', 'c', 'd', 'e'],
'D': np.random.randn(5),
'E': np.random.randn(5)})
# new object, single-column
result = df.set_index('C')
result_nodrop = df.set_index('C', drop=False)
index = Index(df['C'], name='C')
expected = df.ix[:, ['A', 'B', 'D', 'E']]
expected.index = index
expected_nodrop = df.copy()
expected_nodrop.index = index
assert_frame_equal(result, expected)
assert_frame_equal(result_nodrop, expected_nodrop)
self.assertEqual(result.index.name, index.name)
# inplace, single
df2 = df.copy()
df2.set_index('C', inplace=True)
assert_frame_equal(df2, expected)
df3 = df.copy()
df3.set_index('C', drop=False, inplace=True)
assert_frame_equal(df3, expected_nodrop)
# create new object, multi-column
result = df.set_index(['A', 'B'])
result_nodrop = df.set_index(['A', 'B'], drop=False)
index = MultiIndex.from_arrays([df['A'], df['B']], names=['A', 'B'])
expected = df.ix[:, ['C', 'D', 'E']]
expected.index = index
expected_nodrop = df.copy()
expected_nodrop.index = index
assert_frame_equal(result, expected)
assert_frame_equal(result_nodrop, expected_nodrop)
self.assertEqual(result.index.names, index.names)
# inplace
df2 = df.copy()
df2.set_index(['A', 'B'], inplace=True)
assert_frame_equal(df2, expected)
df3 = df.copy()
df3.set_index(['A', 'B'], drop=False, inplace=True)
assert_frame_equal(df3, expected_nodrop)
# corner case
with assertRaisesRegexp(ValueError, 'Index has duplicate keys'):
df.set_index('A', verify_integrity=True)
# append
result = df.set_index(['A', 'B'], append=True)
xp = df.reset_index().set_index(['index', 'A', 'B'])
xp.index.names = [None, 'A', 'B']
assert_frame_equal(result, xp)
# append to existing multiindex
rdf = df.set_index(['A'], append=True)
rdf = rdf.set_index(['B', 'C'], append=True)
expected = df.set_index(['A', 'B', 'C'], append=True)
assert_frame_equal(rdf, expected)
# Series
result = df.set_index(df.C)
self.assertEqual(result.index.name, 'C')
def test_set_index_nonuniq(self):
df = DataFrame({'A': ['foo', 'foo', 'foo', 'bar', 'bar'],
'B': ['one', 'two', 'three', 'one', 'two'],
'C': ['a', 'b', 'c', 'd', 'e'],
'D': np.random.randn(5),
'E': np.random.randn(5)})
with assertRaisesRegexp(ValueError, 'Index has duplicate keys'):
df.set_index('A', verify_integrity=True, inplace=True)
self.assertIn('A', df)
def test_set_index_bug(self):
# GH1590
df = DataFrame({'val': [0, 1, 2], 'key': ['a', 'b', 'c']})
df2 = df.select(lambda indx: indx >= 1)
rs = df2.set_index('key')
xp = DataFrame({'val': [1, 2]},
Index(['b', 'c'], name='key'))
assert_frame_equal(rs, xp)
def test_set_index_pass_arrays(self):
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
# multiple columns
result = df.set_index(['A', df['B'].values], drop=False)
expected = df.set_index(['A', 'B'], drop=False)
assert_frame_equal(result, expected, check_names=False) # TODO should set_index check_names ?
def test_set_index_cast_datetimeindex(self):
df = DataFrame({'A': [datetime(2000, 1, 1) + timedelta(i)
for i in range(1000)],
'B': np.random.randn(1000)})
idf = df.set_index('A')
tm.assert_isinstance(idf.index, DatetimeIndex)
# don't cast a DatetimeIndex WITH a tz, leave as object
# GH 6032
i = pd.DatetimeIndex(pd.tseries.tools.to_datetime(['2013-1-1 13:00','2013-1-2 14:00'], errors="raise")).tz_localize('US/Pacific')
df = DataFrame(np.random.randn(2,1),columns=['A'])
expected = Series(np.array([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),
pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Pacific')], dtype="object"))
# convert index to series
result = Series(i)
assert_series_equal(result, expected)
# assignt to frame
df['B'] = i
result = df['B']
assert_series_equal(result, expected)
# keep the timezone
result = i.to_series(keep_tz=True)
assert_series_equal(result.reset_index(drop=True), expected)
# convert to utc
df['C'] = i.to_series().reset_index(drop=True)
result = df['C']
comp = DatetimeIndex(expected.values).copy()
comp.tz = None
self.assert_numpy_array_equal(result.values, comp.values)
# list of datetimes with a tz
df['D'] = i.to_pydatetime()
result = df['D']
assert_series_equal(result, expected)
# GH 6785
# set the index manually
import pytz
df = DataFrame([{'ts':datetime(2014, 4, 1, tzinfo=pytz.utc), 'foo':1}])
expected = df.set_index('ts')
df.index = df['ts']
df.pop('ts')
assert_frame_equal(df, expected)
# GH 3950
# reset_index with single level
for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern']:
idx = pd.date_range('1/1/2011', periods=5, freq='D', tz=tz, name='idx')
df = pd.DataFrame({'a': range(5), 'b': ['A', 'B', 'C', 'D', 'E']}, index=idx)
expected = pd.DataFrame({'idx': [datetime(2011, 1, 1), datetime(2011, 1, 2),
datetime(2011, 1, 3), datetime(2011, 1, 4),
datetime(2011, 1, 5)],
'a': range(5), 'b': ['A', 'B', 'C', 'D', 'E']},
columns=['idx', 'a', 'b'])
expected['idx'] = expected['idx'].apply(lambda d: pd.Timestamp(d, tz=tz))
assert_frame_equal(df.reset_index(), expected)
def test_set_index_multiindexcolumns(self):
columns = MultiIndex.from_tuples([('foo', 1), ('foo', 2), ('bar', 1)])
df = DataFrame(np.random.randn(3, 3), columns=columns)
rs = df.set_index(df.columns[0])
xp = df.ix[:, 1:]
xp.index = df.ix[:, 0].values
xp.index.names = [df.columns[0]]
assert_frame_equal(rs, xp)
def test_set_index_empty_column(self):
# #1971
df = DataFrame([
dict(a=1, p=0),
dict(a=2, m=10),
dict(a=3, m=11, p=20),
dict(a=4, m=12, p=21)
], columns=('a', 'm', 'p', 'x'))
# it works!
result = df.set_index(['a', 'x'])
repr(result)
def test_set_columns(self):
cols = Index(np.arange(len(self.mixed_frame.columns)))
self.mixed_frame.columns = cols
with assertRaisesRegexp(ValueError, 'Length mismatch'):
self.mixed_frame.columns = cols[::2]
def test_keys(self):
getkeys = self.frame.keys
self.assertIs(getkeys(), self.frame.columns)
def test_column_contains_typeerror(self):
try:
self.frame.columns in self.frame
except TypeError:
pass
def test_constructor(self):
df = DataFrame()
self.assertEqual(len(df.index), 0)
df = DataFrame(data={})
self.assertEqual(len(df.index), 0)
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
indexed_frame = DataFrame(data, index=index)
unindexed_frame = DataFrame(data)
self.assertEqual(self.mixed_frame['foo'].dtype, np.object_)
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
self.assertEqual(foo['a'].dtype, object)
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4,2)))
# this is ok
df['foo'] = np.ones((4,2)).tolist()
# this is not ok
self.assertRaises(ValueError, df.__setitem__, tuple(['test']), np.ones((4,2)))
# this is ok
df['foo2'] = np.ones((4,2)).tolist()
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
self.assertEqual(df.values[0, 0], 99)
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
self.assertEqual(df.values[0, 0], 97)
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
self.assertIsNone(df.ix[1, 0])
self.assertEqual(df.ix[0, 1], '2')
def test_constructor_list_frames(self):
# GH 3243
result = DataFrame([DataFrame([])])
self.assertEqual(result.shape, (1,0))
result = DataFrame([DataFrame(dict(A = lrange(5)))])
tm.assert_isinstance(result.iloc[0,0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad = None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [ np.array(np.random.rand(10), dtype = d) for d in dtypes ]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [ np.array(np.random.randint(10, size=10), dtype = d) for d in dtypes ]
zipper = lzip(dtypes,arrays)
for d,a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update(dict([ (d,a) for d,a in zipper ]))
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes = None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A = 1, B = 'foo', C = 'bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
# Assigning causes segfault in NumPy < 1.5.1
# rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
self.assert_numpy_array_equal(df.columns, rec.dtype.names)
df2 = DataFrame(rec, index=index)
self.assert_numpy_array_equal(df2.columns, rec.dtype.names)
self.assertTrue(df2.index.equals(index))
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
self.assertEqual(df.values.dtype, np.bool_)
def test_constructor_overflow_int64(self):
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
self.assertEqual(result['a'].dtype, object)
# #2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45), (long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
self.assertEqual(df_crawls['uid'].dtype, object)
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
self.assertEqual(expected, list(df.columns))
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
tm.assert_dict_equal(self.ts1, frame['col1'], compare_keys=False)
tm.assert_dict_equal(self.ts2, frame['col2'], compare_keys=False)
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
self.assertEqual(len(frame), len(self.ts2))
self.assertNotIn('col1', frame)
self.assertTrue(isnull(frame['col3']).all())
# Corner cases
self.assertEqual(len(DataFrame({})), 0)
# mix dict and array, wrong size - no spec for which error should raise
# first
with tm.assertRaises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
self.assert_numpy_array_equal(frame.index, ['1', '2'])
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
self.assertIs(frame.index, idx)
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
self.assertIs(frame.index, idx)
self.assertIs(frame.columns, idx)
self.assertEqual(len(frame._series), 3)
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
self.assertTrue(frame.index.equals(Index([])))
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi,columns=mi)
self.assertTrue(pd.isnull(df).values.ravel().all())
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi,columns=mi)
self.assertTrue(pd.isnull(df).values.ravel().all())
def test_constructor_error_msgs(self):
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with assertRaisesRegexp(ValueError, msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = "Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with assertRaisesRegexp(ValueError, msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=date_range('2000-01-01', periods=3))
# higher dim raise exception
with assertRaisesRegexp(ValueError, 'Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
with assertRaisesRegexp(ValueError, "Shape of passed values is \(3, 2\), indices imply \(3, 1\)"):
DataFrame(np.random.rand(2,3), columns=['A', 'B', 'C'], index=[1])
with assertRaisesRegexp(ValueError, "Shape of passed values is \(3, 2\), indices imply \(2, 2\)"):
DataFrame(np.random.rand(2,3), columns=['A', 'B'], index=[1, 2])
with assertRaisesRegexp(ValueError, 'If using all scalar values, you must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a':[1, 2, 3], 'b':[3, 4, 5]})
df2 = DataFrame([df1, df1+10])
df2.dtypes
str(df2)
result = df2.loc[0,0]
assert_frame_equal(result,df1)
result = df2.loc[1,0]
assert_frame_equal(result,df1+10)
def test_insert_error_msmgs(self):
# GH 7432
df = DataFrame({'foo':['a', 'b', 'c'], 'bar':[1,2,3], 'baz':['d','e','f']}).set_index('foo')
s = DataFrame({'foo':['a', 'b', 'c', 'a'], 'fiz':['g','h','i','j']}).set_index('foo')
msg = 'cannot reindex from a duplicate axis'
with assertRaisesRegexp(ValueError, msg):
df['newcol'] = s
# GH 4107, more descriptive error message
df = DataFrame(np.random.randint(0,2,(4,4)),
columns=['a', 'b', 'c', 'd'])
msg = 'incompatible index of inserted column with frame index'
with assertRaisesRegexp(TypeError, msg):
df['gr'] = df.groupby(['b', 'c']).count()
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame(dict((col, dict(compat.iteritems(val)))
for col, val in compat.iteritems(data)))
assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = [[4., 3., 2., 1.]]
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
assert_almost_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
self.assertEqual(len(frame), 3)
self.assertEqual(frame['B'].dtype, np.float64)
self.assertEqual(frame['A'].dtype, np.float64)
frame = DataFrame(test_data)
self.assertEqual(len(frame), 3)
self.assertEqual(frame['B'].dtype, np.object_)
self.assertEqual(frame['A'].dtype, np.float64)
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
self.assertEqual(len(frame), 20)
self.assertEqual(frame['A'].dtype, np.object_)
self.assertEqual(frame['B'].dtype, np.float64)
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
tm.assert_isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
tm.assert_isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame(dict((k, list(v)) for k, v in compat.iteritems(data)))
assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
check = lambda result, expected: tm.assert_frame_equal(
result, expected, check_dtype=True, check_index_type=True,
check_column_type=True, check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def _check_basic_constructor(self, empty):
"mat: 2d matrix with shpae (3, 2) to input. empty - makes sized objects"
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(len(frame.index), 2)
self.assertEqual(len(frame.columns), 3)
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
self.assertEqual(len(frame.index), 3)
self.assertEqual(len(frame.columns), 1)
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
self.assertEqual(frame.values.dtype, np.int64)
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with assertRaisesRegexp(ValueError, msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with assertRaisesRegexp(ValueError, msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with assertRaisesRegexp(ValueError, 'Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
self.assert_numpy_array_equal(frame.index, lrange(2))
self.assert_numpy_array_equal(frame.columns, lrange(3))
frame = DataFrame(mat, index=[1, 2])
self.assert_numpy_array_equal(frame.columns, lrange(3))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
self.assert_numpy_array_equal(frame.index, lrange(2))
# 0-length axis
frame = DataFrame(empty((0, 3)))
self.assertEqual(len(frame.index), 0)
frame = DataFrame(empty((3, 0)))
self.assertEqual(len(frame.columns), 0)
def test_constructor_ndarray(self):
mat = np.zeros((2, 3), dtype=float)
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
self.assertEqual(len(frame), 2)
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(1.0, frame['A'][1])
self.assertEqual(2.0, frame['C'][2])
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertTrue(np.all(~np.asarray(frame == frame)))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(len(frame.index), 2)
self.assertEqual(len(frame.columns), 3)
self.assertTrue(np.all(~np.asarray(frame == frame)))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
self.assertEqual(frame.values.dtype, np.float64)
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(1, frame['A'][1])
self.assertEqual(2, frame['C'][2])
# masked np.datetime64 stays (use lib.NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(len(frame.index), 2)
self.assertEqual(len(frame.columns), 3)
self.assertTrue(isnull(frame).values.all())
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
self.assertEqual(frame.values.dtype, np.int64)
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(1, frame['A'].view('i8')[1])
self.assertEqual(2, frame['C'].view('i8')[2])
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(len(frame.index), 2)
self.assertEqual(len(frame.columns), 3)
self.assertTrue(np.all(~np.asarray(frame == frame)))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
self.assertEqual(frame.values.dtype, object)
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(True, frame['A'][1])
self.assertEqual(False, frame['C'][2])
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = dict([ (k, v.filled()) if hasattr(v,'filled') else (k, v) for k, v in comb ])
expected = DataFrame(comb,columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result,expected)
# specify columns
expected = DataFrame(comb,columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result,expected)
# specify index
expected = DataFrame(comb,columns=names,index=[1,2])
result = DataFrame(mrecs, index=[1,2])
assert_fr_equal(result,expected)
def test_constructor_corner(self):
df = DataFrame(index=[])
self.assertEqual(df.values.shape, (0, 0))
# empty but with specified dtype
df = DataFrame(index=lrange(10), columns=['a', 'b'], dtype=object)
self.assertEqual(df.values.dtype, np.object_)
# does not error but ends up float
df = DataFrame(index=lrange(10), columns=['a', 'b'], dtype=int)
self.assertEqual(df.values.dtype, np.object_)
# #1783 empty dtype object
df = DataFrame({}, columns=['foo', 'bar'])
self.assertEqual(df.values.dtype, np.object_)
df = DataFrame({'b': 1}, index=lrange(10), columns=list('abc'),
dtype=int)
self.assertEqual(df.values.dtype, np.object_)
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
self.assertEqual(df['int'].dtype, np.int64)
self.assertEqual(df['bool'].dtype, np.bool_)
self.assertEqual(df['float'].dtype, np.float64)
self.assertEqual(df['complex'].dtype, np.complex128)
self.assertEqual(df['object'].dtype, np.object_)
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
assert_frame_equal(df, exp)
with tm.assertRaisesRegexp(ValueError, 'must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
self.assertEqual(df_casted.values.dtype, np.int64)
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
self.assertEqual(dm.values.ndim, 2)
arr = randn(0)
dm = DataFrame(arr)
self.assertEqual(dm.values.ndim, 2)
self.assertEqual(dm.values.ndim, 2)
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
self.assertEqual(dm.values.shape, (10, 2))
dm = DataFrame(columns=['A', 'B'])
self.assertEqual(dm.values.shape, (0, 2))
dm = DataFrame(index=np.arange(10))
self.assertEqual(dm.values.shape, (10, 0))
# corner, silly
# TODO: Fix this Exception to be better...
with assertRaisesRegexp(PandasError, 'constructor not properly called'):
DataFrame((1, 2, 3))
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with assertRaisesRegexp(ValueError, 'cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
self.assertEqual(len(dm.columns), 2)
self.assertEqual(dm.values.dtype, np.float64)
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
l = [[1, 'a'], [2, 'b']]
df = DataFrame(data=l, columns=["num", "str"])
self.assertTrue(com.is_integer_dtype(df['num']))
self.assertEqual(df['str'].dtype, np.object_)
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({ 0: range(10) })
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
import collections
class DummyContainer(collections.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
l = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(l, columns=columns)
expected = DataFrame([[1,'a'],[2,'b']],columns=columns)
assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame.from_items([('A', array.array('i', range(10)))])
expected = DataFrame({ 'A' : list(range(10)) })
assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([ list(range(10)), list(range(10)) ])
result = DataFrame([ array.array('i', range(10)), array.array('i',range(10)) ])
assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterator(self):
expected = DataFrame([ list(range(10)), list(range(10)) ])
result = DataFrame([ range(10), range(10) ])
assert_frame_equal(result, expected)
def test_constructor_generator(self):
#related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([ list(range(10)), list(range(10)) ])
result = DataFrame([ gen1, gen2 ])
assert_frame_equal(result, expected)
gen = ([ i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({ 0 : range(10), 1 : 'a' })
assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(['x', 'y'], data))
idx = Index(['a', 'b', 'c'])
# all named
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx, name='y')]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
assert_frame_equal(result, expected)
# some unnamed
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))
expected = DataFrame.from_dict(sdict, orient='index')
assert_frame_equal(result.sort_index(), expected)
# none named
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
data = [Series(d) for d in data]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient='index')
assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6))
assert_frame_equal(result, result2)
result = DataFrame([Series({})])
expected = DataFrame(index=[0])
assert_frame_equal(result, expected)
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(range(len(data)), data))
idx = Index(['a', 'b', 'c'])
data2 = [Series([1.5, 3, 4], idx, dtype='O'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
assert_frame_equal(result, expected)
def test_constructor_list_of_derived_dicts(self):
class CustomDict(dict):
pass
d = {'a': 1.5, 'b': 3}
data_custom = [CustomDict(d)]
data = [d]
result_custom = DataFrame(data_custom)
result = DataFrame(data)
assert_frame_equal(result, result_custom)
def test_constructor_ragged(self):
data = {'A': randn(10),
'B': randn(8)}
with assertRaisesRegexp(ValueError, 'arrays must all be same length'):
DataFrame(data)
def test_constructor_scalar(self):
idx = Index(lrange(3))
df = DataFrame({"a": 0}, index=idx)
expected = DataFrame({"a": [0, 0, 0]}, index=idx)
assert_frame_equal(df, expected, check_dtype=False)
def test_constructor_Series_copy_bug(self):
df = DataFrame(self.frame['A'], index=self.frame.index, columns=['A'])
df.copy()
def test_constructor_mixed_dict_and_Series(self):
data = {}
data['A'] = {'foo': 1, 'bar': 2, 'baz': 3}
data['B'] = Series([4, 3, 2, 1], index=['bar', 'qux', 'baz', 'foo'])
result = DataFrame(data)
self.assertTrue(result.index.is_monotonic)
# ordering ambiguous, raise exception
with assertRaisesRegexp(ValueError, 'ambiguous ordering'):
DataFrame({'A': ['a', 'b'], 'B': {'a': 'a', 'b': 'b'}})
# this is OK though
result = DataFrame({'A': ['a', 'b'],
'B': Series(['a', 'b'], index=['a', 'b'])})
expected = DataFrame({'A': ['a', 'b'], 'B': ['a', 'b']},
index=['a', 'b'])
assert_frame_equal(result, expected)
def test_constructor_tuples(self):
result = DataFrame({'A': [(1, 2), (3, 4)]})
expected = DataFrame({'A': Series([(1, 2), (3, 4)])})
assert_frame_equal(result, expected)
def test_constructor_orient(self):
data_dict = self.mixed_frame.T._series
recons = DataFrame.from_dict(data_dict, orient='index')
expected = self.mixed_frame.sort_index()
assert_frame_equal(recons, expected)
# dict of sequence
a = {'hi': [32, 3, 3],
'there': [3, 5, 3]}
rs = DataFrame.from_dict(a, orient='index')
xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))
assert_frame_equal(rs, xp)
def test_constructor_Series_named(self):
a = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
df = DataFrame(a)
self.assertEqual(df.columns[0], 'x')
self.assertTrue(df.index.equals(a.index))
# ndarray like
arr = np.random.randn(10)
s = Series(arr,name='x')
df = DataFrame(s)
expected = DataFrame(dict(x = s))
assert_frame_equal(df,expected)
s = Series(arr,index=range(3,13))
df = DataFrame(s)
expected = DataFrame({ 0 : s })
assert_frame_equal(df,expected)
self.assertRaises(ValueError, DataFrame, s, columns=[1,2])
# #2234
a = Series([], name='x')
df = DataFrame(a)
self.assertEqual(df.columns[0], 'x')
# series with name and w/o
s1 = Series(arr,name='x')
df = DataFrame([s1, arr]).T
expected = DataFrame({ 'x' : s1, 'Unnamed 0' : arr },columns=['x','Unnamed 0'])
assert_frame_equal(df,expected)
# this is a bit non-intuitive here; the series collapse down to arrays
df = DataFrame([arr, s1]).T
expected = DataFrame({ 1 : s1, 0 : arr },columns=[0,1])
assert_frame_equal(df,expected)
def test_constructor_Series_differently_indexed(self):
# name
s1 = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
# no name
s2 = Series([1, 2, 3], index=['a', 'b', 'c'])
other_index = Index(['a', 'b'])
df1 = DataFrame(s1, index=other_index)
exp1 = DataFrame(s1.reindex(other_index))
self.assertEqual(df1.columns[0], 'x')
assert_frame_equal(df1, exp1)
df2 = DataFrame(s2, index=other_index)
exp2 = DataFrame(s2.reindex(other_index))
self.assertEqual(df2.columns[0], 0)
self.assertTrue(df2.index.equals(other_index))
assert_frame_equal(df2, exp2)
def test_constructor_manager_resize(self):
index = list(self.frame.index[:5])
columns = list(self.frame.columns[:3])
result = DataFrame(self.frame._data, index=index,
columns=columns)
self.assert_numpy_array_equal(result.index, index)
self.assert_numpy_array_equal(result.columns, columns)
def test_constructor_from_items(self):
items = [(c, self.frame[c]) for c in self.frame.columns]
recons = DataFrame.from_items(items)
assert_frame_equal(recons, self.frame)
# pass some columns
recons = DataFrame.from_items(items, columns=['C', 'B', 'A'])
assert_frame_equal(recons, self.frame.ix[:, ['C', 'B', 'A']])
# orient='index'
row_items = [(idx, self.mixed_frame.xs(idx))
for idx in self.mixed_frame.index]
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
assert_frame_equal(recons, self.mixed_frame)
self.assertEqual(recons['A'].dtype, np.float64)
with tm.assertRaisesRegexp(TypeError,
"Must pass columns with orient='index'"):
DataFrame.from_items(row_items, orient='index')
# orient='index', but thar be tuples
arr = lib.list_to_object_array(
[('bar', 'baz')] * len(self.mixed_frame))
self.mixed_frame['foo'] = arr
row_items = [(idx, list(self.mixed_frame.xs(idx)))
for idx in self.mixed_frame.index]
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
assert_frame_equal(recons, self.mixed_frame)
tm.assert_isinstance(recons['foo'][0], tuple)
rs = DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
orient='index', columns=['one', 'two', 'three'])
xp = DataFrame([[1, 2, 3], [4, 5, 6]], index=['A', 'B'],
columns=['one', 'two', 'three'])
assert_frame_equal(rs, xp)
def test_constructor_mix_series_nonseries(self):
df = DataFrame({'A': self.frame['A'],
'B': list(self.frame['B'])}, columns=['A', 'B'])
assert_frame_equal(df, self.frame.ix[:, ['A', 'B']])
with tm.assertRaisesRegexp(ValueError, 'does not match index length'):
DataFrame({'A': self.frame['A'], 'B': list(self.frame['B'])[:-2]})
def test_constructor_miscast_na_int_dtype(self):
df = DataFrame([[np.nan, 1], [1, 0]], dtype=np.int64)
expected = DataFrame([[np.nan, 1], [1, 0]])
assert_frame_equal(df, expected)
def test_constructor_iterator_failure(self):
with assertRaisesRegexp(TypeError, 'iterator'):
df = DataFrame(iter([1, 2, 3]))
def test_constructor_column_duplicates(self):
# it works! #2079
df = DataFrame([[8, 5]], columns=['a', 'a'])
edf = DataFrame([[8, 5]])
edf.columns = ['a', 'a']
assert_frame_equal(df, edf)
idf = DataFrame.from_items(
[('a', [8]), ('a', [5])], columns=['a', 'a'])
assert_frame_equal(idf, edf)
self.assertRaises(ValueError, DataFrame.from_items,
[('a', [8]), ('a', [5]), ('b', [6])],
columns=['b', 'a', 'a'])
def test_column_dups_operations(self):
def check(result, expected=None):
if expected is not None:
assert_frame_equal(result,expected)
result.dtypes
str(result)
# assignment
# GH 3687
arr = np.random.randn(3, 2)
idx = lrange(2)
df = DataFrame(arr, columns=['A', 'A'])
df.columns = idx
expected = DataFrame(arr,columns=idx)
check(df,expected)
idx = date_range('20130101',periods=4,freq='Q-NOV')
df = DataFrame([[1,1,1,5],[1,1,2,5],[2,1,3,5]],columns=['a','a','a','a'])
df.columns = idx
expected = DataFrame([[1,1,1,5],[1,1,2,5],[2,1,3,5]],columns=idx)
check(df,expected)
# insert
df = DataFrame([[1,1,1,5],[1,1,2,5],[2,1,3,5]],columns=['foo','bar','foo','hello'])
df['string'] = 'bah'
expected = DataFrame([[1,1,1,5,'bah'],[1,1,2,5,'bah'],[2,1,3,5,'bah']],columns=['foo','bar','foo','hello','string'])
check(df,expected)
with assertRaisesRegexp(ValueError, 'Length of value'):
df.insert(0, 'AnotherColumn', range(len(df.index) - 1))
# insert same dtype
df['foo2'] = 3
expected = DataFrame([[1,1,1,5,'bah',3],[1,1,2,5,'bah',3],[2,1,3,5,'bah',3]],columns=['foo','bar','foo','hello','string','foo2'])
check(df,expected)
# set (non-dup)
df['foo2'] = 4
expected = DataFrame([[1,1,1,5,'bah',4],[1,1,2,5,'bah',4],[2,1,3,5,'bah',4]],columns=['foo','bar','foo','hello','string','foo2'])
check(df,expected)
df['foo2'] = 3
# delete (non dup)
del df['bar']
expected = DataFrame([[1,1,5,'bah',3],[1,2,5,'bah',3],[2,3,5,'bah',3]],columns=['foo','foo','hello','string','foo2'])
check(df,expected)
# try to delete again (its not consolidated)
del df['hello']
expected = DataFrame([[1,1,'bah',3],[1,2,'bah',3],[2,3,'bah',3]],columns=['foo','foo','string','foo2'])
check(df,expected)
# consolidate
df = df.consolidate()
expected = DataFrame([[1,1,'bah',3],[1,2,'bah',3],[2,3,'bah',3]],columns=['foo','foo','string','foo2'])
check(df,expected)
# insert
df.insert(2,'new_col',5.)
expected = DataFrame([[1,1,5.,'bah',3],[1,2,5.,'bah',3],[2,3,5.,'bah',3]],columns=['foo','foo','new_col','string','foo2'])
check(df,expected)
# insert a dup
assertRaisesRegexp(ValueError, 'cannot insert', df.insert, 2, 'new_col', 4.)
df.insert(2,'new_col',4.,allow_duplicates=True)
expected = DataFrame([[1,1,4.,5.,'bah',3],[1,2,4.,5.,'bah',3],[2,3,4.,5.,'bah',3]],columns=['foo','foo','new_col','new_col','string','foo2'])
check(df,expected)
# delete (dup)
del df['foo']
expected = DataFrame([[4.,5.,'bah',3],[4.,5.,'bah',3],[4.,5.,'bah',3]],columns=['new_col','new_col','string','foo2'])
assert_frame_equal(df,expected)
# dup across dtypes
df = DataFrame([[1,1,1.,5],[1,1,2.,5],[2,1,3.,5]],columns=['foo','bar','foo','hello'])
check(df)
df['foo2'] = 7.
expected = DataFrame([[1,1,1.,5,7.],[1,1,2.,5,7.],[2,1,3.,5,7.]],columns=['foo','bar','foo','hello','foo2'])
check(df,expected)
result = df['foo']
expected = DataFrame([[1,1.],[1,2.],[2,3.]],columns=['foo','foo'])
check(result,expected)
# multiple replacements
df['foo'] = 'string'
expected = DataFrame([['string',1,'string',5,7.],['string',1,'string',5,7.],['string',1,'string',5,7.]],columns=['foo','bar','foo','hello','foo2'])
check(df,expected)
del df['foo']
expected = DataFrame([[1,5,7.],[1,5,7.],[1,5,7.]],columns=['bar','hello','foo2'])
check(df,expected)
# values
df = DataFrame([[1,2.5],[3,4.5]], index=[1,2], columns=['x','x'])
result = df.values
expected = np.array([[1,2.5],[3,4.5]])
self.assertTrue((result == expected).all().all())
# rename, GH 4403
df4 = DataFrame({'TClose': [22.02],
'RT': [0.0454],
'TExg': [0.0422]},
index=MultiIndex.from_tuples([(600809, 20130331)], names=['STK_ID', 'RPT_Date']))
df5 = DataFrame({'STK_ID': [600809] * 3,
'RPT_Date': [20120930,20121231,20130331],
'STK_Name': [u('饡驦'), u('饡驦'), u('饡驦')],
'TClose': [38.05, 41.66, 30.01]},
index=MultiIndex.from_tuples([(600809, 20120930), (600809, 20121231),(600809,20130331)], names=['STK_ID', 'RPT_Date']))
k = pd.merge(df4,df5,how='inner',left_index=True,right_index=True)
result = k.rename(columns={'TClose_x':'TClose', 'TClose_y':'QT_Close'})
str(result)
result.dtypes
expected = DataFrame([[0.0454, 22.02, 0.0422, 20130331, 600809, u('饡驦'), 30.01 ]],
columns=['RT','TClose','TExg','RPT_Date','STK_ID','STK_Name','QT_Close']).set_index(['STK_ID','RPT_Date'],drop=False)
assert_frame_equal(result,expected)
# reindex is invalid!
df = DataFrame([[1,5,7.],[1,5,7.],[1,5,7.]],columns=['bar','a','a'])
self.assertRaises(ValueError, df.reindex, columns=['bar'])
self.assertRaises(ValueError, df.reindex, columns=['bar','foo'])
# drop
df = DataFrame([[1,5,7.],[1,5,7.],[1,5,7.]],columns=['bar','a','a'])
result = df.drop(['a'],axis=1)
expected = DataFrame([[1],[1],[1]],columns=['bar'])
check(result,expected)
result = df.drop('a',axis=1)
check(result,expected)
# describe
df = DataFrame([[1,1,1],[2,2,2],[3,3,3]],columns=['bar','a','a'],dtype='float64')
result = df.describe()
s = df.iloc[:,0].describe()
expected = pd.concat([ s, s, s],keys=df.columns,axis=1)
check(result,expected)
# check column dups with index equal and not equal to df's index
df = DataFrame(np.random.randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],
columns=['A', 'B', 'A'])
for index in [df.index, pd.Index(list('edcba'))]:
this_df = df.copy()
expected_ser = pd.Series(index.values, index=this_df.index)
expected_df = DataFrame.from_items([('A', expected_ser),
('B', this_df['B']),
('A', expected_ser)])
this_df['A'] = index
check(this_df, expected_df)
# operations
for op in ['__add__','__mul__','__sub__','__truediv__']:
df = DataFrame(dict(A = np.arange(10), B = np.random.rand(10)))
expected = getattr(df,op)(df)
expected.columns = ['A','A']
df.columns = ['A','A']
result = getattr(df,op)(df)
check(result,expected)
# multiple assignments that change dtypes
# the location indexer is a slice
# GH 6120
df = DataFrame(np.random.randn(5,2), columns=['that', 'that'])
expected = DataFrame(1.0, index=range(5), columns=['that', 'that'])
df['that'] = 1.0
check(df, expected)
df = DataFrame(np.random.rand(5,2), columns=['that', 'that'])
expected = DataFrame(1, index=range(5), columns=['that', 'that'])
df['that'] = 1
check(df, expected)
def test_column_dups2(self):
# drop buggy GH 6240
df = DataFrame({'A' : np.random.randn(5),
'B' : np.random.randn(5),
'C' : np.random.randn(5),
'D' : ['a','b','c','d','e'] })
expected = df.take([0,1,1], axis=1)
df2 = df.take([2,0,1,2,1], axis=1)
result = df2.drop('C',axis=1)
assert_frame_equal(result, expected)
# dropna
df = DataFrame({'A' : np.random.randn(5),
'B' : np.random.randn(5),
'C' : np.random.randn(5),
'D' : ['a','b','c','d','e'] })
df.iloc[2,[0,1,2]] = np.nan
df.iloc[0,0] = np.nan
df.iloc[1,1] = np.nan
df.iloc[:,3] = np.nan
expected = df.dropna(subset=['A','B','C'],how='all')
expected.columns = ['A','A','B','C']
df.columns = ['A','A','B','C']
result = df.dropna(subset=['A','C'],how='all')
assert_frame_equal(result, expected)
def test_column_dups_indexing(self):
def check(result, expected=None):
if expected is not None:
assert_frame_equal(result,expected)
result.dtypes
str(result)
# boolean indexing
# GH 4879
dups = ['A', 'A', 'C', 'D']
df = DataFrame(np.arange(12).reshape(3,4), columns=['A', 'B', 'C', 'D'],dtype='float64')
expected = df[df.C > 6]
expected.columns = dups
df = DataFrame(np.arange(12).reshape(3,4), columns=dups,dtype='float64')
result = df[df.C > 6]
check(result,expected)
# where
df = DataFrame(np.arange(12).reshape(3,4), columns=['A', 'B', 'C', 'D'],dtype='float64')
expected = df[df > 6]
expected.columns = dups
df = DataFrame(np.arange(12).reshape(3,4), columns=dups,dtype='float64')
result = df[df > 6]
check(result,expected)
# boolean with the duplicate raises
df = DataFrame(np.arange(12).reshape(3,4), columns=dups,dtype='float64')
self.assertRaises(ValueError, lambda : df[df.A > 6])
# dup aligining operations should work
# GH 5185
df1 = DataFrame([1, 2, 3, 4, 5], index=[1, 2, 1, 2, 3])
df2 = DataFrame([1, 2, 3], index=[1, 2, 3])
expected = DataFrame([0,2,0,2,2],index=[1,1,2,2,3])
result = df1.sub(df2)
assert_frame_equal(result,expected)
# equality
df1 = DataFrame([[1,2],[2,np.nan],[3,4],[4,4]],columns=['A','B'])
df2 = DataFrame([[0,1],[2,4],[2,np.nan],[4,5]],columns=['A','A'])
# not-comparing like-labelled
self.assertRaises(ValueError, lambda : df1 == df2)
df1r = df1.reindex_like(df2)
result = df1r == df2
expected = DataFrame([[False,True],[True,False],[False,False],[True,False]],columns=['A','A'])
assert_frame_equal(result,expected)
# mixed column selection
# GH 5639
dfbool = DataFrame({'one' : Series([True, True, False], index=['a', 'b', 'c']),
'two' : Series([False, False, True, False], index=['a', 'b', 'c', 'd']),
'three': Series([False, True, True, True], index=['a', 'b', 'c', 'd'])})
expected = pd.concat([dfbool['one'],dfbool['three'],dfbool['one']],axis=1)
result = dfbool[['one', 'three', 'one']]
check(result,expected)
# multi-axis dups
# GH 6121
df = DataFrame(np.arange(25.).reshape(5,5),
index=['a', 'b', 'c', 'd', 'e'],
columns=['A', 'B', 'C', 'D', 'E'])
z = df[['A', 'C', 'A']].copy()
expected = z.ix[['a', 'c', 'a']]
df = DataFrame(np.arange(25.).reshape(5,5),
index=['a', 'b', 'c', 'd', 'e'],
columns=['A', 'B', 'C', 'D', 'E'])
z = df[['A', 'C', 'A']]
result = z.ix[['a', 'c', 'a']]
check(result,expected)
def test_column_dups_indexing2(self):
# GH 8363
# datetime ops with a non-unique index
df = DataFrame({'A' : np.arange(5,dtype='int64'),
'B' : np.arange(1,6,dtype='int64')},
index=[2,2,3,3,4])
result = df.B-df.A
expected = Series(1,index=[2,2,3,3,4])
assert_series_equal(result,expected)
df = DataFrame({'A' : date_range('20130101',periods=5), 'B' : date_range('20130101 09:00:00', periods=5)},index=[2,2,3,3,4])
result = df.B-df.A
expected = Series(Timedelta('9 hours'),index=[2,2,3,3,4])
assert_series_equal(result,expected)
def test_insert_benchmark(self):
# from the vb_suite/frame_methods/frame_insert_columns
N = 10
K = 5
df = DataFrame(index=lrange(N))
new_col = np.random.randn(N)
for i in range(K):
df[i] = new_col
expected = DataFrame(np.repeat(new_col,K).reshape(N,K),index=lrange(N))
assert_frame_equal(df,expected)
def test_constructor_single_value(self):
# expecting single value upcasting here
df = DataFrame(0., index=[1, 2, 3], columns=['a', 'b', 'c'])
assert_frame_equal(df, DataFrame(np.zeros(df.shape).astype('float64'), df.index,
df.columns))
df = DataFrame(0, index=[1, 2, 3], columns=['a', 'b', 'c'])
assert_frame_equal(df, DataFrame(np.zeros(df.shape).astype('int64'), df.index,
df.columns))
df = DataFrame('a', index=[1, 2], columns=['a', 'c'])
assert_frame_equal(df, DataFrame(np.array([['a', 'a'],
['a', 'a']],
dtype=object),
index=[1, 2],
columns=['a', 'c']))
self.assertRaises(com.PandasError, DataFrame, 'a', [1, 2])
self.assertRaises(com.PandasError, DataFrame, 'a', columns=['a', 'c'])
with tm.assertRaisesRegexp(TypeError, 'incompatible data and dtype'):
DataFrame('a', [1, 2], ['a', 'c'], float)
def test_constructor_with_datetimes(self):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# single item
df = DataFrame({'A' : 1, 'B' : 'foo', 'C' : 'bar', 'D' : Timestamp("20010101"), 'E' : datetime(2001,1,2,0,0) },
index=np.arange(10))
result = df.get_dtype_counts()
expected = Series({'int64': 1, datetime64name: 2, objectname : 2})
result.sort_index()
expected.sort_index()
assert_series_equal(result, expected)
# check with ndarray construction ndim==0 (e.g. we are passing a ndim 0 ndarray with a dtype specified)
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo', floatname : np.array(1.,dtype=floatname),
intname : np.array(1,dtype=intname)}, index=np.arange(10))
result = df.get_dtype_counts()
expected = { objectname : 1 }
if intname == 'int64':
expected['int64'] = 2
else:
expected['int64'] = 1
expected[intname] = 1
if floatname == 'float64':
expected['float64'] = 2
else:
expected['float64'] = 1
expected[floatname] = 1
result.sort_index()
expected = Series(expected)
expected.sort_index()
assert_series_equal(result, expected)
# check with ndarray construction ndim>0
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo', floatname : np.array([1.]*10,dtype=floatname),
intname : np.array([1]*10,dtype=intname)}, index=np.arange(10))
result = df.get_dtype_counts()
result.sort_index()
assert_series_equal(result, expected)
# GH 2809
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
datetime_s = Series(datetimes)
self.assertEqual(datetime_s.dtype, 'M8[ns]')
df = DataFrame({'datetime_s':datetime_s})
result = df.get_dtype_counts()
expected = Series({ datetime64name : 1 })
result.sort_index()
expected.sort_index()
assert_series_equal(result, expected)
# GH 2810
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
dates = [ts.date() for ts in ind]
df = DataFrame({'datetimes': datetimes, 'dates':dates})
result = df.get_dtype_counts()
expected = Series({ datetime64name : 1, objectname : 1 })
result.sort_index()
expected.sort_index()
assert_series_equal(result, expected)
# GH 7594
# don't coerce tz-aware
import pytz
tz = pytz.timezone('US/Eastern')
dt = tz.localize(datetime(2012, 1, 1))
df = DataFrame({'End Date': dt}, index=[0])
self.assertEqual(df.iat[0,0],dt)
assert_series_equal(df.dtypes,Series({'End Date' : np.dtype('object') }))
df = DataFrame([{'End Date': dt}])
self.assertEqual(df.iat[0,0],dt)
assert_series_equal(df.dtypes,Series({'End Date' : np.dtype('object') }))
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range('20130101',periods=3)
df = DataFrame({ 'value' : dr})
self.assertTrue(df.iat[0,0].tz is None)
dr = date_range('20130101',periods=3,tz='UTC')
df = DataFrame({ 'value' : dr})
self.assertTrue(str(df.iat[0,0].tz) == 'UTC')
dr = date_range('20130101',periods=3,tz='US/Eastern')
df = DataFrame({ 'value' : dr})
self.assertTrue(str(df.iat[0,0].tz) == 'US/Eastern')
# GH 7822
# preserver an index with a tz on dict construction
i = date_range('1/1/2011', periods=5, freq='10s', tz = 'US/Eastern')
expected = DataFrame( {'a' : i.to_series(keep_tz=True).reset_index(drop=True) })
df = DataFrame()
df['a'] = i
assert_frame_equal(df, expected)
df = DataFrame( {'a' : i } )
assert_frame_equal(df, expected)
# multiples
i_no_tz = date_range('1/1/2011', periods=5, freq='10s')
df = DataFrame( {'a' : i, 'b' : i_no_tz } )
expected = DataFrame( {'a' : i.to_series(keep_tz=True).reset_index(drop=True), 'b': i_no_tz })
assert_frame_equal(df, expected)
def test_constructor_for_list_with_dtypes(self):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# test list of lists/ndarrays
df = DataFrame([np.arange(5) for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int64' : 5})
df = DataFrame([np.array(np.arange(5),dtype='int32') for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int32' : 5})
# overflow issue? (we always expecte int64 upcasting here)
df = DataFrame({'a' : [2**31,2**31+1]})
result = df.get_dtype_counts()
expected = Series({'int64' : 1 })
assert_series_equal(result, expected)
# GH #2751 (construction with no index specified), make sure we cast to platform values
df = DataFrame([1, 2])
result = df.get_dtype_counts()
expected = Series({'int64': 1 })
assert_series_equal(result, expected)
df = DataFrame([1.,2.])
result = df.get_dtype_counts()
expected = Series({'float64' : 1 })
assert_series_equal(result, expected)
df = DataFrame({'a' : [1, 2]})
result = df.get_dtype_counts()
expected = Series({'int64' : 1})
assert_series_equal(result, expected)
df = DataFrame({'a' : [1., 2.]})
result = df.get_dtype_counts()
expected = Series({'float64' : 1})
assert_series_equal(result, expected)
df = DataFrame({'a' : 1 }, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'int64': 1})
assert_series_equal(result, expected)
df = DataFrame({'a' : 1. }, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'float64': 1 })
assert_series_equal(result, expected)
# with object list
df = DataFrame({'a':[1,2,4,7], 'b':[1.2, 2.3, 5.1, 6.3],
'c':list('abcd'), 'd':[datetime(2000,1,1) for i in range(4)],
'e' : [1.,2,4.,7]})
result = df.get_dtype_counts()
expected = Series({'int64': 1, 'float64' : 2, datetime64name: 1, objectname : 1})
result.sort_index()
expected.sort_index()
assert_series_equal(result, expected)
def test_not_hashable(self):
df = pd.DataFrame([1])
self.assertRaises(TypeError, hash, df)
self.assertRaises(TypeError, hash, self.empty)
def test_timedeltas(self):
df = DataFrame(dict(A = Series(date_range('2012-1-1', periods=3, freq='D')),
B = Series([ timedelta(days=i) for i in range(3) ])))
result = df.get_dtype_counts()
expected = Series({'datetime64[ns]': 1, 'timedelta64[ns]' : 1 })
result.sort()
expected.sort()
assert_series_equal(result, expected)
df['C'] = df['A'] + df['B']
expected = Series({'datetime64[ns]': 2, 'timedelta64[ns]' : 1 })
result = df.get_dtype_counts()
result.sort()
expected.sort()
assert_series_equal(result, expected)
# mixed int types
df['D'] = 1
expected = Series({'datetime64[ns]': 2, 'timedelta64[ns]' : 1, 'int64' : 1 })
result = df.get_dtype_counts()
result.sort()
expected.sort()
assert_series_equal(result, expected)
def test_operators_timedelta64(self):
from datetime import datetime, timedelta
df = DataFrame(dict(A = date_range('2012-1-1', periods=3, freq='D'),
B = date_range('2012-1-2', periods=3, freq='D'),
C = Timestamp('20120101')-timedelta(minutes=5,seconds=5)))
diffs = DataFrame(dict(A = df['A']-df['C'],
B = df['A']-df['B']))
# min
result = diffs.min()
self.assertEqual(result[0], diffs.ix[0,'A'])
self.assertEqual(result[1], diffs.ix[0,'B'])
result = diffs.min(axis=1)
self.assertTrue((result == diffs.ix[0,'B']).all() == True)
# max
result = diffs.max()
self.assertEqual(result[0], diffs.ix[2,'A'])
self.assertEqual(result[1], diffs.ix[2,'B'])
result = diffs.max(axis=1)
self.assertTrue((result == diffs['A']).all() == True)
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame(dict(A = df['A']-df['C'],
B = df['B']-df['A']))
assert_frame_equal(result,expected)
assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed['C'] = 'foo'
mixed['D'] = 1
mixed['E'] = 1.
mixed['F'] = Timestamp('20130101')
# results in an object array
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type
result = mixed.min()
expected = Series([_coerce_scalar_to_timedelta_type(timedelta(seconds=5*60+5)),
_coerce_scalar_to_timedelta_type(timedelta(days=-1)),
'foo',
1,
1.0,
Timestamp('20130101')],
index=mixed.columns)
assert_series_equal(result,expected)
# excludes numeric
result = mixed.min(axis=1)
expected = Series([1, 1, 1.],index=[0, 1, 2])
assert_series_equal(result,expected)
# works when only those columns are selected
result = mixed[['A','B']].min(1)
expected = Series([ timedelta(days=-1) ] * 3)
assert_series_equal(result,expected)
result = mixed[['A','B']].min()
expected = Series([ timedelta(seconds=5*60+5), timedelta(days=-1) ],index=['A','B'])
assert_series_equal(result,expected)
# GH 3106
df = DataFrame({'time' : date_range('20130102',periods=5),
'time2' : date_range('20130105',periods=5) })
df['off1'] = df['time2']-df['time']
self.assertEqual(df['off1'].dtype, 'timedelta64[ns]')
df['off2'] = df['time']-df['time2']
df._consolidate_inplace()
self.assertTrue(df['off1'].dtype == 'timedelta64[ns]')
self.assertTrue(df['off2'].dtype == 'timedelta64[ns]')
def test_datetimelike_setitem_with_inference(self):
# GH 7592
# assignment of timedeltas with NaT
one_hour = timedelta(hours=1)
df = DataFrame(index=date_range('20130101',periods=4))
df['A'] = np.array([1*one_hour]*4, dtype='m8[ns]')
df.loc[:,'B'] = np.array([2*one_hour]*4, dtype='m8[ns]')
df.loc[:3,'C'] = np.array([3*one_hour]*3, dtype='m8[ns]')
df.ix[:,'D'] = np.array([4*one_hour]*4, dtype='m8[ns]')
df.ix[:3,'E'] = np.array([5*one_hour]*3, dtype='m8[ns]')
df['F'] = np.timedelta64('NaT')
df.ix[:-1,'F'] = np.array([6*one_hour]*3, dtype='m8[ns]')
df.ix[-3:,'G'] = date_range('20130101',periods=3)
df['H'] = np.datetime64('NaT')
result = df.dtypes
expected = Series([np.dtype('timedelta64[ns]')]*6+[np.dtype('datetime64[ns]')]*2,index=list('ABCDEFGH'))
assert_series_equal(result,expected)
def test_new_empty_index(self):
df1 = DataFrame(randn(0, 3))
df2 = DataFrame(randn(0, 3))
df1.index.name = 'foo'
self.assertIsNone(df2.index.name)
def test_astype(self):
casted = self.frame.astype(int)
expected = DataFrame(self.frame.values.astype(int),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(casted, expected)
casted = self.frame.astype(np.int32)
expected = DataFrame(self.frame.values.astype(np.int32),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(casted, expected)
self.frame['foo'] = '5'
casted = self.frame.astype(int)
expected = DataFrame(self.frame.values.astype(int),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(casted, expected)
# mixed casting
def _check_cast(df, v):
self.assertEqual(list(set([ s.dtype.name for _, s in compat.iteritems(df) ]))[0], v)
mn = self.all_mixed._get_numeric_data().copy()
mn['little_float'] = np.array(12345.,dtype='float16')
mn['big_float'] = np.array(123456789101112.,dtype='float64')
casted = mn.astype('float64')
_check_cast(casted, 'float64')
casted = mn.astype('int64')
_check_cast(casted, 'int64')
casted = self.mixed_float.reindex(columns = ['A','B']).astype('float32')
_check_cast(casted, 'float32')
casted = mn.reindex(columns = ['little_float']).astype('float16')
_check_cast(casted, 'float16')
casted = self.mixed_float.reindex(columns = ['A','B']).astype('float16')
_check_cast(casted, 'float16')
casted = mn.astype('float32')
_check_cast(casted, 'float32')
casted = mn.astype('int32')
_check_cast(casted, 'int32')
# to object
casted = mn.astype('O')
_check_cast(casted, 'object')
def test_astype_with_exclude_string(self):
df = self.frame.copy()
expected = self.frame.astype(int)
df['string'] = 'foo'
casted = df.astype(int, raise_on_error = False)
expected['string'] = 'foo'
assert_frame_equal(casted, expected)
df = self.frame.copy()
expected = self.frame.astype(np.int32)
df['string'] = 'foo'
casted = df.astype(np.int32, raise_on_error = False)
expected['string'] = 'foo'
assert_frame_equal(casted, expected)
def test_astype_with_view(self):
tf = self.mixed_float.reindex(columns = ['A','B','C'])
casted = tf.astype(np.int64)
casted = tf.astype(np.float32)
# this is the only real reason to do it this way
tf = np.round(self.frame).astype(np.int32)
casted = tf.astype(np.float32, copy = False)
tf = self.frame.astype(np.float64)
casted = tf.astype(np.int64, copy = False)
def test_astype_cast_nan_int(self):
df = DataFrame(data={"Values": [1.0, 2.0, 3.0, np.nan]})
self.assertRaises(ValueError, df.astype, np.int64)
def test_array_interface(self):
result = np.sqrt(self.frame)
tm.assert_isinstance(result, type(self.frame))
self.assertIs(result.index, self.frame.index)
self.assertIs(result.columns, self.frame.columns)
assert_frame_equal(result, self.frame.apply(np.sqrt))
def test_pickle(self):
unpickled = self.round_trip_pickle(self.mixed_frame)
assert_frame_equal(self.mixed_frame, unpickled)
# buglet
self.mixed_frame._data.ndim
# empty
unpickled = self.round_trip_pickle(self.empty)
repr(unpickled)
def test_to_dict(self):
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
recons_data = DataFrame(test_data).to_dict()
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
self.assertEqual(v2, recons_data[k][k2])
recons_data = DataFrame(test_data).to_dict("l")
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
self.assertEqual(v2, recons_data[k][int(k2) - 1])
recons_data = DataFrame(test_data).to_dict("s")
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
self.assertEqual(v2, recons_data[k][k2])
recons_data = DataFrame(test_data).to_dict("sp")
expected_split = {'columns': ['A', 'B'], 'index': ['1', '2', '3'],
'data': [[1.0, '1'], [2.0, '2'], [nan, '3']]}
tm.assert_almost_equal(recons_data, expected_split)
recons_data = DataFrame(test_data).to_dict("r")
expected_records = [{'A': 1.0, 'B': '1'},
{'A': 2.0, 'B': '2'},
{'A': nan, 'B': '3'}]
tm.assert_almost_equal(recons_data, expected_records)
def test_to_dict_invalid_orient(self):
df = DataFrame({'A':[0, 1]})
self.assertRaises(ValueError, df.to_dict, orient='invalid')
def test_to_records_dt64(self):
df = DataFrame([["one", "two", "three"],
["four", "five", "six"]],
index=date_range("2012-01-01", "2012-01-02"))
self.assertEqual(df.to_records()['index'][0], df.index[0])
rs = df.to_records(convert_datetime64=False)
self.assertEqual(rs['index'][0], df.index.values[0])
def test_to_records_with_multindex(self):
# GH3189
index = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
data = np.zeros((8, 4))
df = DataFrame(data, index=index)
r = df.to_records(index=True)['level_0']
self.assertTrue('bar' in r)
self.assertTrue('one' not in r)
def test_to_records_with_Mapping_type(self):
import email
from email.parser import Parser
import collections
collections.Mapping.register(email.message.Message)
headers = Parser().parsestr('From: <[email protected]>\n'
'To: <[email protected]>\n'
'Subject: Test message\n'
'\n'
'Body would go here\n')
frame = DataFrame.from_records([headers])
all( x in frame for x in ['Type','Subject','From'])
def test_from_records_to_records(self):
# from numpy documentation
arr = np.zeros((2,), dtype=('i4,f4,a10'))
arr[:] = [(1, 2., 'Hello'), (2, 3., "World")]
frame = DataFrame.from_records(arr)
index = np.arange(len(arr))[::-1]
indexed_frame = DataFrame.from_records(arr, index=index)
self.assert_numpy_array_equal(indexed_frame.index, index)
# without names, it should go to last ditch
arr2 = np.zeros((2,3))
tm.assert_frame_equal(DataFrame.from_records(arr2), DataFrame(arr2))
# wrong length
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with assertRaisesRegexp(ValueError, msg):
DataFrame.from_records(arr, index=index[:-1])
indexed_frame = DataFrame.from_records(arr, index='f1')
# what to do?
records = indexed_frame.to_records()
self.assertEqual(len(records.dtype.names), 3)
records = indexed_frame.to_records(index=False)
self.assertEqual(len(records.dtype.names), 2)
self.assertNotIn('index', records.dtype.names)
def test_from_records_nones(self):
tuples = [(1, 2, None, 3),
(1, 2, None, 3),
(None, 2, 5, 3)]
df = DataFrame.from_records(tuples, columns=['a', 'b', 'c', 'd'])
self.assertTrue(np.isnan(df['c'][0]))
def test_from_records_iterator(self):
arr = np.array([(1.0, 1.0, 2, 2), (3.0, 3.0, 4, 4), (5., 5., 6, 6), (7., 7., 8, 8)],
dtype=[('x', np.float64), ('u', np.float32), ('y', np.int64), ('z', np.int32) ])
df = DataFrame.from_records(iter(arr), nrows=2)
xp = DataFrame({'x': np.array([1.0, 3.0], dtype=np.float64),
'u': np.array([1.0, 3.0], dtype=np.float32),
'y': np.array([2, 4], dtype=np.int64),
'z': np.array([2, 4], dtype=np.int32)})
assert_frame_equal(df.reindex_like(xp), xp)
# no dtypes specified here, so just compare with the default
arr = [(1.0, 2), (3.0, 4), (5., 6), (7., 8)]
df = DataFrame.from_records(iter(arr), columns=['x', 'y'],
nrows=2)
assert_frame_equal(df, xp.reindex(columns=['x','y']), check_dtype=False)
def test_from_records_tuples_generator(self):
def tuple_generator(length):
for i in range(length):
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
yield (i, letters[i % len(letters)], i/length)
columns_names = ['Integer', 'String', 'Float']
columns = [[i[j] for i in tuple_generator(10)] for j in range(len(columns_names))]
data = {'Integer': columns[0], 'String': columns[1], 'Float': columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = tuple_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
assert_frame_equal(result, expected)
def test_from_records_lists_generator(self):
def list_generator(length):
for i in range(length):
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
yield [i, letters[i % len(letters)], i/length]
columns_names = ['Integer', 'String', 'Float']
columns = [[i[j] for i in list_generator(10)] for j in range(len(columns_names))]
data = {'Integer': columns[0], 'String': columns[1], 'Float': columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = list_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
assert_frame_equal(result, expected)
def test_from_records_columns_not_modified(self):
tuples = [(1, 2, 3),
(1, 2, 3),
(2, 5, 3)]
columns = ['a', 'b', 'c']
original_columns = list(columns)
df = DataFrame.from_records(tuples, columns=columns, index='a')
self.assertEqual(columns, original_columns)
def test_from_records_decimal(self):
from decimal import Decimal
tuples = [(Decimal('1.5'),), (Decimal('2.5'),), (None,)]
df = DataFrame.from_records(tuples, columns=['a'])
self.assertEqual(df['a'].dtype, object)
df = DataFrame.from_records(tuples, columns=['a'], coerce_float=True)
self.assertEqual(df['a'].dtype, np.float64)
self.assertTrue(np.isnan(df['a'].values[-1]))
def test_from_records_duplicates(self):
result = DataFrame.from_records([(1, 2, 3), (4, 5, 6)],
columns=['a', 'b', 'a'])
expected = DataFrame([(1, 2, 3), (4, 5, 6)],
columns=['a', 'b', 'a'])
assert_frame_equal(result, expected)
def test_from_records_set_index_name(self):
def create_dict(order_id):
return {'order_id': order_id, 'quantity': np.random.randint(1, 10),
'price': np.random.randint(1, 10)}
documents = [create_dict(i) for i in range(10)]
# demo missing data
documents.append({'order_id': 10, 'quantity': 5})
result = DataFrame.from_records(documents, index='order_id')
self.assertEqual(result.index.name, 'order_id')
# MultiIndex
result = DataFrame.from_records(documents,
index=['order_id', 'quantity'])
self.assertEqual(result.index.names, ('order_id', 'quantity'))
def test_from_records_misc_brokenness(self):
# #2179
data = {1: ['foo'], 2: ['bar']}
result = DataFrame.from_records(data, columns=['a', 'b'])
exp = DataFrame(data, columns=['a', 'b'])
assert_frame_equal(result, exp)
# overlap in index/index_names
data = {'a': [1, 2, 3], 'b': [4, 5, 6]}
result = DataFrame.from_records(data, index=['a', 'b', 'c'])
exp = DataFrame(data, index=['a', 'b', 'c'])
assert_frame_equal(result, exp)
# GH 2623
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi']) # test col upconverts to obj
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
results = df2_obj.get_dtype_counts()
expected = Series({ 'datetime64[ns]' : 1, 'object' : 1 })
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 1])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
results = df2_obj.get_dtype_counts()
expected = Series({ 'datetime64[ns]' : 1, 'int64' : 1 })
def test_from_records_empty(self):
# 3562
result = DataFrame.from_records([], columns=['a','b','c'])
expected = DataFrame(columns=['a','b','c'])
assert_frame_equal(result, expected)
result = DataFrame.from_records([], columns=['a','b','b'])
expected = DataFrame(columns=['a','b','b'])
assert_frame_equal(result, expected)
def test_from_records_empty_with_nonempty_fields_gh3682(self):
a = np.array([(1, 2)], dtype=[('id', np.int64), ('value', np.int64)])
df = DataFrame.from_records(a, index='id')
assert_array_equal(df.index, Index([1], name='id'))
self.assertEqual(df.index.name, 'id')
assert_array_equal(df.columns, Index(['value']))
b = np.array([], dtype=[('id', np.int64), ('value', np.int64)])
df = DataFrame.from_records(b, index='id')
assert_array_equal(df.index, Index([], name='id'))
self.assertEqual(df.index.name, 'id')
def test_from_records_with_datetimes(self):
if sys.version < LooseVersion('2.7'):
raise nose.SkipTest('rec arrays dont work properly with py2.6')
# this may fail on certain platforms because of a numpy issue
# related GH6140
if not is_little_endian():
raise nose.SkipTest("known failure of test on non-little endian")
# construction with a null in a recarray
# GH 6140
expected = DataFrame({ 'EXPIRY' : [datetime(2005, 3, 1, 0, 0), None ]})
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [('EXPIRY', '<M8[ns]')]
try:
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
except (ValueError):
raise nose.SkipTest("known failure of numpy rec array creation")
result = DataFrame.from_records(recarray)
assert_frame_equal(result,expected)
# coercion should work too
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [('EXPIRY', '<M8[m]')]
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
result = DataFrame.from_records(recarray)
assert_frame_equal(result,expected)
def test_to_records_floats(self):
df = DataFrame(np.random.rand(10, 10))
df.to_records()
def test_to_recods_index_name(self):
df = DataFrame(np.random.randn(3, 3))
df.index.name = 'X'
rs = df.to_records()
self.assertIn('X', rs.dtype.fields)
df = DataFrame(np.random.randn(3, 3))
rs = df.to_records()
self.assertIn('index', rs.dtype.fields)
df.index = MultiIndex.from_tuples([('a', 'x'), ('a', 'y'), ('b', 'z')])
df.index.names = ['A', None]
rs = df.to_records()
self.assertIn('level_0', rs.dtype.fields)
def test_join_str_datetime(self):
str_dates = ['20120209', '20120222']
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
A = DataFrame(str_dates, index=lrange(2), columns=['aa'])
C = DataFrame([[1, 2], [3, 4]], index=str_dates, columns=dt_dates)
tst = A.join(C, on='aa')
self.assertEqual(len(tst.columns), 3)
def test_from_records_sequencelike(self):
df = DataFrame({'A' : np.array(np.random.randn(6), dtype = np.float64),
'A1': np.array(np.random.randn(6), dtype = np.float64),
'B' : np.array(np.arange(6), dtype = np.int64),
'C' : ['foo'] * 6,
'D' : np.array([True, False] * 3, dtype=bool),
'E' : np.array(np.random.randn(6), dtype = np.float32),
'E1': np.array(np.random.randn(6), dtype = np.float32),
'F' : np.array(np.arange(6), dtype = np.int32) })
# this is actually tricky to create the recordlike arrays and have the dtypes be intact
blocks = df.blocks
tuples = []
columns = []
dtypes = []
for dtype, b in compat.iteritems(blocks):
columns.extend(b.columns)
dtypes.extend([ (c,np.dtype(dtype).descr[0][1]) for c in b.columns ])
for i in range(len(df.index)):
tup = []
for _, b in compat.iteritems(blocks):
tup.extend(b.irow(i).values)
tuples.append(tuple(tup))
recarray = np.array(tuples, dtype=dtypes).view(np.recarray)
recarray2 = df.to_records()
lists = [list(x) for x in tuples]
# tuples (lose the dtype info)
result = DataFrame.from_records(tuples, columns=columns).reindex(columns=df.columns)
# created recarray and with to_records recarray (have dtype info)
result2 = DataFrame.from_records(recarray, columns=columns).reindex(columns=df.columns)
result3 = DataFrame.from_records(recarray2, columns=columns).reindex(columns=df.columns)
# list of tupels (no dtype info)
result4 = DataFrame.from_records(lists, columns=columns).reindex(columns=df.columns)
assert_frame_equal(result, df, check_dtype=False)
assert_frame_equal(result2, df)
assert_frame_equal(result3, df)
assert_frame_equal(result4, df, check_dtype=False)
# tuples is in the order of the columns
result = DataFrame.from_records(tuples)
self.assert_numpy_array_equal(result.columns, lrange(8))
# test exclude parameter & we are casting the results here (as we don't have dtype info to recover)
columns_to_test = [ columns.index('C'), columns.index('E1') ]
exclude = list(set(range(8))-set(columns_to_test))
result = DataFrame.from_records(tuples, exclude=exclude)
result.columns = [ columns[i] for i in sorted(columns_to_test) ]
assert_series_equal(result['C'], df['C'])
assert_series_equal(result['E1'], df['E1'].astype('float64'))
# empty case
result = DataFrame.from_records([], columns=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 0)
self.assert_numpy_array_equal(result.columns, ['foo', 'bar', 'baz'])
result = DataFrame.from_records([])
self.assertEqual(len(result), 0)
self.assertEqual(len(result.columns), 0)
def test_from_records_dictlike(self):
# test the dict methods
df = DataFrame({'A' : np.array(np.random.randn(6), dtype = np.float64),
'A1': np.array(np.random.randn(6), dtype = np.float64),
'B' : np.array(np.arange(6), dtype = np.int64),
'C' : ['foo'] * 6,
'D' : np.array([True, False] * 3, dtype=bool),
'E' : np.array(np.random.randn(6), dtype = np.float32),
'E1': np.array(np.random.randn(6), dtype = np.float32),
'F' : np.array(np.arange(6), dtype = np.int32) })
# columns is in a different order here than the actual items iterated from the dict
columns = []
for dtype, b in compat.iteritems(df.blocks):
columns.extend(b.columns)
asdict = dict((x, y) for x, y in compat.iteritems(df))
asdict2 = dict((x, y.values) for x, y in compat.iteritems(df))
# dict of series & dict of ndarrays (have dtype info)
results = []
results.append(DataFrame.from_records(asdict).reindex(columns=df.columns))
results.append(DataFrame.from_records(asdict, columns=columns).reindex(columns=df.columns))
results.append(DataFrame.from_records(asdict2, columns=columns).reindex(columns=df.columns))
for r in results:
assert_frame_equal(r, df)
def test_from_records_with_index_data(self):
df = DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
data = np.random.randn(10)
df1 = DataFrame.from_records(df, index=data)
assert(df1.index.equals(Index(data)))
def test_from_records_bad_index_column(self):
df = DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
# should pass
df1 = DataFrame.from_records(df, index=['C'])
assert(df1.index.equals(Index(df.C)))
df1 = DataFrame.from_records(df, index='C')
assert(df1.index.equals(Index(df.C)))
# should fail
self.assertRaises(ValueError, DataFrame.from_records, df, index=[2])
self.assertRaises(KeyError, DataFrame.from_records, df, index=2)
def test_from_records_non_tuple(self):
class Record(object):
def __init__(self, *args):
self.args = args
def __getitem__(self, i):
return self.args[i]
def __iter__(self):
return iter(self.args)
recs = [Record(1, 2, 3), Record(4, 5, 6), Record(7, 8, 9)]
tups = lmap(tuple, recs)
result = DataFrame.from_records(recs)
expected = DataFrame.from_records(tups)
assert_frame_equal(result, expected)
def test_from_records_len0_with_columns(self):
# #2633
result = DataFrame.from_records([], index='foo',
columns=['foo', 'bar'])
self.assertTrue(np.array_equal(result.columns, ['bar']))
self.assertEqual(len(result), 0)
self.assertEqual(result.index.name, 'foo')
def test_get_agg_axis(self):
cols = self.frame._get_agg_axis(0)
self.assertIs(cols, self.frame.columns)
idx = self.frame._get_agg_axis(1)
self.assertIs(idx, self.frame.index)
self.assertRaises(ValueError, self.frame._get_agg_axis, 2)
def test_nonzero(self):
self.assertTrue(self.empty.empty)
self.assertFalse(self.frame.empty)
self.assertFalse(self.mixed_frame.empty)
# corner case
df = DataFrame({'A': [1., 2., 3.],
'B': ['a', 'b', 'c']},
index=np.arange(3))
del df['A']
self.assertFalse(df.empty)
def test_repr_empty(self):
buf = StringIO()
# empty
foo = repr(self.empty)
# empty with index
frame = DataFrame(index=np.arange(1000))
foo = repr(frame)
def test_repr_mixed(self):
buf = StringIO()
# mixed
foo = repr(self.mixed_frame)
self.mixed_frame.info(verbose=False, buf=buf)
@slow
def test_repr_mixed_big(self):
# big mixed
biggie = DataFrame({'A': randn(200),
'B': tm.makeStringIndex(200)},
index=lrange(200))
biggie.loc[:20,'A'] = nan
biggie.loc[:20,'B'] = nan
foo = repr(biggie)
def test_repr(self):
buf = StringIO()
# small one
foo = repr(self.frame)
self.frame.info(verbose=False, buf=buf)
# even smaller
self.frame.reindex(columns=['A']).info(verbose=False, buf=buf)
self.frame.reindex(columns=['A', 'B']).info(verbose=False, buf=buf)
# exhausting cases in DataFrame.info
# columns but no index
no_index = DataFrame(columns=[0, 1, 3])
foo = repr(no_index)
# no columns or index
self.empty.info(buf=buf)
df = DataFrame(["a\n\r\tb"], columns=["a\n\r\td"], index=["a\n\r\tf"])
self.assertFalse("\t" in repr(df))
self.assertFalse("\r" in repr(df))
self.assertFalse("a\n" in repr(df))
def test_repr_dimensions(self):
df = DataFrame([[1, 2,], [3, 4]])
with option_context('display.show_dimensions', True):
self.assertTrue("2 rows x 2 columns" in repr(df))
with option_context('display.show_dimensions', False):
self.assertFalse("2 rows x 2 columns" in repr(df))
with option_context('display.show_dimensions', 'truncate'):
self.assertFalse("2 rows x 2 columns" in repr(df))
@slow
def test_repr_big(self):
buf = StringIO()
# big one
biggie = DataFrame(np.zeros((200, 4)), columns=lrange(4),
index=lrange(200))
foo = repr(biggie)
def test_repr_unsortable(self):
# columns are not sortable
import warnings
warn_filters = warnings.filters
warnings.filterwarnings('ignore',
category=FutureWarning,
module=".*format")
unsortable = DataFrame({'foo': [1] * 50,
datetime.today(): [1] * 50,
'bar': ['bar'] * 50,
datetime.today(
) + timedelta(1): ['bar'] * 50},
index=np.arange(50))
foo = repr(unsortable)
fmt.set_option('display.precision', 3, 'display.column_space', 10)
repr(self.frame)
fmt.set_option('display.max_rows', 10, 'display.max_columns', 2)
repr(self.frame)
fmt.set_option('display.max_rows', 1000, 'display.max_columns', 1000)
repr(self.frame)
self.reset_display_options()
warnings.filters = warn_filters
def test_repr_unicode(self):
uval = u('\u03c3\u03c3\u03c3\u03c3')
bval = uval.encode('utf-8')
df = DataFrame({'A': [uval, uval]})
result = repr(df)
ex_top = ' A'
self.assertEqual(result.split('\n')[0].rstrip(), ex_top)
df = DataFrame({'A': [uval, uval]})
result = repr(df)
self.assertEqual(result.split('\n')[0].rstrip(), ex_top)
def test_unicode_string_with_unicode(self):
df = DataFrame({'A': [u("\u05d0")]})
if compat.PY3:
str(df)
else:
compat.text_type(df)
def test_bytestring_with_unicode(self):
df = DataFrame({'A': [u("\u05d0")]})
if compat.PY3:
bytes(df)
else:
str(df)
def test_very_wide_info_repr(self):
df = DataFrame(np.random.randn(10, 20),
columns=tm.rands_array(10, 20))
repr(df)
def test_repr_column_name_unicode_truncation_bug(self):
# #1906
df = DataFrame({'Id': [7117434],
'StringCol': ('Is it possible to modify drop plot code'
' so that the output graph is displayed '
'in iphone simulator, Is it possible to '
'modify drop plot code so that the '
'output graph is \xe2\x80\xa8displayed '
'in iphone simulator.Now we are adding '
'the CSV file externally. I want to Call'
' the File through the code..')})
result = repr(df)
self.assertIn('StringCol', result)
def test_head_tail(self):
assert_frame_equal(self.frame.head(), self.frame[:5])
assert_frame_equal(self.frame.tail(), self.frame[-5:])
assert_frame_equal(self.frame.head(0), self.frame)
assert_frame_equal(self.frame.tail(0), self.frame)
assert_frame_equal(self.frame.head(-1), self.frame[:-1])
assert_frame_equal(self.frame.tail(-1), self.frame[1:])
assert_frame_equal(self.frame.head(1), self.frame[:1])
assert_frame_equal(self.frame.tail(1), self.frame[-1:])
# with a float index
df = self.frame.copy()
df.index = np.arange(len(self.frame)) + 0.1
assert_frame_equal(df.head(), df.iloc[:5])
assert_frame_equal(df.tail(), df.iloc[-5:])
assert_frame_equal(df.head(0), df)
assert_frame_equal(df.tail(0), df)
assert_frame_equal(df.head(-1), df.iloc[:-1])
assert_frame_equal(df.tail(-1), df.iloc[1:])
#test empty dataframe
empty_df = DataFrame()
assert_frame_equal(empty_df.tail(), empty_df)
assert_frame_equal(empty_df.head(), empty_df)
def test_insert(self):
df = DataFrame(np.random.randn(5, 3), index=np.arange(5),
columns=['c', 'b', 'a'])
df.insert(0, 'foo', df['a'])
self.assert_numpy_array_equal(df.columns, ['foo', 'c', 'b', 'a'])
assert_almost_equal(df['a'], df['foo'])
df.insert(2, 'bar', df['c'])
self.assert_numpy_array_equal(df.columns, ['foo', 'c', 'bar', 'b', 'a'])
assert_almost_equal(df['c'], df['bar'])
# diff dtype
# new item
df['x'] = df['a'].astype('float32')
result = Series(dict(float64 = 5, float32 = 1))
self.assertTrue((df.get_dtype_counts() == result).all())
# replacing current (in different block)
df['a'] = df['a'].astype('float32')
result = Series(dict(float64 = 4, float32 = 2))
self.assertTrue((df.get_dtype_counts() == result).all())
df['y'] = df['a'].astype('int32')
result = Series(dict(float64 = 4, float32 = 2, int32 = 1))
self.assertTrue((df.get_dtype_counts() == result).all())
with assertRaisesRegexp(ValueError, 'already exists'):
df.insert(1, 'a', df['b'])
self.assertRaises(ValueError, df.insert, 1, 'c', df['b'])
df.columns.name = 'some_name'
# preserve columns name field
df.insert(0, 'baz', df['c'])
self.assertEqual(df.columns.name, 'some_name')
def test_delitem(self):
del self.frame['A']
self.assertNotIn('A', self.frame)
def test_pop(self):
self.frame.columns.name = 'baz'
A = self.frame.pop('A')
self.assertNotIn('A', self.frame)
self.frame['foo'] = 'bar'
foo = self.frame.pop('foo')
self.assertNotIn('foo', self.frame)
# TODO self.assertEqual(self.frame.columns.name, 'baz')
def test_pop_non_unique_cols(self):
df = DataFrame({0: [0, 1], 1: [0, 1], 2: [4, 5]})
df.columns = ["a", "b", "a"]
res = df.pop("a")
self.assertEqual(type(res), DataFrame)
self.assertEqual(len(res), 2)
self.assertEqual(len(df.columns), 1)
self.assertTrue("b" in df.columns)
self.assertFalse("a" in df.columns)
self.assertEqual(len(df.index), 2)
def test_iter(self):
self.assertTrue(tm.equalContents(list(self.frame), self.frame.columns))
def test_iterrows(self):
for i, (k, v) in enumerate(self.frame.iterrows()):
exp = self.frame.xs(self.frame.index[i])
assert_series_equal(v, exp)
for i, (k, v) in enumerate(self.mixed_frame.iterrows()):
exp = self.mixed_frame.xs(self.mixed_frame.index[i])
assert_series_equal(v, exp)
def test_itertuples(self):
for i, tup in enumerate(self.frame.itertuples()):
s = Series(tup[1:])
s.name = tup[0]
expected = self.frame.ix[i, :].reset_index(drop=True)
assert_series_equal(s, expected)
df = DataFrame({'floats': np.random.randn(5),
'ints': lrange(5)}, columns=['floats', 'ints'])
for tup in df.itertuples(index=False):
tm.assert_isinstance(tup[1], np.integer)
df = DataFrame(data={"a": [1, 2, 3], "b": [4, 5, 6]})
dfaa = df[['a', 'a']]
self.assertEqual(list(dfaa.itertuples()), [(0, 1, 1), (1, 2, 2), (2, 3, 3)])
def test_len(self):
self.assertEqual(len(self.frame), len(self.frame.index))
def test_operators(self):
garbage = random.random(4)
colSeries = Series(garbage, index=np.array(self.frame.columns))
idSum = self.frame + self.frame
seriesSum = self.frame + colSeries
for col, series in compat.iteritems(idSum):
for idx, val in compat.iteritems(series):
origVal = self.frame[col][idx] * 2
if not np.isnan(val):
self.assertEqual(val, origVal)
else:
self.assertTrue(np.isnan(origVal))
for col, series in compat.iteritems(seriesSum):
for idx, val in compat.iteritems(series):
origVal = self.frame[col][idx] + colSeries[col]
if not np.isnan(val):
self.assertEqual(val, origVal)
else:
self.assertTrue(np.isnan(origVal))
added = self.frame2 + self.frame2
expected = self.frame2 * 2
assert_frame_equal(added, expected)
df = DataFrame({'a': ['a', None, 'b']})
assert_frame_equal(df + df, DataFrame({'a': ['aa', np.nan, 'bb']}))
def test_ops_np_scalar(self):
vals, xs = np.random.rand(5, 3), [nan, 7, -23, 2.718, -3.14, np.inf]
f = lambda x: DataFrame(x, index=list('ABCDE'),
columns=['jim', 'joe', 'jolie'])
df = f(vals)
for x in xs:
assert_frame_equal(df / np.array(x), f(vals / x))
assert_frame_equal(np.array(x) * df, f(vals * x))
assert_frame_equal(df + np.array(x), f(vals + x))
assert_frame_equal(np.array(x) - df, f(x - vals))
def test_operators_boolean(self):
# GH 5808
# empty frames, non-mixed dtype
result = DataFrame(index=[1]) & DataFrame(index=[1])
assert_frame_equal(result,DataFrame(index=[1]))
result = DataFrame(index=[1]) | DataFrame(index=[1])
assert_frame_equal(result,DataFrame(index=[1]))
result = DataFrame(index=[1]) & DataFrame(index=[1,2])
assert_frame_equal(result,DataFrame(index=[1,2]))
result = DataFrame(index=[1],columns=['A']) & DataFrame(index=[1],columns=['A'])
assert_frame_equal(result,DataFrame(index=[1],columns=['A']))
result = DataFrame(True,index=[1],columns=['A']) & DataFrame(True,index=[1],columns=['A'])
assert_frame_equal(result,DataFrame(True,index=[1],columns=['A']))
result = DataFrame(True,index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])
assert_frame_equal(result,DataFrame(True,index=[1],columns=['A']))
# boolean ops
result = DataFrame(1,index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])
assert_frame_equal(result,DataFrame(1,index=[1],columns=['A']))
def f():
DataFrame(1.0,index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])
self.assertRaises(TypeError, f)
def f():
DataFrame('foo',index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])
self.assertRaises(TypeError, f)
def test_operators_none_as_na(self):
df = DataFrame({"col1": [2, 5.0, 123, None],
"col2": [1, 2, 3, 4]}, dtype=object)
ops = [operator.add, operator.sub, operator.mul, operator.truediv]
# since filling converts dtypes from object, changed expected to be object
for op in ops:
filled = df.fillna(np.nan)
result = op(df, 3)
expected = op(filled, 3).astype(object)
expected[com.isnull(expected)] = None
assert_frame_equal(result, expected)
result = op(df, df)
expected = op(filled, filled).astype(object)
expected[com.isnull(expected)] = None
assert_frame_equal(result, expected)
result = op(df, df.fillna(7))
assert_frame_equal(result, expected)
result = op(df.fillna(7), df)
assert_frame_equal(result, expected, check_dtype=False)
def test_comparison_invalid(self):
def check(df,df2):
for (x, y) in [(df,df2),(df2,df)]:
self.assertRaises(TypeError, lambda : x == y)
self.assertRaises(TypeError, lambda : x != y)
self.assertRaises(TypeError, lambda : x >= y)
self.assertRaises(TypeError, lambda : x > y)
self.assertRaises(TypeError, lambda : x < y)
self.assertRaises(TypeError, lambda : x <= y)
# GH4968
# invalid date/int comparisons
df = DataFrame(np.random.randint(10, size=(10, 1)), columns=['a'])
df['dates'] = date_range('20010101', periods=len(df))
df2 = df.copy()
df2['dates'] = df['a']
check(df,df2)
df = DataFrame(np.random.randint(10, size=(10, 2)), columns=['a', 'b'])
df2 = DataFrame({'a': date_range('20010101', periods=len(df)), 'b': date_range('20100101', periods=len(df))})
check(df,df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH4982
df = DataFrame({'dates1': date_range('20010101', periods=10),
'dates2': date_range('20010102', periods=10),
'intcol': np.random.randint(1000000000, size=10),
'floatcol': np.random.randn(10),
'stringcol': list(tm.rands(10))})
df.loc[np.random.rand(len(df)) > 0.5, 'dates2'] = pd.NaT
ops = {'gt': 'lt', 'lt': 'gt', 'ge': 'le', 'le': 'ge', 'eq': 'eq',
'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
expected = left_f(df, Timestamp('20010109'))
result = right_f(Timestamp('20010109'), df)
tm.assert_frame_equal(result, expected)
# nats
expected = left_f(df, Timestamp('nat'))
result = right_f(Timestamp('nat'), df)
tm.assert_frame_equal(result, expected)
def test_modulo(self):
# GH3590, modulo as ints
p = DataFrame({ 'first' : [3,4,5,8], 'second' : [0,0,0,3] })
### this is technically wrong as the integer portion is coerced to float ###
expected = DataFrame({ 'first' : Series([0,0,0,0],dtype='float64'), 'second' : Series([np.nan,np.nan,np.nan,0]) })
result = p % p
assert_frame_equal(result,expected)
# numpy has a slightly different (wrong) treatement
result2 = DataFrame(p.values % p.values,index=p.index,columns=p.columns,dtype='float64')
result2.iloc[0:3,1] = np.nan
assert_frame_equal(result2,expected)
result = p % 0
expected = DataFrame(np.nan,index=p.index,columns=p.columns)
assert_frame_equal(result,expected)
# numpy has a slightly different (wrong) treatement
result2 = DataFrame(p.values.astype('float64') % 0,index=p.index,columns=p.columns)
assert_frame_equal(result2,expected)
# not commutative with series
p = DataFrame(np.random.randn(10, 5))
s = p[0]
res = s % p
res2 = p % s
self.assertFalse(np.array_equal(res.fillna(0), res2.fillna(0)))
def test_div(self):
# integer div, but deal with the 0's
p = DataFrame({ 'first' : [3,4,5,8], 'second' : [0,0,0,3] })
result = p / p
### this is technically wrong as the integer portion is coerced to float ###
expected = DataFrame({ 'first' : Series([1,1,1,1],dtype='float64'), 'second' : Series([np.inf,np.inf,np.inf,1]) })
assert_frame_equal(result,expected)
result2 = DataFrame(p.values.astype('float64')/p.values,index=p.index,columns=p.columns).fillna(np.inf)
assert_frame_equal(result2,expected)
result = p / 0
expected = DataFrame(np.inf,index=p.index,columns=p.columns)
assert_frame_equal(result,expected)
# numpy has a slightly different (wrong) treatement
result2 = DataFrame(p.values.astype('float64')/0,index=p.index,columns=p.columns).fillna(np.inf)
assert_frame_equal(result2,expected)
p = DataFrame(np.random.randn(10, 5))
s = p[0]
res = s / p
res2 = p / s
self.assertFalse(np.array_equal(res.fillna(0), res2.fillna(0)))
def test_logical_operators(self):
def _check_bin_op(op):
result = op(df1, df2)
expected = DataFrame(op(df1.values, df2.values), index=df1.index,
columns=df1.columns)
self.assertEqual(result.values.dtype, np.bool_)
assert_frame_equal(result, expected)
def _check_unary_op(op):
result = op(df1)
expected = DataFrame(op(df1.values), index=df1.index,
columns=df1.columns)
self.assertEqual(result.values.dtype, np.bool_)
assert_frame_equal(result, expected)
df1 = {'a': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True},
'b': {'a': False, 'b': True, 'c': False,
'd': False, 'e': False},
'c': {'a': False, 'b': False, 'c': True,
'd': False, 'e': False},
'd': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True},
'e': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True}}
df2 = {'a': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False},
'b': {'a': False, 'b': True, 'c': False,
'd': False, 'e': False},
'c': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False},
'd': {'a': False, 'b': False, 'c': False,
'd': True, 'e': False},
'e': {'a': False, 'b': False, 'c': False,
'd': False, 'e': True}}
df1 = DataFrame(df1)
df2 = DataFrame(df2)
_check_bin_op(operator.and_)
_check_bin_op(operator.or_)
_check_bin_op(operator.xor)
# operator.neg is deprecated in numpy >= 1.9
_check_unary_op(operator.inv)
def test_logical_typeerror(self):
if not compat.PY3:
self.assertRaises(TypeError, self.frame.__eq__, 'foo')
self.assertRaises(TypeError, self.frame.__lt__, 'foo')
self.assertRaises(TypeError, self.frame.__gt__, 'foo')
self.assertRaises(TypeError, self.frame.__ne__, 'foo')
else:
raise nose.SkipTest('test_logical_typeerror not tested on PY3')
def test_constructor_lists_to_object_dtype(self):
# from #1074
d = DataFrame({'a': [np.nan, False]})
self.assertEqual(d['a'].dtype, np.object_)
self.assertFalse(d['a'][1])
def test_constructor_with_nas(self):
# GH 5016
# na's in indicies
def check(df):
for i in range(len(df.columns)):
df.iloc[:,i]
# allow single nans to succeed
indexer = np.arange(len(df.columns))[isnull(df.columns)]
if len(indexer) == 1:
assert_series_equal(df.iloc[:,indexer[0]],df.loc[:,np.nan])
# multiple nans should fail
else:
def f():
df.loc[:,np.nan]
self.assertRaises(ValueError, f)
df = DataFrame([[1,2,3],[4,5,6]], index=[1,np.nan])
check(df)
df = DataFrame([[1,2,3],[4,5,6]], columns=[1.1,2.2,np.nan])
check(df)
df = DataFrame([[0,1,2,3],[4,5,6,7]], columns=[np.nan,1.1,2.2,np.nan])
check(df)
df = DataFrame([[0.0,1,2,3.0],[4,5,6,7]], columns=[np.nan,1.1,2.2,np.nan])
check(df)
def test_logical_with_nas(self):
d = DataFrame({'a': [np.nan, False], 'b': [True, True]})
# GH4947
# bool comparisons should return bool
result = d['a'] | d['b']
expected = Series([False, True])
assert_series_equal(result, expected)
# GH4604, automatic casting here
result = d['a'].fillna(False) | d['b']
expected = Series([True, True])
assert_series_equal(result, expected)
result = d['a'].fillna(False,downcast=False) | d['b']
expected = Series([True, True])
assert_series_equal(result, expected)
def test_neg(self):
# what to do?
assert_frame_equal(-self.frame, -1 * self.frame)
def test_invert(self):
assert_frame_equal(-(self.frame < 0), ~(self.frame < 0))
def test_first_last_valid(self):
N = len(self.frame.index)
mat = randn(N)
mat[:5] = nan
mat[-5:] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
index = frame.first_valid_index()
self.assertEqual(index, frame.index[5])
index = frame.last_valid_index()
self.assertEqual(index, frame.index[-6])
def test_arith_flex_frame(self):
ops = ['add', 'sub', 'mul', 'div', 'truediv', 'pow', 'floordiv', 'mod']
if not compat.PY3:
aliases = {}
else:
aliases = {'div': 'truediv'}
for op in ops:
try:
alias = aliases.get(op, op)
f = getattr(operator, alias)
result = getattr(self.frame, op)(2 * self.frame)
exp = f(self.frame, 2 * self.frame)
assert_frame_equal(result, exp)
# vs mix float
result = getattr(self.mixed_float, op)(2 * self.mixed_float)
exp = f(self.mixed_float, 2 * self.mixed_float)
assert_frame_equal(result, exp)
_check_mixed_float(result, dtype = dict(C = None))
# vs mix int
if op in ['add','sub','mul']:
result = getattr(self.mixed_int, op)(2 + self.mixed_int)
exp = f(self.mixed_int, 2 + self.mixed_int)
# overflow in the uint
dtype = None
if op in ['sub']:
dtype = dict(B = 'object', C = None)
elif op in ['add','mul']:
dtype = dict(C = None)
assert_frame_equal(result, exp)
_check_mixed_int(result, dtype = dtype)
# rops
r_f = lambda x, y: f(y, x)
result = getattr(self.frame, 'r' + op)(2 * self.frame)
exp = r_f(self.frame, 2 * self.frame)
assert_frame_equal(result, exp)
# vs mix float
result = getattr(self.mixed_float, op)(2 * self.mixed_float)
exp = f(self.mixed_float, 2 * self.mixed_float)
assert_frame_equal(result, exp)
_check_mixed_float(result, dtype = dict(C = None))
result = getattr(self.intframe, op)(2 * self.intframe)
exp = f(self.intframe, 2 * self.intframe)
assert_frame_equal(result, exp)
# vs mix int
if op in ['add','sub','mul']:
result = getattr(self.mixed_int, op)(2 + self.mixed_int)
exp = f(self.mixed_int, 2 + self.mixed_int)
# overflow in the uint
dtype = None
if op in ['sub']:
dtype = dict(B = 'object', C = None)
elif op in ['add','mul']:
dtype = dict(C = None)
assert_frame_equal(result, exp)
_check_mixed_int(result, dtype = dtype)
except:
com.pprint_thing("Failing operation %r" % op)
raise
# ndim >= 3
ndim_5 = np.ones(self.frame.shape + (3, 4, 5))
with assertRaisesRegexp(ValueError, 'shape'):
f(self.frame, ndim_5)
with assertRaisesRegexp(ValueError, 'shape'):
getattr(self.frame, op)(ndim_5)
# res_add = self.frame.add(self.frame)
# res_sub = self.frame.sub(self.frame)
# res_mul = self.frame.mul(self.frame)
# res_div = self.frame.div(2 * self.frame)
# assert_frame_equal(res_add, self.frame + self.frame)
# assert_frame_equal(res_sub, self.frame - self.frame)
# assert_frame_equal(res_mul, self.frame * self.frame)
# assert_frame_equal(res_div, self.frame / (2 * self.frame))
const_add = self.frame.add(1)
assert_frame_equal(const_add, self.frame + 1)
# corner cases
result = self.frame.add(self.frame[:0])
assert_frame_equal(result, self.frame * np.nan)
result = self.frame[:0].add(self.frame)
assert_frame_equal(result, self.frame * np.nan)
with assertRaisesRegexp(NotImplementedError, 'fill_value'):
self.frame.add(self.frame.irow(0), fill_value=3)
with assertRaisesRegexp(NotImplementedError, 'fill_value'):
self.frame.add(self.frame.irow(0), axis='index', fill_value=3)
def test_binary_ops_align(self):
# test aligning binary ops
# GH 6681
index=MultiIndex.from_product([list('abc'),
['one','two','three'],
[1,2,3]],
names=['first','second','third'])
df = DataFrame(np.arange(27*3).reshape(27,3),
index=index,
columns=['value1','value2','value3']).sortlevel()
idx = pd.IndexSlice
for op in ['add','sub','mul','div','truediv']:
opa = getattr(operator,op,None)
if opa is None:
continue
x = Series([ 1.0, 10.0, 100.0], [1,2,3])
result = getattr(df,op)(x,level='third',axis=0)
expected = pd.concat([ opa(df.loc[idx[:,:,i],:],v) for i, v in x.iteritems() ]).sortlevel()
assert_frame_equal(result, expected)
x = Series([ 1.0, 10.0], ['two','three'])
result = getattr(df,op)(x,level='second',axis=0)
expected = pd.concat([ opa(df.loc[idx[:,i],:],v) for i, v in x.iteritems() ]).reindex_like(df).sortlevel()
assert_frame_equal(result, expected)
def test_arith_mixed(self):
left = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 2, 3]})
result = left + left
expected = DataFrame({'A': ['aa', 'bb', 'cc'],
'B': [2, 4, 6]})
assert_frame_equal(result, expected)
def test_arith_getitem_commute(self):
df = DataFrame({'A': [1.1, 3.3], 'B': [2.5, -3.9]})
self._test_op(df, operator.add)
self._test_op(df, operator.sub)
self._test_op(df, operator.mul)
self._test_op(df, operator.truediv)
self._test_op(df, operator.floordiv)
self._test_op(df, operator.pow)
self._test_op(df, lambda x, y: y + x)
self._test_op(df, lambda x, y: y - x)
self._test_op(df, lambda x, y: y * x)
self._test_op(df, lambda x, y: y / x)
self._test_op(df, lambda x, y: y ** x)
self._test_op(df, lambda x, y: x + y)
self._test_op(df, lambda x, y: x - y)
self._test_op(df, lambda x, y: x * y)
self._test_op(df, lambda x, y: x / y)
self._test_op(df, lambda x, y: x ** y)
@staticmethod
def _test_op(df, op):
result = op(df, 1)
if not df.columns.is_unique:
raise ValueError("Only unique columns supported by this test")
for col in result.columns:
assert_series_equal(result[col], op(df[col], 1))
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = DataFrame(data)
other = DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.ix[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
assert_frame_equal(rs, xp)
# DataFrame
self.assertTrue(df.eq(df).values.all())
self.assertFalse(df.ne(df).values.any())
for op in ['eq', 'ne', 'gt', 'lt', 'ge', 'le']:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
assert_frame_equal(f(other.values), o(df, other.values))
# scalar
assert_frame_equal(f(0), o(df, 0))
# NAs
assert_frame_equal(f(np.nan), o(df, np.nan))
with assertRaisesRegexp(ValueError, 'shape'):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
assert_frame_equal(col_eq, df == Series(col_ser))
assert_frame_equal(col_eq, -col_ne)
assert_frame_equal(idx_eq, -idx_ne)
assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
assert_frame_equal(col_eq, df.eq(list(col_ser)))
assert_frame_equal(idx_eq, df.eq(Series(idx_ser), axis=0))
assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
assert_frame_equal(col_gt, df > Series(col_ser))
assert_frame_equal(col_gt, -col_le)
assert_frame_equal(idx_gt, -idx_le)
assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
assert_frame_equal(col_ge, df >= Series(col_ser))
assert_frame_equal(col_ge, -col_lt)
assert_frame_equal(idx_ge, -idx_lt)
assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = Series(np.random.randn(5))
col_ser = Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.ix[0, 0] = np.nan
rs = df.eq(df)
self.assertFalse(rs.ix[0, 0])
rs = df.ne(df)
self.assertTrue(rs.ix[0, 0])
rs = df.gt(df)
self.assertFalse(rs.ix[0, 0])
rs = df.lt(df)
self.assertFalse(rs.ix[0, 0])
rs = df.ge(df)
self.assertFalse(rs.ix[0, 0])
rs = df.le(df)
self.assertFalse(rs.ix[0, 0])
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = DataFrame({'a': arr})
df2 = DataFrame({'a': arr2})
rs = df.gt(df2)
self.assertFalse(rs.values.any())
rs = df.ne(df2)
self.assertTrue(rs.values.all())
arr3 = np.array([2j, np.nan, None])
df3 = DataFrame({'a': arr3})
rs = df3.gt(2j)
self.assertFalse(rs.values.any())
# corner, dtype=object
df1 = DataFrame({'col': ['foo', np.nan, 'bar']})
df2 = DataFrame({'col': ['foo', datetime.now(), 'bar']})
result = df1.ne(df2)
exp = DataFrame({'col': [False, True, False]})
assert_frame_equal(result, exp)
def test_arith_flex_series(self):
df = self.simple
row = df.xs('a')
col = df['two']
# after arithmetic refactor, add truediv here
ops = ['add', 'sub', 'mul', 'mod']
for op in ops:
f = getattr(df, op)
op = getattr(operator, op)
assert_frame_equal(f(row), op(df, row))
assert_frame_equal(f(col, axis=0), op(df.T, col).T)
# special case for some reason
assert_frame_equal(df.add(row, axis=None), df + row)
# cases which will be refactored after big arithmetic refactor
assert_frame_equal(df.div(row), df / row)
assert_frame_equal(df.div(col, axis=0), (df.T / col).T)
# broadcasting issue in GH7325
df = DataFrame(np.arange(3*2).reshape((3,2)),dtype='int64')
expected = DataFrame([[np.inf,np.inf],[1.0,1.5],[1.0,1.25]])
result = df.div(df[0],axis='index')
assert_frame_equal(result,expected)
df = DataFrame(np.arange(3*2).reshape((3,2)),dtype='float64')
expected = DataFrame([[np.nan,np.inf],[1.0,1.5],[1.0,1.25]])
result = df.div(df[0],axis='index')
assert_frame_equal(result,expected)
def test_arith_non_pandas_object(self):
df = self.simple
val1 = df.xs('a').values
added = DataFrame(df.values + val1, index=df.index, columns=df.columns)
assert_frame_equal(df + val1, added)
added = DataFrame((df.values.T + val1).T,
index=df.index, columns=df.columns)
assert_frame_equal(df.add(val1, axis=0), added)
val2 = list(df['two'])
added = DataFrame(df.values + val2, index=df.index, columns=df.columns)
assert_frame_equal(df + val2, added)
added = DataFrame((df.values.T + val2).T, index=df.index,
columns=df.columns)
assert_frame_equal(df.add(val2, axis='index'), added)
val3 = np.random.rand(*df.shape)
added = DataFrame(df.values + val3, index=df.index, columns=df.columns)
assert_frame_equal(df.add(val3), added)
def test_combineFrame(self):
frame_copy = self.frame.reindex(self.frame.index[::2])
del frame_copy['D']
frame_copy['C'][:5] = nan
added = self.frame + frame_copy
tm.assert_dict_equal(added['A'].valid(),
self.frame['A'] * 2,
compare_keys=False)
self.assertTrue(np.isnan(added['C'].reindex(frame_copy.index)[:5]).all())
# assert(False)
self.assertTrue(np.isnan(added['D']).all())
self_added = self.frame + self.frame
self.assertTrue(self_added.index.equals(self.frame.index))
added_rev = frame_copy + self.frame
self.assertTrue(np.isnan(added['D']).all())
# corner cases
# empty
plus_empty = self.frame + self.empty
self.assertTrue(np.isnan(plus_empty.values).all())
empty_plus = self.empty + self.frame
self.assertTrue(np.isnan(empty_plus.values).all())
empty_empty = self.empty + self.empty
self.assertTrue(empty_empty.empty)
# out of order
reverse = self.frame.reindex(columns=self.frame.columns[::-1])
assert_frame_equal(reverse + self.frame, self.frame * 2)
# mix vs float64, upcast
added = self.frame + self.mixed_float
_check_mixed_float(added, dtype = 'float64')
added = self.mixed_float + self.frame
_check_mixed_float(added, dtype = 'float64')
# mix vs mix
added = self.mixed_float + self.mixed_float2
_check_mixed_float(added, dtype = dict(C = None))
added = self.mixed_float2 + self.mixed_float
_check_mixed_float(added, dtype = dict(C = None))
# with int
added = self.frame + self.mixed_int
_check_mixed_float(added, dtype = 'float64')
def test_combineSeries(self):
# Series
series = self.frame.xs(self.frame.index[0])
added = self.frame + series
for key, s in compat.iteritems(added):
assert_series_equal(s, self.frame[key] + series[key])
larger_series = series.to_dict()
larger_series['E'] = 1
larger_series = Series(larger_series)
larger_added = self.frame + larger_series
for key, s in compat.iteritems(self.frame):
assert_series_equal(larger_added[key], s + series[key])
self.assertIn('E', larger_added)
self.assertTrue(np.isnan(larger_added['E']).all())
# vs mix (upcast) as needed
added = self.mixed_float + series
_check_mixed_float(added, dtype = 'float64')
added = self.mixed_float + series.astype('float32')
_check_mixed_float(added, dtype = dict(C = None))
added = self.mixed_float + series.astype('float16')
_check_mixed_float(added, dtype = dict(C = None))
#### these raise with numexpr.....as we are adding an int64 to an uint64....weird
# vs int
#added = self.mixed_int + (100*series).astype('int64')
#_check_mixed_int(added, dtype = dict(A = 'int64', B = 'float64', C = 'int64', D = 'int64'))
#added = self.mixed_int + (100*series).astype('int32')
#_check_mixed_int(added, dtype = dict(A = 'int32', B = 'float64', C = 'int32', D = 'int64'))
# TimeSeries
buf = StringIO()
tmp = sys.stderr
sys.stderr = buf
try:
ts = self.tsframe['A']
added = self.tsframe + ts
for key, col in compat.iteritems(self.tsframe):
assert_series_equal(added[key], col + ts)
smaller_frame = self.tsframe[:-5]
smaller_added = smaller_frame + ts
self.assertTrue(smaller_added.index.equals(self.tsframe.index))
smaller_ts = ts[:-5]
smaller_added2 = self.tsframe + smaller_ts
assert_frame_equal(smaller_added, smaller_added2)
# length 0
result = self.tsframe + ts[:0]
# Frame is length 0
result = self.tsframe[:0] + ts
self.assertEqual(len(result), 0)
# empty but with non-empty index
frame = self.tsframe[:1].reindex(columns=[])
result = frame * ts
self.assertEqual(len(result), len(ts))
finally:
sys.stderr = tmp
def test_combineFunc(self):
result = self.frame * 2
self.assert_numpy_array_equal(result.values, self.frame.values * 2)
# vs mix
result = self.mixed_float * 2
for c, s in compat.iteritems(result):
self.assert_numpy_array_equal(s.values, self.mixed_float[c].values * 2)
_check_mixed_float(result, dtype = dict(C = None))
result = self.empty * 2
self.assertIs(result.index, self.empty.index)
self.assertEqual(len(result.columns), 0)
def test_comparisons(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
row = self.simple.xs('a')
ndim_5 = np.ones(df1.shape + (1, 1, 1))
def test_comp(func):
result = func(df1, df2)
self.assert_numpy_array_equal(result.values,
func(df1.values, df2.values))
with assertRaisesRegexp(ValueError, 'Wrong number of dimensions'):
func(df1, ndim_5)
result2 = func(self.simple, row)
self.assert_numpy_array_equal(result2.values,
func(self.simple.values, row.values))
result3 = func(self.frame, 0)
self.assert_numpy_array_equal(result3.values,
func(self.frame.values, 0))
with assertRaisesRegexp(ValueError, 'Can only compare '
'identically-labeled DataFrame'):
func(self.simple, self.simple[:2])
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_string_comparison(self):
df = DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
assert_frame_equal(df[mask_a], df.ix[1:1, :])
assert_frame_equal(df[-mask_a], df.ix[0:0, :])
mask_b = df.b == "foo"
assert_frame_equal(df[mask_b], df.ix[0:0, :])
assert_frame_equal(df[-mask_b], df.ix[1:1, :])
def test_float_none_comparison(self):
df = DataFrame(np.random.randn(8, 3), index=lrange(8),
columns=['A', 'B', 'C'])
self.assertRaises(TypeError, df.__eq__, None)
def test_boolean_comparison(self):
# GH 4576
# boolean comparisons with a tuple/list give unexpected results
df = DataFrame(np.arange(6).reshape((3,2)))
b = np.array([2, 2])
b_r = np.atleast_2d([2,2])
b_c = b_r.T
l = (2,2,2)
tup = tuple(l)
# gt
expected = DataFrame([[False,False],[False,True],[True,True]])
result = df>b
assert_frame_equal(result,expected)
result = df.values>b
assert_array_equal(result,expected.values)
result = df>l
assert_frame_equal(result,expected)
result = df>tup
assert_frame_equal(result,expected)
result = df>b_r
assert_frame_equal(result,expected)
result = df.values>b_r
assert_array_equal(result,expected.values)
self.assertRaises(ValueError, df.__gt__, b_c)
self.assertRaises(ValueError, df.values.__gt__, b_c)
# ==
expected = DataFrame([[False,False],[True,False],[False,False]])
result = df == b
assert_frame_equal(result,expected)
result = df==l
assert_frame_equal(result,expected)
result = df==tup
assert_frame_equal(result,expected)
result = df == b_r
assert_frame_equal(result,expected)
result = df.values == b_r
assert_array_equal(result,expected.values)
self.assertRaises(ValueError, lambda : df == b_c)
self.assertFalse((df.values == b_c))
# with alignment
df = DataFrame(np.arange(6).reshape((3,2)),columns=list('AB'),index=list('abc'))
expected.index=df.index
expected.columns=df.columns
result = df==l
assert_frame_equal(result,expected)
result = df==tup
assert_frame_equal(result,expected)
# not shape compatible
self.assertRaises(ValueError, lambda : df == (2,2))
self.assertRaises(ValueError, lambda : df == [2,2])
def test_to_csv_deprecated_options(self):
pname = '__tmp_to_csv_deprecated_options__'
with ensure_clean(pname) as path:
self.tsframe[1:3] = np.nan
self.tsframe.to_csv(path, nanRep='foo')
recons = read_csv(path,index_col=0,parse_dates=[0],na_values=['foo'])
assert_frame_equal(self.tsframe, recons)
with tm.assert_produces_warning(FutureWarning):
self.frame.to_csv(path, cols=['A', 'B'])
with tm.assert_produces_warning(False):
self.frame.to_csv(path, columns=['A', 'B'])
def test_to_csv_from_csv(self):
pname = '__tmp_to_csv_from_csv__'
with ensure_clean(pname) as path:
self.frame['A'][:5] = nan
self.frame.to_csv(path)
self.frame.to_csv(path, columns=['A', 'B'])
self.frame.to_csv(path, header=False)
self.frame.to_csv(path, index=False)
# test roundtrip
self.tsframe.to_csv(path)
recons = DataFrame.from_csv(path)
assert_frame_equal(self.tsframe, recons)
self.tsframe.to_csv(path, index_label='index')
recons = DataFrame.from_csv(path, index_col=None)
assert(len(recons.columns) == len(self.tsframe.columns) + 1)
# no index
self.tsframe.to_csv(path, index=False)
recons = DataFrame.from_csv(path, index_col=None)
assert_almost_equal(self.tsframe.values, recons.values)
# corner case
dm = DataFrame({'s1': Series(lrange(3), lrange(3)),
's2': Series(lrange(2), lrange(2))})
dm.to_csv(path)
recons = DataFrame.from_csv(path)
assert_frame_equal(dm, recons)
with ensure_clean(pname) as path:
# duplicate index
df = DataFrame(np.random.randn(3, 3), index=['a', 'a', 'b'],
columns=['x', 'y', 'z'])
df.to_csv(path)
result = DataFrame.from_csv(path)
assert_frame_equal(result, df)
midx = MultiIndex.from_tuples([('A', 1, 2), ('A', 1, 2), ('B', 1, 2)])
df = DataFrame(np.random.randn(3, 3), index=midx,
columns=['x', 'y', 'z'])
df.to_csv(path)
result = DataFrame.from_csv(path, index_col=[0, 1, 2],
parse_dates=False)
assert_frame_equal(result, df, check_names=False) # TODO from_csv names index ['Unnamed: 1', 'Unnamed: 2'] should it ?
# column aliases
col_aliases = Index(['AA', 'X', 'Y', 'Z'])
self.frame2.to_csv(path, header=col_aliases)
rs = DataFrame.from_csv(path)
xp = self.frame2.copy()
xp.columns = col_aliases
assert_frame_equal(xp, rs)
self.assertRaises(ValueError, self.frame2.to_csv, path,
header=['AA', 'X'])
with ensure_clean(pname) as path:
import pandas as pd
df1 = DataFrame(np.random.randn(3, 1))
df2 = DataFrame(np.random.randn(3, 1))
df1.to_csv(path)
df2.to_csv(path,mode='a',header=False)
xp = pd.concat([df1,df2])
rs = pd.read_csv(path,index_col=0)
rs.columns = lmap(int,rs.columns)
xp.columns = lmap(int,xp.columns)
assert_frame_equal(xp,rs)
def test_to_csv_cols_reordering(self):
# GH3454
import pandas as pd
def _check_df(df,cols=None):
with ensure_clean() as path:
df.to_csv(path,columns = cols,engine='python')
rs_p = pd.read_csv(path,index_col=0)
df.to_csv(path,columns = cols,chunksize=chunksize)
rs_c = pd.read_csv(path,index_col=0)
if cols:
df = df[cols]
assert (rs_c.columns==rs_p.columns).all()
assert_frame_equal(df,rs_c,check_names=False)
chunksize=5
N = int(chunksize*2.5)
df= mkdf(N, 3)
cs = df.columns
cols = [cs[2],cs[0]]
_check_df(df,cols)
def test_to_csv_legacy_raises_on_dupe_cols(self):
df= mkdf(10, 3)
df.columns = ['a','a','b']
with ensure_clean() as path:
self.assertRaises(NotImplementedError,df.to_csv,path,engine='python')
def test_to_csv_new_dupe_cols(self):
import pandas as pd
def _check_df(df,cols=None):
with ensure_clean() as path:
df.to_csv(path,columns = cols,chunksize=chunksize)
rs_c = pd.read_csv(path,index_col=0)
# we wrote them in a different order
# so compare them in that order
if cols is not None:
if df.columns.is_unique:
rs_c.columns = cols
else:
indexer, missing = df.columns.get_indexer_non_unique(cols)
rs_c.columns = df.columns.take(indexer)
for c in cols:
obj_df = df[c]
obj_rs = rs_c[c]
if isinstance(obj_df,Series):
assert_series_equal(obj_df,obj_rs)
else:
assert_frame_equal(obj_df,obj_rs,check_names=False)
# wrote in the same order
else:
rs_c.columns = df.columns
assert_frame_equal(df,rs_c,check_names=False)
chunksize=5
N = int(chunksize*2.5)
# dupe cols
df= mkdf(N, 3)
df.columns = ['a','a','b']
_check_df(df,None)
# dupe cols with selection
cols = ['b','a']
_check_df(df,cols)
@slow
def test_to_csv_moar(self):
path = '__tmp_to_csv_moar__'
def _do_test(df,path,r_dtype=None,c_dtype=None,rnlvl=None,cnlvl=None,
dupe_col=False):
kwargs = dict(parse_dates=False)
if cnlvl:
if rnlvl is not None:
kwargs['index_col'] = lrange(rnlvl)
kwargs['header'] = lrange(cnlvl)
with ensure_clean(path) as path:
df.to_csv(path,encoding='utf8',chunksize=chunksize,tupleize_cols=False)
recons = DataFrame.from_csv(path,tupleize_cols=False,**kwargs)
else:
kwargs['header'] = 0
with ensure_clean(path) as path:
df.to_csv(path,encoding='utf8',chunksize=chunksize)
recons = DataFrame.from_csv(path,**kwargs)
def _to_uni(x):
if not isinstance(x, compat.text_type):
return x.decode('utf8')
return x
if dupe_col:
# read_Csv disambiguates the columns by
# labeling them dupe.1,dupe.2, etc'. monkey patch columns
recons.columns = df.columns
if rnlvl and not cnlvl:
delta_lvl = [recons.icol(i).values for i in range(rnlvl-1)]
ix=MultiIndex.from_arrays([list(recons.index)]+delta_lvl)
recons.index = ix
recons = recons.iloc[:,rnlvl-1:]
type_map = dict(i='i',f='f',s='O',u='O',dt='O',p='O')
if r_dtype:
if r_dtype == 'u': # unicode
r_dtype='O'
recons.index = np.array(lmap(_to_uni,recons.index),
dtype=r_dtype)
df.index = np.array(lmap(_to_uni,df.index),dtype=r_dtype)
elif r_dtype == 'dt': # unicode
r_dtype='O'
recons.index = np.array(lmap(Timestamp,recons.index),
dtype=r_dtype)
df.index = np.array(lmap(Timestamp,df.index),dtype=r_dtype)
elif r_dtype == 'p':
r_dtype='O'
recons.index = np.array(list(map(Timestamp,
recons.index.to_datetime())),
dtype=r_dtype)
df.index = np.array(list(map(Timestamp,
df.index.to_datetime())),
dtype=r_dtype)
else:
r_dtype= type_map.get(r_dtype)
recons.index = np.array(recons.index,dtype=r_dtype )
df.index = np.array(df.index,dtype=r_dtype )
if c_dtype:
if c_dtype == 'u':
c_dtype='O'
recons.columns = np.array(lmap(_to_uni,recons.columns),
dtype=c_dtype)
df.columns = np.array(lmap(_to_uni,df.columns),dtype=c_dtype )
elif c_dtype == 'dt':
c_dtype='O'
recons.columns = np.array(lmap(Timestamp,recons.columns),
dtype=c_dtype )
df.columns = np.array(lmap(Timestamp,df.columns),dtype=c_dtype)
elif c_dtype == 'p':
c_dtype='O'
recons.columns = np.array(lmap(Timestamp,recons.columns.to_datetime()),
dtype=c_dtype)
df.columns = np.array(lmap(Timestamp,df.columns.to_datetime()),dtype=c_dtype )
else:
c_dtype= type_map.get(c_dtype)
recons.columns = np.array(recons.columns,dtype=c_dtype )
df.columns = np.array(df.columns,dtype=c_dtype )
assert_frame_equal(df,recons,check_names=False,check_less_precise=True)
N = 100
chunksize=1000
# GH3437
from pandas import NaT
def make_dtnat_arr(n,nnat=None):
if nnat is None:
nnat= int(n*0.1) # 10%
s=list(date_range('2000',freq='5min',periods=n))
if nnat:
for i in np.random.randint(0,len(s),nnat):
s[i] = NaT
i = np.random.randint(100)
s[-i] = NaT
s[i] = NaT
return s
# N=35000
s1=make_dtnat_arr(chunksize+5)
s2=make_dtnat_arr(chunksize+5,0)
path = '1.csv'
# s3=make_dtnjat_arr(chunksize+5,0)
with ensure_clean('.csv') as pth:
df=DataFrame(dict(a=s1,b=s2))
df.to_csv(pth,chunksize=chunksize)
recons = DataFrame.from_csv(pth).convert_objects('coerce')
assert_frame_equal(df, recons,check_names=False,check_less_precise=True)
for ncols in [4]:
base = int((chunksize// ncols or 1) or 1)
for nrows in [2,10,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,
base-1,base,base+1]:
_do_test(mkdf(nrows, ncols,r_idx_type='dt',
c_idx_type='s'),path, 'dt','s')
for ncols in [4]:
base = int((chunksize// ncols or 1) or 1)
for nrows in [2,10,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,
base-1,base,base+1]:
_do_test(mkdf(nrows, ncols,r_idx_type='dt',
c_idx_type='s'),path, 'dt','s')
pass
for r_idx_type,c_idx_type in [('i','i'),('s','s'),('u','dt'),('p','p')]:
for ncols in [1,2,3,4]:
base = int((chunksize// ncols or 1) or 1)
for nrows in [2,10,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,
base-1,base,base+1]:
_do_test(mkdf(nrows, ncols,r_idx_type=r_idx_type,
c_idx_type=c_idx_type),path,r_idx_type,c_idx_type)
for ncols in [1,2,3,4]:
base = int((chunksize// ncols or 1) or 1)
for nrows in [10,N-2,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,
base-1,base,base+1]:
_do_test(mkdf(nrows, ncols),path)
for nrows in [10,N-2,N-1,N,N+1,N+2]:
df = mkdf(nrows, 3)
cols = list(df.columns)
cols[:2] = ["dupe","dupe"]
cols[-2:] = ["dupe","dupe"]
ix = list(df.index)
ix[:2] = ["rdupe","rdupe"]
ix[-2:] = ["rdupe","rdupe"]
df.index=ix
df.columns=cols
_do_test(df,path,dupe_col=True)
_do_test(DataFrame(index=lrange(10)),path)
_do_test(mkdf(chunksize//2+1, 2,r_idx_nlevels=2),path,rnlvl=2)
for ncols in [2,3,4]:
base = int(chunksize//ncols)
for nrows in [10,N-2,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,
base-1,base,base+1]:
_do_test(mkdf(nrows, ncols,r_idx_nlevels=2),path,rnlvl=2)
_do_test(mkdf(nrows, ncols,c_idx_nlevels=2),path,cnlvl=2)
_do_test(mkdf(nrows, ncols,r_idx_nlevels=2,c_idx_nlevels=2),
path,rnlvl=2,cnlvl=2)
def test_to_csv_from_csv_w_some_infs(self):
# test roundtrip with inf, -inf, nan, as full columns and mix
self.frame['G'] = np.nan
f = lambda x: [np.inf, np.nan][np.random.rand() < .5]
self.frame['H'] = self.frame.index.map(f)
with ensure_clean() as path:
self.frame.to_csv(path)
recons = DataFrame.from_csv(path)
assert_frame_equal(self.frame, recons, check_names=False) # TODO to_csv drops column name
assert_frame_equal(np.isinf(self.frame), np.isinf(recons), check_names=False)
def test_to_csv_from_csv_w_all_infs(self):
# test roundtrip with inf, -inf, nan, as full columns and mix
self.frame['E'] = np.inf
self.frame['F'] = -np.inf
with ensure_clean() as path:
self.frame.to_csv(path)
recons = DataFrame.from_csv(path)
assert_frame_equal(self.frame, recons, check_names=False) # TODO to_csv drops column name
assert_frame_equal(np.isinf(self.frame), np.isinf(recons), check_names=False)
def test_to_csv_no_index(self):
# GH 3624, after appending columns, to_csv fails
pname = '__tmp_to_csv_no_index__'
with ensure_clean(pname) as path:
df = DataFrame({'c1':[1,2,3], 'c2':[4,5,6]})
df.to_csv(path, index=False)
result = read_csv(path)
assert_frame_equal(df,result)
df['c3'] = Series([7,8,9],dtype='int64')
df.to_csv(path, index=False)
result = read_csv(path)
assert_frame_equal(df,result)
def test_to_csv_headers(self):
# GH6186, the presence or absence of `index` incorrectly
# causes to_csv to have different header semantics.
pname = '__tmp_to_csv_headers__'
from_df = DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
to_df = DataFrame([[1, 2], [3, 4]], columns=['X', 'Y'])
with ensure_clean(pname) as path:
from_df.to_csv(path, header=['X', 'Y'])
recons = DataFrame.from_csv(path)
assert_frame_equal(to_df, recons)
from_df.to_csv(path, index=False, header=['X', 'Y'])
recons = DataFrame.from_csv(path)
recons.reset_index(inplace=True)
assert_frame_equal(to_df, recons)
def test_to_csv_multiindex(self):
pname = '__tmp_to_csv_multiindex__'
frame = self.frame
old_index = frame.index
arrays = np.arange(len(old_index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays, names=['first', 'second'])
frame.index = new_index
with ensure_clean(pname) as path:
frame.to_csv(path, header=False)
frame.to_csv(path, columns=['A', 'B'])
# round trip
frame.to_csv(path)
df = DataFrame.from_csv(path, index_col=[0, 1], parse_dates=False)
assert_frame_equal(frame, df, check_names=False) # TODO to_csv drops column name
self.assertEqual(frame.index.names, df.index.names)
self.frame.index = old_index # needed if setUP becomes a classmethod
# try multiindex with dates
tsframe = self.tsframe
old_index = tsframe.index
new_index = [old_index, np.arange(len(old_index))]
tsframe.index = MultiIndex.from_arrays(new_index)
tsframe.to_csv(path, index_label=['time', 'foo'])
recons = DataFrame.from_csv(path, index_col=[0, 1])
assert_frame_equal(tsframe, recons, check_names=False) # TODO to_csv drops column name
# do not load index
tsframe.to_csv(path)
recons = DataFrame.from_csv(path, index_col=None)
np.testing.assert_equal(len(recons.columns), len(tsframe.columns) + 2)
# no index
tsframe.to_csv(path, index=False)
recons = DataFrame.from_csv(path, index_col=None)
assert_almost_equal(recons.values, self.tsframe.values)
self.tsframe.index = old_index # needed if setUP becomes classmethod
with ensure_clean(pname) as path:
# GH3571, GH1651, GH3141
def _make_frame(names=None):
if names is True:
names = ['first','second']
return DataFrame(np.random.randint(0,10,size=(3,3)),
columns=MultiIndex.from_tuples([('bah', 'foo'),
('bah', 'bar'),
('ban', 'baz')],
names=names),
dtype='int64')
# column & index are multi-index
df = mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
df.to_csv(path,tupleize_cols=False)
result = read_csv(path,header=[0,1,2,3],index_col=[0,1],tupleize_cols=False)
assert_frame_equal(df,result)
# column is mi
df = mkdf(5,3,r_idx_nlevels=1,c_idx_nlevels=4)
df.to_csv(path,tupleize_cols=False)
result = read_csv(path,header=[0,1,2,3],index_col=0,tupleize_cols=False)
assert_frame_equal(df,result)
# dup column names?
df = mkdf(5,3,r_idx_nlevels=3,c_idx_nlevels=4)
df.to_csv(path,tupleize_cols=False)
result = read_csv(path,header=[0,1,2,3],index_col=[0,1,2],tupleize_cols=False)
assert_frame_equal(df,result)
# writing with no index
df = _make_frame()
df.to_csv(path,tupleize_cols=False,index=False)
result = read_csv(path,header=[0,1],tupleize_cols=False)
assert_frame_equal(df,result)
# we lose the names here
df = _make_frame(True)
df.to_csv(path,tupleize_cols=False,index=False)
result = read_csv(path,header=[0,1],tupleize_cols=False)
self.assertTrue(all([ x is None for x in result.columns.names ]))
result.columns.names = df.columns.names
assert_frame_equal(df,result)
# tupleize_cols=True and index=False
df = _make_frame(True)
df.to_csv(path,tupleize_cols=True,index=False)
result = read_csv(path,header=0,tupleize_cols=True,index_col=None)
result.columns = df.columns
assert_frame_equal(df,result)
# whatsnew example
df = _make_frame()
df.to_csv(path,tupleize_cols=False)
result = read_csv(path,header=[0,1],index_col=[0],tupleize_cols=False)
assert_frame_equal(df,result)
df = _make_frame(True)
df.to_csv(path,tupleize_cols=False)
result = read_csv(path,header=[0,1],index_col=[0],tupleize_cols=False)
assert_frame_equal(df,result)
# column & index are multi-index (compatibility)
df = mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
df.to_csv(path,tupleize_cols=True)
result = read_csv(path,header=0,index_col=[0,1],tupleize_cols=True)
result.columns = df.columns
assert_frame_equal(df,result)
# invalid options
df = _make_frame(True)
df.to_csv(path,tupleize_cols=False)
# catch invalid headers
with assertRaisesRegexp(CParserError, 'Passed header=\[0,1,2\] are too many rows for this multi_index of columns'):
read_csv(path,tupleize_cols=False,header=lrange(3),index_col=0)
with assertRaisesRegexp(CParserError, 'Passed header=\[0,1,2,3,4,5,6\], len of 7, but only 6 lines in file'):
read_csv(path,tupleize_cols=False,header=lrange(7),index_col=0)
for i in [4,5,6]:
with tm.assertRaises(CParserError):
read_csv(path, tupleize_cols=False, header=lrange(i), index_col=0)
# write with cols
with assertRaisesRegexp(TypeError, 'cannot specify cols with a MultiIndex'):
df.to_csv(path, tupleize_cols=False, columns=['foo', 'bar'])
with ensure_clean(pname) as path:
# empty
tsframe[:0].to_csv(path)
recons = DataFrame.from_csv(path)
exp = tsframe[:0]
exp.index = []
self.assertTrue(recons.columns.equals(exp.columns))
self.assertEqual(len(recons), 0)
def test_to_csv_float32_nanrep(self):
df = DataFrame(np.random.randn(1, 4).astype(np.float32))
df[1] = np.nan
with ensure_clean('__tmp_to_csv_float32_nanrep__.csv') as path:
df.to_csv(path, na_rep=999)
with open(path) as f:
lines = f.readlines()
self.assertEqual(lines[1].split(',')[2], '999')
def test_to_csv_withcommas(self):
# Commas inside fields should be correctly escaped when saving as CSV.
df = DataFrame({'A': [1, 2, 3], 'B': ['5,6', '7,8', '9,0']})
with ensure_clean('__tmp_to_csv_withcommas__.csv') as path:
df.to_csv(path)
df2 = DataFrame.from_csv(path)
assert_frame_equal(df2, df)
def test_to_csv_mixed(self):
def create_cols(name):
return [ "%s%03d" % (name,i) for i in range(5) ]
df_float = DataFrame(np.random.randn(100, 5),dtype='float64',columns=create_cols('float'))
df_int = DataFrame(np.random.randn(100, 5),dtype='int64',columns=create_cols('int'))
df_bool = DataFrame(True,index=df_float.index,columns=create_cols('bool'))
df_object = DataFrame('foo',index=df_float.index,columns=create_cols('object'))
df_dt = DataFrame(Timestamp('20010101'),index=df_float.index,columns=create_cols('date'))
# add in some nans
df_float.ix[30:50,1:3] = np.nan
#### this is a bug in read_csv right now ####
#df_dt.ix[30:50,1:3] = np.nan
df = pd.concat([ df_float, df_int, df_bool, df_object, df_dt ], axis=1)
# dtype
dtypes = dict()
for n,dtype in [('float',np.float64),('int',np.int64),('bool',np.bool),('object',np.object)]:
for c in create_cols(n):
dtypes[c] = dtype
with ensure_clean() as filename:
df.to_csv(filename)
rs = read_csv(filename, index_col=0, dtype=dtypes, parse_dates=create_cols('date'))
assert_frame_equal(rs, df)
def test_to_csv_dups_cols(self):
df = DataFrame(np.random.randn(1000, 30),columns=lrange(15)+lrange(15),dtype='float64')
with ensure_clean() as filename:
df.to_csv(filename) # single dtype, fine
result = read_csv(filename,index_col=0)
result.columns = df.columns
assert_frame_equal(result,df)
df_float = DataFrame(np.random.randn(1000, 3),dtype='float64')
df_int = DataFrame(np.random.randn(1000, 3),dtype='int64')
df_bool = DataFrame(True,index=df_float.index,columns=lrange(3))
df_object = DataFrame('foo',index=df_float.index,columns=lrange(3))
df_dt = DataFrame(Timestamp('20010101'),index=df_float.index,columns=lrange(3))
df = pd.concat([ df_float, df_int, df_bool, df_object, df_dt ], axis=1, ignore_index=True)
cols = []
for i in range(5):
cols.extend([0,1,2])
df.columns = cols
from pandas import to_datetime
with ensure_clean() as filename:
df.to_csv(filename)
result = read_csv(filename,index_col=0)
# date cols
for i in ['0.4','1.4','2.4']:
result[i] = to_datetime(result[i])
result.columns = df.columns
assert_frame_equal(result,df)
# GH3457
from pandas.util.testing import makeCustomDataframe as mkdf
N=10
df= mkdf(N, 3)
df.columns = ['a','a','b']
with ensure_clean() as filename:
df.to_csv(filename)
# read_csv will rename the dups columns
result = read_csv(filename,index_col=0)
result = result.rename(columns={ 'a.1' : 'a' })
assert_frame_equal(result,df)
def test_to_csv_chunking(self):
aa=DataFrame({'A':lrange(100000)})
aa['B'] = aa.A + 1.0
aa['C'] = aa.A + 2.0
aa['D'] = aa.A + 3.0
for chunksize in [10000,50000,100000]:
with ensure_clean() as filename:
aa.to_csv(filename,chunksize=chunksize)
rs = read_csv(filename,index_col=0)
assert_frame_equal(rs, aa)
def test_to_csv_wide_frame_formatting(self):
# Issue #8621
df = DataFrame(np.random.randn(1, 100010), columns=None, index=None)
with ensure_clean() as filename:
df.to_csv(filename, header=False, index=False)
rs = read_csv(filename, header=None)
assert_frame_equal(rs, df)
def test_to_csv_bug(self):
f1 = StringIO('a,1.0\nb,2.0')
df = DataFrame.from_csv(f1, header=None)
newdf = DataFrame({'t': df[df.columns[0]]})
with ensure_clean() as path:
newdf.to_csv(path)
recons = read_csv(path, index_col=0)
assert_frame_equal(recons, newdf, check_names=False) # don't check_names as t != 1
def test_to_csv_unicode(self):
df = DataFrame({u('c/\u03c3'): [1, 2, 3]})
with ensure_clean() as path:
df.to_csv(path, encoding='UTF-8')
df2 = read_csv(path, index_col=0, encoding='UTF-8')
assert_frame_equal(df, df2)
df.to_csv(path, encoding='UTF-8', index=False)
df2 = read_csv(path, index_col=None, encoding='UTF-8')
assert_frame_equal(df, df2)
def test_to_csv_unicode_index_col(self):
buf = StringIO('')
df = DataFrame(
[[u("\u05d0"), "d2", "d3", "d4"], ["a1", "a2", "a3", "a4"]],
columns=[u("\u05d0"),
u("\u05d1"), u("\u05d2"), u("\u05d3")],
index=[u("\u05d0"), u("\u05d1")])
df.to_csv(buf, encoding='UTF-8')
buf.seek(0)
df2 = read_csv(buf, index_col=0, encoding='UTF-8')
assert_frame_equal(df, df2)
def test_to_csv_stringio(self):
buf = StringIO()
self.frame.to_csv(buf)
buf.seek(0)
recons = read_csv(buf, index_col=0)
assert_frame_equal(recons, self.frame, check_names=False) # TODO to_csv drops column name
def test_to_csv_float_format(self):
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with ensure_clean() as filename:
df.to_csv(filename, float_format='%.2f')
rs = read_csv(filename, index_col=0)
xp = DataFrame([[0.12, 0.23, 0.57],
[12.32, 123123.20, 321321.20]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
assert_frame_equal(rs, xp)
def test_to_csv_quoting(self):
df = DataFrame({'A': [1, 2, 3], 'B': ['foo', 'bar', 'baz']})
buf = StringIO()
df.to_csv(buf, index=False, quoting=csv.QUOTE_NONNUMERIC)
result = buf.getvalue()
expected = ('"A","B"\n'
'1,"foo"\n'
'2,"bar"\n'
'3,"baz"\n')
self.assertEqual(result, expected)
# quoting windows line terminators, presents with encoding?
# #3503
text = 'a,b,c\n1,"test \r\n",3\n'
df = pd.read_csv(StringIO(text))
buf = StringIO()
df.to_csv(buf, encoding='utf-8', index=False)
self.assertEqual(buf.getvalue(), text)
def test_to_csv_unicodewriter_quoting(self):
df = DataFrame({'A': [1, 2, 3], 'B': ['foo', 'bar', 'baz']})
buf = StringIO()
df.to_csv(buf, index=False, quoting=csv.QUOTE_NONNUMERIC,
encoding='utf-8')
result = buf.getvalue()
expected = ('"A","B"\n'
'1,"foo"\n'
'2,"bar"\n'
'3,"baz"\n')
self.assertEqual(result, expected)
def test_to_csv_quote_none(self):
# GH4328
df = DataFrame({'A': ['hello', '{"hello"}']})
for encoding in (None, 'utf-8'):
buf = StringIO()
df.to_csv(buf, quoting=csv.QUOTE_NONE,
encoding=encoding, index=False)
result = buf.getvalue()
expected = 'A\nhello\n{"hello"}\n'
self.assertEqual(result, expected)
def test_to_csv_index_no_leading_comma(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
index=['one', 'two', 'three'])
buf = StringIO()
df.to_csv(buf, index_label=False)
expected = ('A,B\n'
'one,1,4\n'
'two,2,5\n'
'three,3,6\n')
self.assertEqual(buf.getvalue(), expected)
def test_to_csv_line_terminators(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
index=['one', 'two', 'three'])
buf = StringIO()
df.to_csv(buf, line_terminator='\r\n')
expected = (',A,B\r\n'
'one,1,4\r\n'
'two,2,5\r\n'
'three,3,6\r\n')
self.assertEqual(buf.getvalue(), expected)
buf = StringIO()
df.to_csv(buf) # The default line terminator remains \n
expected = (',A,B\n'
'one,1,4\n'
'two,2,5\n'
'three,3,6\n')
self.assertEqual(buf.getvalue(), expected)
def test_to_csv_from_csv_categorical(self):
# CSV with categoricals should result in the same output as when one would add a "normal"
# Series/DataFrame.
s = Series(pd.Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']))
s2 = Series(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
res = StringIO()
s.to_csv(res)
exp = StringIO()
s2.to_csv(exp)
self.assertEqual(res.getvalue(), exp.getvalue())
df = DataFrame({"s":s})
df2 = DataFrame({"s":s2})
res = StringIO()
df.to_csv(res)
exp = StringIO()
df2.to_csv(exp)
self.assertEqual(res.getvalue(), exp.getvalue())
def test_to_csv_path_is_none(self):
# GH 8215
# Make sure we return string for consistency with
# Series.to_csv()
csv_str = self.frame.to_csv(path=None)
self.assertIsInstance(csv_str, str)
recons = pd.read_csv(StringIO(csv_str), index_col=0)
assert_frame_equal(self.frame, recons)
def test_info(self):
io = StringIO()
self.frame.info(buf=io)
self.tsframe.info(buf=io)
frame = DataFrame(np.random.randn(5, 3))
import sys
sys.stdout = StringIO()
frame.info()
frame.info(verbose=False)
sys.stdout = sys.__stdout__
def test_info_wide(self):
from pandas import set_option, reset_option
io = StringIO()
df = DataFrame(np.random.randn(5, 101))
df.info(buf=io)
io = StringIO()
df.info(buf=io, max_cols=101)
rs = io.getvalue()
self.assertTrue(len(rs.splitlines()) > 100)
xp = rs
set_option('display.max_info_columns', 101)
io = StringIO()
df.info(buf=io)
self.assertEqual(rs, xp)
reset_option('display.max_info_columns')
def test_info_duplicate_columns(self):
io = StringIO()
# it works!
frame = DataFrame(np.random.randn(1500, 4),
columns=['a', 'a', 'b', 'b'])
frame.info(buf=io)
def test_info_shows_column_dtypes(self):
dtypes = ['int64', 'float64', 'datetime64[ns]', 'timedelta64[ns]',
'complex128', 'object', 'bool']
data = {}
n = 10
for i, dtype in enumerate(dtypes):
data[i] = np.random.randint(2, size=n).astype(dtype)
df = DataFrame(data)
buf = StringIO()
df.info(buf=buf)
res = buf.getvalue()
for i, dtype in enumerate(dtypes):
name = '%d %d non-null %s' % (i, n, dtype)
assert name in res
def test_info_max_cols(self):
df = DataFrame(np.random.randn(10, 5))
for len_, verbose in [(5, None), (5, False), (10, True)]:
# For verbose always ^ setting ^ summarize ^ full output
with option_context('max_info_columns', 4):
buf = StringIO()
df.info(buf=buf, verbose=verbose)
res = buf.getvalue()
self.assertEqual(len(res.strip().split('\n')), len_)
for len_, verbose in [(10, None), (5, False), (10, True)]:
# max_cols no exceeded
with option_context('max_info_columns', 5):
buf = StringIO()
df.info(buf=buf, verbose=verbose)
res = buf.getvalue()
self.assertEqual(len(res.strip().split('\n')), len_)
for len_, max_cols in [(10, 5), (5, 4)]:
# setting truncates
with option_context('max_info_columns', 4):
buf = StringIO()
df.info(buf=buf, max_cols=max_cols)
res = buf.getvalue()
self.assertEqual(len(res.strip().split('\n')), len_)
# setting wouldn't truncate
with option_context('max_info_columns', 5):
buf = StringIO()
df.info(buf=buf, max_cols=max_cols)
res = buf.getvalue()
self.assertEqual(len(res.strip().split('\n')), len_)
def test_info_memory_usage(self):
# Ensure memory usage is displayed, when asserted, on the last line
dtypes = ['int64', 'float64', 'datetime64[ns]', 'timedelta64[ns]',
'complex128', 'object', 'bool']
data = {}
n = 10
for i, dtype in enumerate(dtypes):
data[i] = np.random.randint(2, size=n).astype(dtype)
df = DataFrame(data)
buf = StringIO()
# display memory usage case
df.info(buf=buf, memory_usage=True)
res = buf.getvalue().splitlines()
self.assertTrue("memory usage: " in res[-1])
# do not display memory usage cas
df.info(buf=buf, memory_usage=False)
res = buf.getvalue().splitlines()
self.assertTrue("memory usage: " not in res[-1])
df.info(buf=buf, memory_usage=True)
res = buf.getvalue().splitlines()
# memory usage is a lower bound, so print it as XYZ+ MB
self.assertTrue(re.match(r"memory usage: [^+]+\+", res[-1]))
df.iloc[:, :5].info(buf=buf, memory_usage=True)
res = buf.getvalue().splitlines()
# excluded column with object dtype, so estimate is accurate
self.assertFalse(re.match(r"memory usage: [^+]+\+", res[-1]))
df_with_object_index = pd.DataFrame({'a': [1]}, index=['foo'])
df_with_object_index.info(buf=buf, memory_usage=True)
res = buf.getvalue().splitlines()
self.assertTrue(re.match(r"memory usage: [^+]+\+", res[-1]))
# Test a DataFrame with duplicate columns
dtypes = ['int64', 'int64', 'int64', 'float64']
data = {}
n = 100
for i, dtype in enumerate(dtypes):
data[i] = np.random.randint(2, size=n).astype(dtype)
df = DataFrame(data)
df.columns = dtypes
# Ensure df size is as expected
df_size = df.memory_usage().sum()
exp_size = len(dtypes) * n * 8 # cols * rows * bytes
self.assertEqual(df_size, exp_size)
# Ensure number of cols in memory_usage is the same as df
size_df = np.size(df.columns.values) # index=False; default
self.assertEqual(size_df, np.size(df.memory_usage()))
# test for validity
DataFrame(1,index=['a'],columns=['A']).memory_usage(index=True)
DataFrame(1,index=['a'],columns=['A']).index.nbytes
DataFrame(1,index=pd.MultiIndex.from_product([['a'],range(1000)]),columns=['A']).index.nbytes
DataFrame(1,index=pd.MultiIndex.from_product([['a'],range(1000)]),columns=['A']).index.values.nbytes
DataFrame(1,index=pd.MultiIndex.from_product([['a'],range(1000)]),columns=['A']).memory_usage(index=True)
DataFrame(1,index=pd.MultiIndex.from_product([['a'],range(1000)]),columns=['A']).index.nbytes
DataFrame(1,index=pd.MultiIndex.from_product([['a'],range(1000)]),columns=['A']).index.values.nbytes
def test_dtypes(self):
self.mixed_frame['bool'] = self.mixed_frame['A'] > 0
result = self.mixed_frame.dtypes
expected = Series(dict((k, v.dtype)
for k, v in compat.iteritems(self.mixed_frame)),
index=result.index)
assert_series_equal(result, expected)
# compat, GH 8722
with option_context('use_inf_as_null',True):
df = DataFrame([[1]])
result = df.dtypes
assert_series_equal(result,Series({0:np.dtype('int64')}))
def test_convert_objects(self):
oops = self.mixed_frame.T.T
converted = oops.convert_objects()
assert_frame_equal(converted, self.mixed_frame)
self.assertEqual(converted['A'].dtype, np.float64)
# force numeric conversion
self.mixed_frame['H'] = '1.'
self.mixed_frame['I'] = '1'
# add in some items that will be nan
l = len(self.mixed_frame)
self.mixed_frame['J'] = '1.'
self.mixed_frame['K'] = '1'
self.mixed_frame.ix[0:5,['J','K']] = 'garbled'
converted = self.mixed_frame.convert_objects(convert_numeric=True)
self.assertEqual(converted['H'].dtype, 'float64')
self.assertEqual(converted['I'].dtype, 'int64')
self.assertEqual(converted['J'].dtype, 'float64')
self.assertEqual(converted['K'].dtype, 'float64')
self.assertEqual(len(converted['J'].dropna()), l-5)
self.assertEqual(len(converted['K'].dropna()), l-5)
# via astype
converted = self.mixed_frame.copy()
converted['H'] = converted['H'].astype('float64')
converted['I'] = converted['I'].astype('int64')
self.assertEqual(converted['H'].dtype, 'float64')
self.assertEqual(converted['I'].dtype, 'int64')
# via astype, but errors
converted = self.mixed_frame.copy()
with assertRaisesRegexp(ValueError, 'invalid literal'):
converted['H'].astype('int32')
# mixed in a single column
df = DataFrame(dict(s = Series([1, 'na', 3 ,4])))
result = df.convert_objects(convert_numeric=True)
expected = DataFrame(dict(s = Series([1, np.nan, 3 ,4])))
assert_frame_equal(result, expected)
def test_convert_objects_no_conversion(self):
mixed1 = DataFrame(
{'a': [1, 2, 3], 'b': [4.0, 5, 6], 'c': ['x', 'y', 'z']})
mixed2 = mixed1.convert_objects()
assert_frame_equal(mixed1, mixed2)
def test_append_series_dict(self):
df = DataFrame(np.random.randn(5, 4),
columns=['foo', 'bar', 'baz', 'qux'])
series = df.ix[4]
with assertRaisesRegexp(ValueError, 'Indexes have overlapping values'):
df.append(series, verify_integrity=True)
series.name = None
with assertRaisesRegexp(TypeError, 'Can only append a Series if '
'ignore_index=True'):
df.append(series, verify_integrity=True)
result = df.append(series[::-1], ignore_index=True)
expected = df.append(DataFrame({0: series[::-1]}, index=df.columns).T,
ignore_index=True)
assert_frame_equal(result, expected)
# dict
result = df.append(series.to_dict(), ignore_index=True)
assert_frame_equal(result, expected)
result = df.append(series[::-1][:3], ignore_index=True)
expected = df.append(DataFrame({0: series[::-1][:3]}).T,
ignore_index=True)
assert_frame_equal(result, expected.ix[:, result.columns])
# can append when name set
row = df.ix[4]
row.name = 5
result = df.append(row)
expected = df.append(df[-1:], ignore_index=True)
assert_frame_equal(result, expected)
def test_append_list_of_series_dicts(self):
df = DataFrame(np.random.randn(5, 4),
columns=['foo', 'bar', 'baz', 'qux'])
dicts = [x.to_dict() for idx, x in df.iterrows()]
result = df.append(dicts, ignore_index=True)
expected = df.append(df, ignore_index=True)
assert_frame_equal(result, expected)
# different columns
dicts = [{'foo': 1, 'bar': 2, 'baz': 3, 'peekaboo': 4},
{'foo': 5, 'bar': 6, 'baz': 7, 'peekaboo': 8}]
result = df.append(dicts, ignore_index=True)
expected = df.append(DataFrame(dicts), ignore_index=True)
assert_frame_equal(result, expected)
def test_append_empty_dataframe(self):
# Empty df append empty df
df1 = DataFrame([])
df2 = DataFrame([])
result = df1.append(df2)
expected = df1.copy()
assert_frame_equal(result, expected)
# Non-empty df append empty df
df1 = DataFrame(np.random.randn(5, 2))
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
assert_frame_equal(result, expected)
# Empty df with columns append empty df
df1 = DataFrame(columns=['bar', 'foo'])
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
assert_frame_equal(result, expected)
# Non-Empty df with columns append empty df
df1 = DataFrame(np.random.randn(5, 2), columns=['bar', 'foo'])
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
assert_frame_equal(result, expected)
def test_append_dtypes(self):
# GH 5754
# row appends of different dtypes (so need to do by-item)
# can sometimes infer the correct type
df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(5))
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
assert_frame_equal(result, expected)
df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))
df2 = DataFrame({ 'bar' : 'foo' }, index=lrange(1,2))
result = df1.append(df2)
expected = DataFrame({ 'bar' : [ Timestamp('20130101'), 'foo' ]})
assert_frame_equal(result, expected)
df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))
df2 = DataFrame({ 'bar' : np.nan }, index=lrange(1,2))
result = df1.append(df2)
expected = DataFrame({ 'bar' : Series([ Timestamp('20130101'), np.nan ],dtype='M8[ns]') })
assert_frame_equal(result, expected)
df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))
df2 = DataFrame({ 'bar' : np.nan }, index=lrange(1,2), dtype=object)
result = df1.append(df2)
expected = DataFrame({ 'bar' : Series([ Timestamp('20130101'), np.nan ],dtype='M8[ns]') })
assert_frame_equal(result, expected)
df1 = DataFrame({ 'bar' : np.nan }, index=lrange(1))
df2 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1,2))
result = df1.append(df2)
expected = DataFrame({ 'bar' : Series([ np.nan, Timestamp('20130101')] ,dtype='M8[ns]') })
assert_frame_equal(result, expected)
df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))
df2 = DataFrame({ 'bar' : 1 }, index=lrange(1,2), dtype=object)
result = df1.append(df2)
expected = DataFrame({ 'bar' : Series([ Timestamp('20130101'), 1 ]) })
assert_frame_equal(result, expected)
def test_asfreq(self):
offset_monthly = self.tsframe.asfreq(datetools.bmonthEnd)
rule_monthly = self.tsframe.asfreq('BM')
assert_almost_equal(offset_monthly['A'], rule_monthly['A'])
filled = rule_monthly.asfreq('B', method='pad')
# TODO: actually check that this worked.
# don't forget!
filled_dep = rule_monthly.asfreq('B', method='pad')
# test does not blow up on length-0 DataFrame
zero_length = self.tsframe.reindex([])
result = zero_length.asfreq('BM')
self.assertIsNot(result, zero_length)
def test_asfreq_datetimeindex(self):
df = DataFrame({'A': [1, 2, 3]},
index=[datetime(2011, 11, 1), datetime(2011, 11, 2),
datetime(2011, 11, 3)])
df = df.asfreq('B')
tm.assert_isinstance(df.index, DatetimeIndex)
ts = df['A'].asfreq('B')
tm.assert_isinstance(ts.index, DatetimeIndex)
def test_at_time_between_time_datetimeindex(self):
index = date_range("2012-01-01", "2012-01-05", freq='30min')
df = DataFrame(randn(len(index), 5), index=index)
akey = time(12, 0, 0)
bkey = slice(time(13, 0, 0), time(14, 0, 0))
ainds = [24, 72, 120, 168]
binds = [26, 27, 28, 74, 75, 76, 122, 123, 124, 170, 171, 172]
result = df.at_time(akey)
expected = df.ix[akey]
expected2 = df.ix[ainds]
assert_frame_equal(result, expected)
assert_frame_equal(result, expected2)
self.assertEqual(len(result), 4)
result = df.between_time(bkey.start, bkey.stop)
expected = df.ix[bkey]
expected2 = df.ix[binds]
assert_frame_equal(result, expected)
assert_frame_equal(result, expected2)
self.assertEqual(len(result), 12)
result = df.copy()
result.ix[akey] = 0
result = result.ix[akey]
expected = df.ix[akey].copy()
expected.ix[:] = 0
assert_frame_equal(result, expected)
result = df.copy()
result.ix[akey] = 0
result.ix[akey] = df.ix[ainds]
assert_frame_equal(result, df)
result = df.copy()
result.ix[bkey] = 0
result = result.ix[bkey]
expected = df.ix[bkey].copy()
expected.ix[:] = 0
assert_frame_equal(result, expected)
result = df.copy()
result.ix[bkey] = 0
result.ix[bkey] = df.ix[binds]
assert_frame_equal(result, df)
def test_as_matrix(self):
frame = self.frame
mat = frame.as_matrix()
frameCols = frame.columns
for i, row in enumerate(mat):
for j, value in enumerate(row):
col = frameCols[j]
if np.isnan(value):
self.assertTrue(np.isnan(frame[col][i]))
else:
self.assertEqual(value, frame[col][i])
# mixed type
mat = self.mixed_frame.as_matrix(['foo', 'A'])
self.assertEqual(mat[0, 0], 'bar')
df = DataFrame({'real': [1, 2, 3], 'complex': [1j, 2j, 3j]})
mat = df.as_matrix()
self.assertEqual(mat[0, 0], 1j)
# single block corner case
mat = self.frame.as_matrix(['A', 'B'])
expected = self.frame.reindex(columns=['A', 'B']).values
assert_almost_equal(mat, expected)
def test_as_matrix_duplicates(self):
df = DataFrame([[1, 2, 'a', 'b'],
[1, 2, 'a', 'b']],
columns=['one', 'one', 'two', 'two'])
result = df.values
expected = np.array([[1, 2, 'a', 'b'], [1, 2, 'a', 'b']],
dtype=object)
self.assertTrue(np.array_equal(result, expected))
def test_ftypes(self):
frame = self.mixed_float
expected = Series(dict(A = 'float32:dense', B = 'float32:dense', C = 'float16:dense', D = 'float64:dense'))
expected.sort()
result = frame.ftypes
result.sort()
assert_series_equal(result,expected)
def test_values(self):
self.frame.values[:, 0] = 5.
self.assertTrue((self.frame.values[:, 0] == 5).all())
def test_deepcopy(self):
cp = deepcopy(self.frame)
series = cp['A']
series[:] = 10
for idx, value in compat.iteritems(series):
self.assertNotEqual(self.frame['A'][idx], value)
def test_copy(self):
cop = self.frame.copy()
cop['E'] = cop['A']
self.assertNotIn('E', self.frame)
# copy objects
copy = self.mixed_frame.copy()
self.assertIsNot(copy._data, self.mixed_frame._data)
def _check_method(self, method='pearson', check_minp=False):
if not check_minp:
correls = self.frame.corr(method=method)
exp = self.frame['A'].corr(self.frame['C'], method=method)
assert_almost_equal(correls['A']['C'], exp)
else:
result = self.frame.corr(min_periods=len(self.frame) - 8)
expected = self.frame.corr()
expected.ix['A', 'B'] = expected.ix['B', 'A'] = nan
def test_corr_pearson(self):
tm._skip_if_no_scipy()
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('pearson')
def test_corr_kendall(self):
tm._skip_if_no_scipy()
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('kendall')
def test_corr_spearman(self):
tm._skip_if_no_scipy()
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('spearman')
def test_corr_non_numeric(self):
tm._skip_if_no_scipy()
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
# exclude non-numeric types
result = self.mixed_frame.corr()
expected = self.mixed_frame.ix[:, ['A', 'B', 'C', 'D']].corr()
assert_frame_equal(result, expected)
def test_corr_nooverlap(self):
tm._skip_if_no_scipy()
# nothing in common
for meth in ['pearson', 'kendall', 'spearman']:
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1]})
rs = df.corr(meth)
self.assertTrue(isnull(rs.ix['A', 'B']))
self.assertTrue(isnull(rs.ix['B', 'A']))
self.assertEqual(rs.ix['A', 'A'], 1)
self.assertEqual(rs.ix['B', 'B'], 1)
def test_corr_constant(self):
tm._skip_if_no_scipy()
# constant --> all NA
for meth in ['pearson', 'spearman']:
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
self.assertTrue(isnull(rs.values).all())
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
# it works!
df3.cov()
df3.corr()
def test_cov(self):
# min_periods no NAs (corner case)
expected = self.frame.cov()
result = self.frame.cov(min_periods=len(self.frame))
assert_frame_equal(expected, result)
result = self.frame.cov(min_periods=len(self.frame) + 1)
self.assertTrue(isnull(result.values).all())
# with NAs
frame = self.frame.copy()
frame['A'][:5] = nan
frame['B'][5:10] = nan
result = self.frame.cov(min_periods=len(self.frame) - 8)
expected = self.frame.cov()
expected.ix['A', 'B'] = np.nan
expected.ix['B', 'A'] = np.nan
# regular
self.frame['A'][:5] = nan
self.frame['B'][:10] = nan
cov = self.frame.cov()
assert_almost_equal(cov['A']['C'],
self.frame['A'].cov(self.frame['C']))
# exclude non-numeric types
result = self.mixed_frame.cov()
expected = self.mixed_frame.ix[:, ['A', 'B', 'C', 'D']].cov()
assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0,1.0,10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1,1)),
index=df.columns,columns=df.columns)
assert_frame_equal(result, expected)
df.ix[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1,1)),
index=df.columns,columns=df.columns)
assert_frame_equal(result, expected)
def test_corrwith(self):
a = self.tsframe
noise = Series(randn(len(a)), index=a.index)
b = self.tsframe + noise
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
self.assertNotIn('B', dropped)
dropped = a.corrwith(b, axis=1, drop=True)
self.assertNotIn(a.index[-1], dropped.index)
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(randn(5, 4), index=index, columns=columns)
df2 = DataFrame(randn(4, 4), index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
assert_almost_equal(correls[row], df1.ix[row].corr(df2.ix[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.ix[:, cols].corrwith(df2.ix[:, cols])
assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.ix[:, cols].corrwith(df2.ix[:, cols], axis=1)
assert_series_equal(result, expected)
def test_corrwith_series(self):
result = self.tsframe.corrwith(self.tsframe['A'])
expected = self.tsframe.apply(self.tsframe['A'].corr)
assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000)**2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'],df2['a'])[0][1]
assert_almost_equal(c1, c2)
self.assertTrue(c1 < 1)
def test_drop_names(self):
df = DataFrame([[1, 2, 3],[3, 4, 5],[5, 6, 7]], index=['a', 'b', 'c'],
columns=['d', 'e', 'f'])
df.index.name, df.columns.name = 'first', 'second'
df_dropped_b = df.drop('b')
df_dropped_e = df.drop('e', axis=1)
df_inplace_b, df_inplace_e = df.copy(), df.copy()
df_inplace_b.drop('b', inplace=True)
df_inplace_e.drop('e', axis=1, inplace=True)
for obj in (df_dropped_b, df_dropped_e, df_inplace_b, df_inplace_e):
self.assertEqual(obj.index.name, 'first')
self.assertEqual(obj.columns.name, 'second')
self.assertEqual(list(df.columns), ['d', 'e', 'f'])
def test_dropEmptyRows(self):
N = len(self.frame.index)
mat = randn(N)
mat[:5] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
original = Series(mat, index=self.frame.index)
expected = original.dropna()
inplace_frame1, inplace_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna(how='all')
# check that original was preserved
assert_series_equal(frame['foo'], original)
inplace_frame1.dropna(how='all', inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame1['foo'], expected)
smaller_frame = frame.dropna(how='all', subset=['foo'])
inplace_frame2.dropna(how='all', subset=['foo'], inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame2['foo'], expected)
def test_dropIncompleteRows(self):
N = len(self.frame.index)
mat = randn(N)
mat[:5] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
frame['bar'] = 5
original = Series(mat, index=self.frame.index)
inp_frame1, inp_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna()
assert_series_equal(frame['foo'], original)
inp_frame1.dropna(inplace=True)
self.assert_numpy_array_equal(smaller_frame['foo'], mat[5:])
self.assert_numpy_array_equal(inp_frame1['foo'], mat[5:])
samesize_frame = frame.dropna(subset=['bar'])
assert_series_equal(frame['foo'], original)
self.assertTrue((frame['bar'] == 5).all())
inp_frame2.dropna(subset=['bar'], inplace=True)
self.assertTrue(samesize_frame.index.equals(self.frame.index))
self.assertTrue(inp_frame2.index.equals(self.frame.index))
def test_dropna(self):
df = DataFrame(np.random.randn(6, 4))
df[2][:2] = nan
dropped = df.dropna(axis=1)
expected = df.ix[:, [0, 1, 3]]
inp = df.copy()
inp.dropna(axis=1, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=0)
expected = df.ix[lrange(2, 6)]
inp = df.copy()
inp.dropna(axis=0, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
# threshold
dropped = df.dropna(axis=1, thresh=5)
expected = df.ix[:, [0, 1, 3]]
inp = df.copy()
inp.dropna(axis=1, thresh=5, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=0, thresh=4)
expected = df.ix[lrange(2, 6)]
inp = df.copy()
inp.dropna(axis=0, thresh=4, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=1, thresh=4)
assert_frame_equal(dropped, df)
dropped = df.dropna(axis=1, thresh=3)
assert_frame_equal(dropped, df)
# subset
dropped = df.dropna(axis=0, subset=[0, 1, 3])
inp = df.copy()
inp.dropna(axis=0, subset=[0, 1, 3], inplace=True)
assert_frame_equal(dropped, df)
assert_frame_equal(inp, df)
# all
dropped = df.dropna(axis=1, how='all')
assert_frame_equal(dropped, df)
df[2] = nan
dropped = df.dropna(axis=1, how='all')
expected = df.ix[:, [0, 1, 3]]
assert_frame_equal(dropped, expected)
# bad input
self.assertRaises(ValueError, df.dropna, axis=3)
def test_drop_and_dropna_caching(self):
# tst that cacher updates
original = Series([1, 2, np.nan])
expected = Series([1, 2], dtype=original.dtype)
df = pd.DataFrame({'A': original.values.copy()})
df2 = df.copy()
df['A'].dropna()
assert_series_equal(df['A'], original)
df['A'].dropna(inplace=True)
assert_series_equal(df['A'], expected)
df2['A'].drop([1])
assert_series_equal(df2['A'], original)
df2['A'].drop([1], inplace=True)
assert_series_equal(df2['A'], original.drop([1]))
def test_dropna_corner(self):
# bad input
self.assertRaises(ValueError, self.frame.dropna, how='foo')
self.assertRaises(TypeError, self.frame.dropna, how=None)
# non-existent column - 8303
self.assertRaises(KeyError, self.frame.dropna, subset=['A','X'])
def test_dropna_multiple_axes(self):
df = DataFrame([[1, np.nan, 2, 3],
[4, np.nan, 5, 6],
[np.nan, np.nan, np.nan, np.nan],
[7, np.nan, 8, 9]])
cp = df.copy()
result = df.dropna(how='all', axis=[0, 1])
result2 = df.dropna(how='all', axis=(0, 1))
expected = df.dropna(how='all').dropna(how='all', axis=1)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(df, cp)
inp = df.copy()
inp.dropna(how='all', axis=(0, 1), inplace=True)
assert_frame_equal(inp, expected)
def test_drop_duplicates(self):
df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
result = df.drop_duplicates('AAA')
expected = df[:2]
assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', take_last=True)
expected = df.ix[[6, 7]]
assert_frame_equal(result, expected)
# multi column
expected = df.ix[[0, 1, 2, 3]]
result = df.drop_duplicates(np.array(['AAA', 'B']))
assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'])
assert_frame_equal(result, expected)
result = df.drop_duplicates(('AAA', 'B'), take_last=True)
expected = df.ix[[0, 5, 6, 7]]
assert_frame_equal(result, expected)
# consider everything
df2 = df.ix[:, ['AAA', 'B', 'C']]
result = df2.drop_duplicates()
# in this case only
expected = df2.drop_duplicates(['AAA', 'B'])
assert_frame_equal(result, expected)
result = df2.drop_duplicates(take_last=True)
expected = df2.drop_duplicates(['AAA', 'B'], take_last=True)
assert_frame_equal(result, expected)
def test_drop_duplicates_deprecated_warning(self):
df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
expected = df[:2]
# Raises warning
with tm.assert_produces_warning(False):
result = df.drop_duplicates(subset='AAA')
assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates(cols='AAA')
assert_frame_equal(result, expected)
# Does not allow both subset and cols
self.assertRaises(TypeError, df.drop_duplicates,
kwargs={'cols': 'AAA', 'subset': 'B'})
# Does not allow unknown kwargs
self.assertRaises(TypeError, df.drop_duplicates,
kwargs={'subset': 'AAA', 'bad_arg': True})
def test_drop_duplicates_tuple(self):
df = DataFrame({('AA', 'AB'): ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
result = df.drop_duplicates(('AA', 'AB'))
expected = df[:2]
assert_frame_equal(result, expected)
result = df.drop_duplicates(('AA', 'AB'), take_last=True)
expected = df.ix[[6, 7]]
assert_frame_equal(result, expected)
# multi column
expected = df.ix[[0, 1, 2, 3]]
result = df.drop_duplicates((('AA', 'AB'), 'B'))
assert_frame_equal(result, expected)
def test_drop_duplicates_NA(self):
# none
df = DataFrame({'A': [None, None, 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.],
'D': lrange(8)})
# single column
result = df.drop_duplicates('A')
expected = df.ix[[0, 2, 3]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('A', take_last=True)
expected = df.ix[[1, 6, 7]]
assert_frame_equal(result, expected)
# multi column
result = df.drop_duplicates(['A', 'B'])
expected = df.ix[[0, 2, 3, 6]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['A', 'B'], take_last=True)
expected = df.ix[[1, 5, 6, 7]]
assert_frame_equal(result, expected)
# nan
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.],
'D': lrange(8)})
# single column
result = df.drop_duplicates('C')
expected = df[:2]
assert_frame_equal(result, expected)
result = df.drop_duplicates('C', take_last=True)
expected = df.ix[[3, 7]]
assert_frame_equal(result, expected)
# multi column
result = df.drop_duplicates(['C', 'B'])
expected = df.ix[[0, 1, 2, 4]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['C', 'B'], take_last=True)
expected = df.ix[[1, 3, 6, 7]]
assert_frame_equal(result, expected)
def test_drop_duplicates_inplace(self):
orig = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
df = orig.copy()
df.drop_duplicates('A', inplace=True)
expected = orig[:2]
result = df
assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates('A', take_last=True, inplace=True)
expected = orig.ix[[6, 7]]
result = df
assert_frame_equal(result, expected)
# multi column
df = orig.copy()
df.drop_duplicates(['A', 'B'], inplace=True)
expected = orig.ix[[0, 1, 2, 3]]
result = df
assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates(['A', 'B'], take_last=True, inplace=True)
expected = orig.ix[[0, 5, 6, 7]]
result = df
assert_frame_equal(result, expected)
# consider everything
orig2 = orig.ix[:, ['A', 'B', 'C']].copy()
df2 = orig2.copy()
df2.drop_duplicates(inplace=True)
# in this case only
expected = orig2.drop_duplicates(['A', 'B'])
result = df2
assert_frame_equal(result, expected)
df2 = orig2.copy()
df2.drop_duplicates(take_last=True, inplace=True)
expected = orig2.drop_duplicates(['A', 'B'], take_last=True)
result = df2
assert_frame_equal(result, expected)
def test_duplicated_deprecated_warning(self):
df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# Raises warning
with tm.assert_produces_warning(False):
result = df.duplicated(subset='AAA')
with tm.assert_produces_warning(FutureWarning):
result = df.duplicated(cols='AAA')
# Does not allow both subset and cols
self.assertRaises(TypeError, df.duplicated,
kwargs={'cols': 'AAA', 'subset': 'B'})
# Does not allow unknown kwargs
self.assertRaises(TypeError, df.duplicated,
kwargs={'subset': 'AAA', 'bad_arg': True})
def test_drop_col_still_multiindex(self):
arrays = [['a', 'b', 'c', 'top'],
['', '', '', 'OD'],
['', '', '', 'wx']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(3, 4), columns=index)
del df[('a', '', '')]
assert(isinstance(df.columns, MultiIndex))
def test_drop(self):
simple = DataFrame({"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]})
assert_frame_equal(simple.drop("A", axis=1), simple[['B']])
assert_frame_equal(simple.drop(["A", "B"], axis='columns'),
simple[[]])
assert_frame_equal(simple.drop([0, 1, 3], axis=0), simple.ix[[2], :])
assert_frame_equal(simple.drop([0, 3], axis='index'), simple.ix[[1, 2], :])
#non-unique - wheee!
nu_df = DataFrame(lzip(range(3), range(-3, 1), list('abc')),
columns=['a', 'a', 'b'])
assert_frame_equal(nu_df.drop('a', axis=1), nu_df[['b']])
assert_frame_equal(nu_df.drop('b', axis='columns'), nu_df['a'])
nu_df = nu_df.set_index(pd.Index(['X', 'Y', 'X']))
nu_df.columns = list('abc')
assert_frame_equal(nu_df.drop('X', axis='rows'), nu_df.ix[["Y"], :])
assert_frame_equal(nu_df.drop(['X', 'Y'], axis=0), nu_df.ix[[], :])
# inplace cache issue
# GH 5628
df = pd.DataFrame(np.random.randn(10,3), columns=list('abc'))
expected = df[~(df.b>0)]
df.drop(labels=df[df.b>0].index, inplace=True)
assert_frame_equal(df,expected)
def test_fillna(self):
self.tsframe.ix[:5,'A'] = nan
self.tsframe.ix[-5:,'A'] = nan
zero_filled = self.tsframe.fillna(0)
self.assertTrue((zero_filled.ix[:5,'A'] == 0).all())
padded = self.tsframe.fillna(method='pad')
self.assertTrue(np.isnan(padded.ix[:5,'A']).all())
self.assertTrue((padded.ix[-5:,'A'] == padded.ix[-5,'A']).all())
# mixed type
self.mixed_frame.ix[5:20,'foo'] = nan
self.mixed_frame.ix[-10:,'A'] = nan
result = self.mixed_frame.fillna(value=0)
result = self.mixed_frame.fillna(method='pad')
self.assertRaises(ValueError, self.tsframe.fillna)
self.assertRaises(ValueError, self.tsframe.fillna, 5, method='ffill')
# mixed numeric (but no float16)
mf = self.mixed_float.reindex(columns=['A','B','D'])
mf.ix[-10:,'A'] = nan
result = mf.fillna(value=0)
_check_mixed_float(result, dtype = dict(C = None))
result = mf.fillna(method='pad')
_check_mixed_float(result, dtype = dict(C = None))
# empty frame (GH #2778)
df = DataFrame(columns=['x'])
for m in ['pad','backfill']:
df.x.fillna(method=m,inplace=1)
df.x.fillna(method=m)
# with different dtype (GH3386)
df = DataFrame([['a','a',np.nan,'a'],['b','b',np.nan,'b'],['c','c',np.nan,'c']])
result = df.fillna({ 2: 'foo' })
expected = DataFrame([['a','a','foo','a'],['b','b','foo','b'],['c','c','foo','c']])
assert_frame_equal(result, expected)
df.fillna({ 2: 'foo' }, inplace=True)
assert_frame_equal(df, expected)
# limit and value
df = DataFrame(np.random.randn(10,3))
df.iloc[2:7,0] = np.nan
df.iloc[3:5,2] = np.nan
expected = df.copy()
expected.iloc[2,0] = 999
expected.iloc[3,2] = 999
result = df.fillna(999,limit=1)
assert_frame_equal(result, expected)
# with datelike
# GH 6344
df = DataFrame({
'Date':[pd.NaT, Timestamp("2014-1-1")],
'Date2':[ Timestamp("2013-1-1"), pd.NaT]
})
expected = df.copy()
expected['Date'] = expected['Date'].fillna(df.ix[0,'Date2'])
result = df.fillna(value={'Date':df['Date2']})
assert_frame_equal(result, expected)
def test_fillna_dtype_conversion(self):
# make sure that fillna on an empty frame works
df = DataFrame(index=["A","B","C"], columns = [1,2,3,4,5])
result = df.get_dtype_counts().order()
expected = Series({ 'object' : 5 })
assert_series_equal(result, expected)
result = df.fillna(1)
expected = DataFrame(1, index=["A","B","C"], columns = [1,2,3,4,5])
result = result.get_dtype_counts().order()
expected = Series({ 'int64' : 5 })
assert_series_equal(result, expected)
# empty block
df = DataFrame(index=lrange(3),columns=['A','B'],dtype='float64')
result = df.fillna('nan')
expected = DataFrame('nan',index=lrange(3),columns=['A','B'])
assert_frame_equal(result, expected)
# equiv of replace
df = DataFrame(dict(A = [1,np.nan], B = [1.,2.]))
for v in ['',1,np.nan,1.0]:
expected = df.replace(np.nan,v)
result = df.fillna(v)
assert_frame_equal(result, expected)
def test_ffill(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
assert_frame_equal(self.tsframe.ffill(),
self.tsframe.fillna(method='ffill'))
def test_bfill(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
assert_frame_equal(self.tsframe.bfill(),
self.tsframe.fillna(method='bfill'))
def test_fillna_skip_certain_blocks(self):
# don't try to fill boolean, int blocks
df = DataFrame(np.random.randn(10, 4).astype(int))
# it works!
df.fillna(np.nan)
def test_fillna_inplace(self):
df = DataFrame(np.random.randn(10, 4))
df[1][:4] = np.nan
df[3][-4:] = np.nan
expected = df.fillna(value=0)
self.assertIsNot(expected, df)
df.fillna(value=0, inplace=True)
assert_frame_equal(df, expected)
df[1][:4] = np.nan
df[3][-4:] = np.nan
expected = df.fillna(method='ffill')
self.assertIsNot(expected, df)
df.fillna(method='ffill', inplace=True)
assert_frame_equal(df, expected)
def test_fillna_dict_series(self):
df = DataFrame({'a': [nan, 1, 2, nan, nan],
'b': [1, 2, 3, nan, nan],
'c': [nan, 1, 2, 3, 4]})
result = df.fillna({'a': 0, 'b': 5})
expected = df.copy()
expected['a'] = expected['a'].fillna(0)
expected['b'] = expected['b'].fillna(5)
assert_frame_equal(result, expected)
# it works
result = df.fillna({'a': 0, 'b': 5, 'd': 7})
# Series treated same as dict
result = df.fillna(df.max())
expected = df.fillna(df.max().to_dict())
assert_frame_equal(result, expected)
# disable this for now
with assertRaisesRegexp(NotImplementedError, 'column by column'):
df.fillna(df.max(1), axis=1)
def test_fillna_dataframe(self):
# GH 8377
df = DataFrame({'a': [nan, 1, 2, nan, nan],
'b': [1, 2, 3, nan, nan],
'c': [nan, 1, 2, 3, 4]},
index = list('VWXYZ'))
# df2 may have different index and columns
df2 = DataFrame({'a': [nan, 10, 20, 30, 40],
'b': [50, 60, 70, 80, 90],
'foo': ['bar']*5},
index = list('VWXuZ'))
result = df.fillna(df2)
# only those columns and indices which are shared get filled
expected = DataFrame({'a': [nan, 1, 2, nan, 40],
'b': [1, 2, 3, nan, 90],
'c': [nan, 1, 2, 3, 4]},
index = list('VWXYZ'))
assert_frame_equal(result, expected)
def test_fillna_columns(self):
df = DataFrame(np.random.randn(10, 10))
df.values[:, ::2] = np.nan
result = df.fillna(method='ffill', axis=1)
expected = df.T.fillna(method='pad').T
assert_frame_equal(result, expected)
df.insert(6, 'foo', 5)
result = df.fillna(method='ffill', axis=1)
expected = df.astype(float).fillna(method='ffill', axis=1)
assert_frame_equal(result, expected)
def test_fillna_invalid_method(self):
with assertRaisesRegexp(ValueError, 'ffil'):
self.frame.fillna(method='ffil')
def test_fillna_invalid_value(self):
# list
self.assertRaises(TypeError, self.frame.fillna, [1, 2])
# tuple
self.assertRaises(TypeError, self.frame.fillna, (1, 2))
# frame with series
self.assertRaises(ValueError, self.frame.iloc[:,0].fillna, self.frame)
def test_replace_inplace(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
tsframe = self.tsframe.copy()
tsframe.replace(nan, 0, inplace=True)
assert_frame_equal(tsframe, self.tsframe.fillna(0))
self.assertRaises(TypeError, self.tsframe.replace, nan, inplace=True)
self.assertRaises(TypeError, self.tsframe.replace, nan)
# mixed type
self.mixed_frame.ix[5:20,'foo'] = nan
self.mixed_frame.ix[-10:,'A'] = nan
result = self.mixed_frame.replace(np.nan, 0)
expected = self.mixed_frame.fillna(value=0)
assert_frame_equal(result, expected)
tsframe = self.tsframe.copy()
tsframe.replace([nan], [0], inplace=True)
assert_frame_equal(tsframe, self.tsframe.fillna(0))
def test_regex_replace_scalar(self):
obj = {'a': list('ab..'), 'b': list('efgh')}
dfobj = DataFrame(obj)
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
### simplest cases
## regex -> value
# obj frame
res = dfobj.replace(r'\s*\.\s*', nan, regex=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.replace(r'\s*\.\s*', nan, regex=True)
assert_frame_equal(dfmix, res.fillna('.'))
## regex -> regex
# obj frame
res = dfobj.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.replace(re.compile(r'\s*\.\s*'), nan, regex=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.replace(re.compile(r'\s*\.\s*'), nan, regex=True)
assert_frame_equal(dfmix, res.fillna('.'))
## regex -> regex
# obj frame
res = dfobj.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1')
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1')
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
res = dfmix.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1')
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
res = dfmix.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1')
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
def test_regex_replace_scalar_inplace(self):
obj = {'a': list('ab..'), 'b': list('efgh')}
dfobj = DataFrame(obj)
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
### simplest cases
## regex -> value
# obj frame
res = dfobj.copy()
res.replace(r'\s*\.\s*', nan, regex=True, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(r'\s*\.\s*', nan, regex=True, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
## regex -> regex
# obj frame
res = dfobj.copy()
res.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True, inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True, inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.copy()
res.replace(re.compile(r'\s*\.\s*'), nan, regex=True, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(re.compile(r'\s*\.\s*'), nan, regex=True, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
## regex -> regex
# obj frame
res = dfobj.copy()
res.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1', regex=True,
inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1', regex=True,
inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
res = dfobj.copy()
res.replace(regex=r'\s*\.\s*', value=nan, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(regex=r'\s*\.\s*', value=nan, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
## regex -> regex
# obj frame
res = dfobj.copy()
res.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1', inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1', inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.copy()
res.replace(regex=re.compile(r'\s*\.\s*'), value=nan, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(regex=re.compile(r'\s*\.\s*'), value=nan, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
## regex -> regex
# obj frame
res = dfobj.copy()
res.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1',
inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1',
inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
def test_regex_replace_list_obj(self):
obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}
dfobj = DataFrame(obj)
## lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'e|f|g']
values = [nan, 'crap']
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': ['a', 'b', nan, nan], 'b': ['crap'] * 3 +
['h'], 'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(e|f|g)']
values = [r'\1\1', r'\1_crap']
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e_crap',
'f_crap',
'g_crap', 'h'],
'c': ['h', 'e_crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.replace(value=values, regex=to_replace_res)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
def test_regex_replace_list_obj_inplace(self):
### same as above with inplace=True
## lists of regexes and values
obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}
dfobj = DataFrame(obj)
## lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'e|f|g']
values = [nan, 'crap']
res = dfobj.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': ['a', 'b', nan, nan], 'b': ['crap'] * 3 +
['h'], 'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(e|f|g)']
values = [r'\1\1', r'\1_crap']
res = dfobj.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e_crap',
'f_crap',
'g_crap', 'h'],
'c': ['h', 'e_crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.copy()
res.replace(value=values, regex=to_replace_res, inplace=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
def test_regex_replace_list_mixed(self):
## mixed frame to make sure this doesn't break things
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
## lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'a']
values = [nan, 'crap']
mix2 = {'a': lrange(4), 'b': list('ab..'), 'c': list('halo')}
dfmix2 = DataFrame(mix2)
res = dfmix2.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': mix2['a'], 'b': ['crap', 'b', nan, nan],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(a|b)']
values = [r'\1\1', r'\1_crap']
res = dfmix.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a_crap', 'b_crap', '..',
'..']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.replace(regex=to_replace_res, value=values)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
def test_regex_replace_list_mixed_inplace(self):
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
# the same inplace
## lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'a']
values = [nan, 'crap']
res = dfmix.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b', nan, nan]})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(a|b)']
values = [r'\1\1', r'\1_crap']
res = dfmix.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a_crap', 'b_crap', '..',
'..']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.copy()
res.replace(regex=to_replace_res, value=values, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
def test_regex_replace_dict_mixed(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
dfmix = DataFrame(mix)
## dicts
# single dict {re1: v1}, search the whole frame
# need test for this...
# list of dicts {re1: v1, re2: v2, ..., re3: v3}, search the whole
# frame
res = dfmix.replace({'b': r'\s*\.\s*'}, {'b': nan}, regex=True)
res2 = dfmix.copy()
res2.replace({'b': r'\s*\.\s*'}, {'b': nan}, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
# list of dicts {re1: re11, re2: re12, ..., reN: re1N}, search the
# whole frame
res = dfmix.replace({'b': r'\s*(\.)\s*'}, {'b': r'\1ty'}, regex=True)
res2 = dfmix.copy()
res2.replace({'b': r'\s*(\.)\s*'}, {'b': r'\1ty'}, inplace=True,
regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', '.ty', '.ty'], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
res = dfmix.replace(regex={'b': r'\s*(\.)\s*'}, value={'b': r'\1ty'})
res2 = dfmix.copy()
res2.replace(regex={'b': r'\s*(\.)\s*'}, value={'b': r'\1ty'},
inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', '.ty', '.ty'], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
# scalar -> dict
# to_replace regex, {value: value}
expec = DataFrame({'a': mix['a'], 'b': [nan, 'b', '.', '.'], 'c':
mix['c']})
res = dfmix.replace('a', {'b': nan}, regex=True)
res2 = dfmix.copy()
res2.replace('a', {'b': nan}, regex=True, inplace=True)
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
res = dfmix.replace('a', {'b': nan}, regex=True)
res2 = dfmix.copy()
res2.replace(regex='a', value={'b': nan}, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': [nan, 'b', '.', '.'], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
def test_regex_replace_dict_nested(self):
# nested dicts will not work until this is implemented for Series
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
dfmix = DataFrame(mix)
res = dfmix.replace({'b': {r'\s*\.\s*': nan}}, regex=True)
res2 = dfmix.copy()
res4 = dfmix.copy()
res2.replace({'b': {r'\s*\.\s*': nan}}, inplace=True, regex=True)
res3 = dfmix.replace(regex={'b': {r'\s*\.\s*': nan}})
res4.replace(regex={'b': {r'\s*\.\s*': nan}}, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
assert_frame_equal(res4, expec)
def test_regex_replace_dict_nested_gh4115(self):
df = pd.DataFrame({'Type':['Q','T','Q','Q','T'], 'tmp':2})
expected = DataFrame({'Type': [0,1,0,0,1], 'tmp': 2})
assert_frame_equal(df.replace({'Type': {'Q':0,'T':1}}), expected)
def test_regex_replace_list_to_scalar(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
res = df.replace([r'\s*\.\s*', 'a|b'], nan, regex=True)
res2 = df.copy()
res3 = df.copy()
res2.replace([r'\s*\.\s*', 'a|b'], nan, regex=True, inplace=True)
res3.replace(regex=[r'\s*\.\s*', 'a|b'], value=nan, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': np.array([nan] * 4),
'c': [nan, nan, nan, 'd']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_str_to_numeric(self):
# what happens when you try to replace a numeric value with a regex?
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
res = df.replace(r'\s*\.\s*', 0, regex=True)
res2 = df.copy()
res2.replace(r'\s*\.\s*', 0, inplace=True, regex=True)
res3 = df.copy()
res3.replace(regex=r'\s*\.\s*', value=0, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', 0, 0], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_regex_list_to_numeric(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
res = df.replace([r'\s*\.\s*', 'b'], 0, regex=True)
res2 = df.copy()
res2.replace([r'\s*\.\s*', 'b'], 0, regex=True, inplace=True)
res3 = df.copy()
res3.replace(regex=[r'\s*\.\s*', 'b'], value=0, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 0, 0, 0], 'c': ['a', 0,
nan,
'd']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_series_of_regexes(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
s1 = Series({'b': r'\s*\.\s*'})
s2 = Series({'b': nan})
res = df.replace(s1, s2, regex=True)
res2 = df.copy()
res2.replace(s1, s2, inplace=True, regex=True)
res3 = df.copy()
res3.replace(regex=s1, value=s2, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_numeric_to_object_conversion(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
res = df.replace(0, 'a')
expec = DataFrame({'a': ['a', 1, 2, 3], 'b': mix['b'], 'c': mix['c']})
assert_frame_equal(res, expec)
self.assertEqual(res.a.dtype, np.object_)
def test_replace_regex_metachar(self):
metachars = '[]', '()', '\d', '\w', '\s'
for metachar in metachars:
df = DataFrame({'a': [metachar, 'else']})
result = df.replace({'a': {metachar: 'paren'}})
expected = DataFrame({'a': ['paren', 'else']})
tm.assert_frame_equal(result, expected)
def test_replace(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
zero_filled = self.tsframe.replace(nan, -1e8)
assert_frame_equal(zero_filled, self.tsframe.fillna(-1e8))
assert_frame_equal(zero_filled.replace(-1e8, nan), self.tsframe)
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
self.tsframe['B'][:5] = -1e8
# empty
df = DataFrame(index=['a', 'b'])
assert_frame_equal(df, df.replace(5, 7))
def test_replace_list(self):
obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}
dfobj = DataFrame(obj)
## lists of regexes and values
# list of [v1, v2, ..., vN] -> [v1, v2, ..., vN]
to_replace_res = [r'.', r'e']
values = [nan, 'crap']
res = dfobj.replace(to_replace_res, values)
expec = DataFrame({'a': ['a', 'b', nan, nan],
'b': ['crap', 'f', 'g', 'h'], 'c': ['h', 'crap',
'l', 'o']})
assert_frame_equal(res, expec)
# list of [v1, v2, ..., vN] -> [v1, v2, .., vN]
to_replace_res = [r'.', r'f']
values = [r'..', r'crap']
res = dfobj.replace(to_replace_res, values)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e', 'crap', 'g',
'h'],
'c': ['h', 'e', 'l', 'o']})
assert_frame_equal(res, expec)
def test_replace_series_dict(self):
# from GH 3064
df = DataFrame({'zero': {'a': 0.0, 'b': 1}, 'one': {'a': 2.0, 'b': 0}})
result = df.replace(0, {'zero': 0.5, 'one': 1.0})
expected = DataFrame({'zero': {'a': 0.5, 'b': 1}, 'one': {'a': 2.0, 'b': 1.0}})
assert_frame_equal(result, expected)
result = df.replace(0, df.mean())
assert_frame_equal(result, expected)
# series to series/dict
df = DataFrame({'zero': {'a': 0.0, 'b': 1}, 'one': {'a': 2.0, 'b': 0}})
s = Series({'zero': 0.0, 'one': 2.0})
result = df.replace(s, {'zero': 0.5, 'one': 1.0})
expected = DataFrame({'zero': {'a': 0.5, 'b': 1}, 'one': {'a': 1.0, 'b': 0.0}})
assert_frame_equal(result, expected)
result = df.replace(s, df.mean())
assert_frame_equal(result, expected)
def test_replace_convert(self):
# gh 3907
df = DataFrame([['foo', 'bar', 'bah'], ['bar', 'foo', 'bah']])
m = {'foo': 1, 'bar': 2, 'bah': 3}
rep = df.replace(m)
expec = Series([ np.int64] * 3)
res = rep.dtypes
assert_series_equal(expec, res)
def test_replace_mixed(self):
self.mixed_frame.ix[5:20,'foo'] = nan
self.mixed_frame.ix[-10:,'A'] = nan
result = self.mixed_frame.replace(np.nan, -18)
expected = self.mixed_frame.fillna(value=-18)
assert_frame_equal(result, expected)
assert_frame_equal(result.replace(-18, nan), self.mixed_frame)
result = self.mixed_frame.replace(np.nan, -1e8)
expected = self.mixed_frame.fillna(value=-1e8)
assert_frame_equal(result, expected)
assert_frame_equal(result.replace(-1e8, nan), self.mixed_frame)
# int block upcasting
df = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0,1],dtype='int64') })
expected = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0.5,1],dtype='float64') })
result = df.replace(0, 0.5)
assert_frame_equal(result,expected)
df.replace(0, 0.5, inplace=True)
assert_frame_equal(df,expected)
# int block splitting
df = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0,1],dtype='int64'), 'C' : Series([1,2],dtype='int64') })
expected = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0.5,1],dtype='float64'), 'C' : Series([1,2],dtype='int64') })
result = df.replace(0, 0.5)
assert_frame_equal(result,expected)
# to object block upcasting
df = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0,1],dtype='int64') })
expected = DataFrame({ 'A' : Series([1,'foo'],dtype='object'), 'B' : Series([0,1],dtype='int64') })
result = df.replace(2, 'foo')
assert_frame_equal(result,expected)
expected = DataFrame({ 'A' : Series(['foo','bar'],dtype='object'), 'B' : Series([0,'foo'],dtype='object') })
result = df.replace([1,2], ['foo','bar'])
assert_frame_equal(result,expected)
# test case from
from pandas.util.testing import makeCustomDataframe as mkdf
df = DataFrame({'A' : Series([3,0],dtype='int64'), 'B' : Series([0,3],dtype='int64') })
result = df.replace(3, df.mean().to_dict())
expected = df.copy().astype('float64')
m = df.mean()
expected.iloc[0,0] = m[0]
expected.iloc[1,1] = m[1]
assert_frame_equal(result,expected)
def test_replace_simple_nested_dict(self):
df = DataFrame({'col': range(1, 5)})
expected = DataFrame({'col': ['a', 2, 3, 'b']})
result = df.replace({'col': {1: 'a', 4: 'b'}})
tm.assert_frame_equal(expected, result)
# in this case, should be the same as the not nested version
result = df.replace({1: 'a', 4: 'b'})
tm.assert_frame_equal(expected, result)
def test_replace_simple_nested_dict_with_nonexistent_value(self):
df = DataFrame({'col': range(1, 5)})
expected = DataFrame({'col': ['a', 2, 3, 'b']})
result = df.replace({-1: '-', 1: 'a', 4: 'b'})
tm.assert_frame_equal(expected, result)
result = df.replace({'col': {-1: '-', 1: 'a', 4: 'b'}})
tm.assert_frame_equal(expected, result)
def test_interpolate(self):
pass
def test_replace_value_is_none(self):
self.assertRaises(TypeError, self.tsframe.replace, nan)
orig_value = self.tsframe.iloc[0, 0]
orig2 = self.tsframe.iloc[1, 0]
self.tsframe.iloc[0, 0] = nan
self.tsframe.iloc[1, 0] = 1
result = self.tsframe.replace(to_replace={nan: 0})
expected = self.tsframe.T.replace(to_replace={nan: 0}).T
assert_frame_equal(result, expected)
result = self.tsframe.replace(to_replace={nan: 0, 1: -1e8})
tsframe = self.tsframe.copy()
tsframe.iloc[0, 0] = 0
tsframe.iloc[1, 0] = -1e8
expected = tsframe
assert_frame_equal(expected, result)
self.tsframe.iloc[0, 0] = orig_value
self.tsframe.iloc[1, 0] = orig2
def test_replace_for_new_dtypes(self):
# dtypes
tsframe = self.tsframe.copy().astype(np.float32)
tsframe['A'][:5] = nan
tsframe['A'][-5:] = nan
zero_filled = tsframe.replace(nan, -1e8)
assert_frame_equal(zero_filled, tsframe.fillna(-1e8))
assert_frame_equal(zero_filled.replace(-1e8, nan), tsframe)
tsframe['A'][:5] = nan
tsframe['A'][-5:] = nan
tsframe['B'][:5] = -1e8
b = tsframe['B']
b[b == -1e8] = nan
tsframe['B'] = b
result = tsframe.fillna(method='bfill')
assert_frame_equal(result, tsframe.fillna(method='bfill'))
def test_replace_dtypes(self):
# int
df = DataFrame({'ints': [1, 2, 3]})
result = df.replace(1, 0)
expected = DataFrame({'ints': [0, 2, 3]})
assert_frame_equal(result, expected)
df = DataFrame({'ints': [1, 2, 3]}, dtype=np.int32)
result = df.replace(1, 0)
expected = DataFrame({'ints': [0, 2, 3]}, dtype=np.int32)
assert_frame_equal(result, expected)
df = DataFrame({'ints': [1, 2, 3]}, dtype=np.int16)
result = df.replace(1, 0)
expected = DataFrame({'ints': [0, 2, 3]}, dtype=np.int16)
assert_frame_equal(result, expected)
# bools
df = DataFrame({'bools': [True, False, True]})
result = df.replace(False, True)
self.assertTrue(result.values.all())
# complex blocks
df = DataFrame({'complex': [1j, 2j, 3j]})
result = df.replace(1j, 0j)
expected = DataFrame({'complex': [0j, 2j, 3j]})
assert_frame_equal(result, expected)
# datetime blocks
prev = datetime.today()
now = datetime.today()
df = DataFrame({'datetime64': Index([prev, now, prev])})
result = df.replace(prev, now)
expected = DataFrame({'datetime64': Index([now] * 3)})
assert_frame_equal(result, expected)
def test_replace_input_formats(self):
# both dicts
to_rep = {'A': np.nan, 'B': 0, 'C': ''}
values = {'A': 0, 'B': -1, 'C': 'missing'}
df = DataFrame({'A': [np.nan, 0, np.inf], 'B': [0, 2, 5],
'C': ['', 'asdf', 'fd']})
filled = df.replace(to_rep, values)
expected = {}
for k, v in compat.iteritems(df):
expected[k] = v.replace(to_rep[k], values[k])
assert_frame_equal(filled, DataFrame(expected))
result = df.replace([0, 2, 5], [5, 2, 0])
expected = DataFrame({'A': [np.nan, 5, np.inf], 'B': [5, 2, 0],
'C': ['', 'asdf', 'fd']})
assert_frame_equal(result, expected)
# dict to scalar
filled = df.replace(to_rep, 0)
expected = {}
for k, v in compat.iteritems(df):
expected[k] = v.replace(to_rep[k], 0)
assert_frame_equal(filled, DataFrame(expected))
self.assertRaises(TypeError, df.replace, to_rep, [np.nan, 0, ''])
# scalar to dict
values = {'A': 0, 'B': -1, 'C': 'missing'}
df = DataFrame({'A': [np.nan, 0, np.nan], 'B': [0, 2, 5],
'C': ['', 'asdf', 'fd']})
filled = df.replace(np.nan, values)
expected = {}
for k, v in compat.iteritems(df):
expected[k] = v.replace(np.nan, values[k])
assert_frame_equal(filled, DataFrame(expected))
# list to list
to_rep = [np.nan, 0, '']
values = [-2, -1, 'missing']
result = df.replace(to_rep, values)
expected = df.copy()
for i in range(len(to_rep)):
expected.replace(to_rep[i], values[i], inplace=True)
assert_frame_equal(result, expected)
self.assertRaises(ValueError, df.replace, to_rep, values[1:])
# list to scalar
to_rep = [np.nan, 0, '']
result = df.replace(to_rep, -1)
expected = df.copy()
for i in range(len(to_rep)):
expected.replace(to_rep[i], -1, inplace=True)
assert_frame_equal(result, expected)
def test_replace_limit(self):
pass
def test_replace_dict_no_regex(self):
answer = Series({0: 'Strongly Agree', 1: 'Agree', 2: 'Neutral', 3:
'Disagree', 4: 'Strongly Disagree'})
weights = {'Agree': 4, 'Disagree': 2, 'Neutral': 3, 'Strongly Agree':
5, 'Strongly Disagree': 1}
expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1})
result = answer.replace(weights)
tm.assert_series_equal(result, expected)
def test_replace_series_no_regex(self):
answer = Series({0: 'Strongly Agree', 1: 'Agree', 2: 'Neutral', 3:
'Disagree', 4: 'Strongly Disagree'})
weights = Series({'Agree': 4, 'Disagree': 2, 'Neutral': 3,
'Strongly Agree': 5, 'Strongly Disagree': 1})
expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1})
result = answer.replace(weights)
tm.assert_series_equal(result, expected)
def test_replace_dict_tuple_list_ordering_remains_the_same(self):
df = DataFrame(dict(A=[nan, 1]))
res1 = df.replace(to_replace={nan: 0, 1: -1e8})
res2 = df.replace(to_replace=(1, nan), value=[-1e8, 0])
res3 = df.replace(to_replace=[1, nan], value=[-1e8, 0])
expected = DataFrame({'A': [0, -1e8]})
tm.assert_frame_equal(res1, res2)
tm.assert_frame_equal(res2, res3)
tm.assert_frame_equal(res3, expected)
def test_replace_doesnt_replace_without_regex(self):
from pandas.compat import StringIO
raw = """fol T_opp T_Dir T_Enh
0 1 0 0 vo
1 2 vr 0 0
2 2 0 0 0
3 3 0 bt 0"""
df = read_csv(StringIO(raw), sep=r'\s+')
res = df.replace({'\D': 1})
tm.assert_frame_equal(df, res)
def test_replace_bool_with_string(self):
df = DataFrame({'a': [True, False], 'b': list('ab')})
result = df.replace(True, 'a')
expected = DataFrame({'a': ['a', False], 'b': df.b})
tm.assert_frame_equal(result, expected)
def test_replace_pure_bool_with_string_no_op(self):
df = DataFrame(np.random.rand(2, 2) > 0.5)
result = df.replace('asdf', 'fdsa')
tm.assert_frame_equal(df, result)
def test_replace_bool_with_bool(self):
df = DataFrame(np.random.rand(2, 2) > 0.5)
result = df.replace(False, True)
expected = DataFrame(np.ones((2, 2), dtype=bool))
tm.assert_frame_equal(result, expected)
def test_replace_with_dict_with_bool_keys(self):
df = DataFrame({0: [True, False], 1: [False, True]})
with tm.assertRaisesRegexp(TypeError, 'Cannot compare types .+'):
df.replace({'asdf': 'asdb', True: 'yes'})
def test_replace_truthy(self):
df = DataFrame({'a': [True, True]})
r = df.replace([np.inf, -np.inf], np.nan)
e = df
tm.assert_frame_equal(r, e)
def test_replace_int_to_int_chain(self):
df = DataFrame({'a': lrange(1, 5)})
with tm.assertRaisesRegexp(ValueError, "Replacement not allowed .+"):
df.replace({'a': dict(zip(range(1, 5), range(2, 6)))})
def test_replace_str_to_str_chain(self):
a = np.arange(1, 5)
astr = a.astype(str)
bstr = np.arange(2, 6).astype(str)
df = DataFrame({'a': astr})
with tm.assertRaisesRegexp(ValueError, "Replacement not allowed .+"):
df.replace({'a': dict(zip(astr, bstr))})
def test_replace_swapping_bug(self):
df = pd.DataFrame({'a': [True, False, True]})
res = df.replace({'a': {True: 'Y', False: 'N'}})
expect = pd.DataFrame({'a': ['Y', 'N', 'Y']})
tm.assert_frame_equal(res, expect)
df = pd.DataFrame({'a': [0, 1, 0]})
res = df.replace({'a': {0: 'Y', 1: 'N'}})
expect = pd.DataFrame({'a': ['Y', 'N', 'Y']})
tm.assert_frame_equal(res, expect)
def test_replace_period(self):
d = {'fname':
{'out_augmented_AUG_2011.json': pd.Period(year=2011, month=8, freq='M'),
'out_augmented_JAN_2011.json': pd.Period(year=2011, month=1, freq='M'),
'out_augmented_MAY_2012.json': pd.Period(year=2012, month=5, freq='M'),
'out_augmented_SUBSIDY_WEEK.json': pd.Period(year=2011, month=4, freq='M'),
'out_augmented_AUG_2012.json': pd.Period(year=2012, month=8, freq='M'),
'out_augmented_MAY_2011.json': pd.Period(year=2011, month=5, freq='M'),
'out_augmented_SEP_2013.json': pd.Period(year=2013, month=9, freq='M')}}
df = pd.DataFrame(['out_augmented_AUG_2012.json',
'out_augmented_SEP_2013.json',
'out_augmented_SUBSIDY_WEEK.json',
'out_augmented_MAY_2012.json',
'out_augmented_MAY_2011.json',
'out_augmented_AUG_2011.json',
'out_augmented_JAN_2011.json'], columns=['fname'])
tm.assert_equal(set(df.fname.values), set(d['fname'].keys()))
expected = DataFrame({'fname': [d['fname'][k]
for k in df.fname.values]})
result = df.replace(d)
tm.assert_frame_equal(result, expected)
def test_replace_datetime(self):
d = {'fname':
{'out_augmented_AUG_2011.json': pd.Timestamp('2011-08'),
'out_augmented_JAN_2011.json': pd.Timestamp('2011-01'),
'out_augmented_MAY_2012.json': pd.Timestamp('2012-05'),
'out_augmented_SUBSIDY_WEEK.json': pd.Timestamp('2011-04'),
'out_augmented_AUG_2012.json': pd.Timestamp('2012-08'),
'out_augmented_MAY_2011.json': pd.Timestamp('2011-05'),
'out_augmented_SEP_2013.json': pd.Timestamp('2013-09')}}
df = pd.DataFrame(['out_augmented_AUG_2012.json',
'out_augmented_SEP_2013.json',
'out_augmented_SUBSIDY_WEEK.json',
'out_augmented_MAY_2012.json',
'out_augmented_MAY_2011.json',
'out_augmented_AUG_2011.json',
'out_augmented_JAN_2011.json'], columns=['fname'])
tm.assert_equal(set(df.fname.values), set(d['fname'].keys()))
expected = DataFrame({'fname': [d['fname'][k]
for k in df.fname.values]})
result = df.replace(d)
tm.assert_frame_equal(result, expected)
def test_combine_multiple_frames_dtypes(self):
# GH 2759
A = DataFrame(data=np.ones((10, 2)), columns=['foo', 'bar'], dtype=np.float64)
B = DataFrame(data=np.ones((10, 2)), dtype=np.float32)
results = pd.concat((A, B), axis=1).get_dtype_counts()
expected = Series(dict( float64 = 2, float32 = 2 ))
assert_series_equal(results,expected)
def test_ops(self):
# tst ops and reversed ops in evaluation
# GH7198
# smaller hits python, larger hits numexpr
for n in [ 4, 4000 ]:
df = DataFrame(1,index=range(n),columns=list('abcd'))
df.iloc[0] = 2
m = df.mean()
for op_str, op, rop in [('+','__add__','__radd__'),
('-','__sub__','__rsub__'),
('*','__mul__','__rmul__'),
('/','__truediv__','__rtruediv__')]:
base = DataFrame(np.tile(m.values,n).reshape(n,-1),columns=list('abcd'))
expected = eval("base{op}df".format(op=op_str))
# ops as strings
result = eval("m{op}df".format(op=op_str))
assert_frame_equal(result,expected)
# these are commutative
if op in ['+','*']:
result = getattr(df,op)(m)
assert_frame_equal(result,expected)
# these are not
elif op in ['-','/']:
result = getattr(df,rop)(m)
assert_frame_equal(result,expected)
# GH7192
df = DataFrame(dict(A=np.random.randn(25000)))
df.iloc[0:5] = np.nan
expected = (1-np.isnan(df.iloc[0:25]))
result = (1-np.isnan(df)).iloc[0:25]
assert_frame_equal(result,expected)
def test_truncate(self):
offset = datetools.bday
ts = self.tsframe[::3]
start, end = self.tsframe.index[3], self.tsframe.index[6]
start_missing = self.tsframe.index[2]
end_missing = self.tsframe.index[7]
# neither specified
truncated = ts.truncate()
assert_frame_equal(truncated, ts)
# both specified
expected = ts[1:3]
truncated = ts.truncate(start, end)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(start_missing, end_missing)
assert_frame_equal(truncated, expected)
# start specified
expected = ts[1:]
truncated = ts.truncate(before=start)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(before=start_missing)
assert_frame_equal(truncated, expected)
# end specified
expected = ts[:3]
truncated = ts.truncate(after=end)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(after=end_missing)
assert_frame_equal(truncated, expected)
self.assertRaises(ValueError, ts.truncate,
before=ts.index[-1] - 1,
after=ts.index[0] +1)
def test_truncate_copy(self):
index = self.tsframe.index
truncated = self.tsframe.truncate(index[5], index[10])
truncated.values[:] = 5.
self.assertFalse((self.tsframe.values[5:11] == 5).any())
def test_xs(self):
idx = self.frame.index[5]
xs = self.frame.xs(idx)
for item, value in compat.iteritems(xs):
if np.isnan(value):
self.assertTrue(np.isnan(self.frame[item][idx]))
else:
self.assertEqual(value, self.frame[item][idx])
# mixed-type xs
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data)
xs = frame.xs('1')
self.assertEqual(xs.dtype, np.object_)
self.assertEqual(xs['A'], 1)
self.assertEqual(xs['B'], '1')
with tm.assertRaises(KeyError):
self.tsframe.xs(self.tsframe.index[0] - datetools.bday)
# xs get column
series = self.frame.xs('A', axis=1)
expected = self.frame['A']
assert_series_equal(series, expected)
# view is returned if possible
series = self.frame.xs('A', axis=1)
series[:] = 5
self.assertTrue((expected == 5).all())
def test_xs_corner(self):
# pathological mixed-type reordering case
df = DataFrame(index=[0])
df['A'] = 1.
df['B'] = 'foo'
df['C'] = 2.
df['D'] = 'bar'
df['E'] = 3.
xs = df.xs(0)
assert_almost_equal(xs, [1., 'foo', 2., 'bar', 3.])
# no columns but index
df = DataFrame(index=['a', 'b', 'c'])
result = df.xs('a')
expected = Series([])
assert_series_equal(result, expected)
def test_xs_duplicates(self):
df = DataFrame(randn(5, 2), index=['b', 'b', 'c', 'b', 'a'])
cross = df.xs('c')
exp = df.irow(2)
assert_series_equal(cross, exp)
def test_xs_keep_level(self):
df = DataFrame({'day': {0: 'sat', 1: 'sun'},
'flavour': {0: 'strawberry', 1: 'strawberry'},
'sales': {0: 10, 1: 12},
'year': {0: 2008, 1: 2008}}).set_index(['year','flavour','day'])
result = df.xs('sat', level='day', drop_level=False)
expected = df[:1]
assert_frame_equal(result, expected)
result = df.xs([2008, 'sat'], level=['year', 'day'], drop_level=False)
assert_frame_equal(result, expected)
def test_pivot(self):
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data)
pivoted = frame.pivot(
index='index', columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
assert_frame_equal(pivoted, expected)
# name tracking
self.assertEqual(pivoted.index.name, 'index')
self.assertEqual(pivoted.columns.name, 'columns')
# don't specify values
pivoted = frame.pivot(index='index', columns='columns')
self.assertEqual(pivoted.index.name, 'index')
self.assertEqual(pivoted.columns.names, (None, 'columns'))
# pivot multiple columns
wp = tm.makePanel()
lp = wp.to_frame()
df = lp.reset_index()
assert_frame_equal(df.pivot('major', 'minor'), lp.unstack())
def test_pivot_duplicates(self):
data = DataFrame({'a': ['bar', 'bar', 'foo', 'foo', 'foo'],
'b': ['one', 'two', 'one', 'one', 'two'],
'c': [1., 2., 3., 3., 4.]})
with assertRaisesRegexp(ValueError, 'duplicate entries'):
data.pivot('a', 'b', 'c')
def test_pivot_empty(self):
df = DataFrame({}, columns=['a', 'b', 'c'])
result = df.pivot('a', 'b', 'c')
expected = DataFrame({})
assert_frame_equal(result, expected, check_names=False)
def test_pivot_integer_bug(self):
df = DataFrame(data=[("A", "1", "A1"), ("B", "2", "B2")])
result = df.pivot(index=1, columns=0, values=2)
repr(result)
self.assert_numpy_array_equal(result.columns, ['A', 'B'])
def test_reindex(self):
newFrame = self.frame.reindex(self.ts1.index)
for col in newFrame.columns:
for idx, val in compat.iteritems(newFrame[col]):
if idx in self.frame.index:
if np.isnan(val):
self.assertTrue(np.isnan(self.frame[col][idx]))
else:
self.assertEqual(val, self.frame[col][idx])
else:
self.assertTrue(np.isnan(val))
for col, series in compat.iteritems(newFrame):
self.assertTrue(tm.equalContents(series.index, newFrame.index))
emptyFrame = self.frame.reindex(Index([]))
self.assertEqual(len(emptyFrame.index), 0)
# Cython code should be unit-tested directly
nonContigFrame = self.frame.reindex(self.ts1.index[::2])
for col in nonContigFrame.columns:
for idx, val in compat.iteritems(nonContigFrame[col]):
if idx in self.frame.index:
if np.isnan(val):
self.assertTrue(np.isnan(self.frame[col][idx]))
else:
self.assertEqual(val, self.frame[col][idx])
else:
self.assertTrue(np.isnan(val))
for col, series in compat.iteritems(nonContigFrame):
self.assertTrue(tm.equalContents(series.index,
nonContigFrame.index))
# corner cases
# Same index, copies values but not index if copy=False
newFrame = self.frame.reindex(self.frame.index, copy=False)
self.assertIs(newFrame.index, self.frame.index)
# length zero
newFrame = self.frame.reindex([])
self.assertTrue(newFrame.empty)
self.assertEqual(len(newFrame.columns), len(self.frame.columns))
# length zero with columns reindexed with non-empty index
newFrame = self.frame.reindex([])
newFrame = newFrame.reindex(self.frame.index)
self.assertEqual(len(newFrame.index), len(self.frame.index))
self.assertEqual(len(newFrame.columns), len(self.frame.columns))
# pass non-Index
newFrame = self.frame.reindex(list(self.ts1.index))
self.assertTrue(newFrame.index.equals(self.ts1.index))
# copy with no axes
result = self.frame.reindex()
assert_frame_equal(result,self.frame)
self.assertFalse(result is self.frame)
def test_reindex_name_remains(self):
s = Series(random.rand(10))
df = DataFrame(s, index=np.arange(len(s)))
i = Series(np.arange(10), name='iname')
df = df.reindex(i)
self.assertEqual(df.index.name, 'iname')
df = df.reindex(Index(np.arange(10), name='tmpname'))
self.assertEqual(df.index.name, 'tmpname')
s = Series(random.rand(10))
df = DataFrame(s.T, index=np.arange(len(s)))
i = Series(np.arange(10), name='iname')
df = df.reindex(columns=i)
self.assertEqual(df.columns.name, 'iname')
def test_reindex_int(self):
smaller = self.intframe.reindex(self.intframe.index[::2])
self.assertEqual(smaller['A'].dtype, np.int64)
bigger = smaller.reindex(self.intframe.index)
self.assertEqual(bigger['A'].dtype, np.float64)
smaller = self.intframe.reindex(columns=['A', 'B'])
self.assertEqual(smaller['A'].dtype, np.int64)
def test_reindex_like(self):
other = self.frame.reindex(index=self.frame.index[:10],
columns=['C', 'B'])
assert_frame_equal(other, self.frame.reindex_like(other))
def test_reindex_columns(self):
newFrame = self.frame.reindex(columns=['A', 'B', 'E'])
assert_series_equal(newFrame['B'], self.frame['B'])
self.assertTrue(np.isnan(newFrame['E']).all())
self.assertNotIn('C', newFrame)
# length zero
newFrame = self.frame.reindex(columns=[])
self.assertTrue(newFrame.empty)
def test_reindex_axes(self):
# GH 3317, reindexing by both axes loses freq of the index
from datetime import datetime
df = DataFrame(np.ones((3, 3)), index=[datetime(2012, 1, 1), datetime(2012, 1, 2), datetime(2012, 1, 3)], columns=['a', 'b', 'c'])
time_freq = date_range('2012-01-01', '2012-01-03', freq='d')
some_cols = ['a', 'b']
index_freq = df.reindex(index=time_freq).index.freq
both_freq = df.reindex(index=time_freq, columns=some_cols).index.freq
seq_freq = df.reindex(index=time_freq).reindex(columns=some_cols).index.freq
self.assertEqual(index_freq, both_freq)
self.assertEqual(index_freq, seq_freq)
def test_reindex_fill_value(self):
df = DataFrame(np.random.randn(10, 4))
# axis=0
result = df.reindex(lrange(15))
self.assertTrue(np.isnan(result.values[-5:]).all())
result = df.reindex(lrange(15), fill_value=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
# axis=1
result = df.reindex(columns=lrange(5), fill_value=0.)
expected = df.copy()
expected[4] = 0.
assert_frame_equal(result, expected)
result = df.reindex(columns=lrange(5), fill_value=0)
expected = df.copy()
expected[4] = 0
assert_frame_equal(result, expected)
result = df.reindex(columns=lrange(5), fill_value='foo')
expected = df.copy()
expected[4] = 'foo'
assert_frame_equal(result, expected)
# reindex_axis
result = df.reindex_axis(lrange(15), fill_value=0., axis=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
result = df.reindex_axis(lrange(5), fill_value=0., axis=1)
expected = df.reindex(columns=lrange(5)).fillna(0)
assert_frame_equal(result, expected)
# other dtypes
df['foo'] = 'foo'
result = df.reindex(lrange(15), fill_value=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
def test_reindex_dups(self):
# GH4746, reindex on duplicate index error messages
arr = np.random.randn(10)
df = DataFrame(arr,index=[1,2,3,4,5,1,2,3,4,5])
# set index is ok
result = df.copy()
result.index = list(range(len(df)))
expected = DataFrame(arr,index=list(range(len(df))))
assert_frame_equal(result,expected)
# reindex fails
self.assertRaises(ValueError, df.reindex, index=list(range(len(df))))
def test_align(self):
af, bf = self.frame.align(self.frame)
self.assertIsNot(af._data, self.frame._data)
af, bf = self.frame.align(self.frame, copy=False)
self.assertIs(af._data, self.frame._data)
# axis = 0
other = self.frame.ix[:-5, :3]
af, bf = self.frame.align(other, axis=0, fill_value=-1)
self.assertTrue(bf.columns.equals(other.columns))
# test fill value
join_idx = self.frame.index.join(other.index)
diff_a = self.frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
diff_b_vals = bf.reindex(diff_b).values
self.assertTrue((diff_a_vals == -1).all())
af, bf = self.frame.align(other, join='right', axis=0)
self.assertTrue(bf.columns.equals(other.columns))
self.assertTrue(bf.index.equals(other.index))
self.assertTrue(af.index.equals(other.index))
# axis = 1
other = self.frame.ix[:-5, :3].copy()
af, bf = self.frame.align(other, axis=1)
self.assertTrue(bf.columns.equals(self.frame.columns))
self.assertTrue(bf.index.equals(other.index))
# test fill value
join_idx = self.frame.index.join(other.index)
diff_a = self.frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
diff_b_vals = bf.reindex(diff_b).values
self.assertTrue((diff_a_vals == -1).all())
af, bf = self.frame.align(other, join='inner', axis=1)
self.assertTrue(bf.columns.equals(other.columns))
af, bf = self.frame.align(other, join='inner', axis=1, method='pad')
self.assertTrue(bf.columns.equals(other.columns))
# test other non-float types
af, bf = self.intframe.align(other, join='inner', axis=1, method='pad')
self.assertTrue(bf.columns.equals(other.columns))
af, bf = self.mixed_frame.align(self.mixed_frame,
join='inner', axis=1, method='pad')
self.assertTrue(bf.columns.equals(self.mixed_frame.columns))
af, bf = self.frame.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=None)
self.assertTrue(bf.index.equals(Index([])))
af, bf = self.frame.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=0)
self.assertTrue(bf.index.equals(Index([])))
# mixed floats/ints
af, bf = self.mixed_float.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=0)
self.assertTrue(bf.index.equals(Index([])))
af, bf = self.mixed_int.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=0)
self.assertTrue(bf.index.equals(Index([])))
# try to align dataframe to series along bad axis
self.assertRaises(ValueError, self.frame.align, af.ix[0, :3],
join='inner', axis=2)
def _check_align(self, a, b, axis, fill_axis, how, method, limit=None):
aa, ab = a.align(b, axis=axis, join=how, method=method, limit=limit,
fill_axis=fill_axis)
join_index, join_columns = None, None
ea, eb = a, b
if axis is None or axis == 0:
join_index = a.index.join(b.index, how=how)
ea = ea.reindex(index=join_index)
eb = eb.reindex(index=join_index)
if axis is None or axis == 1:
join_columns = a.columns.join(b.columns, how=how)
ea = ea.reindex(columns=join_columns)
eb = eb.reindex(columns=join_columns)
ea = ea.fillna(axis=fill_axis, method=method, limit=limit)
eb = eb.fillna(axis=fill_axis, method=method, limit=limit)
assert_frame_equal(aa, ea)
assert_frame_equal(ab, eb)
def test_align_fill_method_inner(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('inner', meth, ax, fax)
def test_align_fill_method_outer(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('outer', meth, ax, fax)
def test_align_fill_method_left(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('left', meth, ax, fax)
def test_align_fill_method_right(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('right', meth, ax, fax)
def _check_align_fill(self, kind, meth, ax, fax):
left = self.frame.ix[0:4, :10]
right = self.frame.ix[2:, 6:]
empty = self.frame.ix[:0, :0]
self._check_align(left, right, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(left, right, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# empty left
self._check_align(empty, right, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(empty, right, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# empty right
self._check_align(left, empty, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(left, empty, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# both empty
self._check_align(empty, empty, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(empty, empty, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
def test_align_int_fill_bug(self):
# GH #910
X = np.arange(10*10, dtype='float64').reshape(10, 10)
Y = np.ones((10, 1), dtype=int)
df1 = DataFrame(X)
df1['0.X'] = Y.squeeze()
df2 = df1.astype(float)
result = df1 - df1.mean()
expected = df2 - df2.mean()
assert_frame_equal(result, expected)
def test_where(self):
default_frame = DataFrame(np.random.randn(5, 3),columns=['A','B','C'])
def _safe_add(df):
# only add to the numeric items
def is_ok(s):
return issubclass(s.dtype.type, (np.integer,np.floating)) and s.dtype != 'uint8'
return DataFrame(dict([ (c,s+1) if is_ok(s) else (c,s) for c, s in compat.iteritems(df) ]))
def _check_get(df, cond, check_dtypes = True):
other1 = _safe_add(df)
rs = df.where(cond, other1)
rs2 = df.where(cond.values, other1)
for k, v in rs.iteritems():
assert_series_equal(v, Series(np.where(cond[k], df[k], other1[k]),index=v.index))
assert_frame_equal(rs, rs2)
# dtypes
if check_dtypes:
self.assertTrue((rs.dtypes == df.dtypes).all() == True)
# check getting
for df in [ default_frame, self.mixed_frame, self.mixed_float, self.mixed_int ]:
cond = df > 0
_check_get(df, cond)
# upcasting case (GH # 2794)
df = DataFrame(dict([ (c,Series([1]*3,dtype=c)) for c in ['int64','int32','float32','float64'] ]))
df.ix[1,:] = 0
result = df.where(df>=0).get_dtype_counts()
#### when we don't preserve boolean casts ####
#expected = Series({ 'float32' : 1, 'float64' : 3 })
expected = Series({ 'float32' : 1, 'float64' : 1, 'int32' : 1, 'int64' : 1 })
assert_series_equal(result, expected)
# aligning
def _check_align(df, cond, other, check_dtypes = True):
rs = df.where(cond, other)
for i, k in enumerate(rs.columns):
result = rs[k]
d = df[k].values
c = cond[k].reindex(df[k].index).fillna(False).values
if np.isscalar(other):
o = other
else:
if isinstance(other,np.ndarray):
o = Series(other[:,i],index=result.index).values
else:
o = other[k].values
new_values = d if c.all() else np.where(c, d, o)
expected = Series(new_values,index=result.index)
# since we can't always have the correct numpy dtype
# as numpy doesn't know how to downcast, don't check
assert_series_equal(result, expected, check_dtype=False)
# dtypes
# can't check dtype when other is an ndarray
if check_dtypes and not isinstance(other,np.ndarray):
self.assertTrue((rs.dtypes == df.dtypes).all() == True)
for df in [ self.mixed_frame, self.mixed_float, self.mixed_int ]:
# other is a frame
cond = (df > 0)[1:]
_check_align(df, cond, _safe_add(df))
# check other is ndarray
cond = df > 0
_check_align(df, cond, (_safe_add(df).values))
# integers are upcast, so don't check the dtypes
cond = df > 0
check_dtypes = all([ not issubclass(s.type,np.integer) for s in df.dtypes ])
_check_align(df, cond, np.nan, check_dtypes = check_dtypes)
# invalid conditions
df = default_frame
err1 = (df + 1).values[0:2, :]
self.assertRaises(ValueError, df.where, cond, err1)
err2 = cond.ix[:2, :].values
other1 = _safe_add(df)
self.assertRaises(ValueError, df.where, err2, other1)
self.assertRaises(ValueError, df.mask, True)
self.assertRaises(ValueError, df.mask, 0)
# where inplace
def _check_set(df, cond, check_dtypes = True):
dfi = df.copy()
econd = cond.reindex_like(df).fillna(True)
expected = dfi.mask(~econd)
dfi.where(cond, np.nan, inplace=True)
assert_frame_equal(dfi, expected)
# dtypes (and confirm upcasts)x
if check_dtypes:
for k, v in compat.iteritems(df.dtypes):
if issubclass(v.type,np.integer) and not cond[k].all():
v = np.dtype('float64')
self.assertEqual(dfi[k].dtype, v)
for df in [ default_frame, self.mixed_frame, self.mixed_float, self.mixed_int ]:
cond = df > 0
_check_set(df, cond)
cond = df >= 0
_check_set(df, cond)
# aligining
cond = (df >= 0)[1:]
_check_set(df, cond)
def test_where_bug(self):
# GH 2793
df = DataFrame({'a': [1.0, 2.0, 3.0, 4.0], 'b': [4.0, 3.0, 2.0, 1.0]}, dtype = 'float64')
expected = DataFrame({'a': [np.nan, np.nan, 3.0, 4.0], 'b': [4.0, 3.0, np.nan, np.nan]}, dtype = 'float64')
result = df.where(df > 2, np.nan)
assert_frame_equal(result, expected)
result = df.copy()
result.where(result > 2, np.nan, inplace=True)
assert_frame_equal(result, expected)
# mixed
for dtype in ['int16','int8','int32','int64']:
df = DataFrame({'a': np.array([1, 2, 3, 4],dtype=dtype), 'b': np.array([4.0, 3.0, 2.0, 1.0], dtype = 'float64') })
expected = DataFrame({'a': [np.nan, np.nan, 3.0, 4.0], 'b': [4.0, 3.0, np.nan, np.nan]}, dtype = 'float64')
result = df.where(df > 2, np.nan)
assert_frame_equal(result, expected)
result = df.copy()
result.where(result > 2, np.nan, inplace=True)
assert_frame_equal(result, expected)
# transpositional issue
# GH7506
a = DataFrame({ 0 : [1,2], 1 : [3,4], 2 : [5,6]})
b = DataFrame({ 0 : [np.nan,8], 1:[9,np.nan], 2:[np.nan,np.nan]})
do_not_replace = b.isnull() | (a > b)
expected = a.copy()
expected[~do_not_replace] = b
result = a.where(do_not_replace,b)
assert_frame_equal(result,expected)
a = DataFrame({ 0 : [4,6], 1 : [1,0]})
b = DataFrame({ 0 : [np.nan,3],1:[3,np.nan]})
do_not_replace = b.isnull() | (a > b)
expected = a.copy()
expected[~do_not_replace] = b
result = a.where(do_not_replace,b)
assert_frame_equal(result,expected)
def test_where_datetime(self):
# GH 3311
df = DataFrame(dict(A = date_range('20130102',periods=5),
B = date_range('20130104',periods=5),
C = np.random.randn(5)))
stamp = datetime(2013,1,3)
result = df[df>stamp]
expected = df.copy()
expected.loc[[0,1],'A'] = np.nan
assert_frame_equal(result,expected)
def test_where_none(self):
# GH 4667
# setting with None changes dtype
df = DataFrame({'series': Series(range(10))}).astype(float)
df[df > 7] = None
expected = DataFrame({'series': Series([0,1,2,3,4,5,6,7,np.nan,np.nan]) })
assert_frame_equal(df, expected)
# GH 7656
df = DataFrame([{'A': 1, 'B': np.nan, 'C': 'Test'}, {'A': np.nan, 'B': 'Test', 'C': np.nan}])
expected = df.where(~isnull(df), None)
with tm.assertRaisesRegexp(TypeError, 'boolean setting on mixed-type'):
df.where(~isnull(df), None, inplace=True)
def test_where_align(self):
def create():
df = DataFrame(np.random.randn(10,3))
df.iloc[3:5,0] = np.nan
df.iloc[4:6,1] = np.nan
df.iloc[5:8,2] = np.nan
return df
# series
df = create()
expected = df.fillna(df.mean())
result = df.where(pd.notnull(df),df.mean(),axis='columns')
assert_frame_equal(result, expected)
df.where(pd.notnull(df),df.mean(),inplace=True,axis='columns')
assert_frame_equal(df, expected)
df = create().fillna(0)
expected = df.apply(lambda x, y: x.where(x>0,y), y=df[0])
result = df.where(df>0,df[0],axis='index')
assert_frame_equal(result, expected)
result = df.where(df>0,df[0],axis='rows')
assert_frame_equal(result, expected)
# frame
df = create()
expected = df.fillna(1)
result = df.where(pd.notnull(df),DataFrame(1,index=df.index,columns=df.columns))
assert_frame_equal(result, expected)
def test_where_complex(self):
# GH 6345
expected = DataFrame([[1+1j, 2], [np.nan, 4+1j]], columns=['a', 'b'])
df = DataFrame([[1+1j, 2], [5+1j, 4+1j]], columns=['a', 'b'])
df[df.abs() >= 5] = np.nan
assert_frame_equal(df,expected)
def test_mask(self):
df = DataFrame(np.random.randn(5, 3))
cond = df > 0
rs = df.where(cond, np.nan)
assert_frame_equal(rs, df.mask(df <= 0))
assert_frame_equal(rs, df.mask(~cond))
def test_mask_edge_case_1xN_frame(self):
# GH4071
df = DataFrame([[1, 2]])
res = df.mask(DataFrame([[True, False]]))
expec = DataFrame([[nan, 2]])
assert_frame_equal(res, expec)
#----------------------------------------------------------------------
# Transposing
def test_transpose(self):
frame = self.frame
dft = frame.T
for idx, series in compat.iteritems(dft):
for col, value in compat.iteritems(series):
if np.isnan(value):
self.assertTrue(np.isnan(frame[col][idx]))
else:
self.assertEqual(value, frame[col][idx])
# mixed type
index, data = tm.getMixedTypeDict()
mixed = DataFrame(data, index=index)
mixed_T = mixed.T
for col, s in compat.iteritems(mixed_T):
self.assertEqual(s.dtype, np.object_)
def test_transpose_get_view(self):
dft = self.frame.T
dft.values[:, 5:10] = 5
self.assertTrue((self.frame.values[5:10] == 5).all())
#----------------------------------------------------------------------
# Renaming
def test_rename(self):
mapping = {
'A': 'a',
'B': 'b',
'C': 'c',
'D': 'd'
}
renamed = self.frame.rename(columns=mapping)
renamed2 = self.frame.rename(columns=str.lower)
assert_frame_equal(renamed, renamed2)
assert_frame_equal(renamed2.rename(columns=str.upper),
self.frame, check_names=False)
# index
data = {
'A': {'foo': 0, 'bar': 1}
}
# gets sorted alphabetical
df = DataFrame(data)
renamed = df.rename(index={'foo': 'bar', 'bar': 'foo'})
self.assert_numpy_array_equal(renamed.index, ['foo', 'bar'])
renamed = df.rename(index=str.upper)
self.assert_numpy_array_equal(renamed.index, ['BAR', 'FOO'])
# have to pass something
self.assertRaises(TypeError, self.frame.rename)
# partial columns
renamed = self.frame.rename(columns={'C': 'foo', 'D': 'bar'})
self.assert_numpy_array_equal(renamed.columns, ['A', 'B', 'foo', 'bar'])
# other axis
renamed = self.frame.T.rename(index={'C': 'foo', 'D': 'bar'})
self.assert_numpy_array_equal(renamed.index, ['A', 'B', 'foo', 'bar'])
# index with name
index = Index(['foo', 'bar'], name='name')
renamer = DataFrame(data, index=index)
renamed = renamer.rename(index={'foo': 'bar', 'bar': 'foo'})
self.assert_numpy_array_equal(renamed.index, ['bar', 'foo'])
self.assertEqual(renamed.index.name, renamer.index.name)
# MultiIndex
tuples_index = [('foo1', 'bar1'), ('foo2', 'bar2')]
tuples_columns = [('fizz1', 'buzz1'), ('fizz2', 'buzz2')]
index = MultiIndex.from_tuples(tuples_index, names=['foo', 'bar'])
columns = MultiIndex.from_tuples(tuples_columns, names=['fizz', 'buzz'])
renamer = DataFrame([(0,0),(1,1)], index=index, columns=columns)
renamed = renamer.rename(index={'foo1': 'foo3', 'bar2': 'bar3'},
columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'})
new_index = MultiIndex.from_tuples([('foo3', 'bar1'), ('foo2', 'bar3')])
new_columns = MultiIndex.from_tuples([('fizz3', 'buzz1'), ('fizz2', 'buzz3')])
self.assert_numpy_array_equal(renamed.index, new_index)
self.assert_numpy_array_equal(renamed.columns, new_columns)
self.assertEqual(renamed.index.names, renamer.index.names)
self.assertEqual(renamed.columns.names, renamer.columns.names)
def test_rename_nocopy(self):
renamed = self.frame.rename(columns={'C': 'foo'}, copy=False)
renamed['foo'] = 1.
self.assertTrue((self.frame['C'] == 1.).all())
def test_rename_inplace(self):
self.frame.rename(columns={'C': 'foo'})
self.assertIn('C', self.frame)
self.assertNotIn('foo', self.frame)
c_id = id(self.frame['C'])
frame = self.frame.copy()
frame.rename(columns={'C': 'foo'}, inplace=True)
self.assertNotIn('C', frame)
self.assertIn('foo', frame)
self.assertNotEqual(id(frame['foo']), c_id)
def test_rename_bug(self):
# GH 5344
# rename set ref_locs, and set_index was not resetting
df = DataFrame({ 0 : ['foo','bar'], 1 : ['bah','bas'], 2 : [1,2]})
df = df.rename(columns={0 : 'a'})
df = df.rename(columns={1 : 'b'})
df = df.set_index(['a','b'])
df.columns = ['2001-01-01']
expected = DataFrame([[1],[2]],index=MultiIndex.from_tuples([('foo','bah'),('bar','bas')],
names=['a','b']),
columns=['2001-01-01'])
assert_frame_equal(df,expected)
#----------------------------------------------------------------------
# Time series related
def test_diff(self):
the_diff = self.tsframe.diff(1)
assert_series_equal(the_diff['A'],
self.tsframe['A'] - self.tsframe['A'].shift(1))
# int dtype
a = 10000000000000000
b = a + 1
s = Series([a, b])
rs = DataFrame({'s': s}).diff()
self.assertEqual(rs.s[1], 1)
# mixed numeric
tf = self.tsframe.astype('float32')
the_diff = tf.diff(1)
assert_series_equal(the_diff['A'],
tf['A'] - tf['A'].shift(1))
def test_diff_timedelta(self):
# GH 4533
df = DataFrame(dict(time=[Timestamp('20130101 9:01'),
Timestamp('20130101 9:02')],
value=[1.0,2.0]))
res = df.diff()
exp = DataFrame([[pd.NaT, np.nan],
[Timedelta('00:01:00'), 1]],
columns=['time', 'value'])
assert_frame_equal(res, exp)
def test_diff_mixed_dtype(self):
df = DataFrame(np.random.randn(5, 3))
df['A'] = np.array([1, 2, 3, 4, 5], dtype=object)
result = df.diff()
self.assertEqual(result[0].dtype, np.float64)
def test_diff_neg_n(self):
rs = self.tsframe.diff(-1)
xp = self.tsframe - self.tsframe.shift(-1)
assert_frame_equal(rs, xp)
def test_diff_float_n(self):
rs = self.tsframe.diff(1.)
xp = self.tsframe.diff(1)
assert_frame_equal(rs, xp)
def test_pct_change(self):
rs = self.tsframe.pct_change(fill_method=None)
assert_frame_equal(rs, self.tsframe / self.tsframe.shift(1) - 1)
rs = self.tsframe.pct_change(2)
filled = self.tsframe.fillna(method='pad')
assert_frame_equal(rs, filled / filled.shift(2) - 1)
rs = self.tsframe.pct_change(fill_method='bfill', limit=1)
filled = self.tsframe.fillna(method='bfill', limit=1)
assert_frame_equal(rs, filled / filled.shift(1) - 1)
rs = self.tsframe.pct_change(freq='5D')
filled = self.tsframe.fillna(method='pad')
assert_frame_equal(rs, filled / filled.shift(freq='5D') - 1)
def test_pct_change_shift_over_nas(self):
s = Series([1., 1.5, np.nan, 2.5, 3.])
df = DataFrame({'a': s, 'b': s})
chg = df.pct_change()
expected = Series([np.nan, 0.5, np.nan, 2.5 / 1.5 - 1, .2])
edf = DataFrame({'a': expected, 'b': expected})
assert_frame_equal(chg, edf)
def test_shift(self):
# naive shift
shiftedFrame = self.tsframe.shift(5)
self.assertTrue(shiftedFrame.index.equals(self.tsframe.index))
shiftedSeries = self.tsframe['A'].shift(5)
assert_series_equal(shiftedFrame['A'], shiftedSeries)
shiftedFrame = self.tsframe.shift(-5)
self.assertTrue(shiftedFrame.index.equals(self.tsframe.index))
shiftedSeries = self.tsframe['A'].shift(-5)
assert_series_equal(shiftedFrame['A'], shiftedSeries)
# shift by 0
unshifted = self.tsframe.shift(0)
assert_frame_equal(unshifted, self.tsframe)
# shift by DateOffset
shiftedFrame = self.tsframe.shift(5, freq=datetools.BDay())
self.assertEqual(len(shiftedFrame), len(self.tsframe))
shiftedFrame2 = self.tsframe.shift(5, freq='B')
assert_frame_equal(shiftedFrame, shiftedFrame2)
d = self.tsframe.index[0]
shifted_d = d + datetools.BDay(5)
assert_series_equal(self.tsframe.xs(d),
shiftedFrame.xs(shifted_d))
# shift int frame
int_shifted = self.intframe.shift(1)
# Shifting with PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
self.assertTrue(shifted.index.equals(ps.index))
tm.assert_dict_equal(unshifted.ix[:, 0].valid(), ps.ix[:, 0],
compare_keys=False)
shifted2 = ps.shift(1, 'B')
shifted3 = ps.shift(1, datetools.bday)
assert_frame_equal(shifted2, shifted3)
assert_frame_equal(ps, shifted2.shift(-1, 'B'))
assertRaisesRegexp(ValueError, 'does not match PeriodIndex freq',
ps.shift, freq='D')
# shift other axis
# GH 6371
df = DataFrame(np.random.rand(10,5))
expected = pd.concat([DataFrame(np.nan,index=df.index,columns=[0]),df.iloc[:,0:-1]],ignore_index=True,axis=1)
result = df.shift(1,axis=1)
assert_frame_equal(result,expected)
# shift named axis
df = DataFrame(np.random.rand(10,5))
expected = pd.concat([DataFrame(np.nan,index=df.index,columns=[0]),df.iloc[:,0:-1]],ignore_index=True,axis=1)
result = df.shift(1,axis='columns')
assert_frame_equal(result,expected)
def test_shift_bool(self):
df = DataFrame({'high': [True, False],
'low': [False, False]})
rs = df.shift(1)
xp = DataFrame(np.array([[np.nan, np.nan],
[True, False]], dtype=object),
columns=['high', 'low'])
assert_frame_equal(rs, xp)
def test_shift_empty(self):
# Regression test for #8019
df = DataFrame({'foo': []})
rs = df.shift(-1)
assert_frame_equal(df, rs)
def test_tshift(self):
# PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(unshifted, ps)
shifted2 = ps.tshift(freq='B')
assert_frame_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=datetools.bday)
assert_frame_equal(shifted, shifted3)
assertRaisesRegexp(ValueError, 'does not match', ps.tshift, freq='M')
# DatetimeIndex
shifted = self.tsframe.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(self.tsframe, unshifted)
shifted2 = self.tsframe.tshift(freq=self.tsframe.index.freq)
assert_frame_equal(shifted, shifted2)
inferred_ts = DataFrame(self.tsframe.values,
Index(np.asarray(self.tsframe.index)),
columns=self.tsframe.columns)
shifted = inferred_ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(shifted, self.tsframe.tshift(1))
assert_frame_equal(unshifted, inferred_ts)
no_freq = self.tsframe.ix[[0, 5, 7], :]
self.assertRaises(ValueError, no_freq.tshift)
def test_apply(self):
# ufunc
applied = self.frame.apply(np.sqrt)
assert_series_equal(np.sqrt(self.frame['A']), applied['A'])
# aggregator
applied = self.frame.apply(np.mean)
self.assertEqual(applied['A'], np.mean(self.frame['A']))
d = self.frame.index[0]
applied = self.frame.apply(np.mean, axis=1)
self.assertEqual(applied[d], np.mean(self.frame.xs(d)))
self.assertIs(applied.index, self.frame.index) # want this
# invalid axis
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
self.assertRaises(ValueError, df.apply, lambda x: x, 2)
def test_apply_mixed_datetimelike(self):
# mixed datetimelike
# GH 7778
df = DataFrame({ 'A' : date_range('20130101',periods=3), 'B' : pd.to_timedelta(np.arange(3),unit='s') })
result = df.apply(lambda x: x, axis=1)
assert_frame_equal(result, df)
def test_apply_empty(self):
# empty
applied = self.empty.apply(np.sqrt)
self.assertTrue(applied.empty)
applied = self.empty.apply(np.mean)
self.assertTrue(applied.empty)
no_rows = self.frame[:0]
result = no_rows.apply(lambda x: x.mean())
expected = Series(np.nan, index=self.frame.columns)
assert_series_equal(result, expected)
no_cols = self.frame.ix[:, []]
result = no_cols.apply(lambda x: x.mean(), axis=1)
expected = Series(np.nan, index=self.frame.index)
assert_series_equal(result, expected)
# 2476
xp = DataFrame(index=['a'])
rs = xp.apply(lambda x: x['a'], axis=1)
assert_frame_equal(xp, rs)
# reduce with an empty DataFrame
x = []
result = self.empty.apply(x.append, axis=1, reduce=False)
assert_frame_equal(result, self.empty)
result = self.empty.apply(x.append, axis=1, reduce=True)
assert_series_equal(result, Series([]))
empty_with_cols = DataFrame(columns=['a', 'b', 'c'])
result = empty_with_cols.apply(x.append, axis=1, reduce=False)
assert_frame_equal(result, empty_with_cols)
result = empty_with_cols.apply(x.append, axis=1, reduce=True)
assert_series_equal(result, Series([]))
# Ensure that x.append hasn't been called
self.assertEqual(x, [])
def test_apply_standard_nonunique(self):
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
rs = df.apply(lambda s: s[0], axis=1)
xp = Series([1, 4, 7], ['a', 'a', 'c'])
assert_series_equal(rs, xp)
rs = df.T.apply(lambda s: s[0], axis=0)
assert_series_equal(rs, xp)
def test_apply_broadcast(self):
broadcasted = self.frame.apply(np.mean, broadcast=True)
agged = self.frame.apply(np.mean)
for col, ts in compat.iteritems(broadcasted):
self.assertTrue((ts == agged[col]).all())
broadcasted = self.frame.apply(np.mean, axis=1, broadcast=True)
agged = self.frame.apply(np.mean, axis=1)
for idx in broadcasted.index:
self.assertTrue((broadcasted.xs(idx) == agged[idx]).all())
def test_apply_raw(self):
result0 = self.frame.apply(np.mean, raw=True)
result1 = self.frame.apply(np.mean, axis=1, raw=True)
expected0 = self.frame.apply(lambda x: x.values.mean())
expected1 = self.frame.apply(lambda x: x.values.mean(), axis=1)
assert_series_equal(result0, expected0)
assert_series_equal(result1, expected1)
# no reduction
result = self.frame.apply(lambda x: x * 2, raw=True)
expected = self.frame * 2
assert_frame_equal(result, expected)
def test_apply_axis1(self):
d = self.frame.index[0]
tapplied = self.frame.apply(np.mean, axis=1)
self.assertEqual(tapplied[d], np.mean(self.frame.xs(d)))
def test_apply_ignore_failures(self):
result = self.mixed_frame._apply_standard(np.mean, 0,
ignore_failures=True)
expected = self.mixed_frame._get_numeric_data().apply(np.mean)
assert_series_equal(result, expected)
def test_apply_mixed_dtype_corner(self):
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df[:0].apply(np.mean, axis=1)
# the result here is actually kind of ambiguous, should it be a Series
# or a DataFrame?
expected = Series(np.nan, index=[])
assert_series_equal(result, expected)
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df.apply(lambda x: x['A'], axis=1)
expected = Series(['foo'],index=[0])
assert_series_equal(result, expected)
result = df.apply(lambda x: x['B'], axis=1)
expected = Series([1.],index=[0])
assert_series_equal(result, expected)
def test_apply_empty_infer_type(self):
no_cols = DataFrame(index=['a', 'b', 'c'])
no_index = DataFrame(columns=['a', 'b', 'c'])
def _check(df, f):
test_res = f(np.array([], dtype='f8'))
is_reduction = not isinstance(test_res, np.ndarray)
def _checkit(axis=0, raw=False):
res = df.apply(f, axis=axis, raw=raw)
if is_reduction:
agg_axis = df._get_agg_axis(axis)
tm.assert_isinstance(res, Series)
self.assertIs(res.index, agg_axis)
else:
tm.assert_isinstance(res, DataFrame)
_checkit()
_checkit(axis=1)
_checkit(raw=True)
_checkit(axis=0, raw=True)
_check(no_cols, lambda x: x)
_check(no_cols, lambda x: x.mean())
_check(no_index, lambda x: x)
_check(no_index, lambda x: x.mean())
result = no_cols.apply(lambda x: x.mean(), broadcast=True)
tm.assert_isinstance(result, DataFrame)
def test_apply_with_args_kwds(self):
def add_some(x, howmuch=0):
return x + howmuch
def agg_and_add(x, howmuch=0):
return x.mean() + howmuch
def subtract_and_divide(x, sub, divide=1):
return (x - sub) / divide
result = self.frame.apply(add_some, howmuch=2)
exp = self.frame.apply(lambda x: x + 2)
assert_frame_equal(result, exp)
result = self.frame.apply(agg_and_add, howmuch=2)
exp = self.frame.apply(lambda x: x.mean() + 2)
assert_series_equal(result, exp)
res = self.frame.apply(subtract_and_divide, args=(2,), divide=2)
exp = self.frame.apply(lambda x: (x - 2.) / 2.)
assert_frame_equal(res, exp)
def test_apply_yield_list(self):
result = self.frame.apply(list)
assert_frame_equal(result, self.frame)
def test_apply_reduce_Series(self):
self.frame.ix[::2, 'A'] = np.nan
expected = self.frame.mean(1)
result = self.frame.apply(np.mean, axis=1)
assert_series_equal(result, expected)
def test_apply_differently_indexed(self):
df = DataFrame(np.random.randn(20, 10))
result0 = df.apply(Series.describe, axis=0)
expected0 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df)),
columns=df.columns)
assert_frame_equal(result0, expected0)
result1 = df.apply(Series.describe, axis=1)
expected1 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df.T)),
columns=df.index).T
assert_frame_equal(result1, expected1)
def test_apply_modify_traceback(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
data.loc[4,'C'] = np.nan
def transform(row):
if row['C'].startswith('shin') and row['A'] == 'foo':
row['D'] = 7
return row
def transform2(row):
if (notnull(row['C']) and row['C'].startswith('shin')
and row['A'] == 'foo'):
row['D'] = 7
return row
try:
transformed = data.apply(transform, axis=1)
except AttributeError as e:
self.assertEqual(len(e.args), 2)
self.assertEqual(e.args[1], 'occurred at index 4')
self.assertEqual(e.args[0], "'float' object has no attribute 'startswith'")
def test_apply_bug(self):
# GH 6125
import datetime
positions = pd.DataFrame([[1, 'ABC0', 50], [1, 'YUM0', 20],
[1, 'DEF0', 20], [2, 'ABC1', 50],
[2, 'YUM1', 20], [2, 'DEF1', 20]],
columns=['a', 'market', 'position'])
def f(r):
return r['market']
expected = positions.apply(f, axis=1)
positions = DataFrame([[datetime.datetime(2013, 1, 1), 'ABC0', 50],
[datetime.datetime(2013, 1, 2), 'YUM0', 20],
[datetime.datetime(2013, 1, 3), 'DEF0', 20],
[datetime.datetime(2013, 1, 4), 'ABC1', 50],
[datetime.datetime(2013, 1, 5), 'YUM1', 20],
[datetime.datetime(2013, 1, 6), 'DEF1', 20]],
columns=['a', 'market', 'position'])
result = positions.apply(f, axis=1)
assert_series_equal(result,expected)
def test_swapaxes(self):
df = DataFrame(np.random.randn(10, 5))
assert_frame_equal(df.T, df.swapaxes(0, 1))
assert_frame_equal(df.T, df.swapaxes(1, 0))
assert_frame_equal(df, df.swapaxes(0, 0))
self.assertRaises(ValueError, df.swapaxes, 2, 5)
def test_apply_convert_objects(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
result = data.apply(lambda x: x, axis=1)
assert_frame_equal(result.convert_objects(), data)
def test_apply_attach_name(self):
result = self.frame.apply(lambda x: x.name)
expected = Series(self.frame.columns, index=self.frame.columns)
assert_series_equal(result, expected)
result = self.frame.apply(lambda x: x.name, axis=1)
expected = Series(self.frame.index, index=self.frame.index)
assert_series_equal(result, expected)
# non-reductions
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)))
expected = DataFrame(np.tile(self.frame.columns,
(len(self.frame.index), 1)),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)),
axis=1)
expected = DataFrame(np.tile(self.frame.index,
(len(self.frame.columns), 1)).T,
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(result, expected)
def test_apply_multi_index(self):
s = DataFrame([[1,2], [3,4], [5,6]])
s.index = MultiIndex.from_arrays([['a','a','b'], ['c','d','d']])
s.columns = ['col1','col2']
res = s.apply(lambda x: Series({'min': min(x), 'max': max(x)}), 1)
tm.assert_isinstance(res.index, MultiIndex)
def test_applymap(self):
applied = self.frame.applymap(lambda x: x * 2)
assert_frame_equal(applied, self.frame * 2)
result = self.frame.applymap(type)
# GH #465, function returning tuples
result = self.frame.applymap(lambda x: (x, x))
tm.assert_isinstance(result['A'][0], tuple)
# GH 2909, object conversion to float in constructor?
df = DataFrame(data=[1,'a'])
result = df.applymap(lambda x: x)
self.assertEqual(result.dtypes[0], object)
df = DataFrame(data=[1.,'a'])
result = df.applymap(lambda x: x)
self.assertEqual(result.dtypes[0], object)
# GH2786
df = DataFrame(np.random.random((3,4)))
df2 = df.copy()
cols = ['a','a','a','a']
df.columns = cols
expected = df2.applymap(str)
expected.columns = cols
result = df.applymap(str)
assert_frame_equal(result,expected)
# datetime/timedelta
df['datetime'] = Timestamp('20130101')
df['timedelta'] = Timedelta('1 min')
result = df.applymap(str)
for f in ['datetime','timedelta']:
self.assertEquals(result.loc[0,f],str(df.loc[0,f]))
def test_filter(self):
# items
filtered = self.frame.filter(['A', 'B', 'E'])
self.assertEqual(len(filtered.columns), 2)
self.assertNotIn('E', filtered)
filtered = self.frame.filter(['A', 'B', 'E'], axis='columns')
self.assertEqual(len(filtered.columns), 2)
self.assertNotIn('E', filtered)
# other axis
idx = self.frame.index[0:4]
filtered = self.frame.filter(idx, axis='index')
expected = self.frame.reindex(index=idx)
assert_frame_equal(filtered,expected)
# like
fcopy = self.frame.copy()
fcopy['AA'] = 1
filtered = fcopy.filter(like='A')
self.assertEqual(len(filtered.columns), 2)
self.assertIn('AA', filtered)
# like with ints in column names
df = DataFrame(0., index=[0, 1, 2], columns=[0, 1, '_A', '_B'])
filtered = df.filter(like='_')
self.assertEqual(len(filtered.columns), 2)
# pass in None
with assertRaisesRegexp(TypeError, 'Must pass'):
self.frame.filter(items=None)
# objects
filtered = self.mixed_frame.filter(like='foo')
self.assertIn('foo', filtered)
# unicode columns, won't ascii-encode
df = self.frame.rename(columns={'B': u('\u2202')})
filtered = df.filter(like='C')
self.assertTrue('C' in filtered)
def test_filter_regex_search(self):
fcopy = self.frame.copy()
fcopy['AA'] = 1
# regex
filtered = fcopy.filter(regex='[A]+')
self.assertEqual(len(filtered.columns), 2)
self.assertIn('AA', filtered)
# doesn't have to be at beginning
df = DataFrame({'aBBa': [1, 2],
'BBaBB': [1, 2],
'aCCa': [1, 2],
'aCCaBB': [1, 2]})
result = df.filter(regex='BB')
exp = df[[x for x in df.columns if 'BB' in x]]
assert_frame_equal(result, exp)
def test_filter_corner(self):
empty = DataFrame()
result = empty.filter([])
assert_frame_equal(result, empty)
result = empty.filter(like='foo')
assert_frame_equal(result, empty)
def test_select(self):
f = lambda x: x.weekday() == 2
result = self.tsframe.select(f, axis=0)
expected = self.tsframe.reindex(
index=self.tsframe.index[[f(x) for x in self.tsframe.index]])
assert_frame_equal(result, expected)
result = self.frame.select(lambda x: x in ('B', 'D'), axis=1)
expected = self.frame.reindex(columns=['B', 'D'])
assert_frame_equal(result, expected, check_names=False) # TODO should reindex check_names?
def test_reorder_levels(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]],
names=['L0', 'L1', 'L2'])
df = DataFrame({'A': np.arange(6), 'B': np.arange(6)}, index=index)
# no change, position
result = df.reorder_levels([0, 1, 2])
assert_frame_equal(df, result)
# no change, labels
result = df.reorder_levels(['L0', 'L1', 'L2'])
assert_frame_equal(df, result)
# rotate, position
result = df.reorder_levels([1, 2, 0])
e_idx = MultiIndex(levels=[['one', 'two', 'three'], [0, 1], ['bar']],
labels=[[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0]],
names=['L1', 'L2', 'L0'])
expected = DataFrame({'A': np.arange(6), 'B': np.arange(6)},
index=e_idx)
assert_frame_equal(result, expected)
result = df.reorder_levels([0, 0, 0])
e_idx = MultiIndex(levels=[['bar'], ['bar'], ['bar']],
labels=[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
names=['L0', 'L0', 'L0'])
expected = DataFrame({'A': np.arange(6), 'B': np.arange(6)},
index=e_idx)
assert_frame_equal(result, expected)
result = df.reorder_levels(['L0', 'L0', 'L0'])
assert_frame_equal(result, expected)
def test_sort_index(self):
frame = DataFrame(np.arange(16).reshape(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# axis=0
unordered = frame.ix[[3, 2, 4, 1]]
sorted_df = unordered.sort_index()
expected = frame
assert_frame_equal(sorted_df, expected)
sorted_df = unordered.sort_index(ascending=False)
expected = frame[::-1]
assert_frame_equal(sorted_df, expected)
# axis=1
unordered = frame.ix[:, ['D', 'B', 'C', 'A']]
sorted_df = unordered.sort_index(axis=1)
expected = frame
assert_frame_equal(sorted_df, expected)
sorted_df = unordered.sort_index(axis=1, ascending=False)
expected = frame.ix[:, ::-1]
assert_frame_equal(sorted_df, expected)
# by column
sorted_df = frame.sort_index(by='A')
indexer = frame['A'].argsort().values
expected = frame.ix[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_index(by='A', ascending=False)
indexer = indexer[::-1]
expected = frame.ix[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort(columns='A', ascending=False)
assert_frame_equal(sorted_df, expected)
# GH4839
sorted_df = frame.sort(columns=['A'], ascending=[False])
assert_frame_equal(sorted_df, expected)
# check for now
sorted_df = frame.sort(columns='A')
assert_frame_equal(sorted_df, expected[::-1])
expected = frame.sort_index(by='A')
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort(columns=['A', 'B'], ascending=False)
expected = frame.sort_index(by=['A', 'B'], ascending=False)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort(columns=['A', 'B'])
assert_frame_equal(sorted_df, expected[::-1])
self.assertRaises(ValueError, frame.sort_index, axis=2, inplace=True)
msg = 'When sorting by column, axis must be 0'
with assertRaisesRegexp(ValueError, msg):
frame.sort_index(by='A', axis=1)
msg = r'Length of ascending \(5\) != length of by \(2\)'
with assertRaisesRegexp(ValueError, msg):
frame.sort_index(by=['A', 'B'], axis=0, ascending=[True] * 5)
def test_sort_nan(self):
# GH3917
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# sort one column only
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort(['A'], na_position='first')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort(['A'], na_position='first', ascending=False)
assert_frame_equal(sorted_df, expected)
# na_position='last', order
expected = DataFrame(
{'A': [1, 1, 2, 4, 6, 8, nan],
'B': [2, 9, nan, 5, 5, 4, 5]},
index=[3, 0, 1, 6, 4, 5, 2])
sorted_df = df.sort(['A','B'])
assert_frame_equal(sorted_df, expected)
# na_position='first', order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 2, 9, nan, 5, 5, 4]},
index=[2, 3, 0, 1, 6, 4, 5])
sorted_df = df.sort(['A','B'], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='first', not order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort(['A','B'], ascending=[1,0], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='last', not order
expected = DataFrame(
{'A': [8, 6, 4, 2, 1, 1, nan],
'B': [4, 5, 5, nan, 2, 9, 5]},
index=[5, 4, 6, 1, 3, 0, 2])
sorted_df = df.sort(['A','B'], ascending=[0,1], na_position='last')
assert_frame_equal(sorted_df, expected)
# Test DataFrame with nan label
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index = [1, 2, 3, 4, 5, 6, nan])
# NaN label, ascending=True, na_position='last'
sorted_df = df.sort(kind='quicksort', ascending=True, na_position='last')
expected = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index = [1, 2, 3, 4, 5, 6, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=True, na_position='first'
sorted_df = df.sort(na_position='first')
expected = DataFrame({'A': [4, 1, 2, nan, 1, 6, 8],
'B': [5, 9, nan, 5, 2, 5, 4]},
index = [nan, 1, 2, 3, 4, 5, 6])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='last'
sorted_df = df.sort(kind='quicksort', ascending=False)
expected = DataFrame({'A': [8, 6, 1, nan, 2, 1, 4],
'B': [4, 5, 2, 5, nan, 9, 5]},
index = [6, 5, 4, 3, 2, 1, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='first'
sorted_df = df.sort(kind='quicksort', ascending=False, na_position='first')
expected = DataFrame({'A': [4, 8, 6, 1, nan, 2, 1],
'B': [5, 4, 5, 2, 5, nan, 9]},
index = [nan, 6, 5, 4, 3, 2, 1])
assert_frame_equal(sorted_df, expected)
def test_stable_descending_sort(self):
# GH #6399
df = DataFrame([[2, 'first'], [2, 'second'], [1, 'a'], [1, 'b']],
columns=['sort_col', 'order'])
sorted_df = df.sort_index(by='sort_col', kind='mergesort',
ascending=False)
assert_frame_equal(df, sorted_df)
def test_stable_descending_multicolumn_sort(self):
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# test stable mergesort
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 2, 9]},
index=[2, 5, 4, 6, 1, 3, 0])
sorted_df = df.sort(['A','B'], ascending=[0,1], na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort(['A','B'], ascending=[0,0], na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
def test_sort_index_multicolumn(self):
import random
A = np.arange(5).repeat(20)
B = np.tile(np.arange(5), 20)
random.shuffle(A)
random.shuffle(B)
frame = DataFrame({'A': A, 'B': B,
'C': np.random.randn(100)})
result = frame.sort_index(by=['A', 'B'])
indexer = np.lexsort((frame['B'], frame['A']))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
result = frame.sort_index(by=['A', 'B'], ascending=False)
indexer = np.lexsort((frame['B'].rank(ascending=False),
frame['A'].rank(ascending=False)))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
result = frame.sort_index(by=['B', 'A'])
indexer = np.lexsort((frame['A'], frame['B']))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
def test_sort_index_inplace(self):
frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# axis=0
unordered = frame.ix[[3, 2, 4, 1]]
a_id = id(unordered['A'])
df = unordered.copy()
df.sort_index(inplace=True)
expected = frame
assert_frame_equal(df, expected)
self.assertNotEqual(a_id, id(df['A']))
df = unordered.copy()
df.sort_index(ascending=False, inplace=True)
expected = frame[::-1]
assert_frame_equal(df, expected)
# axis=1
unordered = frame.ix[:, ['D', 'B', 'C', 'A']]
df = unordered.copy()
df.sort_index(axis=1, inplace=True)
expected = frame
assert_frame_equal(df, expected)
df = unordered.copy()
df.sort_index(axis=1, ascending=False, inplace=True)
expected = frame.ix[:, ::-1]
assert_frame_equal(df, expected)
def test_sort_index_different_sortorder(self):
import random
A = np.arange(20).repeat(5)
B = np.tile(np.arange(5), 20)
indexer = np.random.permutation(100)
A = A.take(indexer)
B = B.take(indexer)
df = DataFrame({'A': A, 'B': B,
'C': np.random.randn(100)})
result = df.sort_index(by=['A', 'B'], ascending=[1, 0])
ex_indexer = np.lexsort((df.B.max() - df.B, df.A))
expected = df.take(ex_indexer)
assert_frame_equal(result, expected)
# test with multiindex, too
idf = df.set_index(['A', 'B'])
result = idf.sort_index(ascending=[1, 0])
expected = idf.take(ex_indexer)
assert_frame_equal(result, expected)
# also, Series!
result = idf['C'].sort_index(ascending=[1, 0])
assert_series_equal(result, expected['C'])
def test_sort_inplace(self):
frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
sorted_df = frame.copy()
sorted_df.sort(columns='A', inplace=True)
expected = frame.sort_index(by='A')
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort(columns='A', ascending=False, inplace=True)
expected = frame.sort_index(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort(columns=['A', 'B'], ascending=False, inplace=True)
expected = frame.sort_index(by=['A', 'B'], ascending=False)
assert_frame_equal(sorted_df, expected)
def test_sort_index_duplicates(self):
df = DataFrame([lrange(5,9), lrange(4)],
columns=['a', 'a', 'b', 'b'])
with assertRaisesRegexp(ValueError, 'duplicate'):
df.sort_index(by='a')
with assertRaisesRegexp(ValueError, 'duplicate'):
df.sort_index(by=['a'])
with assertRaisesRegexp(ValueError, 'duplicate'):
# multi-column 'by' is separate codepath
df.sort_index(by=['a', 'b'])
# with multi-index
# GH4370
df = DataFrame(np.random.randn(4,2),columns=MultiIndex.from_tuples([('a',0),('a',1)]))
with assertRaisesRegexp(ValueError, 'levels'):
df.sort_index(by='a')
# convert tuples to a list of tuples
expected = df.sort_index(by=[('a',1)])
result = df.sort_index(by=('a',1))
assert_frame_equal(result, expected)
def test_sortlevel(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
df = DataFrame([[1, 2], [3, 4]], mi)
res = df.sortlevel('A', sort_remaining=False)
assert_frame_equal(df, res)
res = df.sortlevel(['A', 'B'], sort_remaining=False)
assert_frame_equal(df, res)
def test_sort_datetimes(self):
# GH 3461, argsort / lexsort differences for a datetime column
df = DataFrame(['a','a','a','b','c','d','e','f','g'],
columns=['A'],
index=date_range('20130101',periods=9))
dts = [Timestamp(x)
for x in ['2004-02-11','2004-01-21','2004-01-26',
'2005-09-20','2010-10-04','2009-05-12',
'2008-11-12','2010-09-28','2010-09-28']]
df['B'] = dts[::2] + dts[1::2]
df['C'] = 2.
df['A1'] = 3.
df1 = df.sort(columns='A')
df2 = df.sort(columns=['A'])
assert_frame_equal(df1,df2)
df1 = df.sort(columns='B')
df2 = df.sort(columns=['B'])
assert_frame_equal(df1,df2)
def test_frame_column_inplace_sort_exception(self):
s = self.frame['A']
with assertRaisesRegexp(ValueError, "This Series is a view"):
s.sort()
cp = s.copy()
cp.sort() # it works!
def test_combine_first(self):
# disjoint
head, tail = self.frame[:5], self.frame[5:]
combined = head.combine_first(tail)
reordered_frame = self.frame.reindex(combined.index)
assert_frame_equal(combined, reordered_frame)
self.assertTrue(tm.equalContents(combined.columns, self.frame.columns))
assert_series_equal(combined['A'], reordered_frame['A'])
# same index
fcopy = self.frame.copy()
fcopy['A'] = 1
del fcopy['C']
fcopy2 = self.frame.copy()
fcopy2['B'] = 0
del fcopy2['D']
combined = fcopy.combine_first(fcopy2)
self.assertTrue((combined['A'] == 1).all())
assert_series_equal(combined['B'], fcopy['B'])
assert_series_equal(combined['C'], fcopy2['C'])
assert_series_equal(combined['D'], fcopy['D'])
# overlap
head, tail = reordered_frame[:10].copy(), reordered_frame
head['A'] = 1
combined = head.combine_first(tail)
self.assertTrue((combined['A'][:10] == 1).all())
# reverse overlap
tail['A'][:10] = 0
combined = tail.combine_first(head)
self.assertTrue((combined['A'][:10] == 0).all())
# no overlap
f = self.frame[:10]
g = self.frame[10:]
combined = f.combine_first(g)
assert_series_equal(combined['A'].reindex(f.index), f['A'])
assert_series_equal(combined['A'].reindex(g.index), g['A'])
# corner cases
comb = self.frame.combine_first(self.empty)
assert_frame_equal(comb, self.frame)
comb = self.empty.combine_first(self.frame)
assert_frame_equal(comb, self.frame)
comb = self.frame.combine_first(DataFrame(index=["faz", "boo"]))
self.assertTrue("faz" in comb.index)
# #2525
df = DataFrame({'a': [1]}, index=[datetime(2012, 1, 1)])
df2 = DataFrame({}, columns=['b'])
result = df.combine_first(df2)
self.assertTrue('b' in result)
def test_combine_first_mixed_bug(self):
idx = Index(['a', 'b', 'c', 'e'])
ser1 = Series([5.0, -9.0, 4.0, 100.], index=idx)
ser2 = Series(['a', 'b', 'c', 'e'], index=idx)
ser3 = Series([12, 4, 5, 97], index=idx)
frame1 = DataFrame({"col0": ser1,
"col2": ser2,
"col3": ser3})
idx = Index(['a', 'b', 'c', 'f'])
ser1 = Series([5.0, -9.0, 4.0, 100.], index=idx)
ser2 = Series(['a', 'b', 'c', 'f'], index=idx)
ser3 = Series([12, 4, 5, 97], index=idx)
frame2 = DataFrame({"col1": ser1,
"col2": ser2,
"col5": ser3})
combined = frame1.combine_first(frame2)
self.assertEqual(len(combined.columns), 5)
# gh 3016 (same as in update)
df = DataFrame([[1.,2.,False, True],[4.,5.,True,False]],
columns=['A','B','bool1','bool2'])
other = DataFrame([[45,45]],index=[0],columns=['A','B'])
result = df.combine_first(other)
assert_frame_equal(result, df)
df.ix[0,'A'] = np.nan
result = df.combine_first(other)
df.ix[0,'A'] = 45
assert_frame_equal(result, df)
# doc example
df1 = DataFrame({'A' : [1., np.nan, 3., 5., np.nan],
'B' : [np.nan, 2., 3., np.nan, 6.]})
df2 = DataFrame({'A' : [5., 2., 4., np.nan, 3., 7.],
'B' : [np.nan, np.nan, 3., 4., 6., 8.]})
result = df1.combine_first(df2)
expected = DataFrame({ 'A' : [1,2,3,5,3,7.], 'B' : [np.nan,2,3,4,6,8] })
assert_frame_equal(result,expected)
# GH3552, return object dtype with bools
df1 = DataFrame([[np.nan, 3.,True], [-4.6, np.nan, True], [np.nan, 7., False]])
df2 = DataFrame([[-42.6, np.nan, True], [-5., 1.6, False]], index=[1, 2])
result = df1.combine_first(df2)[2]
expected = Series([True,True,False])
assert_series_equal(result,expected)
# GH 3593, converting datetime64[ns] incorrecly
df0 = DataFrame({"a":[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]})
df1 = DataFrame({"a":[None, None, None]})
df2 = df1.combine_first(df0)
assert_frame_equal(df2,df0)
df2 = df0.combine_first(df1)
assert_frame_equal(df2,df0)
df0 = DataFrame({"a":[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]})
df1 = DataFrame({"a":[datetime(2000, 1, 2), None, None]})
df2 = df1.combine_first(df0)
result = df0.copy()
result.iloc[0,:] = df1.iloc[0,:]
assert_frame_equal(df2,result)
df2 = df0.combine_first(df1)
assert_frame_equal(df2,df0)
def test_update(self):
df = DataFrame([[1.5, nan, 3.],
[1.5, nan, 3.],
[1.5, nan, 3],
[1.5, nan, 3]])
other = DataFrame([[3.6, 2., np.nan],
[np.nan, np.nan, 7]], index=[1, 3])
df.update(other)
expected = DataFrame([[1.5, nan, 3],
[3.6, 2, 3],
[1.5, nan, 3],
[1.5, nan, 7.]])
assert_frame_equal(df, expected)
def test_update_dtypes(self):
# gh 3016
df = DataFrame([[1.,2.,False, True],[4.,5.,True,False]],
columns=['A','B','bool1','bool2'])
other = DataFrame([[45,45]],index=[0],columns=['A','B'])
df.update(other)
expected = DataFrame([[45.,45.,False, True],[4.,5.,True,False]],
columns=['A','B','bool1','bool2'])
assert_frame_equal(df, expected)
def test_update_nooverwrite(self):
df = DataFrame([[1.5, nan, 3.],
[1.5, nan, 3.],
[1.5, nan, 3],
[1.5, nan, 3]])
other = DataFrame([[3.6, 2., np.nan],
[np.nan, np.nan, 7]], index=[1, 3])
df.update(other, overwrite=False)
expected = DataFrame([[1.5, nan, 3],
[1.5, 2, 3],
[1.5, nan, 3],
[1.5, nan, 3.]])
assert_frame_equal(df, expected)
def test_update_filtered(self):
df = DataFrame([[1.5, nan, 3.],
[1.5, nan, 3.],
[1.5, nan, 3],
[1.5, nan, 3]])
other = DataFrame([[3.6, 2., np.nan],
[np.nan, np.nan, 7]], index=[1, 3])
df.update(other, filter_func=lambda x: x > 2)
expected = DataFrame([[1.5, nan, 3],
[1.5, nan, 3],
[1.5, nan, 3],
[1.5, nan, 7.]])
assert_frame_equal(df, expected)
def test_update_raise(self):
df = DataFrame([[1.5, 1, 3.],
[1.5, nan, 3.],
[1.5, nan, 3],
[1.5, nan, 3]])
other = DataFrame([[2., nan],
[nan, 7]], index=[1, 3], columns=[1, 2])
with assertRaisesRegexp(ValueError, "Data overlaps"):
df.update(other, raise_conflict=True)
def test_update_from_non_df(self):
d = {'a': Series([1, 2, 3, 4]), 'b': Series([5, 6, 7, 8])}
df = DataFrame(d)
d['a'] = Series([5, 6, 7, 8])
df.update(d)
expected = DataFrame(d)
assert_frame_equal(df, expected)
d = {'a': [1, 2, 3, 4], 'b': [5, 6, 7, 8]}
df = DataFrame(d)
d['a'] = [5, 6, 7, 8]
df.update(d)
expected = DataFrame(d)
assert_frame_equal(df, expected)
def test_combineAdd(self):
# trivial
comb = self.frame.combineAdd(self.frame)
assert_frame_equal(comb, self.frame * 2)
# more rigorous
a = DataFrame([[1., nan, nan, 2., nan]],
columns=np.arange(5))
b = DataFrame([[2., 3., nan, 2., 6., nan]],
columns=np.arange(6))
expected = DataFrame([[3., 3., nan, 4., 6., nan]],
columns=np.arange(6))
result = a.combineAdd(b)
assert_frame_equal(result, expected)
result2 = a.T.combineAdd(b.T)
assert_frame_equal(result2, expected.T)
expected2 = a.combine(b, operator.add, fill_value=0.)
assert_frame_equal(expected, expected2)
# corner cases
comb = self.frame.combineAdd(self.empty)
assert_frame_equal(comb, self.frame)
comb = self.empty.combineAdd(self.frame)
assert_frame_equal(comb, self.frame)
# integer corner case
df1 = DataFrame({'x': [5]})
df2 = DataFrame({'x': [1]})
df3 = DataFrame({'x': [6]})
comb = df1.combineAdd(df2)
assert_frame_equal(comb, df3)
# mixed type GH2191
df1 = DataFrame({'A': [1, 2], 'B': [3, 4]})
df2 = DataFrame({'A': [1, 2], 'C': [5, 6]})
rs = df1.combineAdd(df2)
xp = DataFrame({'A': [2, 4], 'B': [3, 4.], 'C': [5, 6.]})
assert_frame_equal(xp, rs)
# TODO: test integer fill corner?
def test_combineMult(self):
# trivial
comb = self.frame.combineMult(self.frame)
assert_frame_equal(comb, self.frame ** 2)
# corner cases
comb = self.frame.combineMult(self.empty)
assert_frame_equal(comb, self.frame)
comb = self.empty.combineMult(self.frame)
assert_frame_equal(comb, self.frame)
def test_combine_generic(self):
df1 = self.frame
df2 = self.frame.ix[:-5, ['A', 'B', 'C']]
combined = df1.combine(df2, np.add)
combined2 = df2.combine(df1, np.add)
self.assertTrue(combined['D'].isnull().all())
self.assertTrue(combined2['D'].isnull().all())
chunk = combined.ix[:-5, ['A', 'B', 'C']]
chunk2 = combined2.ix[:-5, ['A', 'B', 'C']]
exp = self.frame.ix[:-5, ['A', 'B', 'C']].reindex_like(chunk) * 2
assert_frame_equal(chunk, exp)
assert_frame_equal(chunk2, exp)
def test_clip(self):
median = self.frame.median().median()
capped = self.frame.clip_upper(median)
self.assertFalse((capped.values > median).any())
floored = self.frame.clip_lower(median)
self.assertFalse((floored.values < median).any())
double = self.frame.clip(upper=median, lower=median)
self.assertFalse((double.values != median).any())
def test_dataframe_clip(self):
# GH #2747
df = DataFrame(np.random.randn(1000,2))
for lb, ub in [(-1,1),(1,-1)]:
clipped_df = df.clip(lb, ub)
lb, ub = min(lb,ub), max(ub,lb)
lb_mask = df.values <= lb
ub_mask = df.values >= ub
mask = ~lb_mask & ~ub_mask
self.assertTrue((clipped_df.values[lb_mask] == lb).all() == True)
self.assertTrue((clipped_df.values[ub_mask] == ub).all() == True)
self.assertTrue((clipped_df.values[mask] == df.values[mask]).all() == True)
def test_get_X_columns(self):
# numeric and object columns
df = DataFrame({'a': [1, 2, 3],
'b' : [True, False, True],
'c': ['foo', 'bar', 'baz'],
'd': [None, None, None],
'e': [3.14, 0.577, 2.773]})
self.assert_numpy_array_equal(df._get_numeric_data().columns,
['a', 'b', 'e'])
def test_is_mixed_type(self):
self.assertFalse(self.frame._is_mixed_type)
self.assertTrue(self.mixed_frame._is_mixed_type)
def test_get_numeric_data(self):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo', 'f' : Timestamp('20010102')},
index=np.arange(10))
result = df.get_dtype_counts()
expected = Series({'int64': 1, 'float64' : 1, datetime64name: 1, objectname : 1})
result.sort_index()
expected.sort_index()
assert_series_equal(result, expected)
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
'd' : np.array([1.]*10,dtype='float32'),
'e' : np.array([1]*10,dtype='int32'),
'f' : np.array([1]*10,dtype='int16'),
'g' : Timestamp('20010102')},
index=np.arange(10))
result = df._get_numeric_data()
expected = df.ix[:, ['a', 'b','d','e','f']]
assert_frame_equal(result, expected)
only_obj = df.ix[:, ['c','g']]
result = only_obj._get_numeric_data()
expected = df.ix[:, []]
assert_frame_equal(result, expected)
df = DataFrame.from_dict({'a':[1,2], 'b':['foo','bar'],'c':[np.pi,np.e]})
result = df._get_numeric_data()
expected = DataFrame.from_dict({'a':[1,2], 'c':[np.pi,np.e]})
assert_frame_equal(result, expected)
df = result.copy()
result = df._get_numeric_data()
expected = df
assert_frame_equal(result, expected)
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Boolean data and integer data is included in .describe() output, string data isn't
self.assert_numpy_array_equal(df.describe().columns, ['bool_data', 'int_data'])
bool_describe = df.describe()['bool_data']
# Both the min and the max values should stay booleans
self.assertEqual(bool_describe['min'].dtype, np.bool_)
self.assertEqual(bool_describe['max'].dtype, np.bool_)
self.assertFalse(bool_describe['min'])
self.assertTrue(bool_describe['max'])
# For numeric operations, like mean or median, the values True/False are cast to
# the integer values 1 and 0
assert_almost_equal(bool_describe['mean'], 0.4)
assert_almost_equal(bool_describe['50%'], 0)
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
assert_almost_equal(test.values, [2, 150, 'abcde'])
assert_series_equal(test, df.T.sum(axis=1))
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f,
has_skipna=False,
has_numeric_only=True,
check_dtype=False,
check_dates=True)
# corner case
frame = DataFrame()
ct1 = frame.count(1)
tm.assert_isinstance(ct1, Series)
ct2 = frame.count(0)
tm.assert_isinstance(ct2, Series)
# GH #423
df = DataFrame(index=lrange(10))
result = df.count(1)
expected = Series(0, index=df.index)
assert_series_equal(result, expected)
df = DataFrame(columns=lrange(10))
result = df.count(0)
expected = Series(0, index=df.columns)
assert_series_equal(result, expected)
df = DataFrame()
result = df.count()
expected = Series(0, index=[])
assert_series_equal(result, expected)
def test_sum(self):
self._check_stat_op('sum', np.sum, has_numeric_only=True)
# mixed types (with upcasting happening)
self._check_stat_op('sum', np.sum, frame=self.mixed_float.astype('float32'),
has_numeric_only=True, check_dtype=False, check_less_precise=True)
def test_stat_operators_attempt_obj_array(self):
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'],
dtype='O')
methods = ['sum', 'mean', 'prod', 'var', 'std', 'skew', 'min', 'max']
# GH #676
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
for meth in methods:
self.assertEqual(df.values.dtype, np.object_)
result = getattr(df, meth)(1)
expected = getattr(df.astype('f8'), meth)(1)
assert_series_equal(result, expected)
def test_mean(self):
self._check_stat_op('mean', np.mean, check_dates=True)
def test_product(self):
self._check_stat_op('product', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper, check_dates=True)
def test_min(self):
self._check_stat_op('min', np.min, check_dates=True)
self._check_stat_op('min', np.min, frame=self.intframe)
def test_cummin(self):
self.tsframe.ix[5:10, 0] = nan
self.tsframe.ix[10:15, 1] = nan
self.tsframe.ix[15:, 2] = nan
# axis = 0
cummin = self.tsframe.cummin()
expected = self.tsframe.apply(Series.cummin)
assert_frame_equal(cummin, expected)
# axis = 1
cummin = self.tsframe.cummin(axis=1)
expected = self.tsframe.apply(Series.cummin, axis=1)
assert_frame_equal(cummin, expected)
# works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummin()
# fix issue
cummin_xs = self.tsframe.cummin(axis=1)
self.assertEqual(np.shape(cummin_xs), np.shape(self.tsframe))
def test_cummax(self):
self.tsframe.ix[5:10, 0] = nan
self.tsframe.ix[10:15, 1] = nan
self.tsframe.ix[15:, 2] = nan
# axis = 0
cummax = self.tsframe.cummax()
expected = self.tsframe.apply(Series.cummax)
assert_frame_equal(cummax, expected)
# axis = 1
cummax = self.tsframe.cummax(axis=1)
expected = self.tsframe.apply(Series.cummax, axis=1)
assert_frame_equal(cummax, expected)
# works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummax()
# fix issue
cummax_xs = self.tsframe.cummax(axis=1)
self.assertEqual(np.shape(cummax_xs), np.shape(self.tsframe))
def test_max(self):
self._check_stat_op('max', np.max, check_dates=True)
self._check_stat_op('max', np.max, frame=self.intframe)
def test_mad(self):
f = lambda x: np.abs(x - x.mean()).mean()
self._check_stat_op('mad', f)
def test_var_std(self):
alt = lambda x: np.var(x, ddof=1)
self._check_stat_op('var', alt)
alt = lambda x: np.std(x, ddof=1)
self._check_stat_op('std', alt)
result = self.tsframe.std(ddof=4)
expected = self.tsframe.apply(lambda x: x.std(ddof=4))
assert_almost_equal(result, expected)
result = self.tsframe.var(ddof=4)
expected = self.tsframe.apply(lambda x: x.var(ddof=4))
assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
self.assertFalse((result < 0).any())
if nanops._USE_BOTTLENECK:
nanops._USE_BOTTLENECK = False
result = nanops.nanvar(arr, axis=0)
self.assertFalse((result < 0).any())
nanops._USE_BOTTLENECK = True
def test_sem(self):
alt = lambda x: np.std(x, ddof=1)/np.sqrt(len(x))
self._check_stat_op('sem', alt)
result = self.tsframe.sem(ddof=4)
expected = self.tsframe.apply(lambda x: x.std(ddof=4)/np.sqrt(len(x)))
assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
self.assertFalse((result < 0).any())
if nanops._USE_BOTTLENECK:
nanops._USE_BOTTLENECK = False
result = nanops.nansem(arr, axis=0)
self.assertFalse((result < 0).any())
nanops._USE_BOTTLENECK = True
def test_skew(self):
tm._skip_if_no_scipy()
from scipy.stats import skew
def alt(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', alt)
def test_kurt(self):
tm._skip_if_no_scipy()
from scipy.stats import kurtosis
def alt(x):
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
self._check_stat_op('kurt', alt)
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
assert_series_equal(df.kurt(), df.kurt(level=0).xs('bar'))
def _check_stat_op(self, name, alternative, frame=None, has_skipna=True,
has_numeric_only=False, check_dtype=True, check_dates=False,
check_less_precise=False):
if frame is None:
frame = self.frame
# set some NAs
frame.ix[5:10] = np.nan
frame.ix[15:20, -2:] = np.nan
f = getattr(frame, name)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
_f = getattr(df, name)
result = _f()
self.assertIsInstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, name)()
self.assertIsInstance(result, Series)
self.assertTrue(len(result))
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
self.assertEqual(lcd_dtype, result0.dtype)
self.assertEqual(lcd_dtype, result1.dtype)
# result = f(axis=1)
# comp = frame.apply(alternative, axis=1).reindex(result.index)
# assert_series_equal(result, comp)
# bad axis
assertRaisesRegexp(ValueError, 'No axis named 2', f, axis=2)
# make sure works on mixed-type frame
getattr(self.mixed_frame, name)(axis=0)
getattr(self.mixed_frame, name)(axis=1)
if has_numeric_only:
getattr(self.mixed_frame, name)(axis=0, numeric_only=True)
getattr(self.mixed_frame, name)(axis=1, numeric_only=True)
getattr(self.frame, name)(axis=0, numeric_only=False)
getattr(self.frame, name)(axis=1, numeric_only=False)
# all NA case
if has_skipna:
all_na = self.frame * np.NaN
r0 = getattr(all_na, name)(axis=0)
r1 = getattr(all_na, name)(axis=1)
self.assertTrue(np.isnan(r0).all())
self.assertTrue(np.isnan(r1).all())
def test_mode(self):
df = pd.DataFrame({"A": [12, 12, 11, 12, 19, 11],
"B": [10, 10, 10, np.nan, 3, 4],
"C": [8, 8, 8, 9, 9, 9],
"D": range(6),
"E": [8, 8, 1, 1, 3, 3]})
assert_frame_equal(df[["A"]].mode(),
pd.DataFrame({"A": [12]}))
assert_frame_equal(df[["D"]].mode(),
pd.DataFrame(pd.Series([], dtype="int64"),
columns=["D"]))
assert_frame_equal(df[["E"]].mode(),
pd.DataFrame(pd.Series([1, 3, 8], dtype="int64"),
columns=["E"]))
assert_frame_equal(df[["A", "B"]].mode(),
pd.DataFrame({"A": [12], "B": [10.]}))
assert_frame_equal(df.mode(),
pd.DataFrame({"A": [12, np.nan, np.nan],
"B": [10, np.nan, np.nan],
"C": [8, 9, np.nan],
"D": [np.nan, np.nan, np.nan],
"E": [1, 3, 8]}))
# outputs in sorted order
df["C"] = list(reversed(df["C"]))
com.pprint_thing(df["C"])
com.pprint_thing(df["C"].mode())
a, b = (df[["A", "B", "C"]].mode(),
pd.DataFrame({"A": [12, np.nan],
"B": [10, np.nan],
"C": [8, 9]}))
com.pprint_thing(a)
com.pprint_thing(b)
assert_frame_equal(a, b)
# should work with heterogeneous types
df = pd.DataFrame({"A": range(6),
"B": pd.date_range('2011', periods=6),
"C": list('abcdef')})
exp = pd.DataFrame({"A": pd.Series([], dtype=df["A"].dtype),
"B": pd.Series([], dtype=df["B"].dtype),
"C": pd.Series([], dtype=df["C"].dtype)})
assert_frame_equal(df.mode(), exp)
# and also when not empty
df.loc[1, "A"] = 0
df.loc[4, "B"] = df.loc[3, "B"]
df.loc[5, "C"] = 'e'
exp = pd.DataFrame({"A": pd.Series([0], dtype=df["A"].dtype),
"B": pd.Series([df.loc[3, "B"]], dtype=df["B"].dtype),
"C": pd.Series(['e'], dtype=df["C"].dtype)})
assert_frame_equal(df.mode(), exp)
def test_sum_corner(self):
axis0 = self.empty.sum(0)
axis1 = self.empty.sum(1)
tm.assert_isinstance(axis0, Series)
tm.assert_isinstance(axis1, Series)
self.assertEqual(len(axis0), 0)
self.assertEqual(len(axis1), 0)
def test_sum_object(self):
values = self.frame.values.astype(int)
frame = DataFrame(values, index=self.frame.index,
columns=self.frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
def test_sum_bool(self):
# ensure this works, bug report
bools = np.isnan(self.frame)
bools.sum(1)
bools.sum(0)
def test_mean_corner(self):
# unit test when have object data
the_mean = self.mixed_frame.mean(axis=0)
the_sum = self.mixed_frame.sum(axis=0, numeric_only=True)
self.assertTrue(the_sum.index.equals(the_mean.index))
self.assertTrue(len(the_mean.index) < len(self.mixed_frame.columns))
# xs sum mixed type, just want to know it works...
the_mean = self.mixed_frame.mean(axis=1)
the_sum = self.mixed_frame.sum(axis=1, numeric_only=True)
self.assertTrue(the_sum.index.equals(the_mean.index))
# take mean of boolean column
self.frame['bool'] = self.frame['A'] > 0
means = self.frame.mean(0)
self.assertEqual(means['bool'], self.frame['bool'].values.mean())
def test_stats_mixed_type(self):
# don't blow up
self.mixed_frame.std(1)
self.mixed_frame.var(1)
self.mixed_frame.mean(1)
self.mixed_frame.skew(1)
def test_median_corner(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper, frame=self.intframe,
check_dtype=False, check_dates=True)
def test_quantile(self):
from numpy import percentile
q = self.tsframe.quantile(0.1, axis=0)
self.assertEqual(q['A'], percentile(self.tsframe['A'], 10))
q = self.tsframe.quantile(0.9, axis=1)
q = self.intframe.quantile(0.1)
self.assertEqual(q['A'], percentile(self.intframe['A'], 10))
# test degenerate case
q = DataFrame({'x': [], 'y': []}).quantile(0.1, axis=0)
assert(np.isnan(q['x']) and np.isnan(q['y']))
# non-numeric exclusion
df = DataFrame({'col1':['A','A','B','B'], 'col2':[1,2,3,4]})
rs = df.quantile(0.5)
xp = df.median()
assert_series_equal(rs, xp)
# axis
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
result = df.quantile(.5, axis=1)
expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3])
assert_series_equal(result, expected)
result = df.quantile([.5, .75], axis=1)
expected = DataFrame({1: [1.5, 1.75], 2: [2.5, 2.75],
3: [3.5, 3.75]}, index=["0.5", "0.75"])
assert_frame_equal(result, expected)
# We may want to break API in the future to change this
# so that we exclude non-numeric along the same axis
# See GH #7312
df = DataFrame([[1, 2, 3],
['a', 'b', 4]])
result = df.quantile(.5, axis=1)
expected = Series([3., 4.], index=[0, 1])
assert_series_equal(result, expected)
def test_quantile_multi(self):
df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]],
columns=['a', 'b', 'c'])
result = df.quantile([.25, .5])
expected = DataFrame([[1.5, 1.5, 1.5], [2., 2., 2.]],
index=[.25, .5], columns=['a', 'b', 'c'])
assert_frame_equal(result, expected)
# axis = 1
result = df.quantile([.25, .5], axis=1)
expected = DataFrame([[1.5, 1.5, 1.5], [2., 2., 2.]],
index=[.25, .5], columns=[0, 1, 2])
# empty
result = DataFrame({'x': [], 'y': []}).quantile([0.1, .9], axis=0)
expected = DataFrame({'x': [np.nan, np.nan], 'y': [np.nan, np.nan]},
index=[.1, .9])
assert_frame_equal(result, expected)
def test_quantile_datetime(self):
df = DataFrame({'a': pd.to_datetime(['2010', '2011']), 'b': [0, 5]})
# exclude datetime
result = df.quantile(.5)
expected = Series([2.5], index=['b'])
# datetime
result = df.quantile(.5, numeric_only=False)
expected = Series([Timestamp('2010-07-02 12:00:00'), 2.5],
index=['a', 'b'])
assert_series_equal(result, expected)
# datetime w/ multi
result = df.quantile([.5], numeric_only=False)
expected = DataFrame([[Timestamp('2010-07-02 12:00:00'), 2.5]],
index=[.5], columns=['a', 'b'])
assert_frame_equal(result, expected)
# axis = 1
df['c'] = pd.to_datetime(['2011', '2012'])
result = df[['a', 'c']].quantile(.5, axis=1, numeric_only=False)
expected = Series([Timestamp('2010-07-02 12:00:00'),
Timestamp('2011-07-02 12:00:00')],
index=[0, 1])
assert_series_equal(result, expected)
result = df[['a', 'c']].quantile([.5], axis=1, numeric_only=False)
expected = DataFrame([[Timestamp('2010-07-02 12:00:00'),
Timestamp('2011-07-02 12:00:00')]],
index=[0.5], columns=[0, 1])
assert_frame_equal(result, expected)
def test_cumsum(self):
self.tsframe.ix[5:10, 0] = nan
self.tsframe.ix[10:15, 1] = nan
self.tsframe.ix[15:, 2] = nan
# axis = 0
cumsum = self.tsframe.cumsum()
expected = self.tsframe.apply(Series.cumsum)
assert_frame_equal(cumsum, expected)
# axis = 1
cumsum = self.tsframe.cumsum(axis=1)
expected = self.tsframe.apply(Series.cumsum, axis=1)
assert_frame_equal(cumsum, expected)
# works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cumsum()
# fix issue
cumsum_xs = self.tsframe.cumsum(axis=1)
self.assertEqual(np.shape(cumsum_xs), np.shape(self.tsframe))
def test_cumprod(self):
self.tsframe.ix[5:10, 0] = nan
self.tsframe.ix[10:15, 1] = nan
self.tsframe.ix[15:, 2] = nan
# axis = 0
cumprod = self.tsframe.cumprod()
expected = self.tsframe.apply(Series.cumprod)
assert_frame_equal(cumprod, expected)
# axis = 1
cumprod = self.tsframe.cumprod(axis=1)
expected = self.tsframe.apply(Series.cumprod, axis=1)
assert_frame_equal(cumprod, expected)
# fix issue
cumprod_xs = self.tsframe.cumprod(axis=1)
self.assertEqual(np.shape(cumprod_xs), np.shape(self.tsframe))
# ints
df = self.tsframe.fillna(0).astype(int)
df.cumprod(0)
df.cumprod(1)
# ints32
df = self.tsframe.fillna(0).astype(np.int32)
df.cumprod(0)
df.cumprod(1)
def test_rank(self):
tm._skip_if_no_scipy()
from scipy.stats import rankdata
self.frame['A'][::2] = np.nan
self.frame['B'][::3] = np.nan
self.frame['C'][::4] = np.nan
self.frame['D'][::5] = np.nan
ranks0 = self.frame.rank()
ranks1 = self.frame.rank(1)
mask = np.isnan(self.frame.values)
fvals = self.frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fvals)
exp0[mask] = np.nan
exp1 = np.apply_along_axis(rankdata, 1, fvals)
exp1[mask] = np.nan
assert_almost_equal(ranks0.values, exp0)
assert_almost_equal(ranks1.values, exp1)
# integers
df = DataFrame(np.random.randint(0, 5, size=40).reshape((10, 4)))
result = df.rank()
exp = df.astype(float).rank()
assert_frame_equal(result, exp)
result = df.rank(1)
exp = df.astype(float).rank(1)
assert_frame_equal(result, exp)
def test_rank2(self):
from datetime import datetime
df = DataFrame([[1, 3, 2], [1, 2, 3]])
expected = DataFrame([[1.0, 3.0, 2.0], [1, 2, 3]]) / 3.0
result = df.rank(1, pct=True)
assert_frame_equal(result, expected)
df = DataFrame([[1, 3, 2], [1, 2, 3]])
expected = df.rank(0) / 2.0
result = df.rank(0, pct=True)
assert_frame_equal(result, expected)
df = DataFrame([['b', 'c', 'a'], ['a', 'c', 'b']])
expected = DataFrame([[2.0, 3.0, 1.0], [1, 3, 2]])
result = df.rank(1, numeric_only=False)
assert_frame_equal(result, expected)
expected = DataFrame([[2.0, 1.5, 1.0], [1, 1.5, 2]])
result = df.rank(0, numeric_only=False)
assert_frame_equal(result, expected)
df = DataFrame([['b', np.nan, 'a'], ['a', 'c', 'b']])
expected = DataFrame([[2.0, nan, 1.0], [1.0, 3.0, 2.0]])
result = df.rank(1, numeric_only=False)
assert_frame_equal(result, expected)
expected = DataFrame([[2.0, nan, 1.0], [1.0, 1.0, 2.0]])
result = df.rank(0, numeric_only=False)
assert_frame_equal(result, expected)
# f7u12, this does not work without extensive workaround
data = [[datetime(2001, 1, 5), nan, datetime(2001, 1, 2)],
[datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 1)]]
df = DataFrame(data)
# check the rank
expected = DataFrame([[2., nan, 1.],
[2., 3., 1.]])
result = df.rank(1, numeric_only=False)
assert_frame_equal(result, expected)
# mixed-type frames
self.mixed_frame['datetime'] = datetime.now()
self.mixed_frame['timedelta'] = timedelta(days=1,seconds=1)
result = self.mixed_frame.rank(1)
expected = self.mixed_frame.rank(1, numeric_only=True)
assert_frame_equal(result, expected)
df = DataFrame({"a":[1e-20, -5, 1e-20+1e-40, 10, 1e60, 1e80, 1e-30]})
exp = DataFrame({"a":[ 3.5, 1. , 3.5, 5. , 6. , 7. , 2. ]})
assert_frame_equal(df.rank(), exp)
def test_rank_na_option(self):
tm._skip_if_no_scipy()
from scipy.stats import rankdata
self.frame['A'][::2] = np.nan
self.frame['B'][::3] = np.nan
self.frame['C'][::4] = np.nan
self.frame['D'][::5] = np.nan
# bottom
ranks0 = self.frame.rank(na_option='bottom')
ranks1 = self.frame.rank(1, na_option='bottom')
fvals = self.frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fvals)
exp1 = np.apply_along_axis(rankdata, 1, fvals)
assert_almost_equal(ranks0.values, exp0)
assert_almost_equal(ranks1.values, exp1)
# top
ranks0 = self.frame.rank(na_option='top')
ranks1 = self.frame.rank(1, na_option='top')
fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values
fval1 = self.frame.T
fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
fval1 = fval1.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fval0)
exp1 = np.apply_along_axis(rankdata, 1, fval1)
assert_almost_equal(ranks0.values, exp0)
assert_almost_equal(ranks1.values, exp1)
# descending
# bottom
ranks0 = self.frame.rank(na_option='top', ascending=False)
ranks1 = self.frame.rank(1, na_option='top', ascending=False)
fvals = self.frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, -fvals)
exp1 = np.apply_along_axis(rankdata, 1, -fvals)
assert_almost_equal(ranks0.values, exp0)
assert_almost_equal(ranks1.values, exp1)
# descending
# top
ranks0 = self.frame.rank(na_option='bottom', ascending=False)
ranks1 = self.frame.rank(1, na_option='bottom', ascending=False)
fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values
fval1 = self.frame.T
fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
fval1 = fval1.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, -fval0)
exp1 = np.apply_along_axis(rankdata, 1, -fval1)
assert_almost_equal(ranks0.values, exp0)
assert_almost_equal(ranks1.values, exp1)
def test_axis_aliases(self):
f = self.frame
# reg name
expected = f.sum(axis=0)
result = f.sum(axis='index')
assert_series_equal(result, expected)
expected = f.sum(axis=1)
result = f.sum(axis='columns')
assert_series_equal(result, expected)
def test_combine_first_mixed(self):
a = Series(['a', 'b'], index=lrange(2))
b = Series(lrange(2), index=lrange(2))
f = DataFrame({'A': a, 'B': b})
a = Series(['a', 'b'], index=lrange(5, 7))
b = Series(lrange(2), index=lrange(5, 7))
g = DataFrame({'A': a, 'B': b})
combined = f.combine_first(g)
def test_more_asMatrix(self):
values = self.mixed_frame.as_matrix()
self.assertEqual(values.shape[1], len(self.mixed_frame.columns))
def test_reindex_boolean(self):
frame = DataFrame(np.ones((10, 2), dtype=bool),
index=np.arange(0, 20, 2),
columns=[0, 2])
reindexed = frame.reindex(np.arange(10))
self.assertEqual(reindexed.values.dtype, np.object_)
self.assertTrue(isnull(reindexed[0][1]))
reindexed = frame.reindex(columns=lrange(3))
self.assertEqual(reindexed.values.dtype, np.object_)
self.assertTrue(isnull(reindexed[1]).all())
def test_reindex_objects(self):
reindexed = self.mixed_frame.reindex(columns=['foo', 'A', 'B'])
self.assertIn('foo', reindexed)
reindexed = self.mixed_frame.reindex(columns=['A', 'B'])
self.assertNotIn('foo', reindexed)
def test_reindex_corner(self):
index = Index(['a', 'b', 'c'])
dm = self.empty.reindex(index=[1, 2, 3])
reindexed = dm.reindex(columns=index)
self.assertTrue(reindexed.columns.equals(index))
# ints are weird
smaller = self.intframe.reindex(columns=['A', 'B', 'E'])
self.assertEqual(smaller['E'].dtype, np.float64)
def test_reindex_axis(self):
cols = ['A', 'B', 'E']
reindexed1 = self.intframe.reindex_axis(cols, axis=1)
reindexed2 = self.intframe.reindex(columns=cols)
assert_frame_equal(reindexed1, reindexed2)
rows = self.intframe.index[0:5]
reindexed1 = self.intframe.reindex_axis(rows, axis=0)
reindexed2 = self.intframe.reindex(index=rows)
assert_frame_equal(reindexed1, reindexed2)
self.assertRaises(ValueError, self.intframe.reindex_axis, rows, axis=2)
# no-op case
cols = self.frame.columns.copy()
newFrame = self.frame.reindex_axis(cols, axis=1)
assert_frame_equal(newFrame, self.frame)
def test_reindex_with_nans(self):
df = DataFrame([[1, 2], [3, 4], [np.nan, np.nan], [7, 8], [9, 10]],
columns=['a', 'b'],
index=[100.0, 101.0, np.nan, 102.0, 103.0])
result = df.reindex(index=[101.0, 102.0, 103.0])
expected = df.iloc[[1, 3, 4]]
assert_frame_equal(result, expected)
result = df.reindex(index=[103.0])
expected = df.iloc[[4]]
assert_frame_equal(result, expected)
result = df.reindex(index=[101.0])
expected = df.iloc[[1]]
assert_frame_equal(result, expected)
def test_reindex_multi(self):
df = DataFrame(np.random.randn(3, 3))
result = df.reindex(lrange(4), lrange(4))
expected = df.reindex(lrange(4)).reindex(columns=lrange(4))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randint(0, 10, (3, 3)))
result = df.reindex(lrange(4), lrange(4))
expected = df.reindex(lrange(4)).reindex(columns=lrange(4))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randint(0, 10, (3, 3)))
result = df.reindex(lrange(2), lrange(2))
expected = df.reindex(lrange(2)).reindex(columns=lrange(2))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randn(5, 3) + 1j, columns=['a', 'b', 'c'])
result = df.reindex(index=[0, 1], columns=['a', 'b'])
expected = df.reindex([0, 1]).reindex(columns=['a', 'b'])
assert_frame_equal(result, expected)
def test_rename_objects(self):
renamed = self.mixed_frame.rename(columns=str.upper)
self.assertIn('FOO', renamed)
self.assertNotIn('foo', renamed)
def test_fill_corner(self):
self.mixed_frame.ix[5:20,'foo'] = nan
self.mixed_frame.ix[-10:,'A'] = nan
filled = self.mixed_frame.fillna(value=0)
self.assertTrue((filled.ix[5:20,'foo'] == 0).all())
del self.mixed_frame['foo']
empty_float = self.frame.reindex(columns=[])
result = empty_float.fillna(value=0)
def test_count_objects(self):
dm = DataFrame(self.mixed_frame._series)
df = DataFrame(self.mixed_frame._series)
tm.assert_series_equal(dm.count(), df.count())
tm.assert_series_equal(dm.count(1), df.count(1))
def test_cumsum_corner(self):
dm = DataFrame(np.arange(20).reshape(4, 5),
index=lrange(4), columns=lrange(5))
result = dm.cumsum()
#----------------------------------------------------------------------
# Stacking / unstacking
def test_stack_unstack(self):
stacked = self.frame.stack()
stacked_df = DataFrame({'foo': stacked, 'bar': stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
assert_frame_equal(unstacked, self.frame)
assert_frame_equal(unstacked_df['bar'], self.frame)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
assert_frame_equal(unstacked_cols.T, self.frame)
assert_frame_equal(unstacked_cols_df['bar'].T, self.frame)
def test_stack_ints(self):
df = DataFrame(
np.random.randn(30, 27),
columns=MultiIndex.from_tuples(
list(itertools.product(range(3), repeat=3))
)
)
assert_frame_equal(
df.stack(level=[1, 2]),
df.stack(level=1).stack(level=1)
)
assert_frame_equal(
df.stack(level=[-2, -1]),
df.stack(level=1).stack(level=1)
)
df_named = df.copy()
df_named.columns.set_names(range(3), inplace=True)
assert_frame_equal(
df_named.stack(level=[1, 2]),
df_named.stack(level=1).stack(level=1)
)
def test_stack_mixed_levels(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(randn(4, 4), columns=columns)
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
# GH #8584: Need to check that stacking works when a number
# is passed that is both a level name and in the range of
# the level numbers
df2 = df.copy()
df2.columns.names = ['exp', 'animal', 1]
assert_frame_equal(df2.stack(level=['animal', 1]),
animal_hair_stacked, check_names=False)
assert_frame_equal(df2.stack(level=['exp', 1]),
exp_hair_stacked, check_names=False)
# When mixed types are passed and the ints are not level
# names, raise
self.assertRaises(ValueError, df2.stack, level=['animal', 0])
# GH #8584: Having 0 in the level names could raise a
# strange error about lexsort depth
df3 = df.copy()
df3.columns.names = ['exp', 'animal', 0]
assert_frame_equal(df3.stack(level=['animal', 0]),
animal_hair_stacked, check_names=False)
def test_stack_int_level_names(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(randn(4, 4), columns=columns)
exp_animal_stacked = df.stack(level=['exp', 'animal'])
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
df2 = df.copy()
df2.columns.names = [0, 1, 2]
assert_frame_equal(df2.stack(level=[1, 2]), animal_hair_stacked,
check_names=False )
assert_frame_equal(df2.stack(level=[0, 1]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df2.stack(level=[0, 2]), exp_hair_stacked,
check_names=False)
# Out-of-order int column names
df3 = df.copy()
df3.columns.names = [2, 0, 1]
assert_frame_equal(df3.stack(level=[0, 1]), animal_hair_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 0]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 1]), exp_hair_stacked,
check_names=False)
def test_unstack_bool(self):
df = DataFrame([False, False],
index=MultiIndex.from_arrays([['a', 'b'], ['c', 'l']]),
columns=['col'])
rs = df.unstack()
xp = DataFrame(np.array([[False, np.nan], [np.nan, False]],
dtype=object),
index=['a', 'b'],
columns=MultiIndex.from_arrays([['col', 'col'],
['c', 'l']]))
assert_frame_equal(rs, xp)
def test_unstack_to_series(self):
# check reversibility
data = self.frame.unstack()
self.assertTrue(isinstance(data, Series))
undo = data.unstack().T
assert_frame_equal(undo, self.frame)
# check NA handling
data = DataFrame({'x': [1, 2, np.NaN], 'y': [3.0, 4, np.NaN]})
data.index = Index(['a', 'b', 'c'])
result = data.unstack()
midx = MultiIndex(levels=[['x', 'y'], ['a', 'b', 'c']],
labels=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = Series([1, 2, np.NaN, 3, 4, np.NaN], index=midx)
assert_series_equal(result, expected)
# check composability of unstack
old_data = data.copy()
for _ in range(4):
data = data.unstack()
assert_frame_equal(old_data, data)
def test_unstack_dtypes(self):
# GH 2929
rows = [[1, 1, 3, 4],
[1, 2, 3, 4],
[2, 1, 3, 4],
[2, 2, 3, 4]]
df = DataFrame(rows, columns=list('ABCD'))
result = df.get_dtype_counts()
expected = Series({'int64' : 4})
assert_series_equal(result, expected)
# single dtype
df2 = df.set_index(['A','B'])
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'int64' : 4})
assert_series_equal(result, expected)
# mixed
df2 = df.set_index(['A','B'])
df2['C'] = 3.
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'int64' : 2, 'float64' : 2})
assert_series_equal(result, expected)
df2['D'] = 'foo'
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'float64' : 2, 'object' : 2})
assert_series_equal(result, expected)
def test_unstack_non_unique_index_names(self):
idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')],
names=['c1', 'c1'])
df = DataFrame([1, 2], index=idx)
with tm.assertRaises(ValueError):
df.unstack('c1')
with tm.assertRaises(ValueError):
df.T.stack('c1')
def test_stack_datetime_column_multiIndex(self):
# GH 8039
t = datetime(2014, 1, 1)
df = DataFrame([1, 2, 3, 4], columns=MultiIndex.from_tuples([(t, 'A', 'B')]))
result = df.stack()
eidx = MultiIndex.from_product([(0, 1, 2, 3), ('B',)])
ecols = MultiIndex.from_tuples([(t, 'A')])
expected = DataFrame([1, 2, 3, 4], index=eidx, columns=ecols)
assert_frame_equal(result, expected)
def test_stack_partial_multiIndex(self):
# GH 8844
def _test_stack_with_multiindex(multiindex):
df = DataFrame(np.arange(3 * len(multiindex)).reshape(3, len(multiindex)),
columns=multiindex)
for level in (-1, 0, 1, [0, 1], [1, 0]):
result = df.stack(level=level, dropna=False)
if isinstance(level, int):
# Stacking a single level should not make any all-NaN rows,
# so df.stack(level=level, dropna=False) should be the same
# as df.stack(level=level, dropna=True).
expected = df.stack(level=level, dropna=True)
if isinstance(expected, Series):
assert_series_equal(result, expected)
else:
assert_frame_equal(result, expected)
df.columns = MultiIndex.from_tuples(df.columns.get_values(),
names=df.columns.names)
expected = df.stack(level=level, dropna=False)
if isinstance(expected, Series):
assert_series_equal(result, expected)
else:
assert_frame_equal(result, expected)
full_multiindex = MultiIndex.from_tuples([('B', 'x'), ('B', 'z'),
('A', 'y'),
('C', 'x'), ('C', 'u')],
names=['Upper', 'Lower'])
for multiindex_columns in ([0, 1, 2, 3, 4],
[0, 1, 2, 3], [0, 1, 2, 4],
[0, 1, 2], [1, 2, 3], [2, 3, 4],
[0, 1], [0, 2], [0, 3],
[0], [2], [4]):
_test_stack_with_multiindex(full_multiindex[multiindex_columns])
if len(multiindex_columns) > 1:
multiindex_columns.reverse()
_test_stack_with_multiindex(full_multiindex[multiindex_columns])
df = DataFrame(np.arange(6).reshape(2, 3), columns=full_multiindex[[0, 1, 3]])
result = df.stack(dropna=False)
expected = DataFrame([[0, 2], [1, nan], [3, 5], [4, nan]],
index=MultiIndex(levels=[[0, 1], ['u', 'x', 'y', 'z']],
labels=[[0, 0, 1, 1], [1, 3, 1, 3]],
names=[None, 'Lower']),
columns=Index(['B', 'C'], name='Upper'),
dtype=df.dtypes[0])
assert_frame_equal(result, expected)
def test_repr_with_mi_nat(self):
df = DataFrame({'X': [1, 2]},
index=[[pd.NaT, pd.Timestamp('20130101')], ['a', 'b']])
res = repr(df)
exp = ' X\nNaT a 1\n2013-01-01 b 2'
nose.tools.assert_equal(res, exp)
def test_reset_index(self):
stacked = self.frame.stack()[::2]
stacked = DataFrame({'foo': stacked, 'bar': stacked})
names = ['first', 'second']
stacked.index.names = names
deleveled = stacked.reset_index()
for i, (lev, lab) in enumerate(zip(stacked.index.levels,
stacked.index.labels)):
values = lev.take(lab)
name = names[i]
assert_almost_equal(values, deleveled[name])
stacked.index.names = [None, None]
deleveled2 = stacked.reset_index()
self.assert_numpy_array_equal(deleveled['first'],
deleveled2['level_0'])
self.assert_numpy_array_equal(deleveled['second'],
deleveled2['level_1'])
# default name assigned
rdf = self.frame.reset_index()
self.assert_numpy_array_equal(rdf['index'], self.frame.index.values)
# default name assigned, corner case
df = self.frame.copy()
df['index'] = 'foo'
rdf = df.reset_index()
self.assert_numpy_array_equal(rdf['level_0'], self.frame.index.values)
# but this is ok
self.frame.index.name = 'index'
deleveled = self.frame.reset_index()
self.assert_numpy_array_equal(deleveled['index'],
self.frame.index.values)
self.assert_numpy_array_equal(deleveled.index,
np.arange(len(deleveled)))
# preserve column names
self.frame.columns.name = 'columns'
resetted = self.frame.reset_index()
self.assertEqual(resetted.columns.name, 'columns')
# only remove certain columns
frame = self.frame.reset_index().set_index(['index', 'A', 'B'])
rs = frame.reset_index(['A', 'B'])
assert_frame_equal(rs, self.frame, check_names=False) # TODO should reset_index check_names ?
rs = frame.reset_index(['index', 'A', 'B'])
assert_frame_equal(rs, self.frame.reset_index(), check_names=False)
rs = frame.reset_index(['index', 'A', 'B'])
assert_frame_equal(rs, self.frame.reset_index(), check_names=False)
rs = frame.reset_index('A')
xp = self.frame.reset_index().set_index(['index', 'B'])
assert_frame_equal(rs, xp, check_names=False)
# test resetting in place
df = self.frame.copy()
resetted = self.frame.reset_index()
df.reset_index(inplace=True)
assert_frame_equal(df, resetted, check_names=False)
frame = self.frame.reset_index().set_index(['index', 'A', 'B'])
rs = frame.reset_index('A', drop=True)
xp = self.frame.copy()
del xp['A']
xp = xp.set_index(['B'], append=True)
assert_frame_equal(rs, xp, check_names=False)
def test_reset_index_right_dtype(self):
time = np.arange(0.0, 10, np.sqrt(2) / 2)
s1 = Series((9.81 * time ** 2) / 2,
index=Index(time, name='time'),
name='speed')
df = DataFrame(s1)
resetted = s1.reset_index()
self.assertEqual(resetted['time'].dtype, np.float64)
resetted = df.reset_index()
self.assertEqual(resetted['time'].dtype, np.float64)
def test_reset_index_multiindex_col(self):
vals = np.random.randn(3, 3).astype(object)
idx = ['x', 'y', 'z']
full = np.hstack(([[x] for x in idx], vals))
df = DataFrame(vals, Index(idx, name='a'),
columns=[['b', 'b', 'c'], ['mean', 'median', 'mean']])
rs = df.reset_index()
xp = DataFrame(full, columns=[['a', 'b', 'b', 'c'],
['', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index(col_fill=None)
xp = DataFrame(full, columns=[['a', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index(col_level=1, col_fill='blah')
xp = DataFrame(full, columns=[['blah', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
df = DataFrame(vals,
MultiIndex.from_arrays([[0, 1, 2], ['x', 'y', 'z']],
names=['d', 'a']),
columns=[['b', 'b', 'c'], ['mean', 'median', 'mean']])
rs = df.reset_index('a', )
xp = DataFrame(full, Index([0, 1, 2], name='d'),
columns=[['a', 'b', 'b', 'c'],
['', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index('a', col_fill=None)
xp = DataFrame(full, Index(lrange(3), name='d'),
columns=[['a', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index('a', col_fill='blah', col_level=1)
xp = DataFrame(full, Index(lrange(3), name='d'),
columns=[['blah', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
def test_reset_index_with_datetimeindex_cols(self):
# GH5818
#
df = pd.DataFrame([[1, 2], [3, 4]],
columns=pd.date_range('1/1/2013', '1/2/2013'),
index=['A', 'B'])
result = df.reset_index()
expected = pd.DataFrame([['A', 1, 2], ['B', 3, 4]],
columns=['index', datetime(2013, 1, 1),
datetime(2013, 1, 2)])
assert_frame_equal(result, expected)
#----------------------------------------------------------------------
# Tests to cope with refactored internals
def test_as_matrix_numeric_cols(self):
self.frame['foo'] = 'bar'
values = self.frame.as_matrix(['A', 'B', 'C', 'D'])
self.assertEqual(values.dtype, np.float64)
def test_as_matrix_lcd(self):
# mixed lcd
values = self.mixed_float.as_matrix(['A', 'B', 'C', 'D'])
self.assertEqual(values.dtype, np.float64)
values = self.mixed_float.as_matrix(['A', 'B', 'C' ])
self.assertEqual(values.dtype, np.float32)
values = self.mixed_float.as_matrix(['C'])
self.assertEqual(values.dtype, np.float16)
values = self.mixed_int.as_matrix(['A','B','C','D'])
self.assertEqual(values.dtype, np.int64)
values = self.mixed_int.as_matrix(['A','D'])
self.assertEqual(values.dtype, np.int64)
# guess all ints are cast to uints....
values = self.mixed_int.as_matrix(['A','B','C'])
self.assertEqual(values.dtype, np.int64)
values = self.mixed_int.as_matrix(['A','C'])
self.assertEqual(values.dtype, np.int32)
values = self.mixed_int.as_matrix(['C','D'])
self.assertEqual(values.dtype, np.int64)
values = self.mixed_int.as_matrix(['A'])
self.assertEqual(values.dtype, np.int32)
values = self.mixed_int.as_matrix(['C'])
self.assertEqual(values.dtype, np.uint8)
def test_constructor_with_convert(self):
# this is actually mostly a test of lib.maybe_convert_objects
# #2845
df = DataFrame({'A' : [2**63-1] })
result = df['A']
expected = Series(np.asarray([2**63-1], np.int64))
assert_series_equal(result, expected)
df = DataFrame({'A' : [2**63] })
result = df['A']
expected = Series(np.asarray([2**63], np.object_))
assert_series_equal(result, expected)
df = DataFrame({'A' : [datetime(2005, 1, 1), True] })
result = df['A']
expected = Series(np.asarray([datetime(2005, 1, 1), True], np.object_))
assert_series_equal(result, expected)
df = DataFrame({'A' : [None, 1] })
result = df['A']
expected = Series(np.asarray([np.nan, 1], np.float_))
assert_series_equal(result, expected)
df = DataFrame({'A' : [1.0, 2] })
result = df['A']
expected = Series(np.asarray([1.0, 2], np.float_))
assert_series_equal(result, expected)
df = DataFrame({'A' : [1.0+2.0j, 3] })
result = df['A']
expected = Series(np.asarray([1.0+2.0j, 3], np.complex_))
assert_series_equal(result, expected)
df = DataFrame({'A' : [1.0+2.0j, 3.0] })
result = df['A']
expected = Series(np.asarray([1.0+2.0j, 3.0], np.complex_))
assert_series_equal(result, expected)
df = DataFrame({'A' : [1.0+2.0j, True] })
result = df['A']
expected = Series(np.asarray([1.0+2.0j, True], np.object_))
assert_series_equal(result, expected)
df = DataFrame({'A' : [1.0, None] })
result = df['A']
expected = Series(np.asarray([1.0, np.nan], np.float_))
assert_series_equal(result, expected)
df = DataFrame({'A' : [1.0+2.0j, None] })
result = df['A']
expected = Series(np.asarray([1.0+2.0j, np.nan], np.complex_))
assert_series_equal(result, expected)
df = DataFrame({'A' : [2.0, 1, True, None] })
result = df['A']
expected = Series(np.asarray([2.0, 1, True, None], np.object_))
assert_series_equal(result, expected)
df = DataFrame({'A' : [2.0, 1, datetime(2006, 1, 1), None] })
result = df['A']
expected = Series(np.asarray([2.0, 1, datetime(2006, 1, 1),
None], np.object_))
assert_series_equal(result, expected)
def test_construction_with_mixed(self):
# test construction edge cases with mixed types
# f7u12, this does not work without extensive workaround
data = [[datetime(2001, 1, 5), nan, datetime(2001, 1, 2)],
[datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 1)]]
df = DataFrame(data)
# check dtypes
result = df.get_dtype_counts().order()
expected = Series({ 'datetime64[ns]' : 3 })
# mixed-type frames
self.mixed_frame['datetime'] = datetime.now()
self.mixed_frame['timedelta'] = timedelta(days=1,seconds=1)
self.assertEqual(self.mixed_frame['datetime'].dtype, 'M8[ns]')
self.assertEqual(self.mixed_frame['timedelta'].dtype, 'm8[ns]')
result = self.mixed_frame.get_dtype_counts().order()
expected = Series({ 'float64' : 4,
'object' : 1,
'datetime64[ns]' : 1,
'timedelta64[ns]' : 1}).order()
assert_series_equal(result,expected)
def test_construction_with_conversions(self):
# convert from a numpy array of non-ns timedelta64
arr = np.array([1,2,3],dtype='timedelta64[s]')
s = Series(arr)
expected = Series(timedelta_range('00:00:01',periods=3,freq='s'))
assert_series_equal(s,expected)
df = DataFrame(index=range(3))
df['A'] = arr
expected = DataFrame({'A' : timedelta_range('00:00:01',periods=3,freq='s')},
index=range(3))
assert_frame_equal(df,expected)
# convert from a numpy array of non-ns datetime64
#### note that creating a numpy datetime64 is in LOCAL time!!!!
#### seems to work for M8[D], but not for M8[s]
s = Series(np.array(['2013-01-01','2013-01-02','2013-01-03'],dtype='datetime64[D]'))
assert_series_equal(s,Series(date_range('20130101',periods=3,freq='D')))
#s = Series(np.array(['2013-01-01 00:00:01','2013-01-01 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]'))
#assert_series_equal(s,date_range('20130101 00:00:01',period=3,freq='s'))
expected = DataFrame({
'dt1' : Timestamp('20130101'),
'dt2' : date_range('20130101',periods=3),
#'dt3' : date_range('20130101 00:00:01',periods=3,freq='s'),
},index=range(3))
df = DataFrame(index=range(3))
df['dt1'] = np.datetime64('2013-01-01')
df['dt2'] = np.array(['2013-01-01','2013-01-02','2013-01-03'],dtype='datetime64[D]')
#df['dt3'] = np.array(['2013-01-01 00:00:01','2013-01-01 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]')
assert_frame_equal(df, expected)
def test_constructor_frame_copy(self):
cop = DataFrame(self.frame, copy=True)
cop['A'] = 5
self.assertTrue((cop['A'] == 5).all())
self.assertFalse((self.frame['A'] == 5).all())
def test_constructor_ndarray_copy(self):
df = DataFrame(self.frame.values)
self.frame.values[5] = 5
self.assertTrue((df.values[5] == 5).all())
df = DataFrame(self.frame.values, copy=True)
self.frame.values[6] = 6
self.assertFalse((df.values[6] == 6).all())
def test_constructor_series_copy(self):
series = self.frame._series
df = DataFrame({'A': series['A']})
df['A'][:] = 5
self.assertFalse((series['A'] == 5).all())
def test_constructor_compound_dtypes(self):
# GH 5191
# compound dtypes should raise not-implementederror
def f(dtype):
return DataFrame(data = list(itertools.repeat((datetime(2001, 1, 1), "aa", 20), 9)),
columns=["A", "B", "C"], dtype=dtype)
self.assertRaises(NotImplementedError, f, [("A","datetime64[h]"), ("B","str"), ("C","int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
f('M8[ns]')
def test_assign_columns(self):
self.frame['hi'] = 'there'
frame = self.frame.copy()
frame.columns = ['foo', 'bar', 'baz', 'quux', 'foo2']
assert_series_equal(self.frame['C'], frame['baz'])
assert_series_equal(self.frame['hi'], frame['foo2'])
def test_columns_with_dups(self):
# GH 3468 related
# basic
df = DataFrame([[1,2]], columns=['a','a'])
df.columns = ['a','a.1']
str(df)
expected = DataFrame([[1,2]], columns=['a','a.1'])
assert_frame_equal(df, expected)
df = DataFrame([[1,2,3]], columns=['b','a','a'])
df.columns = ['b','a','a.1']
str(df)
expected = DataFrame([[1,2,3]], columns=['b','a','a.1'])
assert_frame_equal(df, expected)
# with a dup index
df = DataFrame([[1,2]], columns=['a','a'])
df.columns = ['b','b']
str(df)
expected = DataFrame([[1,2]], columns=['b','b'])
assert_frame_equal(df, expected)
# multi-dtype
df = DataFrame([[1,2,1.,2.,3.,'foo','bar']], columns=['a','a','b','b','d','c','c'])
df.columns = list('ABCDEFG')
str(df)
expected = DataFrame([[1,2,1.,2.,3.,'foo','bar']], columns=list('ABCDEFG'))
assert_frame_equal(df, expected)
# this is an error because we cannot disambiguate the dup columns
self.assertRaises(Exception, lambda x: DataFrame([[1,2,'foo','bar']], columns=['a','a','a','a']))
# dups across blocks
df_float = DataFrame(np.random.randn(10, 3),dtype='float64')
df_int = DataFrame(np.random.randn(10, 3),dtype='int64')
df_bool = DataFrame(True,index=df_float.index,columns=df_float.columns)
df_object = DataFrame('foo',index=df_float.index,columns=df_float.columns)
df_dt = DataFrame(Timestamp('20010101'),index=df_float.index,columns=df_float.columns)
df = pd.concat([ df_float, df_int, df_bool, df_object, df_dt ], axis=1)
self.assertEqual(len(df._data._blknos), len(df.columns))
self.assertEqual(len(df._data._blklocs), len(df.columns))
# testing iget
for i in range(len(df.columns)):
df.iloc[:,i]
# dup columns across dtype GH 2079/2194
vals = [[1, -1, 2.], [2, -2, 3.]]
rs = DataFrame(vals, columns=['A', 'A', 'B'])
xp = DataFrame(vals)
xp.columns = ['A', 'A', 'B']
assert_frame_equal(rs, xp)
def test_insert_column_bug_4032(self):
# GH4032, inserting a column and renaming causing errors
df = DataFrame({'b': [1.1, 2.2]})
df = df.rename(columns={})
df.insert(0, 'a', [1, 2])
result = df.rename(columns={})
str(result)
expected = DataFrame([[1,1.1],[2, 2.2]],columns=['a','b'])
assert_frame_equal(result,expected)
df.insert(0, 'c', [1.3, 2.3])
result = df.rename(columns={})
str(result)
expected = DataFrame([[1.3,1,1.1],[2.3,2, 2.2]],columns=['c','a','b'])
assert_frame_equal(result,expected)
def test_cast_internals(self):
casted = DataFrame(self.frame._data, dtype=int)
expected = DataFrame(self.frame._series, dtype=int)
assert_frame_equal(casted, expected)
casted = DataFrame(self.frame._data, dtype=np.int32)
expected = DataFrame(self.frame._series, dtype=np.int32)
assert_frame_equal(casted, expected)
def test_consolidate(self):
self.frame['E'] = 7.
consolidated = self.frame.consolidate()
self.assertEqual(len(consolidated._data.blocks), 1)
# Ensure copy, do I want this?
recons = consolidated.consolidate()
self.assertIsNot(recons, consolidated)
assert_frame_equal(recons, consolidated)
self.frame['F'] = 8.
self.assertEqual(len(self.frame._data.blocks), 3)
self.frame.consolidate(inplace=True)
self.assertEqual(len(self.frame._data.blocks), 1)
def test_consolidate_inplace(self):
frame = self.frame.copy()
# triggers in-place consolidation
for letter in range(ord('A'), ord('Z')):
self.frame[chr(letter)] = chr(letter)
def test_as_matrix_consolidate(self):
self.frame['E'] = 7.
self.assertFalse(self.frame._data.is_consolidated())
_ = self.frame.as_matrix()
self.assertTrue(self.frame._data.is_consolidated())
def test_modify_values(self):
self.frame.values[5] = 5
self.assertTrue((self.frame.values[5] == 5).all())
# unconsolidated
self.frame['E'] = 7.
self.frame.values[6] = 6
self.assertTrue((self.frame.values[6] == 6).all())
def test_boolean_set_uncons(self):
self.frame['E'] = 7.
expected = self.frame.values.copy()
expected[expected > 1] = 2
self.frame[self.frame > 1] = 2
assert_almost_equal(expected, self.frame.values)
def test_xs_view(self):
"""
in 0.14 this will return a view if possible
a copy otherwise, but this is numpy dependent
"""
dm = DataFrame(np.arange(20.).reshape(4, 5),
index=lrange(4), columns=lrange(5))
dm.xs(2)[:] = 10
self.assertTrue((dm.xs(2) == 10).all())
def test_boolean_indexing(self):
idx = lrange(3)
cols = ['A','B','C']
df1 = DataFrame(index=idx, columns=cols,
data=np.array([[0.0, 0.5, 1.0],
[1.5, 2.0, 2.5],
[3.0, 3.5, 4.0]],
dtype=float))
df2 = DataFrame(index=idx, columns=cols,
data=np.ones((len(idx), len(cols))))
expected = DataFrame(index=idx, columns=cols,
data=np.array([[0.0, 0.5, 1.0],
[1.5, 2.0, -1],
[-1, -1, -1]], dtype=float))
df1[df1 > 2.0 * df2] = -1
assert_frame_equal(df1, expected)
with assertRaisesRegexp(ValueError, 'Item wrong length'):
df1[df1.index[:-1] > 2] = -1
def test_boolean_indexing_mixed(self):
df = DataFrame(
{long(0): {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},
long(1): {35: np.nan,
40: 0.32632316859446198,
43: np.nan,
49: 0.32632316859446198,
50: 0.39114724480578139},
long(2): {35: np.nan, 40: np.nan, 43: 0.29012581014105987, 49: np.nan, 50: np.nan},
long(3): {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},
long(4): {35: 0.34215328467153283, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},
'y': {35: 0, 40: 0, 43: 0, 49: 0, 50: 1}})
# mixed int/float ok
df2 = df.copy()
df2[df2>0.3] = 1
expected = df.copy()
expected.loc[40,1] = 1
expected.loc[49,1] = 1
expected.loc[50,1] = 1
expected.loc[35,4] = 1
assert_frame_equal(df2,expected)
df['foo'] = 'test'
with tm.assertRaisesRegexp(TypeError, 'boolean setting on mixed-type'):
df[df > 0.3] = 1
def test_sum_bools(self):
df = DataFrame(index=lrange(1), columns=lrange(10))
bools = isnull(df)
self.assertEqual(bools.sum(axis=1)[0], 10)
def test_fillna_col_reordering(self):
idx = lrange(20)
cols = ["COL." + str(i) for i in range(5, 0, -1)]
data = np.random.rand(20, 5)
df = DataFrame(index=lrange(20), columns=cols, data=data)
filled = df.fillna(method='ffill')
self.assertEqual(df.columns.tolist(), filled.columns.tolist())
def test_take(self):
# homogeneous
#----------------------------------------
order = [3, 1, 2, 0]
for df in [self.frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['D', 'B', 'C', 'A']]
assert_frame_equal(result, expected, check_names=False)
# neg indicies
order = [2,1,-1]
for df in [self.frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['C', 'B', 'D']]
assert_frame_equal(result, expected, check_names=False)
# illegal indices
self.assertRaises(IndexError, df.take, [3,1,2,30], axis=0)
self.assertRaises(IndexError, df.take, [3,1,2,-31], axis=0)
self.assertRaises(IndexError, df.take, [3,1,2,5], axis=1)
self.assertRaises(IndexError, df.take, [3,1,2,-5], axis=1)
# mixed-dtype
#----------------------------------------
order = [4, 1, 2, 0, 3]
for df in [self.mixed_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['foo', 'B', 'C', 'A', 'D']]
assert_frame_equal(result, expected)
# neg indicies
order = [4,1,-2]
for df in [self.mixed_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['foo', 'B', 'D']]
assert_frame_equal(result, expected)
# by dtype
order = [1, 2, 0, 3]
for df in [self.mixed_float,self.mixed_int]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['B', 'C', 'A', 'D']]
assert_frame_equal(result, expected)
def test_iterkv_deprecation(self):
with tm.assert_produces_warning(DeprecationWarning):
self.mixed_float.iterkv()
def test_iterkv_names(self):
for k, v in compat.iteritems(self.mixed_frame):
self.assertEqual(v.name, k)
def test_series_put_names(self):
series = self.mixed_frame._series
for k, v in compat.iteritems(series):
self.assertEqual(v.name, k)
def test_dot(self):
a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'],
columns=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(4, 2), index=['p', 'q', 'r', 's'],
columns=['one', 'two'])
result = a.dot(b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
# Check alignment
b1 = b.reindex(index=reversed(b.index))
result = a.dot(b)
assert_frame_equal(result, expected)
# Check series argument
result = a.dot(b['one'])
assert_series_equal(result, expected['one'])
result = a.dot(b1['one'])
assert_series_equal(result, expected['one'])
# can pass correct-length arrays
row = a.ix[0].values
result = a.dot(row)
exp = a.dot(a.ix[0])
assert_series_equal(result, exp)
with assertRaisesRegexp(ValueError, 'Dot product shape mismatch'):
a.dot(row[:-1])
a = np.random.rand(1, 5)
b = np.random.rand(5, 1)
A = DataFrame(a)
B = DataFrame(b)
# it works
result = A.dot(b)
# unaligned
df = DataFrame(randn(3, 4), index=[1, 2, 3], columns=lrange(4))
df2 = DataFrame(randn(5, 3), index=lrange(5), columns=[1, 2, 3])
assertRaisesRegexp(ValueError, 'aligned', df.dot, df2)
def test_idxmin(self):
frame = self.frame
frame.ix[5:10] = np.nan
frame.ix[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, self.intframe]:
result = df.idxmin(axis=axis, skipna=skipna)
expected = df.apply(
Series.idxmin, axis=axis, skipna=skipna)
assert_series_equal(result, expected)
self.assertRaises(ValueError, frame.idxmin, axis=2)
def test_idxmax(self):
frame = self.frame
frame.ix[5:10] = np.nan
frame.ix[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, self.intframe]:
result = df.idxmax(axis=axis, skipna=skipna)
expected = df.apply(
Series.idxmax, axis=axis, skipna=skipna)
assert_series_equal(result, expected)
self.assertRaises(ValueError, frame.idxmax, axis=2)
def test_stale_cached_series_bug_473(self):
# this is chained, but ok
with option_context('chained_assignment',None):
Y = DataFrame(np.random.random((4, 4)), index=('a', 'b', 'c', 'd'),
columns=('e', 'f', 'g', 'h'))
repr(Y)
Y['e'] = Y['e'].astype('object')
Y['g']['c'] = np.NaN
repr(Y)
result = Y.sum()
exp = Y['g'].sum()
self.assertTrue(isnull(Y['g']['c']))
def test_index_namedtuple(self):
from collections import namedtuple
IndexType = namedtuple("IndexType", ["a", "b"])
idx1 = IndexType("foo", "bar")
idx2 = IndexType("baz", "bof")
index = Index([idx1, idx2],
name="composite_index", tupleize_cols=False)
df = DataFrame([(1, 2), (3, 4)], index=index, columns=["A", "B"])
self.assertEqual(df.ix[IndexType("foo", "bar")]["A"], 1)
def test_empty_nonzero(self):
df = DataFrame([1, 2, 3])
self.assertFalse(df.empty)
df = DataFrame(index=['a', 'b'], columns=['c', 'd']).dropna()
self.assertTrue(df.empty)
self.assertTrue(df.T.empty)
def test_any_all(self):
self._check_bool_op('any', np.any, has_skipna=True, has_bool_only=True)
self._check_bool_op('all', np.all, has_skipna=True, has_bool_only=True)
df = DataFrame(randn(10, 4)) > 0
df.any(1)
df.all(1)
df.any(1, bool_only=True)
df.all(1, bool_only=True)
# skip pathological failure cases
# class CantNonzero(object):
# def __nonzero__(self):
# raise ValueError
# df[4] = CantNonzero()
# it works!
# df.any(1)
# df.all(1)
# df.any(1, bool_only=True)
# df.all(1, bool_only=True)
# df[4][4] = np.nan
# df.any(1)
# df.all(1)
# df.any(1, bool_only=True)
# df.all(1, bool_only=True)
def test_consolidate_datetime64(self):
# numpy vstack bug
data = """\
starting,ending,measure
2012-06-21 00:00,2012-06-23 07:00,77
2012-06-23 07:00,2012-06-23 16:30,65
2012-06-23 16:30,2012-06-25 08:00,77
2012-06-25 08:00,2012-06-26 12:00,0
2012-06-26 12:00,2012-06-27 08:00,77
"""
df = read_csv(StringIO(data), parse_dates=[0, 1])
ser_starting = df.starting
ser_starting.index = ser_starting.values
ser_starting = ser_starting.tz_localize('US/Eastern')
ser_starting = ser_starting.tz_convert('UTC')
ser_ending = df.ending
ser_ending.index = ser_ending.values
ser_ending = ser_ending.tz_localize('US/Eastern')
ser_ending = ser_ending.tz_convert('UTC')
df.starting = ser_starting.index
df.ending = ser_ending.index
tm.assert_index_equal(pd.DatetimeIndex(df.starting), ser_starting.index)
tm.assert_index_equal(pd.DatetimeIndex(df.ending), ser_ending.index)
def _check_bool_op(self, name, alternative, frame=None, has_skipna=True,
has_bool_only=False):
if frame is None:
frame = self.frame > 0
# set some NAs
frame = DataFrame(frame.values.astype(object), frame.index,
frame.columns)
frame.ix[5:10] = np.nan
frame.ix[15:20, -2:] = np.nan
f = getattr(frame, name)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
assert_series_equal(result0, frame.apply(wrapper))
assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
assert_series_equal(result0, frame.apply(skipna_wrapper))
assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# result = f(axis=1)
# comp = frame.apply(alternative, axis=1).reindex(result.index)
# assert_series_equal(result, comp)
# bad axis
self.assertRaises(ValueError, f, axis=2)
# make sure works on mixed-type frame
mixed = self.mixed_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0
getattr(mixed, name)(axis=0)
getattr(mixed, name)(axis=1)
class NonzeroFail:
def __nonzero__(self):
raise ValueError
mixed['_nonzero_fail_'] = NonzeroFail()
if has_bool_only:
getattr(mixed, name)(axis=0, bool_only=True)
getattr(mixed, name)(axis=1, bool_only=True)
getattr(frame, name)(axis=0, bool_only=False)
getattr(frame, name)(axis=1, bool_only=False)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, name)(axis=0)
r1 = getattr(all_na, name)(axis=1)
if name == 'any':
self.assertFalse(r0.any())
self.assertFalse(r1.any())
else:
self.assertTrue(r0.all())
self.assertTrue(r1.all())
def test_strange_column_corruption_issue(self):
df = DataFrame(index=[0, 1])
df[0] = nan
wasCol = {}
# uncommenting these makes the results match
# for col in xrange(100, 200):
# wasCol[col] = 1
# df[col] = nan
for i, dt in enumerate(df.index):
for col in range(100, 200):
if not col in wasCol:
wasCol[col] = 1
df[col] = nan
df[col][dt] = i
myid = 100
first = len(df.ix[isnull(df[myid]), [myid]])
second = len(df.ix[isnull(df[myid]), [myid]])
self.assertTrue(first == second == 0)
def test_inplace_return_self(self):
# re #1893
data = DataFrame({'a': ['foo', 'bar', 'baz', 'qux'],
'b': [0, 0, 1, 1],
'c': [1, 2, 3, 4]})
def _check_f(base, f):
result = f(base)
self.assertTrue(result is None)
# -----DataFrame-----
# set_index
f = lambda x: x.set_index('a', inplace=True)
_check_f(data.copy(), f)
# reset_index
f = lambda x: x.reset_index(inplace=True)
_check_f(data.set_index('a'), f)
# drop_duplicates
f = lambda x: x.drop_duplicates(inplace=True)
_check_f(data.copy(), f)
# sort
f = lambda x: x.sort('b', inplace=True)
_check_f(data.copy(), f)
# sort_index
f = lambda x: x.sort_index(inplace=True)
_check_f(data.copy(), f)
# sortlevel
f = lambda x: x.sortlevel(0, inplace=True)
_check_f(data.set_index(['a', 'b']), f)
# fillna
f = lambda x: x.fillna(0, inplace=True)
_check_f(data.copy(), f)
# replace
f = lambda x: x.replace(1, 0, inplace=True)
_check_f(data.copy(), f)
# rename
f = lambda x: x.rename({1: 'foo'}, inplace=True)
_check_f(data.copy(), f)
# -----Series-----
d = data.copy()['c']
# reset_index
f = lambda x: x.reset_index(inplace=True, drop=True)
_check_f(data.set_index('a')['c'], f)
# fillna
f = lambda x: x.fillna(0, inplace=True)
_check_f(d.copy(), f)
# replace
f = lambda x: x.replace(1, 0, inplace=True)
_check_f(d.copy(), f)
# rename
f = lambda x: x.rename({1: 'foo'}, inplace=True)
_check_f(d.copy(), f)
def test_isin(self):
# GH #4211
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
other = ['a', 'b', 'c']
result = df.isin(other)
expected = DataFrame([df.loc[s].isin(other) for s in df.index])
assert_frame_equal(result, expected)
def test_isin_empty(self):
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
result = df.isin([])
expected = pd.DataFrame(False, df.index, df.columns)
assert_frame_equal(result, expected)
def test_isin_dict(self):
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
d = {'A': ['a']}
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
assert_frame_equal(result, expected)
# non unique columns
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
df.columns = ['A', 'A']
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
assert_frame_equal(result, expected)
def test_isin_with_string_scalar(self):
#GH4763
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
with tm.assertRaises(TypeError):
df.isin('a')
with tm.assertRaises(TypeError):
df.isin('aaa')
def test_isin_df(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
df2 = DataFrame({'A': [0, 2, 12, 4], 'B': [2, np.nan, 4, 5]})
expected = DataFrame(False, df1.index, df1.columns)
result = df1.isin(df2)
expected['A'].loc[[1, 3]] = True
expected['B'].loc[[0, 2]] = True
assert_frame_equal(result, expected)
# partial overlapping columns
df2.columns = ['A', 'C']
result = df1.isin(df2)
expected['B'] = False
assert_frame_equal(result, expected)
def test_isin_df_dupe_values(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
# just cols duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['B', 'B'])
with tm.assertRaises(ValueError):
df1.isin(df2)
# just index duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['A', 'B'], index=[0, 0, 1, 1])
with tm.assertRaises(ValueError):
df1.isin(df2)
# cols and index:
df2.columns = ['B', 'B']
with tm.assertRaises(ValueError):
df1.isin(df2)
def test_isin_dupe_self(self):
other = DataFrame({'A': [1, 0, 1, 0], 'B': [1, 1, 0, 0]})
df = DataFrame([[1, 1], [1, 0], [0, 0]], columns=['A','A'])
result = df.isin(other)
expected = DataFrame(False, index=df.index, columns=df.columns)
expected.loc[0] = True
expected.iloc[1, 1] = True
assert_frame_equal(result, expected)
def test_isin_against_series(self):
df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]},
index=['a', 'b', 'c', 'd'])
s = pd.Series([1, 3, 11, 4], index=['a', 'b', 'c', 'd'])
expected = DataFrame(False, index=df.index, columns=df.columns)
expected['A'].loc['a'] = True
expected.loc['d'] = True
result = df.isin(s)
assert_frame_equal(result, expected)
def test_isin_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a', 'foo'), (0, 'a', 'bar'),
(0, 'b', 'bar'), (0, 'b', 'baz'),
(2, 'a', 'foo'), (2, 'a', 'bar'),
(2, 'c', 'bar'), (2, 'c', 'baz'),
(1, 'b', 'foo'), (1, 'b', 'bar'),
(1, 'c', 'bar'), (1, 'c', 'baz')])
df1 = DataFrame({'A': np.ones(12),
'B': np.zeros(12)}, index=idx)
df2 = DataFrame({'A': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
'B': [1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1]})
# against regular index
expected = DataFrame(False, index=df1.index, columns=df1.columns)
result = df1.isin(df2)
assert_frame_equal(result, expected)
df2.index = idx
expected = df2.values.astype(np.bool)
expected[:, 1] = ~expected[:, 1]
expected = DataFrame(expected, columns=['A', 'B'], index=idx)
result = df1.isin(df2)
assert_frame_equal(result, expected)
def test_to_csv_date_format(self):
from pandas import to_datetime
pname = '__tmp_to_csv_date_format__'
with ensure_clean(pname) as path:
for engine in [None, 'python']:
dt_index = self.tsframe.index
datetime_frame = DataFrame({'A': dt_index, 'B': dt_index.shift(1)}, index=dt_index)
datetime_frame.to_csv(path, date_format='%Y%m%d', engine=engine)
# Check that the data was put in the specified format
test = read_csv(path, index_col=0)
datetime_frame_int = datetime_frame.applymap(lambda x: int(x.strftime('%Y%m%d')))
datetime_frame_int.index = datetime_frame_int.index.map(lambda x: int(x.strftime('%Y%m%d')))
assert_frame_equal(test, datetime_frame_int)
datetime_frame.to_csv(path, date_format='%Y-%m-%d', engine=engine)
# Check that the data was put in the specified format
test = read_csv(path, index_col=0)
datetime_frame_str = datetime_frame.applymap(lambda x: x.strftime('%Y-%m-%d'))
datetime_frame_str.index = datetime_frame_str.index.map(lambda x: x.strftime('%Y-%m-%d'))
assert_frame_equal(test, datetime_frame_str)
# Check that columns get converted
datetime_frame_columns = datetime_frame.T
datetime_frame_columns.to_csv(path, date_format='%Y%m%d', engine=engine)
test = read_csv(path, index_col=0)
datetime_frame_columns = datetime_frame_columns.applymap(lambda x: int(x.strftime('%Y%m%d')))
# Columns don't get converted to ints by read_csv
datetime_frame_columns.columns = datetime_frame_columns.columns.map(lambda x: x.strftime('%Y%m%d'))
assert_frame_equal(test, datetime_frame_columns)
# test NaTs
nat_index = to_datetime(['NaT'] * 10 + ['2000-01-01', '1/1/2000', '1-1-2000'])
nat_frame = DataFrame({'A': nat_index}, index=nat_index)
nat_frame.to_csv(path, date_format='%Y-%m-%d', engine=engine)
test = read_csv(path, parse_dates=[0, 1], index_col=0)
assert_frame_equal(test, nat_frame)
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df['a'] = df['a'].astype(np.bool_)
df['b'] = df['b'].astype(np.int32)
df['c'] = df['c'].astype(np.float64)
result = pd.concat([df, df])
self.assertEqual(result['a'].dtype, np.bool_)
self.assertEqual(result['b'].dtype, np.int32)
self.assertEqual(result['c'].dtype, np.float64)
result = pd.concat([df, df.astype(np.float64)])
self.assertEqual(result['a'].dtype, np.object_)
self.assertEqual(result['b'].dtype, np.float64)
self.assertEqual(result['c'].dtype, np.float64)
def test_empty_frame_dtypes_ftypes(self):
empty_df = pd.DataFrame()
assert_series_equal(empty_df.dtypes, pd.Series(dtype=np.object))
assert_series_equal(empty_df.ftypes, pd.Series(dtype=np.object))
nocols_df = pd.DataFrame(index=[1,2,3])
assert_series_equal(nocols_df.dtypes, pd.Series(dtype=np.object))
assert_series_equal(nocols_df.ftypes, pd.Series(dtype=np.object))
norows_df = pd.DataFrame(columns=list("abc"))
assert_series_equal(norows_df.dtypes, pd.Series(np.object, index=list("abc")))
assert_series_equal(norows_df.ftypes, pd.Series('object:dense', index=list("abc")))
norows_int_df = pd.DataFrame(columns=list("abc")).astype(np.int32)
assert_series_equal(norows_int_df.dtypes, pd.Series(np.dtype('int32'), index=list("abc")))
assert_series_equal(norows_int_df.ftypes, pd.Series('int32:dense', index=list("abc")))
odict = OrderedDict
df = pd.DataFrame(odict([('a', 1), ('b', True), ('c', 1.0)]), index=[1, 2, 3])
assert_series_equal(df.dtypes, pd.Series(odict([('a', np.int64),
('b', np.bool),
('c', np.float64)])))
assert_series_equal(df.ftypes, pd.Series(odict([('a', 'int64:dense'),
('b', 'bool:dense'),
('c', 'float64:dense')])))
# same but for empty slice of df
assert_series_equal(df[:0].dtypes, pd.Series(odict([('a', np.int64),
('b', np.bool),
('c', np.float64)])))
assert_series_equal(df[:0].ftypes, pd.Series(odict([('a', 'int64:dense'),
('b', 'bool:dense'),
('c', 'float64:dense')])))
def test_dtypes_are_correct_after_column_slice(self):
# GH6525
df = pd.DataFrame(index=range(5), columns=list("abc"), dtype=np.float_)
odict = OrderedDict
assert_series_equal(df.dtypes,
pd.Series(odict([('a', np.float_), ('b', np.float_),
('c', np.float_),])))
assert_series_equal(df.iloc[:,2:].dtypes,
pd.Series(odict([('c', np.float_)])))
assert_series_equal(df.dtypes,
pd.Series(odict([('a', np.float_), ('b', np.float_),
('c', np.float_),])))
def test_set_index_names(self):
df = pd.util.testing.makeDataFrame()
df.index.name = 'name'
self.assertEqual(df.set_index(df.index).index.names, ['name'])
mi = MultiIndex.from_arrays(df[['A', 'B']].T.values, names=['A', 'B'])
mi2 = MultiIndex.from_arrays(df[['A', 'B', 'A', 'B']].T.values,
names=['A', 'B', 'A', 'B'])
df = df.set_index(['A', 'B'])
self.assertEqual(df.set_index(df.index).index.names, ['A', 'B'])
# Check that set_index isn't converting a MultiIndex into an Index
self.assertTrue(isinstance(df.set_index(df.index).index, MultiIndex))
# Check actual equality
tm.assert_index_equal(df.set_index(df.index).index, mi)
# Check that [MultiIndex, MultiIndex] yields a MultiIndex rather
# than a pair of tuples
self.assertTrue(isinstance(df.set_index([df.index, df.index]).index, MultiIndex))
# Check equality
tm.assert_index_equal(df.set_index([df.index, df.index]).index, mi2)
def test_select_dtypes_include(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.Categorical(list('abc'))})
ri = df.select_dtypes(include=[np.number])
ei = df[['b', 'c', 'd']]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number,'category'])
ei = df[['b', 'c', 'd', 'f']]
tm.assert_frame_equal(ri, ei)
def test_select_dtypes_exclude(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True]})
re = df.select_dtypes(exclude=[np.number])
ee = df[['a', 'e']]
tm.assert_frame_equal(re, ee)
def test_select_dtypes_exclude_include(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('now', periods=3).values})
exclude = np.datetime64,
include = np.bool_, 'integer'
r = df.select_dtypes(include=include, exclude=exclude)
e = df[['b', 'c', 'e']]
tm.assert_frame_equal(r, e)
exclude = 'datetime',
include = 'bool', 'int64', 'int32'
r = df.select_dtypes(include=include, exclude=exclude)
e = df[['b', 'e']]
tm.assert_frame_equal(r, e)
def test_select_dtypes_not_an_attr_but_still_valid_dtype(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('now', periods=3).values})
df['g'] = df.f.diff()
assert not hasattr(np, 'u8')
r = df.select_dtypes(include=['i8', 'O'], exclude=['timedelta'])
e = df[['a', 'b']]
tm.assert_frame_equal(r, e)
r = df.select_dtypes(include=['i8', 'O', 'timedelta64[ns]'])
e = df[['a', 'b', 'g']]
tm.assert_frame_equal(r, e)
def test_select_dtypes_empty(self):
df = DataFrame({'a': list('abc'), 'b': list(range(1, 4))})
with tm.assertRaisesRegexp(ValueError, 'at least one of include or '
'exclude must be nonempty'):
df.select_dtypes()
def test_select_dtypes_raises_on_string(self):
df = DataFrame({'a': list('abc'), 'b': list(range(1, 4))})
with tm.assertRaisesRegexp(TypeError, 'include and exclude .+ non-'):
df.select_dtypes(include='object')
with tm.assertRaisesRegexp(TypeError, 'include and exclude .+ non-'):
df.select_dtypes(exclude='object')
with tm.assertRaisesRegexp(TypeError, 'include and exclude .+ non-'):
df.select_dtypes(include=int, exclude='object')
def test_select_dtypes_bad_datetime64(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('now', periods=3).values})
with tm.assertRaisesRegexp(ValueError, '.+ is too specific'):
df.select_dtypes(include=['datetime64[D]'])
with tm.assertRaisesRegexp(ValueError, '.+ is too specific'):
df.select_dtypes(exclude=['datetime64[as]'])
def test_select_dtypes_str_raises(self):
df = DataFrame({'a': list('abc'),
'g': list(u('abc')),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('now', periods=3).values})
string_dtypes = set((str, 'str', np.string_, 'S1',
'unicode', np.unicode_, 'U1'))
try:
string_dtypes.add(unicode)
except NameError:
pass
for dt in string_dtypes:
with tm.assertRaisesRegexp(TypeError,
'string dtypes are not allowed'):
df.select_dtypes(include=[dt])
with tm.assertRaisesRegexp(TypeError,
'string dtypes are not allowed'):
df.select_dtypes(exclude=[dt])
def test_select_dtypes_bad_arg_raises(self):
df = DataFrame({'a': list('abc'),
'g': list(u('abc')),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('now', periods=3).values})
with tm.assertRaisesRegexp(TypeError, 'data type.*not understood'):
df.select_dtypes(['blargy, blarg, blarg'])
def skip_if_no_ne(engine='numexpr'):
if engine == 'numexpr':
try:
import numexpr as ne
except ImportError:
raise nose.SkipTest("cannot query engine numexpr when numexpr not "
"installed")
def skip_if_no_pandas_parser(parser):
if parser != 'pandas':
raise nose.SkipTest("cannot evaluate with parser {0!r}".format(parser))
class TestDataFrameQueryWithMultiIndex(object):
def check_query_with_named_multiindex(self, parser, engine):
tm.skip_if_no_ne(engine)
a = tm.choice(['red', 'green'], size=10)
b = tm.choice(['eggs', 'ham'], size=10)
index = MultiIndex.from_arrays([a, b], names=['color', 'food'])
df = DataFrame(randn(10, 2), index=index)
ind = Series(df.index.get_level_values('color').values, index=index,
name='color')
# equality
res1 = df.query('color == "red"', parser=parser, engine=engine)
res2 = df.query('"red" == color', parser=parser, engine=engine)
exp = df[ind == 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# inequality
res1 = df.query('color != "red"', parser=parser, engine=engine)
res2 = df.query('"red" != color', parser=parser, engine=engine)
exp = df[ind != 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('color == ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] == color', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('color != ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] != color', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["red"] in color', parser=parser, engine=engine)
res2 = df.query('"red" in color', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('["red"] not in color', parser=parser, engine=engine)
res2 = df.query('"red" not in color', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
def test_query_with_named_multiindex(self):
for parser, engine in product(['pandas'], ENGINES):
yield self.check_query_with_named_multiindex, parser, engine
def check_query_with_unnamed_multiindex(self, parser, engine):
tm.skip_if_no_ne(engine)
a = tm.choice(['red', 'green'], size=10)
b = tm.choice(['eggs', 'ham'], size=10)
index = MultiIndex.from_arrays([a, b])
df = DataFrame(randn(10, 2), index=index)
ind = Series(df.index.get_level_values(0).values, index=index)
res1 = df.query('ilevel_0 == "red"', parser=parser, engine=engine)
res2 = df.query('"red" == ilevel_0', parser=parser, engine=engine)
exp = df[ind == 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# inequality
res1 = df.query('ilevel_0 != "red"', parser=parser, engine=engine)
res2 = df.query('"red" != ilevel_0', parser=parser, engine=engine)
exp = df[ind != 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('ilevel_0 == ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] == ilevel_0', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('ilevel_0 != ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] != ilevel_0', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["red"] in ilevel_0', parser=parser, engine=engine)
res2 = df.query('"red" in ilevel_0', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('["red"] not in ilevel_0', parser=parser, engine=engine)
res2 = df.query('"red" not in ilevel_0', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
#### LEVEL 1 ####
ind = Series(df.index.get_level_values(1).values, index=index)
res1 = df.query('ilevel_1 == "eggs"', parser=parser, engine=engine)
res2 = df.query('"eggs" == ilevel_1', parser=parser, engine=engine)
exp = df[ind == 'eggs']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# inequality
res1 = df.query('ilevel_1 != "eggs"', parser=parser, engine=engine)
res2 = df.query('"eggs" != ilevel_1', parser=parser, engine=engine)
exp = df[ind != 'eggs']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('ilevel_1 == ["eggs"]', parser=parser, engine=engine)
res2 = df.query('["eggs"] == ilevel_1', parser=parser, engine=engine)
exp = df[ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('ilevel_1 != ["eggs"]', parser=parser, engine=engine)
res2 = df.query('["eggs"] != ilevel_1', parser=parser, engine=engine)
exp = df[~ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["eggs"] in ilevel_1', parser=parser, engine=engine)
res2 = df.query('"eggs" in ilevel_1', parser=parser, engine=engine)
exp = df[ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('["eggs"] not in ilevel_1', parser=parser, engine=engine)
res2 = df.query('"eggs" not in ilevel_1', parser=parser, engine=engine)
exp = df[~ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
def test_query_with_unnamed_multiindex(self):
for parser, engine in product(['pandas'], ENGINES):
yield self.check_query_with_unnamed_multiindex, parser, engine
def check_query_with_partially_named_multiindex(self, parser, engine):
tm.skip_if_no_ne(engine)
a = tm.choice(['red', 'green'], size=10)
b = np.arange(10)
index = MultiIndex.from_arrays([a, b])
index.names = [None, 'rating']
df = DataFrame(randn(10, 2), index=index)
res = df.query('rating == 1', parser=parser, engine=engine)
ind = Series(df.index.get_level_values('rating').values, index=index,
name='rating')
exp = df[ind == 1]
assert_frame_equal(res, exp)
res = df.query('rating != 1', parser=parser, engine=engine)
ind = Series(df.index.get_level_values('rating').values, index=index,
name='rating')
exp = df[ind != 1]
assert_frame_equal(res, exp)
res = df.query('ilevel_0 == "red"', parser=parser, engine=engine)
ind = Series(df.index.get_level_values(0).values, index=index)
exp = df[ind == "red"]
assert_frame_equal(res, exp)
res = df.query('ilevel_0 != "red"', parser=parser, engine=engine)
ind = Series(df.index.get_level_values(0).values, index=index)
exp = df[ind != "red"]
assert_frame_equal(res, exp)
def test_query_with_partially_named_multiindex(self):
for parser, engine in product(['pandas'], ENGINES):
yield self.check_query_with_partially_named_multiindex, parser, engine
def test_query_multiindex_get_index_resolvers(self):
for parser, engine in product(['pandas'], ENGINES):
yield self.check_query_multiindex_get_index_resolvers, parser, engine
def check_query_multiindex_get_index_resolvers(self, parser, engine):
df = mkdf(10, 3, r_idx_nlevels=2, r_idx_names=['spam', 'eggs'])
resolvers = df._get_index_resolvers()
def to_series(mi, level):
level_values = mi.get_level_values(level)
s = level_values.to_series()
s.index = mi
return s
col_series = df.columns.to_series()
expected = {'index': df.index,
'columns': col_series,
'spam': to_series(df.index, 'spam'),
'eggs': to_series(df.index, 'eggs'),
'C0': col_series}
for k, v in resolvers.items():
if isinstance(v, Index):
assert v.is_(expected[k])
elif isinstance(v, Series):
tm.assert_series_equal(v, expected[k])
else:
raise AssertionError("object must be a Series or Index")
def test_raise_on_panel_with_multiindex(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_raise_on_panel_with_multiindex, parser, engine
def check_raise_on_panel_with_multiindex(self, parser, engine):
tm.skip_if_no_ne()
p = tm.makePanel(7)
p.items = tm.makeCustomIndex(len(p.items), nlevels=2)
with tm.assertRaises(NotImplementedError):
pd.eval('p + 1', parser=parser, engine=engine)
def test_raise_on_panel4d_with_multiindex(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_raise_on_panel4d_with_multiindex, parser, engine
def check_raise_on_panel4d_with_multiindex(self, parser, engine):
tm.skip_if_no_ne()
p4d = tm.makePanel4D(7)
p4d.items = tm.makeCustomIndex(len(p4d.items), nlevels=2)
with tm.assertRaises(NotImplementedError):
pd.eval('p4d + 1', parser=parser, engine=engine)
class TestDataFrameQueryNumExprPandas(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestDataFrameQueryNumExprPandas, cls).setUpClass()
cls.engine = 'numexpr'
cls.parser = 'pandas'
tm.skip_if_no_ne(cls.engine)
@classmethod
def tearDownClass(cls):
super(TestDataFrameQueryNumExprPandas, cls).tearDownClass()
del cls.engine, cls.parser
def test_date_query_with_attribute_access(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(randn(5, 3))
df['dates1'] = date_range('1/1/2012', periods=5)
df['dates2'] = date_range('1/1/2013', periods=5)
df['dates3'] = date_range('1/1/2014', periods=5)
res = df.query('@df.dates1 < 20130101 < @df.dates3', engine=engine,
parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_query_no_attribute_access(self):
engine, parser = self.engine, self.parser
df = DataFrame(randn(5, 3))
df['dates1'] = date_range('1/1/2012', periods=5)
df['dates2'] = date_range('1/1/2013', periods=5)
df['dates3'] = date_range('1/1/2014', periods=5)
res = df.query('dates1 < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates2'] = date_range('1/1/2013', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.loc[np.random.rand(n) > 0.5, 'dates3'] = pd.NaT
res = df.query('dates1 < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.set_index('dates1', inplace=True, drop=True)
res = df.query('index < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.iloc[0, 0] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
res = df.query('index < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT_duplicates(self):
engine, parser = self.engine, self.parser
n = 10
d = {}
d['dates1'] = date_range('1/1/2012', periods=n)
d['dates3'] = date_range('1/1/2014', periods=n)
df = DataFrame(d)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
res = df.query('index < 20130101 < dates3', engine=engine, parser=parser)
expec = df[(df.index.to_series() < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_query_with_non_date(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame({'dates': date_range('1/1/2012', periods=n),
'nondate': np.arange(n)})
ops = '==', '!=', '<', '>', '<=', '>='
for op in ops:
with tm.assertRaises(TypeError):
df.query('dates %s nondate' % op, parser=parser, engine=engine)
def test_query_syntax_error(self):
engine, parser = self.engine, self.parser
df = DataFrame({"i": lrange(10), "+": lrange(3, 13),
"r": lrange(4, 14)})
with tm.assertRaises(SyntaxError):
df.query('i - +', engine=engine, parser=parser)
def test_query_scope(self):
from pandas.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.randn(20, 2), columns=list('ab'))
a, b = 1, 2
res = df.query('a > b', engine=engine, parser=parser)
expected = df[df.a > df.b]
tm.assert_frame_equal(res, expected)
res = df.query('@a > b', engine=engine, parser=parser)
expected = df[a > df.b]
tm.assert_frame_equal(res, expected)
# no local variable c
with tm.assertRaises(UndefinedVariableError):
df.query('@a > b > @c', engine=engine, parser=parser)
# no column named 'c'
with tm.assertRaises(UndefinedVariableError):
df.query('@a > b > c', engine=engine, parser=parser)
def test_query_doesnt_pickup_local(self):
from pandas.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
from numpy import sin
# we don't pick up the local 'sin'
with tm.assertRaises(UndefinedVariableError):
df.query('sin > 5', engine=engine, parser=parser)
def test_query_builtin(self):
from pandas.computation.engines import NumExprClobberingError
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
df.index.name = 'sin'
with tm.assertRaisesRegexp(NumExprClobberingError,
'Variables in expression.+'):
df.query('sin > 5', engine=engine, parser=parser)
def test_query(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randn(10, 3), columns=['a', 'b', 'c'])
assert_frame_equal(df.query('a < b', engine=engine, parser=parser),
df[df.a < df.b])
assert_frame_equal(df.query('a + b > b * c', engine=engine,
parser=parser),
df[df.a + df.b > df.b * df.c])
def test_query_index_with_name(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randint(10, size=(10, 3)),
index=Index(range(10), name='blob'),
columns=['a', 'b', 'c'])
res = df.query('(blob < 5) & (a < b)', engine=engine, parser=parser)
expec = df[(df.index < 5) & (df.a < df.b)]
assert_frame_equal(res, expec)
res = df.query('blob < b', engine=engine, parser=parser)
expec = df[df.index < df.b]
assert_frame_equal(res, expec)
def test_query_index_without_name(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randint(10, size=(10, 3)),
index=range(10), columns=['a', 'b', 'c'])
# "index" should refer to the index
res = df.query('index < b', engine=engine, parser=parser)
expec = df[df.index < df.b]
assert_frame_equal(res, expec)
# test against a scalar
res = df.query('index < 5', engine=engine, parser=parser)
expec = df[df.index < 5]
assert_frame_equal(res, expec)
def test_nested_scope(self):
engine = self.engine
parser = self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.randn(5, 3))
df2 = DataFrame(np.random.randn(5, 3))
expected = df[(df > 0) & (df2 > 0)]
result = df.query('(@df > 0) & (@df2 > 0)', engine=engine, parser=parser)
assert_frame_equal(result, expected)
result = pd.eval('df[df > 0 and df2 > 0]', engine=engine,
parser=parser)
assert_frame_equal(result, expected)
result = pd.eval('df[df > 0 and df2 > 0 and df[df > 0] > 0]',
engine=engine, parser=parser)
expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]
assert_frame_equal(result, expected)
result = pd.eval('df[(df>0) & (df2>0)]', engine=engine, parser=parser)
expected = df.query('(@df>0) & (@df2>0)', engine=engine, parser=parser)
assert_frame_equal(result, expected)
def test_nested_raises_on_local_self_reference(self):
from pandas.computation.ops import UndefinedVariableError
df = DataFrame(np.random.randn(5, 3))
# can't reference ourself b/c we're a local so @ is necessary
with tm.assertRaises(UndefinedVariableError):
df.query('df > 0', engine=self.engine, parser=self.parser)
def test_local_syntax(self):
skip_if_no_pandas_parser(self.parser)
engine, parser = self.engine, self.parser
df = DataFrame(randn(100, 10), columns=list('abcdefghij'))
b = 1
expect = df[df.a < b]
result = df.query('a < @b', engine=engine, parser=parser)
assert_frame_equal(result, expect)
expect = df[df.a < df.b]
result = df.query('a < b', engine=engine, parser=parser)
assert_frame_equal(result, expect)
def test_chained_cmp_and_in(self):
skip_if_no_pandas_parser(self.parser)
engine, parser = self.engine, self.parser
cols = list('abc')
df = DataFrame(randn(100, len(cols)), columns=cols)
res = df.query('a < b < c and a not in b not in c', engine=engine,
parser=parser)
ind = (df.a < df.b) & (df.b < df.c) & ~df.b.isin(df.a) & ~df.c.isin(df.b)
expec = df[ind]
assert_frame_equal(res, expec)
def test_local_variable_with_in(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
a = Series(np.random.randint(3, size=15), name='a')
b = Series(np.random.randint(10, size=15), name='b')
df = DataFrame({'a': a, 'b': b})
expected = df.loc[(df.b - 1).isin(a)]
result = df.query('b - 1 in a', engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
b = Series(np.random.randint(10, size=15), name='b')
expected = df.loc[(b - 1).isin(a)]
result = df.query('@b - 1 in a', engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
def test_at_inside_string(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
c = 1
df = DataFrame({'a': ['a', 'a', 'b', 'b', '@c', '@c']})
result = df.query('a == "@c"', engine=engine, parser=parser)
expected = df[df.a == "@c"]
tm.assert_frame_equal(result, expected)
def test_query_undefined_local(self):
from pandas.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.rand(10, 2), columns=list('ab'))
with tm.assertRaisesRegexp(UndefinedVariableError,
"local variable 'c' is not defined"):
df.query('a == @c', engine=engine, parser=parser)
def test_index_resolvers_come_after_columns_with_the_same_name(self):
n = 1
a = np.r_[20:101:20]
df = DataFrame({'index': a, 'b': np.random.randn(a.size)})
df.index.name = 'index'
result = df.query('index > 5', engine=self.engine, parser=self.parser)
expected = df[df['index'] > 5]
tm.assert_frame_equal(result, expected)
df = DataFrame({'index': a, 'b': np.random.randn(a.size)})
result = df.query('ilevel_0 > 5', engine=self.engine, parser=self.parser)
expected = df.loc[df.index[df.index > 5]]
tm.assert_frame_equal(result, expected)
df = DataFrame({'a': a, 'b': np.random.randn(a.size)})
df.index.name = 'a'
result = df.query('a > 5', engine=self.engine, parser=self.parser)
expected = df[df.a > 5]
tm.assert_frame_equal(result, expected)
result = df.query('index > 5', engine=self.engine, parser=self.parser)
expected = df.loc[df.index[df.index > 5]]
tm.assert_frame_equal(result, expected)
def test_inf(self):
n = 10
df = DataFrame({'a': np.random.rand(n), 'b': np.random.rand(n)})
df.loc[::2, 0] = np.inf
ops = '==', '!='
d = dict(zip(ops, (operator.eq, operator.ne)))
for op, f in d.items():
q = 'a %s inf' % op
expected = df[f(df.a, np.inf)]
result = df.query(q, engine=self.engine, parser=self.parser)
tm.assert_frame_equal(result, expected)
class TestDataFrameQueryNumExprPython(TestDataFrameQueryNumExprPandas):
@classmethod
def setUpClass(cls):
super(TestDataFrameQueryNumExprPython, cls).setUpClass()
cls.engine = 'numexpr'
cls.parser = 'python'
tm.skip_if_no_ne(cls.engine)
cls.frame = _frame.copy()
def test_date_query_no_attribute_access(self):
engine, parser = self.engine, self.parser
df = DataFrame(randn(5, 3))
df['dates1'] = date_range('1/1/2012', periods=5)
df['dates2'] = date_range('1/1/2013', periods=5)
df['dates3'] = date_range('1/1/2014', periods=5)
res = df.query('(dates1 < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates2'] = date_range('1/1/2013', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.loc[np.random.rand(n) > 0.5, 'dates3'] = pd.NaT
res = df.query('(dates1 < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.set_index('dates1', inplace=True, drop=True)
res = df.query('(index < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.iloc[0, 0] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
res = df.query('(index < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT_duplicates(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
with tm.assertRaises(NotImplementedError):
df.query('index < 20130101 < dates3', engine=engine, parser=parser)
def test_nested_scope(self):
from pandas.computation.ops import UndefinedVariableError
engine = self.engine
parser = self.parser
# smoke test
x = 1
result = pd.eval('x + 1', engine=engine, parser=parser)
self.assertEqual(result, 2)
df = DataFrame(np.random.randn(5, 3))
df2 = DataFrame(np.random.randn(5, 3))
# don't have the pandas parser
with tm.assertRaises(SyntaxError):
df.query('(@df>0) & (@df2>0)', engine=engine, parser=parser)
with tm.assertRaises(UndefinedVariableError):
df.query('(df>0) & (df2>0)', engine=engine, parser=parser)
expected = df[(df > 0) & (df2 > 0)]
result = pd.eval('df[(df > 0) & (df2 > 0)]', engine=engine,
parser=parser)
tm.assert_frame_equal(expected, result)
expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]
result = pd.eval('df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]',
engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
class TestDataFrameQueryPythonPandas(TestDataFrameQueryNumExprPandas):
@classmethod
def setUpClass(cls):
super(TestDataFrameQueryPythonPandas, cls).setUpClass()
cls.engine = 'python'
cls.parser = 'pandas'
cls.frame = _frame.copy()
def test_query_builtin(self):
from pandas.computation.engines import NumExprClobberingError
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
df.index.name = 'sin'
expected = df[df.index > 5]
result = df.query('sin > 5', engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
class TestDataFrameQueryPythonPython(TestDataFrameQueryNumExprPython):
@classmethod
def setUpClass(cls):
super(TestDataFrameQueryPythonPython, cls).setUpClass()
cls.engine = cls.parser = 'python'
cls.frame = _frame.copy()
def test_query_builtin(self):
from pandas.computation.engines import NumExprClobberingError
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
df.index.name = 'sin'
expected = df[df.index > 5]
result = df.query('sin > 5', engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
PARSERS = 'python', 'pandas'
ENGINES = 'python', 'numexpr'
class TestDataFrameQueryStrings(object):
def check_str_query_method(self, parser, engine):
tm.skip_if_no_ne(engine)
df = DataFrame(randn(10, 1), columns=['b'])
df['strings'] = Series(list('aabbccddee'))
expect = df[df.strings == 'a']
if parser != 'pandas':
col = 'strings'
lst = '"a"'
lhs = [col] * 2 + [lst] * 2
rhs = lhs[::-1]
eq, ne = '==', '!='
ops = 2 * ([eq] + [ne])
for lhs, op, rhs in zip(lhs, ops, rhs):
ex = '{lhs} {op} {rhs}'.format(lhs=lhs, op=op, rhs=rhs)
assertRaises(NotImplementedError, df.query, ex, engine=engine,
parser=parser, local_dict={'strings': df.strings})
else:
res = df.query('"a" == strings', engine=engine, parser=parser)
assert_frame_equal(res, expect)
res = df.query('strings == "a"', engine=engine, parser=parser)
assert_frame_equal(res, expect)
assert_frame_equal(res, df[df.strings.isin(['a'])])
expect = df[df.strings != 'a']
res = df.query('strings != "a"', engine=engine, parser=parser)
assert_frame_equal(res, expect)
res = df.query('"a" != strings', engine=engine, parser=parser)
assert_frame_equal(res, expect)
assert_frame_equal(res, df[~df.strings.isin(['a'])])
def test_str_query_method(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_str_query_method, parser, engine
def test_str_list_query_method(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_str_list_query_method, parser, engine
def check_str_list_query_method(self, parser, engine):
tm.skip_if_no_ne(engine)
df = DataFrame(randn(10, 1), columns=['b'])
df['strings'] = Series(list('aabbccddee'))
expect = df[df.strings.isin(['a', 'b'])]
if parser != 'pandas':
col = 'strings'
lst = '["a", "b"]'
lhs = [col] * 2 + [lst] * 2
rhs = lhs[::-1]
eq, ne = '==', '!='
ops = 2 * ([eq] + [ne])
for lhs, op, rhs in zip(lhs, ops, rhs):
ex = '{lhs} {op} {rhs}'.format(lhs=lhs, op=op, rhs=rhs)
with tm.assertRaises(NotImplementedError):
df.query(ex, engine=engine, parser=parser)
else:
res = df.query('strings == ["a", "b"]', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
res = df.query('["a", "b"] == strings', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
expect = df[~df.strings.isin(['a', 'b'])]
res = df.query('strings != ["a", "b"]', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
res = df.query('["a", "b"] != strings', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
def check_query_with_string_columns(self, parser, engine):
tm.skip_if_no_ne(engine)
df = DataFrame({'a': list('aaaabbbbcccc'),
'b': list('aabbccddeeff'),
'c': np.random.randint(5, size=12),
'd': np.random.randint(9, size=12)})
if parser == 'pandas':
res = df.query('a in b', parser=parser, engine=engine)
expec = df[df.a.isin(df.b)]
assert_frame_equal(res, expec)
res = df.query('a in b and c < d', parser=parser, engine=engine)
expec = df[df.a.isin(df.b) & (df.c < df.d)]
assert_frame_equal(res, expec)
else:
with assertRaises(NotImplementedError):
df.query('a in b', parser=parser, engine=engine)
with assertRaises(NotImplementedError):
df.query('a in b and c < d', parser=parser, engine=engine)
def test_query_with_string_columns(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_query_with_string_columns, parser, engine
def check_object_array_eq_ne(self, parser, engine):
tm.skip_if_no_ne(engine)
df = DataFrame({'a': list('aaaabbbbcccc'),
'b': list('aabbccddeeff'),
'c': np.random.randint(5, size=12),
'd': np.random.randint(9, size=12)})
res = df.query('a == b', parser=parser, engine=engine)
exp = df[df.a == df.b]
assert_frame_equal(res, exp)
res = df.query('a != b', parser=parser, engine=engine)
exp = df[df.a != df.b]
assert_frame_equal(res, exp)
def test_object_array_eq_ne(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_object_array_eq_ne, parser, engine
def check_query_with_nested_strings(self, parser, engine):
tm.skip_if_no_ne(engine)
skip_if_no_pandas_parser(parser)
from pandas.compat import StringIO
raw = """id event timestamp
1 "page 1 load" 1/1/2014 0:00:01
1 "page 1 exit" 1/1/2014 0:00:31
2 "page 2 load" 1/1/2014 0:01:01
2 "page 2 exit" 1/1/2014 0:01:31
3 "page 3 load" 1/1/2014 0:02:01
3 "page 3 exit" 1/1/2014 0:02:31
4 "page 1 load" 2/1/2014 1:00:01
4 "page 1 exit" 2/1/2014 1:00:31
5 "page 2 load" 2/1/2014 1:01:01
5 "page 2 exit" 2/1/2014 1:01:31
6 "page 3 load" 2/1/2014 1:02:01
6 "page 3 exit" 2/1/2014 1:02:31
"""
df = pd.read_csv(StringIO(raw), sep=r'\s{2,}', engine='python',
parse_dates=['timestamp'])
expected = df[df.event == '"page 1 load"']
res = df.query("""'"page 1 load"' in event""", parser=parser,
engine=engine)
tm.assert_frame_equal(expected, res)
def test_query_with_nested_string(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_query_with_nested_strings, parser, engine
def check_query_with_nested_special_character(self, parser, engine):
skip_if_no_pandas_parser(parser)
tm.skip_if_no_ne(engine)
df = DataFrame({'a': ['a', 'b', 'test & test'],
'b': [1, 2, 3]})
res = df.query('a == "test & test"', parser=parser, engine=engine)
expec = df[df.a == 'test & test']
tm.assert_frame_equal(res, expec)
def test_query_with_nested_special_character(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_query_with_nested_special_character, parser, engine
def check_query_lex_compare_strings(self, parser, engine):
tm.skip_if_no_ne(engine=engine)
import operator as opr
a = Series(tm.choice(list('abcde'), 20))
b = Series(np.arange(a.size))
df = DataFrame({'X': a, 'Y': b})
ops = {'<': opr.lt, '>': opr.gt, '<=': opr.le, '>=': opr.ge}
for op, func in ops.items():
res = df.query('X %s "d"' % op, engine=engine, parser=parser)
expected = df[func(df.X, 'd')]
assert_frame_equal(res, expected)
def test_query_lex_compare_strings(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_query_lex_compare_strings, parser, engine
def check_query_single_element_booleans(self, parser, engine):
tm.skip_if_no_ne(engine)
columns = 'bid', 'bidsize', 'ask', 'asksize'
data = np.random.randint(2, size=(1, len(columns))).astype(bool)
df = DataFrame(data, columns=columns)
res = df.query('bid & ask', engine=engine, parser=parser)
expected = df[df.bid & df.ask]
assert_frame_equal(res, expected)
def test_query_single_element_booleans(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_query_single_element_booleans, parser, engine
def check_query_string_scalar_variable(self, parser, engine):
tm.skip_if_no_ne(engine)
df = pd.DataFrame({'Symbol': ['BUD US', 'BUD US', 'IBM US', 'IBM US'],
'Price': [109.70, 109.72, 183.30, 183.35]})
e = df[df.Symbol == 'BUD US']
symb = 'BUD US'
r = df.query('Symbol == @symb', parser=parser, engine=engine)
tm.assert_frame_equal(e, r)
def test_query_string_scalar_variable(self):
for parser, engine in product(['pandas'], ENGINES):
yield self.check_query_string_scalar_variable, parser, engine
class TestDataFrameEvalNumExprPandas(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestDataFrameEvalNumExprPandas, cls).setUpClass()
cls.engine = 'numexpr'
cls.parser = 'pandas'
tm.skip_if_no_ne()
def setUp(self):
self.frame = DataFrame(randn(10, 3), columns=list('abc'))
def tearDown(self):
del self.frame
def test_simple_expr(self):
res = self.frame.eval('a + b', engine=self.engine, parser=self.parser)
expect = self.frame.a + self.frame.b
assert_series_equal(res, expect)
def test_bool_arith_expr(self):
res = self.frame.eval('a[a < 1] + b', engine=self.engine,
parser=self.parser)
expect = self.frame.a[self.frame.a < 1] + self.frame.b
assert_series_equal(res, expect)
def test_invalid_type_for_operator_raises(self):
df = DataFrame({'a': [1, 2], 'b': ['c', 'd']})
ops = '+', '-', '*', '/'
for op in ops:
with tm.assertRaisesRegexp(TypeError,
"unsupported operand type\(s\) for "
".+: '.+' and '.+'"):
df.eval('a {0} b'.format(op), engine=self.engine,
parser=self.parser)
class TestDataFrameEvalNumExprPython(TestDataFrameEvalNumExprPandas):
@classmethod
def setUpClass(cls):
super(TestDataFrameEvalNumExprPython, cls).setUpClass()
cls.engine = 'numexpr'
cls.parser = 'python'
tm.skip_if_no_ne(cls.engine)
class TestDataFrameEvalPythonPandas(TestDataFrameEvalNumExprPandas):
@classmethod
def setUpClass(cls):
super(TestDataFrameEvalPythonPandas, cls).setUpClass()
cls.engine = 'python'
cls.parser = 'pandas'
class TestDataFrameEvalPythonPython(TestDataFrameEvalNumExprPython):
@classmethod
def setUpClass(cls):
super(TestDataFrameEvalPythonPython, cls).tearDownClass()
cls.engine = cls.parser = 'python'
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit | -8,488,055,087,396,399,000 | 36.431438 | 155 | 0.523572 | false |
DIRACGrid/DIRAC | src/DIRAC/Core/Tornado/scripts/tornado_start_all.py | 1 | 2483 | #!/usr/bin/env python
########################################################################
# File : tornado-start-all
# Author : Louis MARTIN
########################################################################
# Just run this script to start Tornado and all services
# Use CS to change port
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import os
import sys
from DIRAC.Core.Utilities.DIRACScript import DIRACScript
@DIRACScript()
def main():
if os.environ.get('DIRAC_USE_TORNADO_IOLOOP', 'false').lower() not in ('yes', 'true'):
raise RuntimeError(
"DIRAC_USE_TORNADO_IOLOOP is not defined in the environment." + "\n" +
"It is necessary to run with Tornado." + "\n" +
"https://dirac.readthedocs.io/en/latest/DeveloperGuide/TornadoServices/index.html"
)
from DIRAC import gConfig
from DIRAC.ConfigurationSystem.Client import PathFinder
from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData
from DIRAC.ConfigurationSystem.Client.LocalConfiguration import LocalConfiguration
from DIRAC.Core.Tornado.Server.TornadoServer import TornadoServer
from DIRAC.Core.Utilities.DErrno import includeExtensionErrors
from DIRAC.FrameworkSystem.Client.Logger import gLogger
# We check if there is no configuration server started as master
# If you want to start a master CS you should use Configuration_Server.cfg and
# use tornado-start-CS.py
key = '/Systems/Configuration/%s/Services/Server/Protocol' % PathFinder.getSystemInstance('Configuration')
if gConfigurationData.isMaster() and gConfig.getValue(key, 'dips').lower() == 'https':
gLogger.fatal("You can't run the CS and services in the same server!")
sys.exit(0)
localCfg = LocalConfiguration()
localCfg.setConfigurationForServer('Tornado/Tornado')
localCfg.addMandatoryEntry("/DIRAC/Setup")
localCfg.addDefaultEntry("/DIRAC/Security/UseServerCertificate", "yes")
localCfg.addDefaultEntry("LogLevel", "INFO")
localCfg.addDefaultEntry("LogColor", True)
resultDict = localCfg.loadUserData()
if not resultDict['OK']:
gLogger.initialize("Tornado", "/")
gLogger.error("There were errors when loading configuration", resultDict['Message'])
sys.exit(1)
includeExtensionErrors()
gLogger.initialize('Tornado', "/")
serverToLaunch = TornadoServer()
serverToLaunch.startTornado()
if __name__ == "__main__":
main()
| gpl-3.0 | -5,539,749,864,250,538,000 | 36.059701 | 108 | 0.699557 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.